machdep.c revision 305866
1/*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2
3/*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by Mark Brinicombe
22 *	for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 *    endorse or promote products derived from this software without specific
25 *    prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependent functions for kernel setup
40 *
41 * Created      : 17/09/94
42 * Updated	: 18/04/01 updated for new wscons
43 */
44
45#include "opt_compat.h"
46#include "opt_ddb.h"
47#include "opt_kstack_pages.h"
48#include "opt_platform.h"
49#include "opt_sched.h"
50#include "opt_timer.h"
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD: stable/11/sys/arm/arm/machdep.c 305866 2016-09-16 10:04:28Z kib $");
54
55#include <sys/param.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/bio.h>
59#include <sys/buf.h>
60#include <sys/bus.h>
61#include <sys/cons.h>
62#include <sys/cpu.h>
63#include <sys/ctype.h>
64#include <sys/devmap.h>
65#include <sys/efi.h>
66#include <sys/exec.h>
67#include <sys/imgact.h>
68#include <sys/kdb.h>
69#include <sys/kernel.h>
70#include <sys/ktr.h>
71#include <sys/linker.h>
72#include <sys/lock.h>
73#include <sys/malloc.h>
74#include <sys/msgbuf.h>
75#include <sys/mutex.h>
76#include <sys/pcpu.h>
77#include <sys/ptrace.h>
78#include <sys/reboot.h>
79#include <sys/boot.h>
80#include <sys/rwlock.h>
81#include <sys/sched.h>
82#include <sys/signalvar.h>
83#include <sys/syscallsubr.h>
84#include <sys/sysctl.h>
85#include <sys/sysent.h>
86#include <sys/sysproto.h>
87#include <sys/uio.h>
88#include <sys/vdso.h>
89
90#include <vm/vm.h>
91#include <vm/pmap.h>
92#include <vm/vm_map.h>
93#include <vm/vm_object.h>
94#include <vm/vm_page.h>
95#include <vm/vm_pager.h>
96
97#include <machine/armreg.h>
98#include <machine/atags.h>
99#include <machine/cpu.h>
100#include <machine/cpuinfo.h>
101#include <machine/debug_monitor.h>
102#include <machine/db_machdep.h>
103#include <machine/frame.h>
104#include <machine/intr.h>
105#include <machine/machdep.h>
106#include <machine/md_var.h>
107#include <machine/metadata.h>
108#include <machine/pcb.h>
109#include <machine/physmem.h>
110#include <machine/platform.h>
111#include <machine/reg.h>
112#include <machine/trap.h>
113#include <machine/undefined.h>
114#include <machine/vfp.h>
115#include <machine/vmparam.h>
116#include <machine/sysarch.h>
117
118#ifdef FDT
119#include <contrib/libfdt/libfdt.h>
120#include <dev/fdt/fdt_common.h>
121#include <dev/ofw/openfirm.h>
122#endif
123
124#ifdef DDB
125#include <ddb/ddb.h>
126
127#if __ARM_ARCH >= 6
128
129DB_SHOW_COMMAND(cp15, db_show_cp15)
130{
131	u_int reg;
132
133	reg = cp15_midr_get();
134	db_printf("Cpu ID: 0x%08x\n", reg);
135	reg = cp15_ctr_get();
136	db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
137
138	reg = cp15_sctlr_get();
139	db_printf("Ctrl: 0x%08x\n",reg);
140	reg = cp15_actlr_get();
141	db_printf("Aux Ctrl: 0x%08x\n",reg);
142
143	reg = cp15_id_pfr0_get();
144	db_printf("Processor Feat 0: 0x%08x\n", reg);
145	reg = cp15_id_pfr1_get();
146	db_printf("Processor Feat 1: 0x%08x\n", reg);
147	reg = cp15_id_dfr0_get();
148	db_printf("Debug Feat 0: 0x%08x\n", reg);
149	reg = cp15_id_afr0_get();
150	db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
151	reg = cp15_id_mmfr0_get();
152	db_printf("Memory Model Feat 0: 0x%08x\n", reg);
153	reg = cp15_id_mmfr1_get();
154	db_printf("Memory Model Feat 1: 0x%08x\n", reg);
155	reg = cp15_id_mmfr2_get();
156	db_printf("Memory Model Feat 2: 0x%08x\n", reg);
157	reg = cp15_id_mmfr3_get();
158	db_printf("Memory Model Feat 3: 0x%08x\n", reg);
159	reg = cp15_ttbr_get();
160	db_printf("TTB0: 0x%08x\n", reg);
161}
162
163DB_SHOW_COMMAND(vtop, db_show_vtop)
164{
165	u_int reg;
166
167	if (have_addr) {
168		cp15_ats1cpr_set(addr);
169		reg = cp15_par_get();
170		db_printf("Physical address reg: 0x%08x\n",reg);
171	} else
172		db_printf("show vtop <virt_addr>\n");
173}
174#endif /* __ARM_ARCH >= 6 */
175#endif /* DDB */
176
177#ifdef DEBUG
178#define	debugf(fmt, args...) printf(fmt, ##args)
179#else
180#define	debugf(fmt, args...)
181#endif
182
183#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
184    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
185    defined(COMPAT_FREEBSD9)
186#error FreeBSD/arm doesn't provide compatibility with releases prior to 10
187#endif
188
189struct pcpu __pcpu[MAXCPU];
190struct pcpu *pcpup = &__pcpu[0];
191
192static struct trapframe proc0_tf;
193uint32_t cpu_reset_address = 0;
194int cold = 1;
195vm_offset_t vector_page;
196
197int (*_arm_memcpy)(void *, void *, int, int) = NULL;
198int (*_arm_bzero)(void *, int, int) = NULL;
199int _min_memcpy_size = 0;
200int _min_bzero_size = 0;
201
202extern int *end;
203
204#ifdef FDT
205static char *loader_envp;
206
207vm_paddr_t pmap_pa;
208
209#if __ARM_ARCH >= 6
210vm_offset_t systempage;
211vm_offset_t irqstack;
212vm_offset_t undstack;
213vm_offset_t abtstack;
214#else
215/*
216 * This is the number of L2 page tables required for covering max
217 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
218 * stacks etc.), uprounded to be divisible by 4.
219 */
220#define KERNEL_PT_MAX	78
221
222static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
223
224struct pv_addr systempage;
225static struct pv_addr msgbufpv;
226struct pv_addr irqstack;
227struct pv_addr undstack;
228struct pv_addr abtstack;
229static struct pv_addr kernelstack;
230#endif
231#endif
232
233#if defined(LINUX_BOOT_ABI)
234#define LBABI_MAX_BANKS	10
235
236#define CMDLINE_GUARD "FreeBSD:"
237uint32_t board_id;
238struct arm_lbabi_tag *atag_list;
239char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
240char atags[LBABI_MAX_COMMAND_LINE * 2];
241uint32_t memstart[LBABI_MAX_BANKS];
242uint32_t memsize[LBABI_MAX_BANKS];
243uint32_t membanks;
244#endif
245#ifdef MULTIDELAY
246static delay_func *delay_impl;
247static void *delay_arg;
248#endif
249
250static uint32_t board_revision;
251/* hex representation of uint64_t */
252static char board_serial[32];
253
254SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
255SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
256    &board_revision, 0, "Board revision");
257SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
258    board_serial, 0, "Board serial");
259
260int vfp_exists;
261SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
262    &vfp_exists, 0, "Floating point support enabled");
263
264void
265board_set_serial(uint64_t serial)
266{
267
268	snprintf(board_serial, sizeof(board_serial)-1,
269		    "%016jx", serial);
270}
271
272void
273board_set_revision(uint32_t revision)
274{
275
276	board_revision = revision;
277}
278
279void
280sendsig(catcher, ksi, mask)
281	sig_t catcher;
282	ksiginfo_t *ksi;
283	sigset_t *mask;
284{
285	struct thread *td;
286	struct proc *p;
287	struct trapframe *tf;
288	struct sigframe *fp, frame;
289	struct sigacts *psp;
290	struct sysentvec *sysent;
291	int onstack;
292	int sig;
293	int code;
294
295	td = curthread;
296	p = td->td_proc;
297	PROC_LOCK_ASSERT(p, MA_OWNED);
298	sig = ksi->ksi_signo;
299	code = ksi->ksi_code;
300	psp = p->p_sigacts;
301	mtx_assert(&psp->ps_mtx, MA_OWNED);
302	tf = td->td_frame;
303	onstack = sigonstack(tf->tf_usr_sp);
304
305	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
306	    catcher, sig);
307
308	/* Allocate and validate space for the signal handler context. */
309	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
310	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
311		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
312		    td->td_sigstk.ss_size);
313#if defined(COMPAT_43)
314		td->td_sigstk.ss_flags |= SS_ONSTACK;
315#endif
316	} else
317		fp = (struct sigframe *)td->td_frame->tf_usr_sp;
318
319	/* make room on the stack */
320	fp--;
321
322	/* make the stack aligned */
323	fp = (struct sigframe *)STACKALIGN(fp);
324	/* Populate the siginfo frame. */
325	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
326	frame.sf_si = ksi->ksi_info;
327	frame.sf_uc.uc_sigmask = *mask;
328	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
329	    ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
330	frame.sf_uc.uc_stack = td->td_sigstk;
331	mtx_unlock(&psp->ps_mtx);
332	PROC_UNLOCK(td->td_proc);
333
334	/* Copy the sigframe out to the user's stack. */
335	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
336		/* Process has trashed its stack. Kill it. */
337		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
338		PROC_LOCK(p);
339		sigexit(td, SIGILL);
340	}
341
342	/*
343	 * Build context to run handler in.  We invoke the handler
344	 * directly, only returning via the trampoline.  Note the
345	 * trampoline version numbers are coordinated with machine-
346	 * dependent code in libc.
347	 */
348
349	tf->tf_r0 = sig;
350	tf->tf_r1 = (register_t)&fp->sf_si;
351	tf->tf_r2 = (register_t)&fp->sf_uc;
352
353	/* the trampoline uses r5 as the uc address */
354	tf->tf_r5 = (register_t)&fp->sf_uc;
355	tf->tf_pc = (register_t)catcher;
356	tf->tf_usr_sp = (register_t)fp;
357	sysent = p->p_sysent;
358	if (sysent->sv_sigcode_base != 0)
359		tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
360	else
361		tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
362		    *(sysent->sv_szsigcode));
363	/* Set the mode to enter in the signal handler */
364#if __ARM_ARCH >= 7
365	if ((register_t)catcher & 1)
366		tf->tf_spsr |= PSR_T;
367	else
368		tf->tf_spsr &= ~PSR_T;
369#endif
370
371	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
372	    tf->tf_usr_sp);
373
374	PROC_LOCK(p);
375	mtx_lock(&psp->ps_mtx);
376}
377
378struct kva_md_info kmi;
379
380/*
381 * arm32_vector_init:
382 *
383 *	Initialize the vector page, and select whether or not to
384 *	relocate the vectors.
385 *
386 *	NOTE: We expect the vector page to be mapped at its expected
387 *	destination.
388 */
389
390extern unsigned int page0[], page0_data[];
391void
392arm_vector_init(vm_offset_t va, int which)
393{
394	unsigned int *vectors = (int *) va;
395	unsigned int *vectors_data = vectors + (page0_data - page0);
396	int vec;
397
398	/*
399	 * Loop through the vectors we're taking over, and copy the
400	 * vector's insn and data word.
401	 */
402	for (vec = 0; vec < ARM_NVEC; vec++) {
403		if ((which & (1 << vec)) == 0) {
404			/* Don't want to take over this vector. */
405			continue;
406		}
407		vectors[vec] = page0[vec];
408		vectors_data[vec] = page0_data[vec];
409	}
410
411	/* Now sync the vectors. */
412	icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
413
414	vector_page = va;
415
416	if (va == ARM_VECTORS_HIGH) {
417		/*
418		 * Enable high vectors in the system control reg (SCTLR).
419		 *
420		 * Assume the MD caller knows what it's doing here, and really
421		 * does want the vector page relocated.
422		 *
423		 * Note: This has to be done here (and not just in
424		 * cpu_setup()) because the vector page needs to be
425		 * accessible *before* cpu_startup() is called.
426		 * Think ddb(9) ...
427		 */
428		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
429	}
430}
431
432static void
433cpu_startup(void *dummy)
434{
435	struct pcb *pcb = thread0.td_pcb;
436	const unsigned int mbyte = 1024 * 1024;
437#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
438	vm_page_t m;
439#endif
440
441	identify_arm_cpu();
442
443	vm_ksubmap_init(&kmi);
444
445	/*
446	 * Display the RAM layout.
447	 */
448	printf("real memory  = %ju (%ju MB)\n",
449	    (uintmax_t)arm32_ptob(realmem),
450	    (uintmax_t)arm32_ptob(realmem) / mbyte);
451	printf("avail memory = %ju (%ju MB)\n",
452	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
453	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
454	if (bootverbose) {
455		arm_physmem_print_tables();
456		devmap_print_table();
457	}
458
459	bufinit();
460	vm_pager_bufferinit();
461	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
462	    USPACE_SVC_STACK_TOP;
463	pmap_set_pcb_pagedir(kernel_pmap, pcb);
464#if __ARM_ARCH < 6
465	vector_page_setprot(VM_PROT_READ);
466	pmap_postinit();
467#ifdef ARM_CACHE_LOCK_ENABLE
468	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
469	arm_lock_cache_line(ARM_TP_ADDRESS);
470#else
471	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
472	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
473#endif
474	*(uint32_t *)ARM_RAS_START = 0;
475	*(uint32_t *)ARM_RAS_END = 0xffffffff;
476#endif
477}
478
479SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
480
481/*
482 * Flush the D-cache for non-DMA I/O so that the I-cache can
483 * be made coherent later.
484 */
485void
486cpu_flush_dcache(void *ptr, size_t len)
487{
488
489	dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
490}
491
492/* Get current clock frequency for the given cpu id. */
493int
494cpu_est_clockrate(int cpu_id, uint64_t *rate)
495{
496
497	return (ENXIO);
498}
499
500void
501cpu_idle(int busy)
502{
503
504	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
505	spinlock_enter();
506#ifndef NO_EVENTTIMERS
507	if (!busy)
508		cpu_idleclock();
509#endif
510	if (!sched_runnable())
511		cpu_sleep(0);
512#ifndef NO_EVENTTIMERS
513	if (!busy)
514		cpu_activeclock();
515#endif
516	spinlock_exit();
517	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
518}
519
520int
521cpu_idle_wakeup(int cpu)
522{
523
524	return (0);
525}
526
527/*
528 * Most ARM platforms don't need to do anything special to init their clocks
529 * (they get intialized during normal device attachment), and by not defining a
530 * cpu_initclocks() function they get this generic one.  Any platform that needs
531 * to do something special can just provide their own implementation, which will
532 * override this one due to the weak linkage.
533 */
534void
535arm_generic_initclocks(void)
536{
537
538#ifndef NO_EVENTTIMERS
539#ifdef SMP
540	if (PCPU_GET(cpuid) == 0)
541		cpu_initclocks_bsp();
542	else
543		cpu_initclocks_ap();
544#else
545	cpu_initclocks_bsp();
546#endif
547#endif
548}
549__weak_reference(arm_generic_initclocks, cpu_initclocks);
550
551#ifdef MULTIDELAY
552void
553arm_set_delay(delay_func *impl, void *arg)
554{
555
556	KASSERT(impl != NULL, ("No DELAY implementation"));
557	delay_impl = impl;
558	delay_arg = arg;
559}
560
561void
562DELAY(int usec)
563{
564
565	delay_impl(usec, delay_arg);
566}
567#endif
568
569int
570fill_regs(struct thread *td, struct reg *regs)
571{
572	struct trapframe *tf = td->td_frame;
573	bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
574	regs->r_sp = tf->tf_usr_sp;
575	regs->r_lr = tf->tf_usr_lr;
576	regs->r_pc = tf->tf_pc;
577	regs->r_cpsr = tf->tf_spsr;
578	return (0);
579}
580int
581fill_fpregs(struct thread *td, struct fpreg *regs)
582{
583	bzero(regs, sizeof(*regs));
584	return (0);
585}
586
587int
588set_regs(struct thread *td, struct reg *regs)
589{
590	struct trapframe *tf = td->td_frame;
591
592	bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
593	tf->tf_usr_sp = regs->r_sp;
594	tf->tf_usr_lr = regs->r_lr;
595	tf->tf_pc = regs->r_pc;
596	tf->tf_spsr &=  ~PSR_FLAGS;
597	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
598	return (0);
599}
600
601int
602set_fpregs(struct thread *td, struct fpreg *regs)
603{
604	return (0);
605}
606
607int
608fill_dbregs(struct thread *td, struct dbreg *regs)
609{
610	return (0);
611}
612int
613set_dbregs(struct thread *td, struct dbreg *regs)
614{
615	return (0);
616}
617
618
619static int
620ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v)
621{
622
623	if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v))
624		return (ENOMEM);
625	return (0);
626}
627
628static int
629ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v)
630{
631
632	if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v))
633		return (ENOMEM);
634	return (0);
635}
636
637static u_int
638ptrace_get_usr_reg(void *cookie, int reg)
639{
640	int ret;
641	struct thread *td = cookie;
642
643	KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)),
644	 ("reg is outside range"));
645
646	switch(reg) {
647	case ARM_REG_NUM_PC:
648		ret = td->td_frame->tf_pc;
649		break;
650	case ARM_REG_NUM_LR:
651		ret = td->td_frame->tf_usr_lr;
652		break;
653	case ARM_REG_NUM_SP:
654		ret = td->td_frame->tf_usr_sp;
655		break;
656	default:
657		ret = *((register_t*)&td->td_frame->tf_r0 + reg);
658		break;
659	}
660
661	return (ret);
662}
663
664static u_int
665ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val)
666{
667	struct thread *td = cookie;
668	u_int error;
669
670	error = ptrace_read_int(td, offset, val);
671
672	return (error);
673}
674
675/**
676 * This function parses current instruction opcode and decodes
677 * any possible jump (change in PC) which might occur after
678 * the instruction is executed.
679 *
680 * @param     td                Thread structure of analysed task
681 * @param     cur_instr         Currently executed instruction
682 * @param     alt_next_address  Pointer to the variable where
683 *                              the destination address of the
684 *                              jump instruction shall be stored.
685 *
686 * @return    <0>               when jump is possible
687 *            <EINVAL>          otherwise
688 */
689static int
690ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr,
691    uint32_t *alt_next_address)
692{
693	int error;
694
695	if (inst_branch(cur_instr) || inst_call(cur_instr) ||
696	    inst_return(cur_instr)) {
697		error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc,
698		    alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int);
699
700		return (error);
701	}
702
703	return (EINVAL);
704}
705
706int
707ptrace_single_step(struct thread *td)
708{
709	struct proc *p;
710	int error, error_alt;
711	uint32_t cur_instr, alt_next = 0;
712
713	/* TODO: This needs to be updated for Thumb-2 */
714	if ((td->td_frame->tf_spsr & PSR_T) != 0)
715		return (EINVAL);
716
717	KASSERT(td->td_md.md_ptrace_instr == 0,
718	 ("Didn't clear single step"));
719	KASSERT(td->td_md.md_ptrace_instr_alt == 0,
720	 ("Didn't clear alternative single step"));
721	p = td->td_proc;
722	PROC_UNLOCK(p);
723
724	error = ptrace_read_int(td, td->td_frame->tf_pc,
725	    &cur_instr);
726	if (error)
727		goto out;
728
729	error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE,
730	    &td->td_md.md_ptrace_instr);
731	if (error == 0) {
732		error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE,
733		    PTRACE_BREAKPOINT);
734		if (error) {
735			td->td_md.md_ptrace_instr = 0;
736		} else {
737			td->td_md.md_ptrace_addr = td->td_frame->tf_pc +
738			    INSN_SIZE;
739		}
740	}
741
742	error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next);
743	if (error_alt == 0) {
744		error_alt = ptrace_read_int(td, alt_next,
745		    &td->td_md.md_ptrace_instr_alt);
746		if (error_alt) {
747			td->td_md.md_ptrace_instr_alt = 0;
748		} else {
749			error_alt = ptrace_write_int(td, alt_next,
750			    PTRACE_BREAKPOINT);
751			if (error_alt)
752				td->td_md.md_ptrace_instr_alt = 0;
753			else
754				td->td_md.md_ptrace_addr_alt = alt_next;
755		}
756	}
757
758out:
759	PROC_LOCK(p);
760	return ((error != 0) && (error_alt != 0));
761}
762
763int
764ptrace_clear_single_step(struct thread *td)
765{
766	struct proc *p;
767
768	/* TODO: This needs to be updated for Thumb-2 */
769	if ((td->td_frame->tf_spsr & PSR_T) != 0)
770		return (EINVAL);
771
772	if (td->td_md.md_ptrace_instr != 0) {
773		p = td->td_proc;
774		PROC_UNLOCK(p);
775		ptrace_write_int(td, td->td_md.md_ptrace_addr,
776		    td->td_md.md_ptrace_instr);
777		PROC_LOCK(p);
778		td->td_md.md_ptrace_instr = 0;
779	}
780
781	if (td->td_md.md_ptrace_instr_alt != 0) {
782		p = td->td_proc;
783		PROC_UNLOCK(p);
784		ptrace_write_int(td, td->td_md.md_ptrace_addr_alt,
785		    td->td_md.md_ptrace_instr_alt);
786		PROC_LOCK(p);
787		td->td_md.md_ptrace_instr_alt = 0;
788	}
789
790	return (0);
791}
792
793int
794ptrace_set_pc(struct thread *td, unsigned long addr)
795{
796	td->td_frame->tf_pc = addr;
797	return (0);
798}
799
800void
801cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
802{
803}
804
805void
806spinlock_enter(void)
807{
808	struct thread *td;
809	register_t cspr;
810
811	td = curthread;
812	if (td->td_md.md_spinlock_count == 0) {
813		cspr = disable_interrupts(PSR_I | PSR_F);
814		td->td_md.md_spinlock_count = 1;
815		td->td_md.md_saved_cspr = cspr;
816	} else
817		td->td_md.md_spinlock_count++;
818	critical_enter();
819}
820
821void
822spinlock_exit(void)
823{
824	struct thread *td;
825	register_t cspr;
826
827	td = curthread;
828	critical_exit();
829	cspr = td->td_md.md_saved_cspr;
830	td->td_md.md_spinlock_count--;
831	if (td->td_md.md_spinlock_count == 0)
832		restore_interrupts(cspr);
833}
834
835/*
836 * Clear registers on exec
837 */
838void
839exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
840{
841	struct trapframe *tf = td->td_frame;
842
843	memset(tf, 0, sizeof(*tf));
844	tf->tf_usr_sp = stack;
845	tf->tf_usr_lr = imgp->entry_addr;
846	tf->tf_svc_lr = 0x77777777;
847	tf->tf_pc = imgp->entry_addr;
848	tf->tf_spsr = PSR_USR32_MODE;
849}
850
851/*
852 * Get machine context.
853 */
854int
855get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
856{
857	struct trapframe *tf = td->td_frame;
858	__greg_t *gr = mcp->__gregs;
859
860	if (clear_ret & GET_MC_CLEAR_RET) {
861		gr[_REG_R0] = 0;
862		gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
863	} else {
864		gr[_REG_R0]   = tf->tf_r0;
865		gr[_REG_CPSR] = tf->tf_spsr;
866	}
867	gr[_REG_R1]   = tf->tf_r1;
868	gr[_REG_R2]   = tf->tf_r2;
869	gr[_REG_R3]   = tf->tf_r3;
870	gr[_REG_R4]   = tf->tf_r4;
871	gr[_REG_R5]   = tf->tf_r5;
872	gr[_REG_R6]   = tf->tf_r6;
873	gr[_REG_R7]   = tf->tf_r7;
874	gr[_REG_R8]   = tf->tf_r8;
875	gr[_REG_R9]   = tf->tf_r9;
876	gr[_REG_R10]  = tf->tf_r10;
877	gr[_REG_R11]  = tf->tf_r11;
878	gr[_REG_R12]  = tf->tf_r12;
879	gr[_REG_SP]   = tf->tf_usr_sp;
880	gr[_REG_LR]   = tf->tf_usr_lr;
881	gr[_REG_PC]   = tf->tf_pc;
882
883	return (0);
884}
885
886/*
887 * Set machine context.
888 *
889 * However, we don't set any but the user modifiable flags, and we won't
890 * touch the cs selector.
891 */
892int
893set_mcontext(struct thread *td, mcontext_t *mcp)
894{
895	struct trapframe *tf = td->td_frame;
896	const __greg_t *gr = mcp->__gregs;
897
898	tf->tf_r0 = gr[_REG_R0];
899	tf->tf_r1 = gr[_REG_R1];
900	tf->tf_r2 = gr[_REG_R2];
901	tf->tf_r3 = gr[_REG_R3];
902	tf->tf_r4 = gr[_REG_R4];
903	tf->tf_r5 = gr[_REG_R5];
904	tf->tf_r6 = gr[_REG_R6];
905	tf->tf_r7 = gr[_REG_R7];
906	tf->tf_r8 = gr[_REG_R8];
907	tf->tf_r9 = gr[_REG_R9];
908	tf->tf_r10 = gr[_REG_R10];
909	tf->tf_r11 = gr[_REG_R11];
910	tf->tf_r12 = gr[_REG_R12];
911	tf->tf_usr_sp = gr[_REG_SP];
912	tf->tf_usr_lr = gr[_REG_LR];
913	tf->tf_pc = gr[_REG_PC];
914	tf->tf_spsr = gr[_REG_CPSR];
915
916	return (0);
917}
918
919/*
920 * MPSAFE
921 */
922int
923sys_sigreturn(td, uap)
924	struct thread *td;
925	struct sigreturn_args /* {
926		const struct __ucontext *sigcntxp;
927	} */ *uap;
928{
929	ucontext_t uc;
930	int spsr;
931
932	if (uap == NULL)
933		return (EFAULT);
934	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
935		return (EFAULT);
936	/*
937	 * Make sure the processor mode has not been tampered with and
938	 * interrupts have not been disabled.
939	 */
940	spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
941	if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
942	    (spsr & (PSR_I | PSR_F)) != 0)
943		return (EINVAL);
944		/* Restore register context. */
945	set_mcontext(td, &uc.uc_mcontext);
946
947	/* Restore signal mask. */
948	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
949
950	return (EJUSTRETURN);
951}
952
953
954/*
955 * Construct a PCB from a trapframe. This is called from kdb_trap() where
956 * we want to start a backtrace from the function that caused us to enter
957 * the debugger. We have the context in the trapframe, but base the trace
958 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
959 * enough for a backtrace.
960 */
961void
962makectx(struct trapframe *tf, struct pcb *pcb)
963{
964	pcb->pcb_regs.sf_r4 = tf->tf_r4;
965	pcb->pcb_regs.sf_r5 = tf->tf_r5;
966	pcb->pcb_regs.sf_r6 = tf->tf_r6;
967	pcb->pcb_regs.sf_r7 = tf->tf_r7;
968	pcb->pcb_regs.sf_r8 = tf->tf_r8;
969	pcb->pcb_regs.sf_r9 = tf->tf_r9;
970	pcb->pcb_regs.sf_r10 = tf->tf_r10;
971	pcb->pcb_regs.sf_r11 = tf->tf_r11;
972	pcb->pcb_regs.sf_r12 = tf->tf_r12;
973	pcb->pcb_regs.sf_pc = tf->tf_pc;
974	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
975	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
976}
977
978/*
979 * Fake up a boot descriptor table
980 */
981vm_offset_t
982fake_preload_metadata(struct arm_boot_params *abp __unused, void *dtb_ptr,
983    size_t dtb_size)
984{
985#ifdef DDB
986	vm_offset_t zstart = 0, zend = 0;
987#endif
988	vm_offset_t lastaddr;
989	int i = 0;
990	static uint32_t fake_preload[35];
991
992	fake_preload[i++] = MODINFO_NAME;
993	fake_preload[i++] = strlen("kernel") + 1;
994	strcpy((char*)&fake_preload[i++], "kernel");
995	i += 1;
996	fake_preload[i++] = MODINFO_TYPE;
997	fake_preload[i++] = strlen("elf kernel") + 1;
998	strcpy((char*)&fake_preload[i++], "elf kernel");
999	i += 2;
1000	fake_preload[i++] = MODINFO_ADDR;
1001	fake_preload[i++] = sizeof(vm_offset_t);
1002	fake_preload[i++] = KERNVIRTADDR;
1003	fake_preload[i++] = MODINFO_SIZE;
1004	fake_preload[i++] = sizeof(uint32_t);
1005	fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
1006#ifdef DDB
1007	if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
1008		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
1009		fake_preload[i++] = sizeof(vm_offset_t);
1010		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
1011		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
1012		fake_preload[i++] = sizeof(vm_offset_t);
1013		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
1014		lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
1015		zend = lastaddr;
1016		zstart = *(uint32_t *)(KERNVIRTADDR + 4);
1017		db_fetch_ksymtab(zstart, zend);
1018	} else
1019#endif
1020		lastaddr = (vm_offset_t)&end;
1021	if (dtb_ptr != NULL) {
1022		/* Copy DTB to KVA space and insert it into module chain. */
1023		lastaddr = roundup(lastaddr, sizeof(int));
1024		fake_preload[i++] = MODINFO_METADATA | MODINFOMD_DTBP;
1025		fake_preload[i++] = sizeof(uint32_t);
1026		fake_preload[i++] = (uint32_t)lastaddr;
1027		memmove((void *)lastaddr, dtb_ptr, dtb_size);
1028		lastaddr += dtb_size;
1029		lastaddr = roundup(lastaddr, sizeof(int));
1030	}
1031	fake_preload[i++] = 0;
1032	fake_preload[i] = 0;
1033	preload_metadata = (void *)fake_preload;
1034
1035	init_static_kenv(NULL, 0);
1036
1037	return (lastaddr);
1038}
1039
1040void
1041pcpu0_init(void)
1042{
1043#if __ARM_ARCH >= 6
1044	set_curthread(&thread0);
1045#endif
1046	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1047	PCPU_SET(curthread, &thread0);
1048}
1049
1050#if defined(LINUX_BOOT_ABI)
1051
1052/* Convert the U-Boot command line into FreeBSD kenv and boot options. */
1053static void
1054cmdline_set_env(char *cmdline, const char *guard)
1055{
1056	char *cmdline_next, *env;
1057	size_t size, guard_len;
1058	int i;
1059
1060	size = strlen(cmdline);
1061	/* Skip leading spaces. */
1062	for (; isspace(*cmdline) && (size > 0); cmdline++)
1063		size--;
1064
1065	/* Test and remove guard. */
1066	if (guard != NULL && guard[0] != '\0') {
1067		guard_len  =  strlen(guard);
1068		if (strncasecmp(cmdline, guard, guard_len) != 0)
1069			return;
1070		cmdline += guard_len;
1071		size -= guard_len;
1072	}
1073
1074	/* Skip leading spaces. */
1075	for (; isspace(*cmdline) && (size > 0); cmdline++)
1076		size--;
1077
1078	/* Replace ',' with '\0'. */
1079	/* TODO: implement escaping for ',' character. */
1080	cmdline_next = cmdline;
1081	while(strsep(&cmdline_next, ",") != NULL)
1082		;
1083	init_static_kenv(cmdline, 0);
1084	/* Parse boothowto. */
1085	for (i = 0; howto_names[i].ev != NULL; i++) {
1086		env = kern_getenv(howto_names[i].ev);
1087		if (env != NULL) {
1088			if (strtoul(env, NULL, 10) != 0)
1089				boothowto |= howto_names[i].mask;
1090			freeenv(env);
1091		}
1092	}
1093}
1094
1095vm_offset_t
1096linux_parse_boot_param(struct arm_boot_params *abp)
1097{
1098	struct arm_lbabi_tag *walker;
1099	uint32_t revision;
1100	uint64_t serial;
1101	int size;
1102	vm_offset_t lastaddr;
1103#ifdef FDT
1104	struct fdt_header *dtb_ptr;
1105	uint32_t dtb_size;
1106#endif
1107
1108	/*
1109	 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
1110	 * is atags or dtb pointer.  If all of these aren't satisfied,
1111	 * then punt. Unfortunately, it looks like DT enabled kernels
1112	 * doesn't uses board type and U-Boot delivers 0 in r1 for them.
1113	 */
1114	if (abp->abp_r0 != 0 || abp->abp_r2 == 0)
1115		return (0);
1116#ifdef FDT
1117	/* Test if r2 point to valid DTB. */
1118	dtb_ptr = (struct fdt_header *)abp->abp_r2;
1119	if (fdt_check_header(dtb_ptr) == 0) {
1120		dtb_size = fdt_totalsize(dtb_ptr);
1121		return (fake_preload_metadata(abp, dtb_ptr, dtb_size));
1122	}
1123#endif
1124
1125	board_id = abp->abp_r1;
1126	walker = (struct arm_lbabi_tag *)abp->abp_r2;
1127
1128	if (ATAG_TAG(walker) != ATAG_CORE)
1129		return 0;
1130
1131	atag_list = walker;
1132	while (ATAG_TAG(walker) != ATAG_NONE) {
1133		switch (ATAG_TAG(walker)) {
1134		case ATAG_CORE:
1135			break;
1136		case ATAG_MEM:
1137			arm_physmem_hardware_region(walker->u.tag_mem.start,
1138			    walker->u.tag_mem.size);
1139			break;
1140		case ATAG_INITRD2:
1141			break;
1142		case ATAG_SERIAL:
1143			serial = walker->u.tag_sn.high;
1144			serial <<= 32;
1145			serial |= walker->u.tag_sn.low;
1146			board_set_serial(serial);
1147			break;
1148		case ATAG_REVISION:
1149			revision = walker->u.tag_rev.rev;
1150			board_set_revision(revision);
1151			break;
1152		case ATAG_CMDLINE:
1153			size = ATAG_SIZE(walker) -
1154			    sizeof(struct arm_lbabi_header);
1155			size = min(size, LBABI_MAX_COMMAND_LINE);
1156			strncpy(linux_command_line, walker->u.tag_cmd.command,
1157			    size);
1158			linux_command_line[size] = '\0';
1159			break;
1160		default:
1161			break;
1162		}
1163		walker = ATAG_NEXT(walker);
1164	}
1165
1166	/* Save a copy for later */
1167	bcopy(atag_list, atags,
1168	    (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
1169
1170	lastaddr = fake_preload_metadata(abp, NULL, 0);
1171	cmdline_set_env(linux_command_line, CMDLINE_GUARD);
1172	return lastaddr;
1173}
1174#endif
1175
1176#if defined(FREEBSD_BOOT_LOADER)
1177vm_offset_t
1178freebsd_parse_boot_param(struct arm_boot_params *abp)
1179{
1180	vm_offset_t lastaddr = 0;
1181	void *mdp;
1182	void *kmdp;
1183#ifdef DDB
1184	vm_offset_t ksym_start;
1185	vm_offset_t ksym_end;
1186#endif
1187
1188	/*
1189	 * Mask metadata pointer: it is supposed to be on page boundary. If
1190	 * the first argument (mdp) doesn't point to a valid address the
1191	 * bootloader must have passed us something else than the metadata
1192	 * ptr, so we give up.  Also give up if we cannot find metadta section
1193	 * the loader creates that we get all this data out of.
1194	 */
1195
1196	if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
1197		return 0;
1198	preload_metadata = mdp;
1199	kmdp = preload_search_by_type("elf kernel");
1200	if (kmdp == NULL)
1201		return 0;
1202
1203	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1204	loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1205	init_static_kenv(loader_envp, 0);
1206	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1207#ifdef DDB
1208	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1209	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1210	db_fetch_ksymtab(ksym_start, ksym_end);
1211#endif
1212	return lastaddr;
1213}
1214#endif
1215
1216vm_offset_t
1217default_parse_boot_param(struct arm_boot_params *abp)
1218{
1219	vm_offset_t lastaddr;
1220
1221#if defined(LINUX_BOOT_ABI)
1222	if ((lastaddr = linux_parse_boot_param(abp)) != 0)
1223		return lastaddr;
1224#endif
1225#if defined(FREEBSD_BOOT_LOADER)
1226	if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
1227		return lastaddr;
1228#endif
1229	/* Fall back to hardcoded metadata. */
1230	lastaddr = fake_preload_metadata(abp, NULL, 0);
1231
1232	return lastaddr;
1233}
1234
1235/*
1236 * Stub version of the boot parameter parsing routine.  We are
1237 * called early in initarm, before even VM has been initialized.
1238 * This routine needs to preserve any data that the boot loader
1239 * has passed in before the kernel starts to grow past the end
1240 * of the BSS, traditionally the place boot-loaders put this data.
1241 *
1242 * Since this is called so early, things that depend on the vm system
1243 * being setup (including access to some SoC's serial ports), about
1244 * all that can be done in this routine is to copy the arguments.
1245 *
1246 * This is the default boot parameter parsing routine.  Individual
1247 * kernels/boards can override this weak function with one of their
1248 * own.  We just fake metadata...
1249 */
1250__weak_reference(default_parse_boot_param, parse_boot_param);
1251
1252/*
1253 * Initialize proc0
1254 */
1255void
1256init_proc0(vm_offset_t kstack)
1257{
1258	proc_linkup0(&proc0, &thread0);
1259	thread0.td_kstack = kstack;
1260	thread0.td_pcb = (struct pcb *)
1261		(thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
1262	thread0.td_pcb->pcb_flags = 0;
1263	thread0.td_pcb->pcb_vfpcpu = -1;
1264	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
1265	thread0.td_frame = &proc0_tf;
1266	pcpup->pc_curpcb = thread0.td_pcb;
1267}
1268
1269int
1270arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc,
1271    u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*))
1272{
1273	u_int addr, nregs, offset = 0;
1274	int error = 0;
1275
1276	switch ((insn >> 24) & 0xf) {
1277	case 0x2:	/* add pc, reg1, #value */
1278	case 0x0:	/* add pc, reg1, reg2, lsl #offset */
1279		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1280		if (((insn >> 16) & 0xf) == 15)
1281			addr += 8;
1282		if (insn & 0x0200000) {
1283			offset = (insn >> 7) & 0x1e;
1284			offset = (insn & 0xff) << (32 - offset) |
1285			    (insn & 0xff) >> offset;
1286		} else {
1287
1288			offset = fetch_reg(cookie, insn & 0x0f);
1289			if ((insn & 0x0000ff0) != 0x00000000) {
1290				if (insn & 0x10)
1291					nregs = fetch_reg(cookie,
1292					    (insn >> 8) & 0xf);
1293				else
1294					nregs = (insn >> 7) & 0x1f;
1295				switch ((insn >> 5) & 3) {
1296				case 0:
1297					/* lsl */
1298					offset = offset << nregs;
1299					break;
1300				case 1:
1301					/* lsr */
1302					offset = offset >> nregs;
1303					break;
1304				default:
1305					break; /* XXX */
1306				}
1307
1308			}
1309			*new_pc = addr + offset;
1310			return (0);
1311
1312		}
1313
1314	case 0xa:	/* b ... */
1315	case 0xb:	/* bl ... */
1316		addr = ((insn << 2) & 0x03ffffff);
1317		if (addr & 0x02000000)
1318			addr |= 0xfc000000;
1319		*new_pc = (pc + 8 + addr);
1320		return (0);
1321	case 0x7:	/* ldr pc, [pc, reg, lsl #2] */
1322		addr = fetch_reg(cookie, insn & 0xf);
1323		addr = pc + 8 + (addr << 2);
1324		error = read_int(cookie, addr, &addr);
1325		*new_pc = addr;
1326		return (error);
1327	case 0x1:	/* mov pc, reg */
1328		*new_pc = fetch_reg(cookie, insn & 0xf);
1329		return (0);
1330	case 0x4:
1331	case 0x5:	/* ldr pc, [reg] */
1332		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1333		/* ldr pc, [reg, #offset] */
1334		if (insn & (1 << 24))
1335			offset = insn & 0xfff;
1336		if (insn & 0x00800000)
1337			addr += offset;
1338		else
1339			addr -= offset;
1340		error = read_int(cookie, addr, &addr);
1341		*new_pc = addr;
1342
1343		return (error);
1344	case 0x8:	/* ldmxx reg, {..., pc} */
1345	case 0x9:
1346		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1347		nregs = (insn  & 0x5555) + ((insn  >> 1) & 0x5555);
1348		nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
1349		nregs = (nregs + (nregs >> 4)) & 0x0f0f;
1350		nregs = (nregs + (nregs >> 8)) & 0x001f;
1351		switch ((insn >> 23) & 0x3) {
1352		case 0x0:	/* ldmda */
1353			addr = addr - 0;
1354			break;
1355		case 0x1:	/* ldmia */
1356			addr = addr + 0 + ((nregs - 1) << 2);
1357			break;
1358		case 0x2:	/* ldmdb */
1359			addr = addr - 4;
1360			break;
1361		case 0x3:	/* ldmib */
1362			addr = addr + 4 + ((nregs - 1) << 2);
1363			break;
1364		}
1365		error = read_int(cookie, addr, &addr);
1366		*new_pc = addr;
1367
1368		return (error);
1369	default:
1370		return (EINVAL);
1371	}
1372}
1373
1374#if __ARM_ARCH >= 6
1375void
1376set_stackptrs(int cpu)
1377{
1378
1379	set_stackptr(PSR_IRQ32_MODE,
1380	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1381	set_stackptr(PSR_ABT32_MODE,
1382	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1383	set_stackptr(PSR_UND32_MODE,
1384	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1385}
1386#else
1387void
1388set_stackptrs(int cpu)
1389{
1390
1391	set_stackptr(PSR_IRQ32_MODE,
1392	    irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1393	set_stackptr(PSR_ABT32_MODE,
1394	    abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1395	set_stackptr(PSR_UND32_MODE,
1396	    undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1397}
1398#endif
1399
1400#ifdef EFI
1401#define efi_next_descriptor(ptr, size) \
1402	((struct efi_md *)(((uint8_t *) ptr) + size))
1403
1404static void
1405add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr,
1406    int *mrcnt)
1407{
1408	struct efi_md *map, *p;
1409	const char *type;
1410	size_t efisz, memory_size;
1411	int ndesc, i, j;
1412
1413	static const char *types[] = {
1414		"Reserved",
1415		"LoaderCode",
1416		"LoaderData",
1417		"BootServicesCode",
1418		"BootServicesData",
1419		"RuntimeServicesCode",
1420		"RuntimeServicesData",
1421		"ConventionalMemory",
1422		"UnusableMemory",
1423		"ACPIReclaimMemory",
1424		"ACPIMemoryNVS",
1425		"MemoryMappedIO",
1426		"MemoryMappedIOPortSpace",
1427		"PalCode"
1428	};
1429
1430	*mrcnt = 0;
1431
1432	/*
1433	 * Memory map data provided by UEFI via the GetMemoryMap
1434	 * Boot Services API.
1435	 */
1436	efisz = roundup2(sizeof(struct efi_map_header), 0x10);
1437	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1438
1439	if (efihdr->descriptor_size == 0)
1440		return;
1441	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1442
1443	if (boothowto & RB_VERBOSE)
1444		printf("%23s %12s %12s %8s %4s\n",
1445		    "Type", "Physical", "Virtual", "#Pages", "Attr");
1446
1447	memory_size = 0;
1448	for (i = 0, j = 0, p = map; i < ndesc; i++,
1449	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1450		if (boothowto & RB_VERBOSE) {
1451			if (p->md_type <= EFI_MD_TYPE_PALCODE)
1452				type = types[p->md_type];
1453			else
1454				type = "<INVALID>";
1455			printf("%23s %012llx %12p %08llx ", type, p->md_phys,
1456			    p->md_virt, p->md_pages);
1457			if (p->md_attr & EFI_MD_ATTR_UC)
1458				printf("UC ");
1459			if (p->md_attr & EFI_MD_ATTR_WC)
1460				printf("WC ");
1461			if (p->md_attr & EFI_MD_ATTR_WT)
1462				printf("WT ");
1463			if (p->md_attr & EFI_MD_ATTR_WB)
1464				printf("WB ");
1465			if (p->md_attr & EFI_MD_ATTR_UCE)
1466				printf("UCE ");
1467			if (p->md_attr & EFI_MD_ATTR_WP)
1468				printf("WP ");
1469			if (p->md_attr & EFI_MD_ATTR_RP)
1470				printf("RP ");
1471			if (p->md_attr & EFI_MD_ATTR_XP)
1472				printf("XP ");
1473			if (p->md_attr & EFI_MD_ATTR_RT)
1474				printf("RUNTIME");
1475			printf("\n");
1476		}
1477
1478		switch (p->md_type) {
1479		case EFI_MD_TYPE_CODE:
1480		case EFI_MD_TYPE_DATA:
1481		case EFI_MD_TYPE_BS_CODE:
1482		case EFI_MD_TYPE_BS_DATA:
1483		case EFI_MD_TYPE_FREE:
1484			/*
1485			 * We're allowed to use any entry with these types.
1486			 */
1487			break;
1488		default:
1489			continue;
1490		}
1491
1492		j++;
1493		if (j >= FDT_MEM_REGIONS)
1494			break;
1495
1496		mr[j].mr_start = p->md_phys;
1497		mr[j].mr_size = p->md_pages * PAGE_SIZE;
1498		memory_size += mr[j].mr_size;
1499	}
1500
1501	*mrcnt = j;
1502}
1503#endif /* EFI */
1504
1505#ifdef FDT
1506static char *
1507kenv_next(char *cp)
1508{
1509
1510	if (cp != NULL) {
1511		while (*cp != 0)
1512			cp++;
1513		cp++;
1514		if (*cp == 0)
1515			cp = NULL;
1516	}
1517	return (cp);
1518}
1519
1520static void
1521print_kenv(void)
1522{
1523	char *cp;
1524
1525	debugf("loader passed (static) kenv:\n");
1526	if (loader_envp == NULL) {
1527		debugf(" no env, null ptr\n");
1528		return;
1529	}
1530	debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp);
1531
1532	for (cp = loader_envp; cp != NULL; cp = kenv_next(cp))
1533		debugf(" %x %s\n", (uint32_t)cp, cp);
1534}
1535
1536#if __ARM_ARCH < 6
1537void *
1538initarm(struct arm_boot_params *abp)
1539{
1540	struct mem_region mem_regions[FDT_MEM_REGIONS];
1541	struct pv_addr kernel_l1pt;
1542	struct pv_addr dpcpu;
1543	vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1544	uint64_t memsize;
1545	uint32_t l2size;
1546	char *env;
1547	void *kmdp;
1548	u_int l1pagetable;
1549	int i, j, err_devmap, mem_regions_sz;
1550
1551	lastaddr = parse_boot_param(abp);
1552	arm_physmem_kernaddr = abp->abp_physaddr;
1553
1554	memsize = 0;
1555
1556	cpuinfo_init();
1557	set_cpufuncs();
1558
1559	/*
1560	 * Find the dtb passed in by the boot loader.
1561	 */
1562	kmdp = preload_search_by_type("elf kernel");
1563	if (kmdp != NULL)
1564		dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1565	else
1566		dtbp = (vm_offset_t)NULL;
1567
1568#if defined(FDT_DTB_STATIC)
1569	/*
1570	 * In case the device tree blob was not retrieved (from metadata) try
1571	 * to use the statically embedded one.
1572	 */
1573	if (dtbp == (vm_offset_t)NULL)
1574		dtbp = (vm_offset_t)&fdt_static_dtb;
1575#endif
1576
1577	if (OF_install(OFW_FDT, 0) == FALSE)
1578		panic("Cannot install FDT");
1579
1580	if (OF_init((void *)dtbp) != 0)
1581		panic("OF_init failed with the found device tree");
1582
1583	/* Grab physical memory regions information from device tree. */
1584	if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1585		panic("Cannot get physical memory regions");
1586	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1587
1588	/* Grab reserved memory regions information from device tree. */
1589	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1590		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1591		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1592
1593	/* Platform-specific initialisation */
1594	platform_probe_and_attach();
1595
1596	pcpu0_init();
1597
1598	/* Do basic tuning, hz etc */
1599	init_param1();
1600
1601	/* Calculate number of L2 tables needed for mapping vm_page_array */
1602	l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1603	l2size = (l2size >> L1_S_SHIFT) + 1;
1604
1605	/*
1606	 * Add one table for end of kernel map, one for stacks, msgbuf and
1607	 * L1 and L2 tables map and one for vectors map.
1608	 */
1609	l2size += 3;
1610
1611	/* Make it divisible by 4 */
1612	l2size = (l2size + 3) & ~3;
1613
1614	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1615
1616	/* Define a macro to simplify memory allocation */
1617#define valloc_pages(var, np)						\
1618	alloc_pages((var).pv_va, (np));					\
1619	(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1620
1621#define alloc_pages(var, np)						\
1622	(var) = freemempos;						\
1623	freemempos += (np * PAGE_SIZE);					\
1624	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1625
1626	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1627		freemempos += PAGE_SIZE;
1628	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1629
1630	for (i = 0, j = 0; i < l2size; ++i) {
1631		if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1632			valloc_pages(kernel_pt_table[i],
1633			    L2_TABLE_SIZE / PAGE_SIZE);
1634			j = i;
1635		} else {
1636			kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1637			    L2_TABLE_SIZE_REAL * (i - j);
1638			kernel_pt_table[i].pv_pa =
1639			    kernel_pt_table[i].pv_va - KERNVIRTADDR +
1640			    abp->abp_physaddr;
1641
1642		}
1643	}
1644	/*
1645	 * Allocate a page for the system page mapped to 0x00000000
1646	 * or 0xffff0000. This page will just contain the system vectors
1647	 * and can be shared by all processes.
1648	 */
1649	valloc_pages(systempage, 1);
1650
1651	/* Allocate dynamic per-cpu area. */
1652	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1653	dpcpu_init((void *)dpcpu.pv_va, 0);
1654
1655	/* Allocate stacks for all modes */
1656	valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1657	valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1658	valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1659	valloc_pages(kernelstack, kstack_pages * MAXCPU);
1660	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1661
1662	/*
1663	 * Now we start construction of the L1 page table
1664	 * We start by mapping the L2 page tables into the L1.
1665	 * This means that we can replace L1 mappings later on if necessary
1666	 */
1667	l1pagetable = kernel_l1pt.pv_va;
1668
1669	/*
1670	 * Try to map as much as possible of kernel text and data using
1671	 * 1MB section mapping and for the rest of initial kernel address
1672	 * space use L2 coarse tables.
1673	 *
1674	 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1675	 * and kernel structures
1676	 */
1677	l2_start = lastaddr & ~(L1_S_OFFSET);
1678	for (i = 0 ; i < l2size - 1; i++)
1679		pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1680		    &kernel_pt_table[i]);
1681
1682	pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1683
1684	/* Map kernel code and data */
1685	pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1686	   (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1687	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1688
1689	/* Map L1 directory and allocated L2 page tables */
1690	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1691	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1692
1693	pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1694	    kernel_pt_table[0].pv_pa,
1695	    L2_TABLE_SIZE_REAL * l2size,
1696	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1697
1698	/* Map allocated DPCPU, stacks and msgbuf */
1699	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1700	    freemempos - dpcpu.pv_va,
1701	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1702
1703	/* Link and map the vector page */
1704	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1705	    &kernel_pt_table[l2size - 1]);
1706	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1707	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1708
1709	/* Establish static device mappings. */
1710	err_devmap = platform_devmap_init();
1711	devmap_bootstrap(l1pagetable, NULL);
1712	vm_max_kernel_address = platform_lastaddr();
1713
1714	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1715	pmap_pa = kernel_l1pt.pv_pa;
1716	cpu_setttb(kernel_l1pt.pv_pa);
1717	cpu_tlb_flushID();
1718	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1719
1720	/*
1721	 * Now that proper page tables are installed, call cpu_setup() to enable
1722	 * instruction and data caches and other chip-specific features.
1723	 */
1724	cpu_setup();
1725
1726	/*
1727	 * Only after the SOC registers block is mapped we can perform device
1728	 * tree fixups, as they may attempt to read parameters from hardware.
1729	 */
1730	OF_interpret("perform-fixup", 0);
1731
1732	platform_gpio_init();
1733
1734	cninit();
1735
1736	debugf("initarm: console initialized\n");
1737	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1738	debugf(" boothowto = 0x%08x\n", boothowto);
1739	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1740	print_kenv();
1741
1742	env = kern_getenv("kernelname");
1743	if (env != NULL) {
1744		strlcpy(kernelname, env, sizeof(kernelname));
1745		freeenv(env);
1746	}
1747
1748	if (err_devmap != 0)
1749		printf("WARNING: could not fully configure devmap, error=%d\n",
1750		    err_devmap);
1751
1752	platform_late_init();
1753
1754	/*
1755	 * Pages were allocated during the secondary bootstrap for the
1756	 * stacks for different CPU modes.
1757	 * We must now set the r13 registers in the different CPU modes to
1758	 * point to these stacks.
1759	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1760	 * of the stack memory.
1761	 */
1762	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1763
1764	set_stackptrs(0);
1765
1766	/*
1767	 * We must now clean the cache again....
1768	 * Cleaning may be done by reading new data to displace any
1769	 * dirty data in the cache. This will have happened in cpu_setttb()
1770	 * but since we are boot strapping the addresses used for the read
1771	 * may have just been remapped and thus the cache could be out
1772	 * of sync. A re-clean after the switch will cure this.
1773	 * After booting there are no gross relocations of the kernel thus
1774	 * this problem will not occur after initarm().
1775	 */
1776	cpu_idcache_wbinv_all();
1777
1778	undefined_init();
1779
1780	init_proc0(kernelstack.pv_va);
1781
1782	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1783	pmap_bootstrap(freemempos, &kernel_l1pt);
1784	msgbufp = (void *)msgbufpv.pv_va;
1785	msgbufinit(msgbufp, msgbufsize);
1786	mutex_init();
1787
1788	/*
1789	 * Exclude the kernel (and all the things we allocated which immediately
1790	 * follow the kernel) from the VM allocation pool but not from crash
1791	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1792	 * "allocated" while setting up pmaps.
1793	 *
1794	 * Prepare the list of physical memory available to the vm subsystem.
1795	 */
1796	arm_physmem_exclude_region(abp->abp_physaddr,
1797	    (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1798	arm_physmem_init_kernel_globals();
1799
1800	init_param2(physmem);
1801	dbg_monitor_init();
1802	kdb_init();
1803
1804	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1805	    sizeof(struct pcb)));
1806}
1807#else /* __ARM_ARCH < 6 */
1808void *
1809initarm(struct arm_boot_params *abp)
1810{
1811	struct mem_region mem_regions[FDT_MEM_REGIONS];
1812	vm_paddr_t lastaddr;
1813	vm_offset_t dtbp, kernelstack, dpcpu;
1814	char *env;
1815	void *kmdp;
1816	int err_devmap, mem_regions_sz;
1817#ifdef EFI
1818	struct efi_map_header *efihdr;
1819#endif
1820
1821	/* get last allocated physical address */
1822	arm_physmem_kernaddr = abp->abp_physaddr;
1823	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1824
1825	set_cpufuncs();
1826	cpuinfo_init();
1827
1828	/*
1829	 * Find the dtb passed in by the boot loader.
1830	 */
1831	kmdp = preload_search_by_type("elf kernel");
1832	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1833#if defined(FDT_DTB_STATIC)
1834	/*
1835	 * In case the device tree blob was not retrieved (from metadata) try
1836	 * to use the statically embedded one.
1837	 */
1838	if (dtbp == (vm_offset_t)NULL)
1839		dtbp = (vm_offset_t)&fdt_static_dtb;
1840#endif
1841
1842	if (OF_install(OFW_FDT, 0) == FALSE)
1843		panic("Cannot install FDT");
1844
1845	if (OF_init((void *)dtbp) != 0)
1846		panic("OF_init failed with the found device tree");
1847
1848#if defined(LINUX_BOOT_ABI)
1849	if (loader_envp == NULL && fdt_get_chosen_bootargs(linux_command_line,
1850	    LBABI_MAX_COMMAND_LINE) == 0)
1851		cmdline_set_env(linux_command_line, CMDLINE_GUARD);
1852#endif
1853
1854#ifdef EFI
1855	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1856	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1857	if (efihdr != NULL) {
1858		add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
1859	} else
1860#endif
1861	{
1862		/* Grab physical memory regions information from device tree. */
1863		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
1864			panic("Cannot get physical memory regions");
1865	}
1866	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1867
1868	/* Grab reserved memory regions information from device tree. */
1869	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1870		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1871		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1872
1873	/*
1874	 * Set TEX remapping registers.
1875	 * Setup kernel page tables and switch to kernel L1 page table.
1876	 */
1877	pmap_set_tex();
1878	pmap_bootstrap_prepare(lastaddr);
1879
1880	/*
1881	 * Now that proper page tables are installed, call cpu_setup() to enable
1882	 * instruction and data caches and other chip-specific features.
1883	 */
1884	cpu_setup();
1885
1886	/* Platform-specific initialisation */
1887	platform_probe_and_attach();
1888	pcpu0_init();
1889
1890	/* Do basic tuning, hz etc */
1891	init_param1();
1892
1893	/*
1894	 * Allocate a page for the system page mapped to 0xffff0000
1895	 * This page will just contain the system vectors and can be
1896	 * shared by all processes.
1897	 */
1898	systempage = pmap_preboot_get_pages(1);
1899
1900	/* Map the vector page. */
1901	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
1902	if (virtual_end >= ARM_VECTORS_HIGH)
1903		virtual_end = ARM_VECTORS_HIGH - 1;
1904
1905	/* Allocate dynamic per-cpu area. */
1906	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1907	dpcpu_init((void *)dpcpu, 0);
1908
1909	/* Allocate stacks for all modes */
1910	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1911	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1912	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1913	kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1914
1915	/* Allocate message buffer. */
1916	msgbufp = (void *)pmap_preboot_get_vpages(
1917	    round_page(msgbufsize) / PAGE_SIZE);
1918
1919	/*
1920	 * Pages were allocated during the secondary bootstrap for the
1921	 * stacks for different CPU modes.
1922	 * We must now set the r13 registers in the different CPU modes to
1923	 * point to these stacks.
1924	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1925	 * of the stack memory.
1926	 */
1927	set_stackptrs(0);
1928	mutex_init();
1929
1930	/* Establish static device mappings. */
1931	err_devmap = platform_devmap_init();
1932	devmap_bootstrap(0, NULL);
1933	vm_max_kernel_address = platform_lastaddr();
1934
1935	/*
1936	 * Only after the SOC registers block is mapped we can perform device
1937	 * tree fixups, as they may attempt to read parameters from hardware.
1938	 */
1939	OF_interpret("perform-fixup", 0);
1940	platform_gpio_init();
1941	cninit();
1942
1943	debugf("initarm: console initialized\n");
1944	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1945	debugf(" boothowto = 0x%08x\n", boothowto);
1946	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1947	debugf(" lastaddr1: 0x%08x\n", lastaddr);
1948	print_kenv();
1949
1950	env = kern_getenv("kernelname");
1951	if (env != NULL)
1952		strlcpy(kernelname, env, sizeof(kernelname));
1953
1954	if (err_devmap != 0)
1955		printf("WARNING: could not fully configure devmap, error=%d\n",
1956		    err_devmap);
1957
1958	platform_late_init();
1959
1960	/*
1961	 * We must now clean the cache again....
1962	 * Cleaning may be done by reading new data to displace any
1963	 * dirty data in the cache. This will have happened in cpu_setttb()
1964	 * but since we are boot strapping the addresses used for the read
1965	 * may have just been remapped and thus the cache could be out
1966	 * of sync. A re-clean after the switch will cure this.
1967	 * After booting there are no gross relocations of the kernel thus
1968	 * this problem will not occur after initarm().
1969	 */
1970	/* Set stack for exception handlers */
1971	undefined_init();
1972	init_proc0(kernelstack);
1973	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1974	enable_interrupts(PSR_A);
1975	pmap_bootstrap(0);
1976
1977	/* Exclude the kernel (and all the things we allocated which immediately
1978	 * follow the kernel) from the VM allocation pool but not from crash
1979	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1980	 * "allocated" while setting up pmaps.
1981	 *
1982	 * Prepare the list of physical memory available to the vm subsystem.
1983	 */
1984	arm_physmem_exclude_region(abp->abp_physaddr,
1985		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1986	arm_physmem_init_kernel_globals();
1987
1988	init_param2(physmem);
1989	/* Init message buffer. */
1990	msgbufinit(msgbufp, msgbufsize);
1991	dbg_monitor_init();
1992	kdb_init();
1993	return ((void *)STACKALIGN(thread0.td_pcb));
1994
1995}
1996
1997#endif /* __ARM_ARCH < 6 */
1998#endif /* FDT */
1999