machdep.c revision 266274
1/* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ 2 3/*- 4 * Copyright (c) 2004 Olivier Houchard 5 * Copyright (c) 1994-1998 Mark Brinicombe. 6 * Copyright (c) 1994 Brini. 7 * All rights reserved. 8 * 9 * This code is derived from software written for Brini by Mark Brinicombe 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Mark Brinicombe 22 * for the NetBSD Project. 23 * 4. The name of the company nor the name of the author may be used to 24 * endorse or promote products derived from this software without specific 25 * prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * Machine dependant functions for kernel setup 40 * 41 * Created : 17/09/94 42 * Updated : 18/04/01 updated for new wscons 43 */ 44 45#include "opt_compat.h" 46#include "opt_ddb.h" 47#include "opt_platform.h" 48#include "opt_sched.h" 49#include "opt_timer.h" 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: stable/10/sys/arm/arm/machdep.c 266274 2014-05-16 23:27:18Z ian $"); 53 54#include <sys/param.h> 55#include <sys/proc.h> 56#include <sys/systm.h> 57#include <sys/bio.h> 58#include <sys/buf.h> 59#include <sys/bus.h> 60#include <sys/cons.h> 61#include <sys/cpu.h> 62#include <sys/exec.h> 63#include <sys/imgact.h> 64#include <sys/kdb.h> 65#include <sys/kernel.h> 66#include <sys/ktr.h> 67#include <sys/linker.h> 68#include <sys/lock.h> 69#include <sys/malloc.h> 70#include <sys/msgbuf.h> 71#include <sys/mutex.h> 72#include <sys/pcpu.h> 73#include <sys/ptrace.h> 74#include <sys/rwlock.h> 75#include <sys/sched.h> 76#include <sys/signalvar.h> 77#include <sys/syscallsubr.h> 78#include <sys/sysctl.h> 79#include <sys/sysent.h> 80#include <sys/sysproto.h> 81#include <sys/uio.h> 82 83#include <vm/vm.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_object.h> 87#include <vm/vm_page.h> 88#include <vm/vm_pager.h> 89 90#include <machine/armreg.h> 91#include <machine/atags.h> 92#include <machine/cpu.h> 93#include <machine/devmap.h> 94#include <machine/frame.h> 95#include <machine/intr.h> 96#include <machine/machdep.h> 97#include <machine/md_var.h> 98#include <machine/metadata.h> 99#include <machine/pcb.h> 100#include <machine/physmem.h> 101#include <machine/reg.h> 102#include <machine/trap.h> 103#include <machine/undefined.h> 104#include <machine/vmparam.h> 105#include <machine/sysarch.h> 106 107#ifdef FDT 108#include <dev/fdt/fdt_common.h> 109#include <dev/ofw/openfirm.h> 110#endif 111 112#ifdef DEBUG 113#define debugf(fmt, args...) printf(fmt, ##args) 114#else 115#define debugf(fmt, args...) 116#endif 117 118struct pcpu __pcpu[MAXCPU]; 119struct pcpu *pcpup = &__pcpu[0]; 120 121static struct trapframe proc0_tf; 122uint32_t cpu_reset_address = 0; 123int cold = 1; 124vm_offset_t vector_page; 125 126int (*_arm_memcpy)(void *, void *, int, int) = NULL; 127int (*_arm_bzero)(void *, int, int) = NULL; 128int _min_memcpy_size = 0; 129int _min_bzero_size = 0; 130 131extern int *end; 132#ifdef DDB 133extern vm_offset_t ksym_start, ksym_end; 134#endif 135 136#ifdef FDT 137/* 138 * This is the number of L2 page tables required for covering max 139 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 140 * stacks etc.), uprounded to be divisible by 4. 141 */ 142#define KERNEL_PT_MAX 78 143 144static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 145 146extern u_int data_abort_handler_address; 147extern u_int prefetch_abort_handler_address; 148extern u_int undefined_handler_address; 149 150vm_paddr_t pmap_pa; 151 152struct pv_addr systempage; 153static struct pv_addr msgbufpv; 154struct pv_addr irqstack; 155struct pv_addr undstack; 156struct pv_addr abtstack; 157static struct pv_addr kernelstack; 158 159#endif 160 161#if defined(LINUX_BOOT_ABI) 162#define LBABI_MAX_BANKS 10 163 164uint32_t board_id; 165struct arm_lbabi_tag *atag_list; 166char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; 167char atags[LBABI_MAX_COMMAND_LINE * 2]; 168uint32_t memstart[LBABI_MAX_BANKS]; 169uint32_t memsize[LBABI_MAX_BANKS]; 170uint32_t membanks; 171#endif 172 173static uint32_t board_revision; 174/* hex representation of uint64_t */ 175static char board_serial[32]; 176 177SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); 178SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, 179 &board_revision, 0, "Board revision"); 180SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, 181 board_serial, 0, "Board serial"); 182 183int vfp_exists; 184SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 185 &vfp_exists, 0, "Floating point support enabled"); 186 187void 188board_set_serial(uint64_t serial) 189{ 190 191 snprintf(board_serial, sizeof(board_serial)-1, 192 "%016jx", serial); 193} 194 195void 196board_set_revision(uint32_t revision) 197{ 198 199 board_revision = revision; 200} 201 202void 203sendsig(catcher, ksi, mask) 204 sig_t catcher; 205 ksiginfo_t *ksi; 206 sigset_t *mask; 207{ 208 struct thread *td; 209 struct proc *p; 210 struct trapframe *tf; 211 struct sigframe *fp, frame; 212 struct sigacts *psp; 213 int onstack; 214 int sig; 215 int code; 216 217 td = curthread; 218 p = td->td_proc; 219 PROC_LOCK_ASSERT(p, MA_OWNED); 220 sig = ksi->ksi_signo; 221 code = ksi->ksi_code; 222 psp = p->p_sigacts; 223 mtx_assert(&psp->ps_mtx, MA_OWNED); 224 tf = td->td_frame; 225 onstack = sigonstack(tf->tf_usr_sp); 226 227 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 228 catcher, sig); 229 230 /* Allocate and validate space for the signal handler context. */ 231 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && 232 SIGISMEMBER(psp->ps_sigonstack, sig)) { 233 fp = (struct sigframe *)(td->td_sigstk.ss_sp + 234 td->td_sigstk.ss_size); 235#if defined(COMPAT_43) 236 td->td_sigstk.ss_flags |= SS_ONSTACK; 237#endif 238 } else 239 fp = (struct sigframe *)td->td_frame->tf_usr_sp; 240 241 /* make room on the stack */ 242 fp--; 243 244 /* make the stack aligned */ 245 fp = (struct sigframe *)STACKALIGN(fp); 246 /* Populate the siginfo frame. */ 247 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); 248 frame.sf_si = ksi->ksi_info; 249 frame.sf_uc.uc_sigmask = *mask; 250 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) 251 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; 252 frame.sf_uc.uc_stack = td->td_sigstk; 253 mtx_unlock(&psp->ps_mtx); 254 PROC_UNLOCK(td->td_proc); 255 256 /* Copy the sigframe out to the user's stack. */ 257 if (copyout(&frame, fp, sizeof(*fp)) != 0) { 258 /* Process has trashed its stack. Kill it. */ 259 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); 260 PROC_LOCK(p); 261 sigexit(td, SIGILL); 262 } 263 264 /* Translate the signal if appropriate. */ 265 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 266 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 267 268 /* 269 * Build context to run handler in. We invoke the handler 270 * directly, only returning via the trampoline. Note the 271 * trampoline version numbers are coordinated with machine- 272 * dependent code in libc. 273 */ 274 275 tf->tf_r0 = sig; 276 tf->tf_r1 = (register_t)&fp->sf_si; 277 tf->tf_r2 = (register_t)&fp->sf_uc; 278 279 /* the trampoline uses r5 as the uc address */ 280 tf->tf_r5 = (register_t)&fp->sf_uc; 281 tf->tf_pc = (register_t)catcher; 282 tf->tf_usr_sp = (register_t)fp; 283 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 284 285 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, 286 tf->tf_usr_sp); 287 288 PROC_LOCK(p); 289 mtx_lock(&psp->ps_mtx); 290} 291 292struct kva_md_info kmi; 293 294/* 295 * arm32_vector_init: 296 * 297 * Initialize the vector page, and select whether or not to 298 * relocate the vectors. 299 * 300 * NOTE: We expect the vector page to be mapped at its expected 301 * destination. 302 */ 303 304extern unsigned int page0[], page0_data[]; 305void 306arm_vector_init(vm_offset_t va, int which) 307{ 308 unsigned int *vectors = (int *) va; 309 unsigned int *vectors_data = vectors + (page0_data - page0); 310 int vec; 311 312 /* 313 * Loop through the vectors we're taking over, and copy the 314 * vector's insn and data word. 315 */ 316 for (vec = 0; vec < ARM_NVEC; vec++) { 317 if ((which & (1 << vec)) == 0) { 318 /* Don't want to take over this vector. */ 319 continue; 320 } 321 vectors[vec] = page0[vec]; 322 vectors_data[vec] = page0_data[vec]; 323 } 324 325 /* Now sync the vectors. */ 326 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 327 328 vector_page = va; 329 330 if (va == ARM_VECTORS_HIGH) { 331 /* 332 * Assume the MD caller knows what it's doing here, and 333 * really does want the vector page relocated. 334 * 335 * Note: This has to be done here (and not just in 336 * cpu_setup()) because the vector page needs to be 337 * accessible *before* cpu_startup() is called. 338 * Think ddb(9) ... 339 * 340 * NOTE: If the CPU control register is not readable, 341 * this will totally fail! We'll just assume that 342 * any system that has high vector support has a 343 * readable CPU control register, for now. If we 344 * ever encounter one that does not, we'll have to 345 * rethink this. 346 */ 347 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 348 } 349} 350 351static void 352cpu_startup(void *dummy) 353{ 354 struct pcb *pcb = thread0.td_pcb; 355 const unsigned int mbyte = 1024 * 1024; 356#ifdef ARM_TP_ADDRESS 357#ifndef ARM_CACHE_LOCK_ENABLE 358 vm_page_t m; 359#endif 360#endif 361 362 identify_arm_cpu(); 363 364 vm_ksubmap_init(&kmi); 365 366 /* 367 * Display the RAM layout. 368 */ 369 printf("real memory = %ju (%ju MB)\n", 370 (uintmax_t)arm32_ptob(realmem), 371 (uintmax_t)arm32_ptob(realmem) / mbyte); 372 printf("avail memory = %ju (%ju MB)\n", 373 (uintmax_t)arm32_ptob(cnt.v_free_count), 374 (uintmax_t)arm32_ptob(cnt.v_free_count) / mbyte); 375 if (bootverbose) { 376 arm_physmem_print_tables(); 377 arm_devmap_print_table(); 378 } 379 380 bufinit(); 381 vm_pager_bufferinit(); 382 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack + 383 USPACE_UNDEF_STACK_TOP; 384 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack + 385 USPACE_SVC_STACK_TOP; 386 vector_page_setprot(VM_PROT_READ); 387 pmap_set_pcb_pagedir(pmap_kernel(), pcb); 388 pmap_postinit(); 389#ifdef ARM_TP_ADDRESS 390#ifdef ARM_CACHE_LOCK_ENABLE 391 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); 392 arm_lock_cache_line(ARM_TP_ADDRESS); 393#else 394 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); 395 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); 396#endif 397 *(uint32_t *)ARM_RAS_START = 0; 398 *(uint32_t *)ARM_RAS_END = 0xffffffff; 399#endif 400} 401 402SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 403 404/* 405 * Flush the D-cache for non-DMA I/O so that the I-cache can 406 * be made coherent later. 407 */ 408void 409cpu_flush_dcache(void *ptr, size_t len) 410{ 411 412 cpu_dcache_wb_range((uintptr_t)ptr, len); 413#ifdef ARM_L2_PIPT 414 cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); 415#else 416 cpu_l2cache_wb_range((uintptr_t)ptr, len); 417#endif 418} 419 420/* Get current clock frequency for the given cpu id. */ 421int 422cpu_est_clockrate(int cpu_id, uint64_t *rate) 423{ 424 425 return (ENXIO); 426} 427 428void 429cpu_idle(int busy) 430{ 431 432 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", 433 busy, curcpu); 434#ifndef NO_EVENTTIMERS 435 if (!busy) { 436 critical_enter(); 437 cpu_idleclock(); 438 } 439#endif 440 if (!sched_runnable()) 441 cpu_sleep(0); 442#ifndef NO_EVENTTIMERS 443 if (!busy) { 444 cpu_activeclock(); 445 critical_exit(); 446 } 447#endif 448 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", 449 busy, curcpu); 450} 451 452int 453cpu_idle_wakeup(int cpu) 454{ 455 456 return (0); 457} 458 459/* 460 * Most ARM platforms don't need to do anything special to init their clocks 461 * (they get intialized during normal device attachment), and by not defining a 462 * cpu_initclocks() function they get this generic one. Any platform that needs 463 * to do something special can just provide their own implementation, which will 464 * override this one due to the weak linkage. 465 */ 466void 467arm_generic_initclocks(void) 468{ 469 470#ifndef NO_EVENTTIMERS 471#ifdef SMP 472 if (PCPU_GET(cpuid) == 0) 473 cpu_initclocks_bsp(); 474 else 475 cpu_initclocks_ap(); 476#else 477 cpu_initclocks_bsp(); 478#endif 479#endif 480} 481__weak_reference(arm_generic_initclocks, cpu_initclocks); 482 483int 484fill_regs(struct thread *td, struct reg *regs) 485{ 486 struct trapframe *tf = td->td_frame; 487 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); 488 regs->r_sp = tf->tf_usr_sp; 489 regs->r_lr = tf->tf_usr_lr; 490 regs->r_pc = tf->tf_pc; 491 regs->r_cpsr = tf->tf_spsr; 492 return (0); 493} 494int 495fill_fpregs(struct thread *td, struct fpreg *regs) 496{ 497 bzero(regs, sizeof(*regs)); 498 return (0); 499} 500 501int 502set_regs(struct thread *td, struct reg *regs) 503{ 504 struct trapframe *tf = td->td_frame; 505 506 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); 507 tf->tf_usr_sp = regs->r_sp; 508 tf->tf_usr_lr = regs->r_lr; 509 tf->tf_pc = regs->r_pc; 510 tf->tf_spsr &= ~PSR_FLAGS; 511 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; 512 return (0); 513} 514 515int 516set_fpregs(struct thread *td, struct fpreg *regs) 517{ 518 return (0); 519} 520 521int 522fill_dbregs(struct thread *td, struct dbreg *regs) 523{ 524 return (0); 525} 526int 527set_dbregs(struct thread *td, struct dbreg *regs) 528{ 529 return (0); 530} 531 532 533static int 534ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v) 535{ 536 struct iovec iov; 537 struct uio uio; 538 539 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 540 iov.iov_base = (caddr_t) v; 541 iov.iov_len = sizeof(u_int32_t); 542 uio.uio_iov = &iov; 543 uio.uio_iovcnt = 1; 544 uio.uio_offset = (off_t)addr; 545 uio.uio_resid = sizeof(u_int32_t); 546 uio.uio_segflg = UIO_SYSSPACE; 547 uio.uio_rw = UIO_READ; 548 uio.uio_td = td; 549 return proc_rwmem(td->td_proc, &uio); 550} 551 552static int 553ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v) 554{ 555 struct iovec iov; 556 struct uio uio; 557 558 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 559 iov.iov_base = (caddr_t) &v; 560 iov.iov_len = sizeof(u_int32_t); 561 uio.uio_iov = &iov; 562 uio.uio_iovcnt = 1; 563 uio.uio_offset = (off_t)addr; 564 uio.uio_resid = sizeof(u_int32_t); 565 uio.uio_segflg = UIO_SYSSPACE; 566 uio.uio_rw = UIO_WRITE; 567 uio.uio_td = td; 568 return proc_rwmem(td->td_proc, &uio); 569} 570 571int 572ptrace_single_step(struct thread *td) 573{ 574 struct proc *p; 575 int error; 576 577 KASSERT(td->td_md.md_ptrace_instr == 0, 578 ("Didn't clear single step")); 579 p = td->td_proc; 580 PROC_UNLOCK(p); 581 error = ptrace_read_int(td, td->td_frame->tf_pc + 4, 582 &td->td_md.md_ptrace_instr); 583 if (error) 584 goto out; 585 error = ptrace_write_int(td, td->td_frame->tf_pc + 4, 586 PTRACE_BREAKPOINT); 587 if (error) 588 td->td_md.md_ptrace_instr = 0; 589 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4; 590out: 591 PROC_LOCK(p); 592 return (error); 593} 594 595int 596ptrace_clear_single_step(struct thread *td) 597{ 598 struct proc *p; 599 600 if (td->td_md.md_ptrace_instr) { 601 p = td->td_proc; 602 PROC_UNLOCK(p); 603 ptrace_write_int(td, td->td_md.md_ptrace_addr, 604 td->td_md.md_ptrace_instr); 605 PROC_LOCK(p); 606 td->td_md.md_ptrace_instr = 0; 607 } 608 return (0); 609} 610 611int 612ptrace_set_pc(struct thread *td, unsigned long addr) 613{ 614 td->td_frame->tf_pc = addr; 615 return (0); 616} 617 618void 619cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 620{ 621} 622 623void 624spinlock_enter(void) 625{ 626 struct thread *td; 627 register_t cspr; 628 629 td = curthread; 630 if (td->td_md.md_spinlock_count == 0) { 631 cspr = disable_interrupts(I32_bit | F32_bit); 632 td->td_md.md_spinlock_count = 1; 633 td->td_md.md_saved_cspr = cspr; 634 } else 635 td->td_md.md_spinlock_count++; 636 critical_enter(); 637} 638 639void 640spinlock_exit(void) 641{ 642 struct thread *td; 643 register_t cspr; 644 645 td = curthread; 646 critical_exit(); 647 cspr = td->td_md.md_saved_cspr; 648 td->td_md.md_spinlock_count--; 649 if (td->td_md.md_spinlock_count == 0) 650 restore_interrupts(cspr); 651} 652 653/* 654 * Clear registers on exec 655 */ 656void 657exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 658{ 659 struct trapframe *tf = td->td_frame; 660 661 memset(tf, 0, sizeof(*tf)); 662 tf->tf_usr_sp = stack; 663 tf->tf_usr_lr = imgp->entry_addr; 664 tf->tf_svc_lr = 0x77777777; 665 tf->tf_pc = imgp->entry_addr; 666 tf->tf_spsr = PSR_USR32_MODE; 667} 668 669/* 670 * Get machine context. 671 */ 672int 673get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) 674{ 675 struct trapframe *tf = td->td_frame; 676 __greg_t *gr = mcp->__gregs; 677 678 if (clear_ret & GET_MC_CLEAR_RET) 679 gr[_REG_R0] = 0; 680 else 681 gr[_REG_R0] = tf->tf_r0; 682 gr[_REG_R1] = tf->tf_r1; 683 gr[_REG_R2] = tf->tf_r2; 684 gr[_REG_R3] = tf->tf_r3; 685 gr[_REG_R4] = tf->tf_r4; 686 gr[_REG_R5] = tf->tf_r5; 687 gr[_REG_R6] = tf->tf_r6; 688 gr[_REG_R7] = tf->tf_r7; 689 gr[_REG_R8] = tf->tf_r8; 690 gr[_REG_R9] = tf->tf_r9; 691 gr[_REG_R10] = tf->tf_r10; 692 gr[_REG_R11] = tf->tf_r11; 693 gr[_REG_R12] = tf->tf_r12; 694 gr[_REG_SP] = tf->tf_usr_sp; 695 gr[_REG_LR] = tf->tf_usr_lr; 696 gr[_REG_PC] = tf->tf_pc; 697 gr[_REG_CPSR] = tf->tf_spsr; 698 699 return (0); 700} 701 702/* 703 * Set machine context. 704 * 705 * However, we don't set any but the user modifiable flags, and we won't 706 * touch the cs selector. 707 */ 708int 709set_mcontext(struct thread *td, const mcontext_t *mcp) 710{ 711 struct trapframe *tf = td->td_frame; 712 const __greg_t *gr = mcp->__gregs; 713 714 tf->tf_r0 = gr[_REG_R0]; 715 tf->tf_r1 = gr[_REG_R1]; 716 tf->tf_r2 = gr[_REG_R2]; 717 tf->tf_r3 = gr[_REG_R3]; 718 tf->tf_r4 = gr[_REG_R4]; 719 tf->tf_r5 = gr[_REG_R5]; 720 tf->tf_r6 = gr[_REG_R6]; 721 tf->tf_r7 = gr[_REG_R7]; 722 tf->tf_r8 = gr[_REG_R8]; 723 tf->tf_r9 = gr[_REG_R9]; 724 tf->tf_r10 = gr[_REG_R10]; 725 tf->tf_r11 = gr[_REG_R11]; 726 tf->tf_r12 = gr[_REG_R12]; 727 tf->tf_usr_sp = gr[_REG_SP]; 728 tf->tf_usr_lr = gr[_REG_LR]; 729 tf->tf_pc = gr[_REG_PC]; 730 tf->tf_spsr = gr[_REG_CPSR]; 731 732 return (0); 733} 734 735/* 736 * MPSAFE 737 */ 738int 739sys_sigreturn(td, uap) 740 struct thread *td; 741 struct sigreturn_args /* { 742 const struct __ucontext *sigcntxp; 743 } */ *uap; 744{ 745 ucontext_t uc; 746 int spsr; 747 748 if (uap == NULL) 749 return (EFAULT); 750 if (copyin(uap->sigcntxp, &uc, sizeof(uc))) 751 return (EFAULT); 752 /* 753 * Make sure the processor mode has not been tampered with and 754 * interrupts have not been disabled. 755 */ 756 spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; 757 if ((spsr & PSR_MODE) != PSR_USR32_MODE || 758 (spsr & (I32_bit | F32_bit)) != 0) 759 return (EINVAL); 760 /* Restore register context. */ 761 set_mcontext(td, &uc.uc_mcontext); 762 763 /* Restore signal mask. */ 764 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); 765 766 return (EJUSTRETURN); 767} 768 769 770/* 771 * Construct a PCB from a trapframe. This is called from kdb_trap() where 772 * we want to start a backtrace from the function that caused us to enter 773 * the debugger. We have the context in the trapframe, but base the trace 774 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 775 * enough for a backtrace. 776 */ 777void 778makectx(struct trapframe *tf, struct pcb *pcb) 779{ 780 pcb->un_32.pcb32_r8 = tf->tf_r8; 781 pcb->un_32.pcb32_r9 = tf->tf_r9; 782 pcb->un_32.pcb32_r10 = tf->tf_r10; 783 pcb->un_32.pcb32_r11 = tf->tf_r11; 784 pcb->un_32.pcb32_r12 = tf->tf_r12; 785 pcb->un_32.pcb32_pc = tf->tf_pc; 786 pcb->un_32.pcb32_lr = tf->tf_usr_lr; 787 pcb->un_32.pcb32_sp = tf->tf_usr_sp; 788} 789 790/* 791 * Fake up a boot descriptor table 792 */ 793vm_offset_t 794fake_preload_metadata(struct arm_boot_params *abp __unused) 795{ 796#ifdef DDB 797 vm_offset_t zstart = 0, zend = 0; 798#endif 799 vm_offset_t lastaddr; 800 int i = 0; 801 static uint32_t fake_preload[35]; 802 803 fake_preload[i++] = MODINFO_NAME; 804 fake_preload[i++] = strlen("kernel") + 1; 805 strcpy((char*)&fake_preload[i++], "kernel"); 806 i += 1; 807 fake_preload[i++] = MODINFO_TYPE; 808 fake_preload[i++] = strlen("elf kernel") + 1; 809 strcpy((char*)&fake_preload[i++], "elf kernel"); 810 i += 2; 811 fake_preload[i++] = MODINFO_ADDR; 812 fake_preload[i++] = sizeof(vm_offset_t); 813 fake_preload[i++] = KERNVIRTADDR; 814 fake_preload[i++] = MODINFO_SIZE; 815 fake_preload[i++] = sizeof(uint32_t); 816 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; 817#ifdef DDB 818 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { 819 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; 820 fake_preload[i++] = sizeof(vm_offset_t); 821 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); 822 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; 823 fake_preload[i++] = sizeof(vm_offset_t); 824 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); 825 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); 826 zend = lastaddr; 827 zstart = *(uint32_t *)(KERNVIRTADDR + 4); 828 ksym_start = zstart; 829 ksym_end = zend; 830 } else 831#endif 832 lastaddr = (vm_offset_t)&end; 833 fake_preload[i++] = 0; 834 fake_preload[i] = 0; 835 preload_metadata = (void *)fake_preload; 836 837 return (lastaddr); 838} 839 840void 841pcpu0_init(void) 842{ 843#if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B) 844 set_curthread(&thread0); 845#endif 846 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 847 PCPU_SET(curthread, &thread0); 848#ifdef VFP 849 PCPU_SET(cpu, 0); 850#endif 851} 852 853#if defined(LINUX_BOOT_ABI) 854vm_offset_t 855linux_parse_boot_param(struct arm_boot_params *abp) 856{ 857 struct arm_lbabi_tag *walker; 858 uint32_t revision; 859 uint64_t serial; 860 861 /* 862 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 863 * is atags or dtb pointer. If all of these aren't satisfied, 864 * then punt. 865 */ 866 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) 867 return 0; 868 869 board_id = abp->abp_r1; 870 walker = (struct arm_lbabi_tag *) 871 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); 872 873 /* xxx - Need to also look for binary device tree */ 874 if (ATAG_TAG(walker) != ATAG_CORE) 875 return 0; 876 877 atag_list = walker; 878 while (ATAG_TAG(walker) != ATAG_NONE) { 879 switch (ATAG_TAG(walker)) { 880 case ATAG_CORE: 881 break; 882 case ATAG_MEM: 883 arm_physmem_hardware_region(walker->u.tag_mem.start, 884 walker->u.tag_mem.size); 885 break; 886 case ATAG_INITRD2: 887 break; 888 case ATAG_SERIAL: 889 serial = walker->u.tag_sn.low | 890 ((uint64_t)walker->u.tag_sn.high << 32); 891 board_set_serial(serial); 892 break; 893 case ATAG_REVISION: 894 revision = walker->u.tag_rev.rev; 895 board_set_revision(revision); 896 break; 897 case ATAG_CMDLINE: 898 /* XXX open question: Parse this for boothowto? */ 899 bcopy(walker->u.tag_cmd.command, linux_command_line, 900 ATAG_SIZE(walker)); 901 break; 902 default: 903 break; 904 } 905 walker = ATAG_NEXT(walker); 906 } 907 908 /* Save a copy for later */ 909 bcopy(atag_list, atags, 910 (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); 911 912 return fake_preload_metadata(abp); 913} 914#endif 915 916#if defined(FREEBSD_BOOT_LOADER) 917vm_offset_t 918freebsd_parse_boot_param(struct arm_boot_params *abp) 919{ 920 vm_offset_t lastaddr = 0; 921 void *mdp; 922 void *kmdp; 923 924 /* 925 * Mask metadata pointer: it is supposed to be on page boundary. If 926 * the first argument (mdp) doesn't point to a valid address the 927 * bootloader must have passed us something else than the metadata 928 * ptr, so we give up. Also give up if we cannot find metadta section 929 * the loader creates that we get all this data out of. 930 */ 931 932 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) 933 return 0; 934 preload_metadata = mdp; 935 kmdp = preload_search_by_type("elf kernel"); 936 if (kmdp == NULL) 937 return 0; 938 939 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 940 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 941 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 942#ifdef DDB 943 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 944 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 945#endif 946 preload_addr_relocate = KERNVIRTADDR - abp->abp_physaddr; 947 return lastaddr; 948} 949#endif 950 951vm_offset_t 952default_parse_boot_param(struct arm_boot_params *abp) 953{ 954 vm_offset_t lastaddr; 955 956#if defined(LINUX_BOOT_ABI) 957 if ((lastaddr = linux_parse_boot_param(abp)) != 0) 958 return lastaddr; 959#endif 960#if defined(FREEBSD_BOOT_LOADER) 961 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) 962 return lastaddr; 963#endif 964 /* Fall back to hardcoded metadata. */ 965 lastaddr = fake_preload_metadata(abp); 966 967 return lastaddr; 968} 969 970/* 971 * Stub version of the boot parameter parsing routine. We are 972 * called early in initarm, before even VM has been initialized. 973 * This routine needs to preserve any data that the boot loader 974 * has passed in before the kernel starts to grow past the end 975 * of the BSS, traditionally the place boot-loaders put this data. 976 * 977 * Since this is called so early, things that depend on the vm system 978 * being setup (including access to some SoC's serial ports), about 979 * all that can be done in this routine is to copy the arguments. 980 * 981 * This is the default boot parameter parsing routine. Individual 982 * kernels/boards can override this weak function with one of their 983 * own. We just fake metadata... 984 */ 985__weak_reference(default_parse_boot_param, parse_boot_param); 986 987/* 988 * Initialize proc0 989 */ 990void 991init_proc0(vm_offset_t kstack) 992{ 993 proc_linkup0(&proc0, &thread0); 994 thread0.td_kstack = kstack; 995 thread0.td_pcb = (struct pcb *) 996 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 997 thread0.td_pcb->pcb_flags = 0; 998 thread0.td_frame = &proc0_tf; 999 pcpup->pc_curpcb = thread0.td_pcb; 1000} 1001 1002void 1003set_stackptrs(int cpu) 1004{ 1005 1006 set_stackptr(PSR_IRQ32_MODE, 1007 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1008 set_stackptr(PSR_ABT32_MODE, 1009 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1010 set_stackptr(PSR_UND32_MODE, 1011 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1012} 1013 1014#ifdef FDT 1015static char * 1016kenv_next(char *cp) 1017{ 1018 1019 if (cp != NULL) { 1020 while (*cp != 0) 1021 cp++; 1022 cp++; 1023 if (*cp == 0) 1024 cp = NULL; 1025 } 1026 return (cp); 1027} 1028 1029static void 1030print_kenv(void) 1031{ 1032 int len; 1033 char *cp; 1034 1035 debugf("loader passed (static) kenv:\n"); 1036 if (kern_envp == NULL) { 1037 debugf(" no env, null ptr\n"); 1038 return; 1039 } 1040 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 1041 1042 len = 0; 1043 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 1044 debugf(" %x %s\n", (uint32_t)cp, cp); 1045} 1046 1047void * 1048initarm(struct arm_boot_params *abp) 1049{ 1050 struct mem_region mem_regions[FDT_MEM_REGIONS]; 1051 struct pv_addr kernel_l1pt; 1052 struct pv_addr dpcpu; 1053 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 1054 uint32_t memsize, l2size; 1055 char *env; 1056 void *kmdp; 1057 u_int l1pagetable; 1058 int i, j, err_devmap, mem_regions_sz; 1059 1060 lastaddr = parse_boot_param(abp); 1061 arm_physmem_kernaddr = abp->abp_physaddr; 1062 1063 memsize = 0; 1064 set_cpufuncs(); 1065 1066 /* 1067 * Find the dtb passed in by the boot loader. 1068 */ 1069 kmdp = preload_search_by_type("elf kernel"); 1070 if (kmdp != NULL) 1071 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 1072 else 1073 dtbp = (vm_offset_t)NULL; 1074 1075#if defined(FDT_DTB_STATIC) 1076 /* 1077 * In case the device tree blob was not retrieved (from metadata) try 1078 * to use the statically embedded one. 1079 */ 1080 if (dtbp == (vm_offset_t)NULL) 1081 dtbp = (vm_offset_t)&fdt_static_dtb; 1082#endif 1083 1084 if (OF_install(OFW_FDT, 0) == FALSE) 1085 panic("Cannot install FDT"); 1086 1087 if (OF_init((void *)dtbp) != 0) 1088 panic("OF_init failed with the found device tree"); 1089 1090 /* Grab physical memory regions information from device tree. */ 1091 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) 1092 panic("Cannot get physical memory regions"); 1093 arm_physmem_hardware_regions(mem_regions, mem_regions_sz); 1094 1095 /* Grab reserved memory regions information from device tree. */ 1096 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) 1097 arm_physmem_exclude_regions(mem_regions, mem_regions_sz, 1098 EXFLAG_NODUMP | EXFLAG_NOALLOC); 1099 1100 /* Platform-specific initialisation */ 1101 initarm_early_init(); 1102 1103 pcpu0_init(); 1104 1105 /* Do basic tuning, hz etc */ 1106 init_param1(); 1107 1108 /* Calculate number of L2 tables needed for mapping vm_page_array */ 1109 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 1110 l2size = (l2size >> L1_S_SHIFT) + 1; 1111 1112 /* 1113 * Add one table for end of kernel map, one for stacks, msgbuf and 1114 * L1 and L2 tables map and one for vectors map. 1115 */ 1116 l2size += 3; 1117 1118 /* Make it divisible by 4 */ 1119 l2size = (l2size + 3) & ~3; 1120 1121 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 1122 1123 /* Define a macro to simplify memory allocation */ 1124#define valloc_pages(var, np) \ 1125 alloc_pages((var).pv_va, (np)); \ 1126 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); 1127 1128#define alloc_pages(var, np) \ 1129 (var) = freemempos; \ 1130 freemempos += (np * PAGE_SIZE); \ 1131 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 1132 1133 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 1134 freemempos += PAGE_SIZE; 1135 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 1136 1137 for (i = 0, j = 0; i < l2size; ++i) { 1138 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 1139 valloc_pages(kernel_pt_table[i], 1140 L2_TABLE_SIZE / PAGE_SIZE); 1141 j = i; 1142 } else { 1143 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 1144 L2_TABLE_SIZE_REAL * (i - j); 1145 kernel_pt_table[i].pv_pa = 1146 kernel_pt_table[i].pv_va - KERNVIRTADDR + 1147 abp->abp_physaddr; 1148 1149 } 1150 } 1151 /* 1152 * Allocate a page for the system page mapped to 0x00000000 1153 * or 0xffff0000. This page will just contain the system vectors 1154 * and can be shared by all processes. 1155 */ 1156 valloc_pages(systempage, 1); 1157 1158 /* Allocate dynamic per-cpu area. */ 1159 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 1160 dpcpu_init((void *)dpcpu.pv_va, 0); 1161 1162 /* Allocate stacks for all modes */ 1163 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); 1164 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); 1165 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); 1166 valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU); 1167 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 1168 1169 /* 1170 * Now we start construction of the L1 page table 1171 * We start by mapping the L2 page tables into the L1. 1172 * This means that we can replace L1 mappings later on if necessary 1173 */ 1174 l1pagetable = kernel_l1pt.pv_va; 1175 1176 /* 1177 * Try to map as much as possible of kernel text and data using 1178 * 1MB section mapping and for the rest of initial kernel address 1179 * space use L2 coarse tables. 1180 * 1181 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 1182 * and kernel structures 1183 */ 1184 l2_start = lastaddr & ~(L1_S_OFFSET); 1185 for (i = 0 ; i < l2size - 1; i++) 1186 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 1187 &kernel_pt_table[i]); 1188 1189 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 1190 1191 /* Map kernel code and data */ 1192 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, 1193 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 1194 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1195 1196 /* Map L1 directory and allocated L2 page tables */ 1197 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 1198 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1199 1200 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 1201 kernel_pt_table[0].pv_pa, 1202 L2_TABLE_SIZE_REAL * l2size, 1203 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1204 1205 /* Map allocated DPCPU, stacks and msgbuf */ 1206 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 1207 freemempos - dpcpu.pv_va, 1208 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1209 1210 /* Link and map the vector page */ 1211 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 1212 &kernel_pt_table[l2size - 1]); 1213 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 1214 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 1215 1216 /* Establish static device mappings. */ 1217 err_devmap = initarm_devmap_init(); 1218 arm_devmap_bootstrap(l1pagetable, NULL); 1219 vm_max_kernel_address = initarm_lastaddr(); 1220 1221 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); 1222 pmap_pa = kernel_l1pt.pv_pa; 1223 setttb(kernel_l1pt.pv_pa); 1224 cpu_tlb_flushID(); 1225 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 1226 1227 /* 1228 * Now that proper page tables are installed, call cpu_setup() to enable 1229 * instruction and data caches and other chip-specific features. 1230 */ 1231 cpu_setup(""); 1232 1233 /* 1234 * Only after the SOC registers block is mapped we can perform device 1235 * tree fixups, as they may attempt to read parameters from hardware. 1236 */ 1237 OF_interpret("perform-fixup", 0); 1238 1239 initarm_gpio_init(); 1240 1241 cninit(); 1242 1243 debugf("initarm: console initialized\n"); 1244 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 1245 debugf(" boothowto = 0x%08x\n", boothowto); 1246 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 1247 print_kenv(); 1248 1249 env = getenv("kernelname"); 1250 if (env != NULL) 1251 strlcpy(kernelname, env, sizeof(kernelname)); 1252 1253 if (err_devmap != 0) 1254 printf("WARNING: could not fully configure devmap, error=%d\n", 1255 err_devmap); 1256 1257 initarm_late_init(); 1258 1259 /* 1260 * Pages were allocated during the secondary bootstrap for the 1261 * stacks for different CPU modes. 1262 * We must now set the r13 registers in the different CPU modes to 1263 * point to these stacks. 1264 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 1265 * of the stack memory. 1266 */ 1267 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 1268 1269 set_stackptrs(0); 1270 1271 /* 1272 * We must now clean the cache again.... 1273 * Cleaning may be done by reading new data to displace any 1274 * dirty data in the cache. This will have happened in setttb() 1275 * but since we are boot strapping the addresses used for the read 1276 * may have just been remapped and thus the cache could be out 1277 * of sync. A re-clean after the switch will cure this. 1278 * After booting there are no gross relocations of the kernel thus 1279 * this problem will not occur after initarm(). 1280 */ 1281 cpu_idcache_wbinv_all(); 1282 1283 /* Set stack for exception handlers */ 1284 data_abort_handler_address = (u_int)data_abort_handler; 1285 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 1286 undefined_handler_address = (u_int)undefinedinstruction_bounce; 1287 undefined_init(); 1288 1289 init_proc0(kernelstack.pv_va); 1290 1291 arm_intrnames_init(); 1292 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 1293 pmap_bootstrap(freemempos, &kernel_l1pt); 1294 msgbufp = (void *)msgbufpv.pv_va; 1295 msgbufinit(msgbufp, msgbufsize); 1296 mutex_init(); 1297 1298 /* 1299 * Exclude the kernel (and all the things we allocated which immediately 1300 * follow the kernel) from the VM allocation pool but not from crash 1301 * dumps. virtual_avail is a global variable which tracks the kva we've 1302 * "allocated" while setting up pmaps. 1303 * 1304 * Prepare the list of physical memory available to the vm subsystem. 1305 */ 1306 arm_physmem_exclude_region(abp->abp_physaddr, 1307 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); 1308 arm_physmem_init_kernel_globals(); 1309 1310 init_param2(physmem); 1311 kdb_init(); 1312 1313 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 1314 sizeof(struct pcb))); 1315} 1316#endif 1317