machdep.c revision 266341
1/* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ 2 3/*- 4 * Copyright (c) 2004 Olivier Houchard 5 * Copyright (c) 1994-1998 Mark Brinicombe. 6 * Copyright (c) 1994 Brini. 7 * All rights reserved. 8 * 9 * This code is derived from software written for Brini by Mark Brinicombe 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Mark Brinicombe 22 * for the NetBSD Project. 23 * 4. The name of the company nor the name of the author may be used to 24 * endorse or promote products derived from this software without specific 25 * prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * Machine dependant functions for kernel setup 40 * 41 * Created : 17/09/94 42 * Updated : 18/04/01 updated for new wscons 43 */ 44 45#include "opt_compat.h" 46#include "opt_ddb.h" 47#include "opt_platform.h" 48#include "opt_sched.h" 49#include "opt_timer.h" 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: stable/10/sys/arm/arm/machdep.c 266341 2014-05-17 19:37:04Z ian $"); 53 54#include <sys/param.h> 55#include <sys/proc.h> 56#include <sys/systm.h> 57#include <sys/bio.h> 58#include <sys/buf.h> 59#include <sys/bus.h> 60#include <sys/cons.h> 61#include <sys/cpu.h> 62#include <sys/exec.h> 63#include <sys/imgact.h> 64#include <sys/kdb.h> 65#include <sys/kernel.h> 66#include <sys/ktr.h> 67#include <sys/linker.h> 68#include <sys/lock.h> 69#include <sys/malloc.h> 70#include <sys/msgbuf.h> 71#include <sys/mutex.h> 72#include <sys/pcpu.h> 73#include <sys/ptrace.h> 74#include <sys/rwlock.h> 75#include <sys/sched.h> 76#include <sys/signalvar.h> 77#include <sys/syscallsubr.h> 78#include <sys/sysctl.h> 79#include <sys/sysent.h> 80#include <sys/sysproto.h> 81#include <sys/uio.h> 82 83#include <vm/vm.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_object.h> 87#include <vm/vm_page.h> 88#include <vm/vm_pager.h> 89 90#include <machine/armreg.h> 91#include <machine/atags.h> 92#include <machine/cpu.h> 93#include <machine/devmap.h> 94#include <machine/frame.h> 95#include <machine/intr.h> 96#include <machine/machdep.h> 97#include <machine/md_var.h> 98#include <machine/metadata.h> 99#include <machine/pcb.h> 100#include <machine/physmem.h> 101#include <machine/reg.h> 102#include <machine/trap.h> 103#include <machine/undefined.h> 104#include <machine/vfp.h> 105#include <machine/vmparam.h> 106#include <machine/sysarch.h> 107 108#ifdef FDT 109#include <dev/fdt/fdt_common.h> 110#include <dev/ofw/openfirm.h> 111#endif 112 113#ifdef DEBUG 114#define debugf(fmt, args...) printf(fmt, ##args) 115#else 116#define debugf(fmt, args...) 117#endif 118 119struct pcpu __pcpu[MAXCPU]; 120struct pcpu *pcpup = &__pcpu[0]; 121 122static struct trapframe proc0_tf; 123uint32_t cpu_reset_address = 0; 124int cold = 1; 125vm_offset_t vector_page; 126 127int (*_arm_memcpy)(void *, void *, int, int) = NULL; 128int (*_arm_bzero)(void *, int, int) = NULL; 129int _min_memcpy_size = 0; 130int _min_bzero_size = 0; 131 132extern int *end; 133#ifdef DDB 134extern vm_offset_t ksym_start, ksym_end; 135#endif 136 137#ifdef FDT 138/* 139 * This is the number of L2 page tables required for covering max 140 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 141 * stacks etc.), uprounded to be divisible by 4. 142 */ 143#define KERNEL_PT_MAX 78 144 145static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 146 147vm_paddr_t pmap_pa; 148 149struct pv_addr systempage; 150static struct pv_addr msgbufpv; 151struct pv_addr irqstack; 152struct pv_addr undstack; 153struct pv_addr abtstack; 154static struct pv_addr kernelstack; 155 156#endif 157 158#if defined(LINUX_BOOT_ABI) 159#define LBABI_MAX_BANKS 10 160 161uint32_t board_id; 162struct arm_lbabi_tag *atag_list; 163char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; 164char atags[LBABI_MAX_COMMAND_LINE * 2]; 165uint32_t memstart[LBABI_MAX_BANKS]; 166uint32_t memsize[LBABI_MAX_BANKS]; 167uint32_t membanks; 168#endif 169 170static uint32_t board_revision; 171/* hex representation of uint64_t */ 172static char board_serial[32]; 173 174SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); 175SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, 176 &board_revision, 0, "Board revision"); 177SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, 178 board_serial, 0, "Board serial"); 179 180int vfp_exists; 181SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 182 &vfp_exists, 0, "Floating point support enabled"); 183 184void 185board_set_serial(uint64_t serial) 186{ 187 188 snprintf(board_serial, sizeof(board_serial)-1, 189 "%016jx", serial); 190} 191 192void 193board_set_revision(uint32_t revision) 194{ 195 196 board_revision = revision; 197} 198 199void 200sendsig(catcher, ksi, mask) 201 sig_t catcher; 202 ksiginfo_t *ksi; 203 sigset_t *mask; 204{ 205 struct thread *td; 206 struct proc *p; 207 struct trapframe *tf; 208 struct sigframe *fp, frame; 209 struct sigacts *psp; 210 int onstack; 211 int sig; 212 int code; 213 214 td = curthread; 215 p = td->td_proc; 216 PROC_LOCK_ASSERT(p, MA_OWNED); 217 sig = ksi->ksi_signo; 218 code = ksi->ksi_code; 219 psp = p->p_sigacts; 220 mtx_assert(&psp->ps_mtx, MA_OWNED); 221 tf = td->td_frame; 222 onstack = sigonstack(tf->tf_usr_sp); 223 224 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 225 catcher, sig); 226 227 /* Allocate and validate space for the signal handler context. */ 228 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && 229 SIGISMEMBER(psp->ps_sigonstack, sig)) { 230 fp = (struct sigframe *)(td->td_sigstk.ss_sp + 231 td->td_sigstk.ss_size); 232#if defined(COMPAT_43) 233 td->td_sigstk.ss_flags |= SS_ONSTACK; 234#endif 235 } else 236 fp = (struct sigframe *)td->td_frame->tf_usr_sp; 237 238 /* make room on the stack */ 239 fp--; 240 241 /* make the stack aligned */ 242 fp = (struct sigframe *)STACKALIGN(fp); 243 /* Populate the siginfo frame. */ 244 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); 245 frame.sf_si = ksi->ksi_info; 246 frame.sf_uc.uc_sigmask = *mask; 247 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) 248 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; 249 frame.sf_uc.uc_stack = td->td_sigstk; 250 mtx_unlock(&psp->ps_mtx); 251 PROC_UNLOCK(td->td_proc); 252 253 /* Copy the sigframe out to the user's stack. */ 254 if (copyout(&frame, fp, sizeof(*fp)) != 0) { 255 /* Process has trashed its stack. Kill it. */ 256 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); 257 PROC_LOCK(p); 258 sigexit(td, SIGILL); 259 } 260 261 /* Translate the signal if appropriate. */ 262 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 263 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 264 265 /* 266 * Build context to run handler in. We invoke the handler 267 * directly, only returning via the trampoline. Note the 268 * trampoline version numbers are coordinated with machine- 269 * dependent code in libc. 270 */ 271 272 tf->tf_r0 = sig; 273 tf->tf_r1 = (register_t)&fp->sf_si; 274 tf->tf_r2 = (register_t)&fp->sf_uc; 275 276 /* the trampoline uses r5 as the uc address */ 277 tf->tf_r5 = (register_t)&fp->sf_uc; 278 tf->tf_pc = (register_t)catcher; 279 tf->tf_usr_sp = (register_t)fp; 280 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 281 282 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, 283 tf->tf_usr_sp); 284 285 PROC_LOCK(p); 286 mtx_lock(&psp->ps_mtx); 287} 288 289struct kva_md_info kmi; 290 291/* 292 * arm32_vector_init: 293 * 294 * Initialize the vector page, and select whether or not to 295 * relocate the vectors. 296 * 297 * NOTE: We expect the vector page to be mapped at its expected 298 * destination. 299 */ 300 301extern unsigned int page0[], page0_data[]; 302void 303arm_vector_init(vm_offset_t va, int which) 304{ 305 unsigned int *vectors = (int *) va; 306 unsigned int *vectors_data = vectors + (page0_data - page0); 307 int vec; 308 309 /* 310 * Loop through the vectors we're taking over, and copy the 311 * vector's insn and data word. 312 */ 313 for (vec = 0; vec < ARM_NVEC; vec++) { 314 if ((which & (1 << vec)) == 0) { 315 /* Don't want to take over this vector. */ 316 continue; 317 } 318 vectors[vec] = page0[vec]; 319 vectors_data[vec] = page0_data[vec]; 320 } 321 322 /* Now sync the vectors. */ 323 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 324 325 vector_page = va; 326 327 if (va == ARM_VECTORS_HIGH) { 328 /* 329 * Assume the MD caller knows what it's doing here, and 330 * really does want the vector page relocated. 331 * 332 * Note: This has to be done here (and not just in 333 * cpu_setup()) because the vector page needs to be 334 * accessible *before* cpu_startup() is called. 335 * Think ddb(9) ... 336 * 337 * NOTE: If the CPU control register is not readable, 338 * this will totally fail! We'll just assume that 339 * any system that has high vector support has a 340 * readable CPU control register, for now. If we 341 * ever encounter one that does not, we'll have to 342 * rethink this. 343 */ 344 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 345 } 346} 347 348static void 349cpu_startup(void *dummy) 350{ 351 struct pcb *pcb = thread0.td_pcb; 352 const unsigned int mbyte = 1024 * 1024; 353#ifdef ARM_TP_ADDRESS 354#ifndef ARM_CACHE_LOCK_ENABLE 355 vm_page_t m; 356#endif 357#endif 358 359 identify_arm_cpu(); 360 361 vm_ksubmap_init(&kmi); 362 363 /* 364 * Display the RAM layout. 365 */ 366 printf("real memory = %ju (%ju MB)\n", 367 (uintmax_t)arm32_ptob(realmem), 368 (uintmax_t)arm32_ptob(realmem) / mbyte); 369 printf("avail memory = %ju (%ju MB)\n", 370 (uintmax_t)arm32_ptob(cnt.v_free_count), 371 (uintmax_t)arm32_ptob(cnt.v_free_count) / mbyte); 372 if (bootverbose) { 373 arm_physmem_print_tables(); 374 arm_devmap_print_table(); 375 } 376 377 bufinit(); 378 vm_pager_bufferinit(); 379 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack + 380 USPACE_SVC_STACK_TOP; 381 vector_page_setprot(VM_PROT_READ); 382 pmap_set_pcb_pagedir(pmap_kernel(), pcb); 383 pmap_postinit(); 384#ifdef ARM_TP_ADDRESS 385#ifdef ARM_CACHE_LOCK_ENABLE 386 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); 387 arm_lock_cache_line(ARM_TP_ADDRESS); 388#else 389 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); 390 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); 391#endif 392 *(uint32_t *)ARM_RAS_START = 0; 393 *(uint32_t *)ARM_RAS_END = 0xffffffff; 394#endif 395} 396 397SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 398 399/* 400 * Flush the D-cache for non-DMA I/O so that the I-cache can 401 * be made coherent later. 402 */ 403void 404cpu_flush_dcache(void *ptr, size_t len) 405{ 406 407 cpu_dcache_wb_range((uintptr_t)ptr, len); 408#ifdef ARM_L2_PIPT 409 cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); 410#else 411 cpu_l2cache_wb_range((uintptr_t)ptr, len); 412#endif 413} 414 415/* Get current clock frequency for the given cpu id. */ 416int 417cpu_est_clockrate(int cpu_id, uint64_t *rate) 418{ 419 420 return (ENXIO); 421} 422 423void 424cpu_idle(int busy) 425{ 426 427 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", 428 busy, curcpu); 429#ifndef NO_EVENTTIMERS 430 if (!busy) { 431 critical_enter(); 432 cpu_idleclock(); 433 } 434#endif 435 if (!sched_runnable()) 436 cpu_sleep(0); 437#ifndef NO_EVENTTIMERS 438 if (!busy) { 439 cpu_activeclock(); 440 critical_exit(); 441 } 442#endif 443 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", 444 busy, curcpu); 445} 446 447int 448cpu_idle_wakeup(int cpu) 449{ 450 451 return (0); 452} 453 454/* 455 * Most ARM platforms don't need to do anything special to init their clocks 456 * (they get intialized during normal device attachment), and by not defining a 457 * cpu_initclocks() function they get this generic one. Any platform that needs 458 * to do something special can just provide their own implementation, which will 459 * override this one due to the weak linkage. 460 */ 461void 462arm_generic_initclocks(void) 463{ 464 465#ifndef NO_EVENTTIMERS 466#ifdef SMP 467 if (PCPU_GET(cpuid) == 0) 468 cpu_initclocks_bsp(); 469 else 470 cpu_initclocks_ap(); 471#else 472 cpu_initclocks_bsp(); 473#endif 474#endif 475} 476__weak_reference(arm_generic_initclocks, cpu_initclocks); 477 478int 479fill_regs(struct thread *td, struct reg *regs) 480{ 481 struct trapframe *tf = td->td_frame; 482 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); 483 regs->r_sp = tf->tf_usr_sp; 484 regs->r_lr = tf->tf_usr_lr; 485 regs->r_pc = tf->tf_pc; 486 regs->r_cpsr = tf->tf_spsr; 487 return (0); 488} 489int 490fill_fpregs(struct thread *td, struct fpreg *regs) 491{ 492 bzero(regs, sizeof(*regs)); 493 return (0); 494} 495 496int 497set_regs(struct thread *td, struct reg *regs) 498{ 499 struct trapframe *tf = td->td_frame; 500 501 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); 502 tf->tf_usr_sp = regs->r_sp; 503 tf->tf_usr_lr = regs->r_lr; 504 tf->tf_pc = regs->r_pc; 505 tf->tf_spsr &= ~PSR_FLAGS; 506 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; 507 return (0); 508} 509 510int 511set_fpregs(struct thread *td, struct fpreg *regs) 512{ 513 return (0); 514} 515 516int 517fill_dbregs(struct thread *td, struct dbreg *regs) 518{ 519 return (0); 520} 521int 522set_dbregs(struct thread *td, struct dbreg *regs) 523{ 524 return (0); 525} 526 527 528static int 529ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v) 530{ 531 struct iovec iov; 532 struct uio uio; 533 534 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 535 iov.iov_base = (caddr_t) v; 536 iov.iov_len = sizeof(u_int32_t); 537 uio.uio_iov = &iov; 538 uio.uio_iovcnt = 1; 539 uio.uio_offset = (off_t)addr; 540 uio.uio_resid = sizeof(u_int32_t); 541 uio.uio_segflg = UIO_SYSSPACE; 542 uio.uio_rw = UIO_READ; 543 uio.uio_td = td; 544 return proc_rwmem(td->td_proc, &uio); 545} 546 547static int 548ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v) 549{ 550 struct iovec iov; 551 struct uio uio; 552 553 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 554 iov.iov_base = (caddr_t) &v; 555 iov.iov_len = sizeof(u_int32_t); 556 uio.uio_iov = &iov; 557 uio.uio_iovcnt = 1; 558 uio.uio_offset = (off_t)addr; 559 uio.uio_resid = sizeof(u_int32_t); 560 uio.uio_segflg = UIO_SYSSPACE; 561 uio.uio_rw = UIO_WRITE; 562 uio.uio_td = td; 563 return proc_rwmem(td->td_proc, &uio); 564} 565 566int 567ptrace_single_step(struct thread *td) 568{ 569 struct proc *p; 570 int error; 571 572 KASSERT(td->td_md.md_ptrace_instr == 0, 573 ("Didn't clear single step")); 574 p = td->td_proc; 575 PROC_UNLOCK(p); 576 error = ptrace_read_int(td, td->td_frame->tf_pc + 4, 577 &td->td_md.md_ptrace_instr); 578 if (error) 579 goto out; 580 error = ptrace_write_int(td, td->td_frame->tf_pc + 4, 581 PTRACE_BREAKPOINT); 582 if (error) 583 td->td_md.md_ptrace_instr = 0; 584 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4; 585out: 586 PROC_LOCK(p); 587 return (error); 588} 589 590int 591ptrace_clear_single_step(struct thread *td) 592{ 593 struct proc *p; 594 595 if (td->td_md.md_ptrace_instr) { 596 p = td->td_proc; 597 PROC_UNLOCK(p); 598 ptrace_write_int(td, td->td_md.md_ptrace_addr, 599 td->td_md.md_ptrace_instr); 600 PROC_LOCK(p); 601 td->td_md.md_ptrace_instr = 0; 602 } 603 return (0); 604} 605 606int 607ptrace_set_pc(struct thread *td, unsigned long addr) 608{ 609 td->td_frame->tf_pc = addr; 610 return (0); 611} 612 613void 614cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 615{ 616} 617 618void 619spinlock_enter(void) 620{ 621 struct thread *td; 622 register_t cspr; 623 624 td = curthread; 625 if (td->td_md.md_spinlock_count == 0) { 626 cspr = disable_interrupts(I32_bit | F32_bit); 627 td->td_md.md_spinlock_count = 1; 628 td->td_md.md_saved_cspr = cspr; 629 } else 630 td->td_md.md_spinlock_count++; 631 critical_enter(); 632} 633 634void 635spinlock_exit(void) 636{ 637 struct thread *td; 638 register_t cspr; 639 640 td = curthread; 641 critical_exit(); 642 cspr = td->td_md.md_saved_cspr; 643 td->td_md.md_spinlock_count--; 644 if (td->td_md.md_spinlock_count == 0) 645 restore_interrupts(cspr); 646} 647 648/* 649 * Clear registers on exec 650 */ 651void 652exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 653{ 654 struct trapframe *tf = td->td_frame; 655 656 memset(tf, 0, sizeof(*tf)); 657 tf->tf_usr_sp = stack; 658 tf->tf_usr_lr = imgp->entry_addr; 659 tf->tf_svc_lr = 0x77777777; 660 tf->tf_pc = imgp->entry_addr; 661 tf->tf_spsr = PSR_USR32_MODE; 662} 663 664/* 665 * Get machine context. 666 */ 667int 668get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) 669{ 670 struct trapframe *tf = td->td_frame; 671 __greg_t *gr = mcp->__gregs; 672 673 if (clear_ret & GET_MC_CLEAR_RET) 674 gr[_REG_R0] = 0; 675 else 676 gr[_REG_R0] = tf->tf_r0; 677 gr[_REG_R1] = tf->tf_r1; 678 gr[_REG_R2] = tf->tf_r2; 679 gr[_REG_R3] = tf->tf_r3; 680 gr[_REG_R4] = tf->tf_r4; 681 gr[_REG_R5] = tf->tf_r5; 682 gr[_REG_R6] = tf->tf_r6; 683 gr[_REG_R7] = tf->tf_r7; 684 gr[_REG_R8] = tf->tf_r8; 685 gr[_REG_R9] = tf->tf_r9; 686 gr[_REG_R10] = tf->tf_r10; 687 gr[_REG_R11] = tf->tf_r11; 688 gr[_REG_R12] = tf->tf_r12; 689 gr[_REG_SP] = tf->tf_usr_sp; 690 gr[_REG_LR] = tf->tf_usr_lr; 691 gr[_REG_PC] = tf->tf_pc; 692 gr[_REG_CPSR] = tf->tf_spsr; 693 694 return (0); 695} 696 697/* 698 * Set machine context. 699 * 700 * However, we don't set any but the user modifiable flags, and we won't 701 * touch the cs selector. 702 */ 703int 704set_mcontext(struct thread *td, const mcontext_t *mcp) 705{ 706 struct trapframe *tf = td->td_frame; 707 const __greg_t *gr = mcp->__gregs; 708 709 tf->tf_r0 = gr[_REG_R0]; 710 tf->tf_r1 = gr[_REG_R1]; 711 tf->tf_r2 = gr[_REG_R2]; 712 tf->tf_r3 = gr[_REG_R3]; 713 tf->tf_r4 = gr[_REG_R4]; 714 tf->tf_r5 = gr[_REG_R5]; 715 tf->tf_r6 = gr[_REG_R6]; 716 tf->tf_r7 = gr[_REG_R7]; 717 tf->tf_r8 = gr[_REG_R8]; 718 tf->tf_r9 = gr[_REG_R9]; 719 tf->tf_r10 = gr[_REG_R10]; 720 tf->tf_r11 = gr[_REG_R11]; 721 tf->tf_r12 = gr[_REG_R12]; 722 tf->tf_usr_sp = gr[_REG_SP]; 723 tf->tf_usr_lr = gr[_REG_LR]; 724 tf->tf_pc = gr[_REG_PC]; 725 tf->tf_spsr = gr[_REG_CPSR]; 726 727 return (0); 728} 729 730/* 731 * MPSAFE 732 */ 733int 734sys_sigreturn(td, uap) 735 struct thread *td; 736 struct sigreturn_args /* { 737 const struct __ucontext *sigcntxp; 738 } */ *uap; 739{ 740 ucontext_t uc; 741 int spsr; 742 743 if (uap == NULL) 744 return (EFAULT); 745 if (copyin(uap->sigcntxp, &uc, sizeof(uc))) 746 return (EFAULT); 747 /* 748 * Make sure the processor mode has not been tampered with and 749 * interrupts have not been disabled. 750 */ 751 spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; 752 if ((spsr & PSR_MODE) != PSR_USR32_MODE || 753 (spsr & (I32_bit | F32_bit)) != 0) 754 return (EINVAL); 755 /* Restore register context. */ 756 set_mcontext(td, &uc.uc_mcontext); 757 758 /* Restore signal mask. */ 759 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); 760 761 return (EJUSTRETURN); 762} 763 764 765/* 766 * Construct a PCB from a trapframe. This is called from kdb_trap() where 767 * we want to start a backtrace from the function that caused us to enter 768 * the debugger. We have the context in the trapframe, but base the trace 769 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 770 * enough for a backtrace. 771 */ 772void 773makectx(struct trapframe *tf, struct pcb *pcb) 774{ 775 pcb->un_32.pcb32_r8 = tf->tf_r8; 776 pcb->un_32.pcb32_r9 = tf->tf_r9; 777 pcb->un_32.pcb32_r10 = tf->tf_r10; 778 pcb->un_32.pcb32_r11 = tf->tf_r11; 779 pcb->un_32.pcb32_r12 = tf->tf_r12; 780 pcb->un_32.pcb32_pc = tf->tf_pc; 781 pcb->un_32.pcb32_lr = tf->tf_usr_lr; 782 pcb->un_32.pcb32_sp = tf->tf_usr_sp; 783} 784 785/* 786 * Fake up a boot descriptor table 787 */ 788vm_offset_t 789fake_preload_metadata(struct arm_boot_params *abp __unused) 790{ 791#ifdef DDB 792 vm_offset_t zstart = 0, zend = 0; 793#endif 794 vm_offset_t lastaddr; 795 int i = 0; 796 static uint32_t fake_preload[35]; 797 798 fake_preload[i++] = MODINFO_NAME; 799 fake_preload[i++] = strlen("kernel") + 1; 800 strcpy((char*)&fake_preload[i++], "kernel"); 801 i += 1; 802 fake_preload[i++] = MODINFO_TYPE; 803 fake_preload[i++] = strlen("elf kernel") + 1; 804 strcpy((char*)&fake_preload[i++], "elf kernel"); 805 i += 2; 806 fake_preload[i++] = MODINFO_ADDR; 807 fake_preload[i++] = sizeof(vm_offset_t); 808 fake_preload[i++] = KERNVIRTADDR; 809 fake_preload[i++] = MODINFO_SIZE; 810 fake_preload[i++] = sizeof(uint32_t); 811 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; 812#ifdef DDB 813 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { 814 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; 815 fake_preload[i++] = sizeof(vm_offset_t); 816 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); 817 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; 818 fake_preload[i++] = sizeof(vm_offset_t); 819 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); 820 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); 821 zend = lastaddr; 822 zstart = *(uint32_t *)(KERNVIRTADDR + 4); 823 ksym_start = zstart; 824 ksym_end = zend; 825 } else 826#endif 827 lastaddr = (vm_offset_t)&end; 828 fake_preload[i++] = 0; 829 fake_preload[i] = 0; 830 preload_metadata = (void *)fake_preload; 831 832 return (lastaddr); 833} 834 835void 836pcpu0_init(void) 837{ 838#if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B) 839 set_curthread(&thread0); 840#endif 841 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 842 PCPU_SET(curthread, &thread0); 843#ifdef VFP 844 PCPU_SET(cpu, 0); 845#endif 846} 847 848#if defined(LINUX_BOOT_ABI) 849vm_offset_t 850linux_parse_boot_param(struct arm_boot_params *abp) 851{ 852 struct arm_lbabi_tag *walker; 853 uint32_t revision; 854 uint64_t serial; 855 856 /* 857 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 858 * is atags or dtb pointer. If all of these aren't satisfied, 859 * then punt. 860 */ 861 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) 862 return 0; 863 864 board_id = abp->abp_r1; 865 walker = (struct arm_lbabi_tag *) 866 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); 867 868 /* xxx - Need to also look for binary device tree */ 869 if (ATAG_TAG(walker) != ATAG_CORE) 870 return 0; 871 872 atag_list = walker; 873 while (ATAG_TAG(walker) != ATAG_NONE) { 874 switch (ATAG_TAG(walker)) { 875 case ATAG_CORE: 876 break; 877 case ATAG_MEM: 878 arm_physmem_hardware_region(walker->u.tag_mem.start, 879 walker->u.tag_mem.size); 880 break; 881 case ATAG_INITRD2: 882 break; 883 case ATAG_SERIAL: 884 serial = walker->u.tag_sn.low | 885 ((uint64_t)walker->u.tag_sn.high << 32); 886 board_set_serial(serial); 887 break; 888 case ATAG_REVISION: 889 revision = walker->u.tag_rev.rev; 890 board_set_revision(revision); 891 break; 892 case ATAG_CMDLINE: 893 /* XXX open question: Parse this for boothowto? */ 894 bcopy(walker->u.tag_cmd.command, linux_command_line, 895 ATAG_SIZE(walker)); 896 break; 897 default: 898 break; 899 } 900 walker = ATAG_NEXT(walker); 901 } 902 903 /* Save a copy for later */ 904 bcopy(atag_list, atags, 905 (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); 906 907 return fake_preload_metadata(abp); 908} 909#endif 910 911#if defined(FREEBSD_BOOT_LOADER) 912vm_offset_t 913freebsd_parse_boot_param(struct arm_boot_params *abp) 914{ 915 vm_offset_t lastaddr = 0; 916 void *mdp; 917 void *kmdp; 918 919 /* 920 * Mask metadata pointer: it is supposed to be on page boundary. If 921 * the first argument (mdp) doesn't point to a valid address the 922 * bootloader must have passed us something else than the metadata 923 * ptr, so we give up. Also give up if we cannot find metadta section 924 * the loader creates that we get all this data out of. 925 */ 926 927 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) 928 return 0; 929 preload_metadata = mdp; 930 kmdp = preload_search_by_type("elf kernel"); 931 if (kmdp == NULL) 932 return 0; 933 934 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 935 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 936 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 937#ifdef DDB 938 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 939 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 940#endif 941 preload_addr_relocate = KERNVIRTADDR - abp->abp_physaddr; 942 return lastaddr; 943} 944#endif 945 946vm_offset_t 947default_parse_boot_param(struct arm_boot_params *abp) 948{ 949 vm_offset_t lastaddr; 950 951#if defined(LINUX_BOOT_ABI) 952 if ((lastaddr = linux_parse_boot_param(abp)) != 0) 953 return lastaddr; 954#endif 955#if defined(FREEBSD_BOOT_LOADER) 956 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) 957 return lastaddr; 958#endif 959 /* Fall back to hardcoded metadata. */ 960 lastaddr = fake_preload_metadata(abp); 961 962 return lastaddr; 963} 964 965/* 966 * Stub version of the boot parameter parsing routine. We are 967 * called early in initarm, before even VM has been initialized. 968 * This routine needs to preserve any data that the boot loader 969 * has passed in before the kernel starts to grow past the end 970 * of the BSS, traditionally the place boot-loaders put this data. 971 * 972 * Since this is called so early, things that depend on the vm system 973 * being setup (including access to some SoC's serial ports), about 974 * all that can be done in this routine is to copy the arguments. 975 * 976 * This is the default boot parameter parsing routine. Individual 977 * kernels/boards can override this weak function with one of their 978 * own. We just fake metadata... 979 */ 980__weak_reference(default_parse_boot_param, parse_boot_param); 981 982/* 983 * Initialize proc0 984 */ 985void 986init_proc0(vm_offset_t kstack) 987{ 988 proc_linkup0(&proc0, &thread0); 989 thread0.td_kstack = kstack; 990 thread0.td_pcb = (struct pcb *) 991 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 992 thread0.td_pcb->pcb_flags = 0; 993 thread0.td_pcb->pcb_vfpcpu = -1; 994 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ; 995 thread0.td_frame = &proc0_tf; 996 pcpup->pc_curpcb = thread0.td_pcb; 997} 998 999void 1000set_stackptrs(int cpu) 1001{ 1002 1003 set_stackptr(PSR_IRQ32_MODE, 1004 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1005 set_stackptr(PSR_ABT32_MODE, 1006 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1007 set_stackptr(PSR_UND32_MODE, 1008 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1009} 1010 1011#ifdef FDT 1012static char * 1013kenv_next(char *cp) 1014{ 1015 1016 if (cp != NULL) { 1017 while (*cp != 0) 1018 cp++; 1019 cp++; 1020 if (*cp == 0) 1021 cp = NULL; 1022 } 1023 return (cp); 1024} 1025 1026static void 1027print_kenv(void) 1028{ 1029 int len; 1030 char *cp; 1031 1032 debugf("loader passed (static) kenv:\n"); 1033 if (kern_envp == NULL) { 1034 debugf(" no env, null ptr\n"); 1035 return; 1036 } 1037 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 1038 1039 len = 0; 1040 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 1041 debugf(" %x %s\n", (uint32_t)cp, cp); 1042} 1043 1044void * 1045initarm(struct arm_boot_params *abp) 1046{ 1047 struct mem_region mem_regions[FDT_MEM_REGIONS]; 1048 struct pv_addr kernel_l1pt; 1049 struct pv_addr dpcpu; 1050 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 1051 uint32_t memsize, l2size; 1052 char *env; 1053 void *kmdp; 1054 u_int l1pagetable; 1055 int i, j, err_devmap, mem_regions_sz; 1056 1057 lastaddr = parse_boot_param(abp); 1058 arm_physmem_kernaddr = abp->abp_physaddr; 1059 1060 memsize = 0; 1061 set_cpufuncs(); 1062 1063 /* 1064 * Find the dtb passed in by the boot loader. 1065 */ 1066 kmdp = preload_search_by_type("elf kernel"); 1067 if (kmdp != NULL) 1068 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 1069 else 1070 dtbp = (vm_offset_t)NULL; 1071 1072#if defined(FDT_DTB_STATIC) 1073 /* 1074 * In case the device tree blob was not retrieved (from metadata) try 1075 * to use the statically embedded one. 1076 */ 1077 if (dtbp == (vm_offset_t)NULL) 1078 dtbp = (vm_offset_t)&fdt_static_dtb; 1079#endif 1080 1081 if (OF_install(OFW_FDT, 0) == FALSE) 1082 panic("Cannot install FDT"); 1083 1084 if (OF_init((void *)dtbp) != 0) 1085 panic("OF_init failed with the found device tree"); 1086 1087 /* Grab physical memory regions information from device tree. */ 1088 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) 1089 panic("Cannot get physical memory regions"); 1090 arm_physmem_hardware_regions(mem_regions, mem_regions_sz); 1091 1092 /* Grab reserved memory regions information from device tree. */ 1093 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) 1094 arm_physmem_exclude_regions(mem_regions, mem_regions_sz, 1095 EXFLAG_NODUMP | EXFLAG_NOALLOC); 1096 1097 /* Platform-specific initialisation */ 1098 initarm_early_init(); 1099 1100 pcpu0_init(); 1101 1102 /* Do basic tuning, hz etc */ 1103 init_param1(); 1104 1105 /* Calculate number of L2 tables needed for mapping vm_page_array */ 1106 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 1107 l2size = (l2size >> L1_S_SHIFT) + 1; 1108 1109 /* 1110 * Add one table for end of kernel map, one for stacks, msgbuf and 1111 * L1 and L2 tables map and one for vectors map. 1112 */ 1113 l2size += 3; 1114 1115 /* Make it divisible by 4 */ 1116 l2size = (l2size + 3) & ~3; 1117 1118 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 1119 1120 /* Define a macro to simplify memory allocation */ 1121#define valloc_pages(var, np) \ 1122 alloc_pages((var).pv_va, (np)); \ 1123 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); 1124 1125#define alloc_pages(var, np) \ 1126 (var) = freemempos; \ 1127 freemempos += (np * PAGE_SIZE); \ 1128 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 1129 1130 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 1131 freemempos += PAGE_SIZE; 1132 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 1133 1134 for (i = 0, j = 0; i < l2size; ++i) { 1135 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 1136 valloc_pages(kernel_pt_table[i], 1137 L2_TABLE_SIZE / PAGE_SIZE); 1138 j = i; 1139 } else { 1140 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 1141 L2_TABLE_SIZE_REAL * (i - j); 1142 kernel_pt_table[i].pv_pa = 1143 kernel_pt_table[i].pv_va - KERNVIRTADDR + 1144 abp->abp_physaddr; 1145 1146 } 1147 } 1148 /* 1149 * Allocate a page for the system page mapped to 0x00000000 1150 * or 0xffff0000. This page will just contain the system vectors 1151 * and can be shared by all processes. 1152 */ 1153 valloc_pages(systempage, 1); 1154 1155 /* Allocate dynamic per-cpu area. */ 1156 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 1157 dpcpu_init((void *)dpcpu.pv_va, 0); 1158 1159 /* Allocate stacks for all modes */ 1160 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); 1161 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); 1162 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); 1163 valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU); 1164 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 1165 1166 /* 1167 * Now we start construction of the L1 page table 1168 * We start by mapping the L2 page tables into the L1. 1169 * This means that we can replace L1 mappings later on if necessary 1170 */ 1171 l1pagetable = kernel_l1pt.pv_va; 1172 1173 /* 1174 * Try to map as much as possible of kernel text and data using 1175 * 1MB section mapping and for the rest of initial kernel address 1176 * space use L2 coarse tables. 1177 * 1178 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 1179 * and kernel structures 1180 */ 1181 l2_start = lastaddr & ~(L1_S_OFFSET); 1182 for (i = 0 ; i < l2size - 1; i++) 1183 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 1184 &kernel_pt_table[i]); 1185 1186 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 1187 1188 /* Map kernel code and data */ 1189 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, 1190 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 1191 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1192 1193 /* Map L1 directory and allocated L2 page tables */ 1194 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 1195 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1196 1197 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 1198 kernel_pt_table[0].pv_pa, 1199 L2_TABLE_SIZE_REAL * l2size, 1200 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1201 1202 /* Map allocated DPCPU, stacks and msgbuf */ 1203 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 1204 freemempos - dpcpu.pv_va, 1205 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1206 1207 /* Link and map the vector page */ 1208 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 1209 &kernel_pt_table[l2size - 1]); 1210 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 1211 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 1212 1213 /* Establish static device mappings. */ 1214 err_devmap = initarm_devmap_init(); 1215 arm_devmap_bootstrap(l1pagetable, NULL); 1216 vm_max_kernel_address = initarm_lastaddr(); 1217 1218 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); 1219 pmap_pa = kernel_l1pt.pv_pa; 1220 setttb(kernel_l1pt.pv_pa); 1221 cpu_tlb_flushID(); 1222 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 1223 1224 /* 1225 * Now that proper page tables are installed, call cpu_setup() to enable 1226 * instruction and data caches and other chip-specific features. 1227 */ 1228 cpu_setup(""); 1229 1230 /* 1231 * Only after the SOC registers block is mapped we can perform device 1232 * tree fixups, as they may attempt to read parameters from hardware. 1233 */ 1234 OF_interpret("perform-fixup", 0); 1235 1236 initarm_gpio_init(); 1237 1238 cninit(); 1239 1240 debugf("initarm: console initialized\n"); 1241 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 1242 debugf(" boothowto = 0x%08x\n", boothowto); 1243 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 1244 print_kenv(); 1245 1246 env = getenv("kernelname"); 1247 if (env != NULL) 1248 strlcpy(kernelname, env, sizeof(kernelname)); 1249 1250 if (err_devmap != 0) 1251 printf("WARNING: could not fully configure devmap, error=%d\n", 1252 err_devmap); 1253 1254 initarm_late_init(); 1255 1256 /* 1257 * Pages were allocated during the secondary bootstrap for the 1258 * stacks for different CPU modes. 1259 * We must now set the r13 registers in the different CPU modes to 1260 * point to these stacks. 1261 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 1262 * of the stack memory. 1263 */ 1264 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 1265 1266 set_stackptrs(0); 1267 1268 /* 1269 * We must now clean the cache again.... 1270 * Cleaning may be done by reading new data to displace any 1271 * dirty data in the cache. This will have happened in setttb() 1272 * but since we are boot strapping the addresses used for the read 1273 * may have just been remapped and thus the cache could be out 1274 * of sync. A re-clean after the switch will cure this. 1275 * After booting there are no gross relocations of the kernel thus 1276 * this problem will not occur after initarm(). 1277 */ 1278 cpu_idcache_wbinv_all(); 1279 1280 undefined_init(); 1281 1282 init_proc0(kernelstack.pv_va); 1283 1284 arm_intrnames_init(); 1285 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 1286 pmap_bootstrap(freemempos, &kernel_l1pt); 1287 msgbufp = (void *)msgbufpv.pv_va; 1288 msgbufinit(msgbufp, msgbufsize); 1289 mutex_init(); 1290 1291 /* 1292 * Exclude the kernel (and all the things we allocated which immediately 1293 * follow the kernel) from the VM allocation pool but not from crash 1294 * dumps. virtual_avail is a global variable which tracks the kva we've 1295 * "allocated" while setting up pmaps. 1296 * 1297 * Prepare the list of physical memory available to the vm subsystem. 1298 */ 1299 arm_physmem_exclude_region(abp->abp_physaddr, 1300 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); 1301 arm_physmem_init_kernel_globals(); 1302 1303 init_param2(physmem); 1304 kdb_init(); 1305 1306 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 1307 sizeof(struct pcb))); 1308} 1309#endif 1310