machdep.c revision 331017
1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001 Jake Burkholder. 5 * Copyright (c) 1992 Terrence R. Lambert. 6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * William Jolitz. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 37 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: stable/11/sys/sparc64/sparc64/machdep.c 331017 2018-03-15 19:08:33Z kevans $"); 42 43#include "opt_compat.h" 44#include "opt_ddb.h" 45#include "opt_kstack_pages.h" 46 47#include <sys/param.h> 48#include <sys/malloc.h> 49#include <sys/proc.h> 50#include <sys/systm.h> 51#include <sys/bio.h> 52#include <sys/buf.h> 53#include <sys/bus.h> 54#include <sys/cpu.h> 55#include <sys/cons.h> 56#include <sys/eventhandler.h> 57#include <sys/exec.h> 58#include <sys/imgact.h> 59#include <sys/interrupt.h> 60#include <sys/kdb.h> 61#include <sys/kernel.h> 62#include <sys/ktr.h> 63#include <sys/linker.h> 64#include <sys/lock.h> 65#include <sys/msgbuf.h> 66#include <sys/mutex.h> 67#include <sys/pcpu.h> 68#include <sys/ptrace.h> 69#include <sys/reboot.h> 70#include <sys/rwlock.h> 71#include <sys/signalvar.h> 72#include <sys/smp.h> 73#include <sys/syscallsubr.h> 74#include <sys/sysent.h> 75#include <sys/sysproto.h> 76#include <sys/timetc.h> 77#include <sys/ucontext.h> 78#include <sys/vmmeter.h> 79 80#include <dev/ofw/openfirm.h> 81 82#include <vm/vm.h> 83#include <vm/vm_extern.h> 84#include <vm/vm_kern.h> 85#include <vm/vm_page.h> 86#include <vm/vm_map.h> 87#include <vm/vm_object.h> 88#include <vm/vm_pager.h> 89#include <vm/vm_param.h> 90 91#include <ddb/ddb.h> 92 93#include <machine/bus.h> 94#include <machine/cache.h> 95#include <machine/cmt.h> 96#include <machine/cpu.h> 97#include <machine/fireplane.h> 98#include <machine/fp.h> 99#include <machine/fsr.h> 100#include <machine/intr_machdep.h> 101#include <machine/jbus.h> 102#include <machine/md_var.h> 103#include <machine/metadata.h> 104#include <machine/ofw_machdep.h> 105#include <machine/ofw_mem.h> 106#include <machine/pcb.h> 107#include <machine/pmap.h> 108#include <machine/pstate.h> 109#include <machine/reg.h> 110#include <machine/sigframe.h> 111#include <machine/smp.h> 112#include <machine/tick.h> 113#include <machine/tlb.h> 114#include <machine/tstate.h> 115#include <machine/upa.h> 116#include <machine/ver.h> 117 118typedef int ofw_vec_t(void *); 119 120int dtlb_slots; 121int itlb_slots; 122struct tlb_entry *kernel_tlbs; 123int kernel_tlb_slots; 124 125int cold = 1; 126long Maxmem; 127long realmem; 128 129void *dpcpu0; 130char pcpu0[PCPU_PAGES * PAGE_SIZE]; 131struct trapframe frame0; 132 133vm_offset_t kstack0; 134vm_paddr_t kstack0_phys; 135 136struct kva_md_info kmi; 137 138u_long ofw_vec; 139u_long ofw_tba; 140u_int tba_taken_over; 141 142char sparc64_model[32]; 143 144static int cpu_use_vis = 1; 145 146cpu_block_copy_t *cpu_block_copy; 147cpu_block_zero_t *cpu_block_zero; 148 149static phandle_t find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl); 150void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, 151 ofw_vec_t *vec); 152static void sparc64_shutdown_final(void *dummy, int howto); 153 154static void cpu_startup(void *arg); 155SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 156 157CTASSERT((1 << INT_SHIFT) == sizeof(int)); 158CTASSERT((1 << PTR_SHIFT) == sizeof(char *)); 159 160CTASSERT(sizeof(struct reg) == 256); 161CTASSERT(sizeof(struct fpreg) == 272); 162CTASSERT(sizeof(struct __mcontext) == 512); 163 164CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0); 165CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0); 166CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0); 167CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8)); 168 169CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2)); 170 171static void 172cpu_startup(void *arg) 173{ 174 vm_paddr_t physsz; 175 int i; 176 177 physsz = 0; 178 for (i = 0; i < sparc64_nmemreg; i++) 179 physsz += sparc64_memreg[i].mr_size; 180 printf("real memory = %lu (%lu MB)\n", physsz, 181 physsz / (1024 * 1024)); 182 realmem = (long)physsz / PAGE_SIZE; 183 184 vm_ksubmap_init(&kmi); 185 186 bufinit(); 187 vm_pager_bufferinit(); 188 189 EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL, 190 SHUTDOWN_PRI_LAST); 191 192 printf("avail memory = %lu (%lu MB)\n", vm_cnt.v_free_count * PAGE_SIZE, 193 vm_cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE)); 194 195 if (bootverbose) 196 printf("machine: %s\n", sparc64_model); 197 198 cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu); 199} 200 201void 202cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 203{ 204 struct intr_request *ir; 205 int i; 206 207 pcpu->pc_irtail = &pcpu->pc_irhead; 208 for (i = 0; i < IR_FREE; i++) { 209 ir = &pcpu->pc_irpool[i]; 210 ir->ir_next = pcpu->pc_irfree; 211 pcpu->pc_irfree = ir; 212 } 213} 214 215void 216spinlock_enter(void) 217{ 218 struct thread *td; 219 register_t pil; 220 221 td = curthread; 222 if (td->td_md.md_spinlock_count == 0) { 223 pil = rdpr(pil); 224 wrpr(pil, 0, PIL_TICK); 225 td->td_md.md_spinlock_count = 1; 226 td->td_md.md_saved_pil = pil; 227 } else 228 td->td_md.md_spinlock_count++; 229 critical_enter(); 230} 231 232void 233spinlock_exit(void) 234{ 235 struct thread *td; 236 register_t pil; 237 238 td = curthread; 239 critical_exit(); 240 pil = td->td_md.md_saved_pil; 241 td->td_md.md_spinlock_count--; 242 if (td->td_md.md_spinlock_count == 0) 243 wrpr(pil, pil, 0); 244} 245 246static phandle_t 247find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl) 248{ 249 char type[sizeof("cpu")]; 250 phandle_t child; 251 uint32_t portid; 252 253 for (; node != 0; node = OF_peer(node)) { 254 child = OF_child(node); 255 if (child > 0) { 256 child = find_bsp(child, bspid, cpu_impl); 257 if (child > 0) 258 return (child); 259 } else { 260 if (OF_getprop(node, "device_type", type, 261 sizeof(type)) <= 0) 262 continue; 263 if (strcmp(type, "cpu") != 0) 264 continue; 265 if (OF_getprop(node, cpu_portid_prop(cpu_impl), 266 &portid, sizeof(portid)) <= 0) 267 continue; 268 if (portid == bspid) 269 return (node); 270 } 271 } 272 return (0); 273} 274 275const char * 276cpu_portid_prop(u_int cpu_impl) 277{ 278 279 switch (cpu_impl) { 280 case CPU_IMPL_SPARC64: 281 case CPU_IMPL_SPARC64V: 282 case CPU_IMPL_ULTRASPARCI: 283 case CPU_IMPL_ULTRASPARCII: 284 case CPU_IMPL_ULTRASPARCIIi: 285 case CPU_IMPL_ULTRASPARCIIe: 286 return ("upa-portid"); 287 case CPU_IMPL_ULTRASPARCIII: 288 case CPU_IMPL_ULTRASPARCIIIp: 289 case CPU_IMPL_ULTRASPARCIIIi: 290 case CPU_IMPL_ULTRASPARCIIIip: 291 return ("portid"); 292 case CPU_IMPL_ULTRASPARCIV: 293 case CPU_IMPL_ULTRASPARCIVp: 294 return ("cpuid"); 295 default: 296 return (""); 297 } 298} 299 300uint32_t 301cpu_get_mid(u_int cpu_impl) 302{ 303 304 switch (cpu_impl) { 305 case CPU_IMPL_SPARC64: 306 case CPU_IMPL_SPARC64V: 307 case CPU_IMPL_ULTRASPARCI: 308 case CPU_IMPL_ULTRASPARCII: 309 case CPU_IMPL_ULTRASPARCIIi: 310 case CPU_IMPL_ULTRASPARCIIe: 311 return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG))); 312 case CPU_IMPL_ULTRASPARCIII: 313 case CPU_IMPL_ULTRASPARCIIIp: 314 return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG, 315 ASI_FIREPLANE_CONFIG_REG))); 316 case CPU_IMPL_ULTRASPARCIIIi: 317 case CPU_IMPL_ULTRASPARCIIIip: 318 return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG))); 319 case CPU_IMPL_ULTRASPARCIV: 320 case CPU_IMPL_ULTRASPARCIVp: 321 return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID))); 322 default: 323 return (0); 324 } 325} 326 327void 328sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec) 329{ 330 char *env; 331 struct pcpu *pc; 332 vm_offset_t end; 333 vm_offset_t va; 334 caddr_t kmdp; 335 phandle_t root; 336 u_int cpu_impl; 337 338 end = 0; 339 kmdp = NULL; 340 341 /* 342 * Find out what kind of CPU we have first, for anything that changes 343 * behaviour. 344 */ 345 cpu_impl = VER_IMPL(rdpr(ver)); 346 347 /* 348 * Do CPU-specific initialization. 349 */ 350 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII) 351 cheetah_init(cpu_impl); 352 else if (cpu_impl == CPU_IMPL_SPARC64V) 353 zeus_init(cpu_impl); 354 355 /* 356 * Clear (S)TICK timer (including NPT). 357 */ 358 tick_clear(cpu_impl); 359 360 /* 361 * UltraSparc II[e,i] based systems come up with the tick interrupt 362 * enabled and a handler that resets the tick counter, causing DELAY() 363 * to not work properly when used early in boot. 364 * UltraSPARC III based systems come up with the system tick interrupt 365 * enabled, causing an interrupt storm on startup since they are not 366 * handled. 367 */ 368 tick_stop(cpu_impl); 369 370 /* 371 * Set up Open Firmware entry points. 372 */ 373 ofw_tba = rdpr(tba); 374 ofw_vec = (u_long)vec; 375 376 /* 377 * Parse metadata if present and fetch parameters. Must be before the 378 * console is inited so cninit() gets the right value of boothowto. 379 */ 380 if (mdp != NULL) { 381 preload_metadata = mdp; 382 kmdp = preload_search_by_type("elf kernel"); 383 if (kmdp != NULL) { 384 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 385 init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 386 0); 387 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 388 kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS, 389 int); 390 kernel_tlbs = (void *)preload_search_info(kmdp, 391 MODINFO_METADATA | MODINFOMD_DTLB); 392 } 393 } 394 395 init_param1(); 396 397 /* 398 * Initialize Open Firmware (needed for console). 399 */ 400 OF_install(OFW_STD_DIRECT, 0); 401 OF_init(ofw_entry); 402 403 /* 404 * Prime our per-CPU data page for use. Note, we are using it for 405 * our stack, so don't pass the real size (PAGE_SIZE) to pcpu_init 406 * or it'll zero it out from under us. 407 */ 408 pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1; 409 pcpu_init(pc, 0, sizeof(struct pcpu)); 410 pc->pc_addr = (vm_offset_t)pcpu0; 411 pc->pc_impl = cpu_impl; 412 pc->pc_mid = cpu_get_mid(cpu_impl); 413 pc->pc_tlb_ctx = TLB_CTX_USER_MIN; 414 pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN; 415 pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX; 416 417 /* 418 * Determine the OFW node and frequency of the BSP (and ensure the 419 * BSP is in the device tree in the first place). 420 */ 421 root = OF_peer(0); 422 pc->pc_node = find_bsp(root, pc->pc_mid, cpu_impl); 423 if (pc->pc_node == 0) 424 OF_panic("%s: cannot find boot CPU node", __func__); 425 if (OF_getprop(pc->pc_node, "clock-frequency", &pc->pc_clock, 426 sizeof(pc->pc_clock)) <= 0) 427 OF_panic("%s: cannot determine boot CPU clock", __func__); 428 429 /* 430 * Panic if there is no metadata. Most likely the kernel was booted 431 * directly, instead of through loader(8). 432 */ 433 if (mdp == NULL || kmdp == NULL || end == 0 || 434 kernel_tlb_slots == 0 || kernel_tlbs == NULL) 435 OF_panic("%s: missing loader metadata.\nThis probably means " 436 "you are not using loader(8).", __func__); 437 438 /* 439 * Work around the broken loader behavior of not demapping no 440 * longer used kernel TLB slots when unloading the kernel or 441 * modules. 442 */ 443 for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M; 444 va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) { 445 if (bootverbose) 446 OF_printf("demapping unused kernel TLB slot " 447 "(va %#lx - %#lx)\n", va, va + PAGE_SIZE_4M - 1); 448 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, 449 ASI_DMMU_DEMAP, 0); 450 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, 451 ASI_IMMU_DEMAP, 0); 452 flush(KERNBASE); 453 kernel_tlb_slots--; 454 } 455 456 /* 457 * Determine the TLB slot maxima, which are expected to be 458 * equal across all CPUs. 459 * NB: for cheetah-class CPUs, these properties only refer 460 * to the t16s. 461 */ 462 if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots, 463 sizeof(dtlb_slots)) == -1) 464 OF_panic("%s: cannot determine number of dTLB slots", 465 __func__); 466 if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots, 467 sizeof(itlb_slots)) == -1) 468 OF_panic("%s: cannot determine number of iTLB slots", 469 __func__); 470 471 /* 472 * Initialize and enable the caches. Note that this may include 473 * applying workarounds. 474 */ 475 cache_init(pc); 476 cache_enable(cpu_impl); 477 uma_set_align(pc->pc_cache.dc_linesize - 1); 478 479 cpu_block_copy = bcopy; 480 cpu_block_zero = bzero; 481 getenv_int("machdep.use_vis", &cpu_use_vis); 482 if (cpu_use_vis) { 483 switch (cpu_impl) { 484 case CPU_IMPL_SPARC64: 485 case CPU_IMPL_ULTRASPARCI: 486 case CPU_IMPL_ULTRASPARCII: 487 case CPU_IMPL_ULTRASPARCIIi: 488 case CPU_IMPL_ULTRASPARCIIe: 489 case CPU_IMPL_ULTRASPARCIII: /* NB: we've disabled P$. */ 490 case CPU_IMPL_ULTRASPARCIIIp: 491 case CPU_IMPL_ULTRASPARCIIIi: 492 case CPU_IMPL_ULTRASPARCIV: 493 case CPU_IMPL_ULTRASPARCIVp: 494 case CPU_IMPL_ULTRASPARCIIIip: 495 cpu_block_copy = spitfire_block_copy; 496 cpu_block_zero = spitfire_block_zero; 497 break; 498 case CPU_IMPL_SPARC64V: 499 cpu_block_copy = zeus_block_copy; 500 cpu_block_zero = zeus_block_zero; 501 break; 502 } 503 } 504 505#ifdef SMP 506 mp_init(); 507#endif 508 509 /* 510 * Initialize virtual memory and calculate physmem. 511 */ 512 pmap_bootstrap(cpu_impl); 513 514 /* 515 * Initialize tunables. 516 */ 517 init_param2(physmem); 518 env = kern_getenv("kernelname"); 519 if (env != NULL) { 520 strlcpy(kernelname, env, sizeof(kernelname)); 521 freeenv(env); 522 } 523 524 /* 525 * Initialize the interrupt tables. 526 */ 527 intr_init1(); 528 529 /* 530 * Initialize proc0, set kstack0, frame0, curthread and curpcb. 531 */ 532 proc_linkup0(&proc0, &thread0); 533 proc0.p_md.md_sigtramp = NULL; 534 proc0.p_md.md_utrap = NULL; 535 thread0.td_kstack = kstack0; 536 thread0.td_kstack_pages = KSTACK_PAGES; 537 thread0.td_pcb = (struct pcb *) 538 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 539 frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV; 540 thread0.td_frame = &frame0; 541 pc->pc_curthread = &thread0; 542 pc->pc_curpcb = thread0.td_pcb; 543 544 /* 545 * Initialize global registers. 546 */ 547 cpu_setregs(pc); 548 549 /* 550 * Take over the trap table via the PROM. Using the PROM for this 551 * is necessary in order to set obp-control-relinquished to true 552 * within the PROM so obtaining /virtual-memory/translations doesn't 553 * trigger a fatal reset error or worse things further down the road. 554 * XXX it should be possible to use this solely instead of writing 555 * %tba in cpu_setregs(). Doing so causes a hang however. 556 * 557 * NB: the low-level console drivers require a working DELAY() and 558 * some compiler optimizations may cause the curthread accesses of 559 * mutex(9) to be factored out even if the latter aren't actually 560 * called. Both of these require PCPU_REG to be set. However, we 561 * can't set PCPU_REG without also taking over the trap table or the 562 * firmware will overwrite it. 563 */ 564 sun4u_set_traptable(tl0_base); 565 566 /* 567 * Initialize the dynamic per-CPU area for the BSP and the message 568 * buffer (after setting the trap table). 569 */ 570 dpcpu_init(dpcpu0, 0); 571 msgbufinit(msgbufp, msgbufsize); 572 573 /* 574 * Initialize mutexes. 575 */ 576 mutex_init(); 577 578 /* 579 * Initialize console now that we have a reasonable set of system 580 * services. 581 */ 582 cninit(); 583 584 /* 585 * Finish the interrupt initialization now that mutexes work and 586 * enable them. 587 */ 588 intr_init2(); 589 wrpr(pil, 0, 0); 590 wrpr(pstate, 0, PSTATE_KERNEL); 591 592 OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1); 593 594 kdb_init(); 595 596#ifdef KDB 597 if (boothowto & RB_KDB) 598 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 599#endif 600} 601 602void 603sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 604{ 605 struct trapframe *tf; 606 struct sigframe *sfp; 607 struct sigacts *psp; 608 struct sigframe sf; 609 struct thread *td; 610 struct frame *fp; 611 struct proc *p; 612 u_long sp; 613 int oonstack; 614 int sig; 615 616 oonstack = 0; 617 td = curthread; 618 p = td->td_proc; 619 PROC_LOCK_ASSERT(p, MA_OWNED); 620 sig = ksi->ksi_signo; 621 psp = p->p_sigacts; 622 mtx_assert(&psp->ps_mtx, MA_OWNED); 623 tf = td->td_frame; 624 sp = tf->tf_sp + SPOFF; 625 oonstack = sigonstack(sp); 626 627 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 628 catcher, sig); 629 630 /* Make sure we have a signal trampoline to return to. */ 631 if (p->p_md.md_sigtramp == NULL) { 632 /* 633 * No signal trampoline... kill the process. 634 */ 635 CTR0(KTR_SIG, "sendsig: no sigtramp"); 636 printf("sendsig: %s is too old, rebuild it\n", p->p_comm); 637 sigexit(td, sig); 638 /* NOTREACHED */ 639 } 640 641 /* Save user context. */ 642 bzero(&sf, sizeof(sf)); 643 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0); 644 sf.sf_uc.uc_sigmask = *mask; 645 sf.sf_uc.uc_stack = td->td_sigstk; 646 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? 647 ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 648 649 /* Allocate and validate space for the signal handler context. */ 650 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 651 SIGISMEMBER(psp->ps_sigonstack, sig)) { 652 sfp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + 653 td->td_sigstk.ss_size - sizeof(struct sigframe)); 654 } else 655 sfp = (struct sigframe *)sp - 1; 656 mtx_unlock(&psp->ps_mtx); 657 PROC_UNLOCK(p); 658 659 fp = (struct frame *)sfp - 1; 660 661 /* Build the argument list for the signal handler. */ 662 tf->tf_out[0] = sig; 663 tf->tf_out[2] = (register_t)&sfp->sf_uc; 664 tf->tf_out[4] = (register_t)catcher; 665 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 666 /* Signal handler installed with SA_SIGINFO. */ 667 tf->tf_out[1] = (register_t)&sfp->sf_si; 668 669 /* Fill in POSIX parts. */ 670 sf.sf_si = ksi->ksi_info; 671 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 672 } else { 673 /* Old FreeBSD-style arguments. */ 674 tf->tf_out[1] = ksi->ksi_code; 675 tf->tf_out[3] = (register_t)ksi->ksi_addr; 676 } 677 678 /* Copy the sigframe out to the user's stack. */ 679 if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 || 680 suword(&fp->fr_in[6], tf->tf_out[6]) != 0) { 681 /* 682 * Something is wrong with the stack pointer. 683 * ...Kill the process. 684 */ 685 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); 686 PROC_LOCK(p); 687 sigexit(td, SIGILL); 688 /* NOTREACHED */ 689 } 690 691 tf->tf_tpc = (u_long)p->p_md.md_sigtramp; 692 tf->tf_tnpc = tf->tf_tpc + 4; 693 tf->tf_sp = (u_long)fp - SPOFF; 694 695 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc, 696 tf->tf_sp); 697 698 PROC_LOCK(p); 699 mtx_lock(&psp->ps_mtx); 700} 701 702#ifndef _SYS_SYSPROTO_H_ 703struct sigreturn_args { 704 ucontext_t *ucp; 705}; 706#endif 707 708/* 709 * MPSAFE 710 */ 711int 712sys_sigreturn(struct thread *td, struct sigreturn_args *uap) 713{ 714 struct proc *p; 715 mcontext_t *mc; 716 ucontext_t uc; 717 int error; 718 719 p = td->td_proc; 720 if (rwindow_save(td)) { 721 PROC_LOCK(p); 722 sigexit(td, SIGILL); 723 } 724 725 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); 726 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { 727 CTR1(KTR_SIG, "sigreturn: efault td=%p", td); 728 return (EFAULT); 729 } 730 731 mc = &uc.uc_mcontext; 732 error = set_mcontext(td, mc); 733 if (error != 0) 734 return (error); 735 736 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); 737 738 CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx", 739 td, mc->_mc_tpc, mc->_mc_sp, mc->_mc_tstate); 740 return (EJUSTRETURN); 741} 742 743/* 744 * Construct a PCB from a trapframe. This is called from kdb_trap() where 745 * we want to start a backtrace from the function that caused us to enter 746 * the debugger. We have the context in the trapframe, but base the trace 747 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 748 * enough for a backtrace. 749 */ 750void 751makectx(struct trapframe *tf, struct pcb *pcb) 752{ 753 754 pcb->pcb_pc = tf->tf_tpc; 755 pcb->pcb_sp = tf->tf_sp; 756} 757 758int 759get_mcontext(struct thread *td, mcontext_t *mc, int flags) 760{ 761 struct trapframe *tf; 762 struct pcb *pcb; 763 764 tf = td->td_frame; 765 pcb = td->td_pcb; 766 /* 767 * Copy the registers which will be restored by tl0_ret() from the 768 * trapframe. 769 * Note that we skip %g7 which is used as the userland TLS register 770 * and %wstate. 771 */ 772 mc->_mc_flags = _MC_VERSION; 773 mc->mc_global[1] = tf->tf_global[1]; 774 mc->mc_global[2] = tf->tf_global[2]; 775 mc->mc_global[3] = tf->tf_global[3]; 776 mc->mc_global[4] = tf->tf_global[4]; 777 mc->mc_global[5] = tf->tf_global[5]; 778 mc->mc_global[6] = tf->tf_global[6]; 779 if (flags & GET_MC_CLEAR_RET) { 780 mc->mc_out[0] = 0; 781 mc->mc_out[1] = 0; 782 } else { 783 mc->mc_out[0] = tf->tf_out[0]; 784 mc->mc_out[1] = tf->tf_out[1]; 785 } 786 mc->mc_out[2] = tf->tf_out[2]; 787 mc->mc_out[3] = tf->tf_out[3]; 788 mc->mc_out[4] = tf->tf_out[4]; 789 mc->mc_out[5] = tf->tf_out[5]; 790 mc->mc_out[6] = tf->tf_out[6]; 791 mc->mc_out[7] = tf->tf_out[7]; 792 mc->_mc_fprs = tf->tf_fprs; 793 mc->_mc_fsr = tf->tf_fsr; 794 mc->_mc_gsr = tf->tf_gsr; 795 mc->_mc_tnpc = tf->tf_tnpc; 796 mc->_mc_tpc = tf->tf_tpc; 797 mc->_mc_tstate = tf->tf_tstate; 798 mc->_mc_y = tf->tf_y; 799 critical_enter(); 800 if ((tf->tf_fprs & FPRS_FEF) != 0) { 801 savefpctx(pcb->pcb_ufp); 802 tf->tf_fprs &= ~FPRS_FEF; 803 pcb->pcb_flags |= PCB_FEF; 804 } 805 if ((pcb->pcb_flags & PCB_FEF) != 0) { 806 bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp)); 807 mc->_mc_fprs |= FPRS_FEF; 808 } 809 critical_exit(); 810 return (0); 811} 812 813int 814set_mcontext(struct thread *td, mcontext_t *mc) 815{ 816 struct trapframe *tf; 817 struct pcb *pcb; 818 819 if (!TSTATE_SECURE(mc->_mc_tstate) || 820 (mc->_mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION) 821 return (EINVAL); 822 tf = td->td_frame; 823 pcb = td->td_pcb; 824 /* Make sure the windows are spilled first. */ 825 flushw(); 826 /* 827 * Copy the registers which will be restored by tl0_ret() to the 828 * trapframe. 829 * Note that we skip %g7 which is used as the userland TLS register 830 * and %wstate. 831 */ 832 tf->tf_global[1] = mc->mc_global[1]; 833 tf->tf_global[2] = mc->mc_global[2]; 834 tf->tf_global[3] = mc->mc_global[3]; 835 tf->tf_global[4] = mc->mc_global[4]; 836 tf->tf_global[5] = mc->mc_global[5]; 837 tf->tf_global[6] = mc->mc_global[6]; 838 tf->tf_out[0] = mc->mc_out[0]; 839 tf->tf_out[1] = mc->mc_out[1]; 840 tf->tf_out[2] = mc->mc_out[2]; 841 tf->tf_out[3] = mc->mc_out[3]; 842 tf->tf_out[4] = mc->mc_out[4]; 843 tf->tf_out[5] = mc->mc_out[5]; 844 tf->tf_out[6] = mc->mc_out[6]; 845 tf->tf_out[7] = mc->mc_out[7]; 846 tf->tf_fprs = mc->_mc_fprs; 847 tf->tf_fsr = mc->_mc_fsr; 848 tf->tf_gsr = mc->_mc_gsr; 849 tf->tf_tnpc = mc->_mc_tnpc; 850 tf->tf_tpc = mc->_mc_tpc; 851 tf->tf_tstate = mc->_mc_tstate; 852 tf->tf_y = mc->_mc_y; 853 if ((mc->_mc_fprs & FPRS_FEF) != 0) { 854 tf->tf_fprs = 0; 855 bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp)); 856 pcb->pcb_flags |= PCB_FEF; 857 } 858 return (0); 859} 860 861/* 862 * Exit the kernel and execute a firmware call that will not return, as 863 * specified by the arguments. 864 */ 865void 866cpu_shutdown(void *args) 867{ 868 869#ifdef SMP 870 cpu_mp_shutdown(); 871#endif 872 ofw_exit(args); 873} 874 875/* 876 * Flush the D-cache for non-DMA I/O so that the I-cache can 877 * be made coherent later. 878 */ 879void 880cpu_flush_dcache(void *ptr, size_t len) 881{ 882 883 /* TBD */ 884} 885 886/* Get current clock frequency for the given CPU ID. */ 887int 888cpu_est_clockrate(int cpu_id, uint64_t *rate) 889{ 890 struct pcpu *pc; 891 892 pc = pcpu_find(cpu_id); 893 if (pc == NULL || rate == NULL) 894 return (EINVAL); 895 *rate = pc->pc_clock; 896 return (0); 897} 898 899/* 900 * Duplicate OF_exit() with a different firmware call function that restores 901 * the trap table, otherwise a RED state exception is triggered in at least 902 * some firmware versions. 903 */ 904void 905cpu_halt(void) 906{ 907 static struct { 908 cell_t name; 909 cell_t nargs; 910 cell_t nreturns; 911 } args = { 912 (cell_t)"exit", 913 0, 914 0 915 }; 916 917 cpu_shutdown(&args); 918} 919 920static void 921sparc64_shutdown_final(void *dummy, int howto) 922{ 923 static struct { 924 cell_t name; 925 cell_t nargs; 926 cell_t nreturns; 927 } args = { 928 (cell_t)"SUNW,power-off", 929 0, 930 0 931 }; 932 933 /* Turn the power off? */ 934 if ((howto & RB_POWEROFF) != 0) 935 cpu_shutdown(&args); 936 /* In case of halt, return to the firmware. */ 937 if ((howto & RB_HALT) != 0) 938 cpu_halt(); 939} 940 941void 942cpu_idle(int busy) 943{ 944 945 /* Insert code to halt (until next interrupt) for the idle loop. */ 946} 947 948int 949cpu_idle_wakeup(int cpu) 950{ 951 952 return (1); 953} 954 955int 956ptrace_set_pc(struct thread *td, u_long addr) 957{ 958 959 td->td_frame->tf_tpc = addr; 960 td->td_frame->tf_tnpc = addr + 4; 961 return (0); 962} 963 964int 965ptrace_single_step(struct thread *td) 966{ 967 968 /* TODO; */ 969 return (0); 970} 971 972int 973ptrace_clear_single_step(struct thread *td) 974{ 975 976 /* TODO; */ 977 return (0); 978} 979 980void 981exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 982{ 983 struct trapframe *tf; 984 struct pcb *pcb; 985 struct proc *p; 986 u_long sp; 987 988 /* XXX no cpu_exec */ 989 p = td->td_proc; 990 p->p_md.md_sigtramp = NULL; 991 if (p->p_md.md_utrap != NULL) { 992 utrap_free(p->p_md.md_utrap); 993 p->p_md.md_utrap = NULL; 994 } 995 996 pcb = td->td_pcb; 997 tf = td->td_frame; 998 sp = rounddown(stack, 16); 999 bzero(pcb, sizeof(*pcb)); 1000 bzero(tf, sizeof(*tf)); 1001 tf->tf_out[0] = stack; 1002 tf->tf_out[3] = p->p_sysent->sv_psstrings; 1003 tf->tf_out[6] = sp - SPOFF - sizeof(struct frame); 1004 tf->tf_tnpc = imgp->entry_addr + 4; 1005 tf->tf_tpc = imgp->entry_addr; 1006 /* 1007 * While we could adhere to the memory model indicated in the ELF 1008 * header, it turns out that just always using TSO performs best. 1009 */ 1010 tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO; 1011 1012 td->td_retval[0] = tf->tf_out[0]; 1013 td->td_retval[1] = tf->tf_out[1]; 1014} 1015 1016int 1017fill_regs(struct thread *td, struct reg *regs) 1018{ 1019 1020 bcopy(td->td_frame, regs, sizeof(*regs)); 1021 return (0); 1022} 1023 1024int 1025set_regs(struct thread *td, struct reg *regs) 1026{ 1027 struct trapframe *tf; 1028 1029 if (!TSTATE_SECURE(regs->r_tstate)) 1030 return (EINVAL); 1031 tf = td->td_frame; 1032 regs->r_wstate = tf->tf_wstate; 1033 bcopy(regs, tf, sizeof(*regs)); 1034 return (0); 1035} 1036 1037int 1038fill_dbregs(struct thread *td, struct dbreg *dbregs) 1039{ 1040 1041 return (ENOSYS); 1042} 1043 1044int 1045set_dbregs(struct thread *td, struct dbreg *dbregs) 1046{ 1047 1048 return (ENOSYS); 1049} 1050 1051int 1052fill_fpregs(struct thread *td, struct fpreg *fpregs) 1053{ 1054 struct trapframe *tf; 1055 struct pcb *pcb; 1056 1057 pcb = td->td_pcb; 1058 tf = td->td_frame; 1059 bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs)); 1060 fpregs->fr_fsr = tf->tf_fsr; 1061 fpregs->fr_gsr = tf->tf_gsr; 1062 return (0); 1063} 1064 1065int 1066set_fpregs(struct thread *td, struct fpreg *fpregs) 1067{ 1068 struct trapframe *tf; 1069 struct pcb *pcb; 1070 1071 pcb = td->td_pcb; 1072 tf = td->td_frame; 1073 tf->tf_fprs &= ~FPRS_FEF; 1074 bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp)); 1075 tf->tf_fsr = fpregs->fr_fsr; 1076 tf->tf_gsr = fpregs->fr_gsr; 1077 return (0); 1078} 1079 1080struct md_utrap * 1081utrap_alloc(void) 1082{ 1083 struct md_utrap *ut; 1084 1085 ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO); 1086 ut->ut_refcnt = 1; 1087 return (ut); 1088} 1089 1090void 1091utrap_free(struct md_utrap *ut) 1092{ 1093 int refcnt; 1094 1095 if (ut == NULL) 1096 return; 1097 mtx_pool_lock(mtxpool_sleep, ut); 1098 ut->ut_refcnt--; 1099 refcnt = ut->ut_refcnt; 1100 mtx_pool_unlock(mtxpool_sleep, ut); 1101 if (refcnt == 0) 1102 free(ut, M_SUBPROC); 1103} 1104 1105struct md_utrap * 1106utrap_hold(struct md_utrap *ut) 1107{ 1108 1109 if (ut == NULL) 1110 return (NULL); 1111 mtx_pool_lock(mtxpool_sleep, ut); 1112 ut->ut_refcnt++; 1113 mtx_pool_unlock(mtxpool_sleep, ut); 1114 return (ut); 1115} 1116