machdep.c revision 282065
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: stable/10/sys/i386/i386/machdep.c 282065 2015-04-27 08:02:12Z kib $"); 42 43#include "opt_apic.h" 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_mp_watchdog.h" 55#include "opt_npx.h" 56#include "opt_perfmon.h" 57#include "opt_platform.h" 58#include "opt_xbox.h" 59#include "opt_kdtrace.h" 60 61#include <sys/param.h> 62#include <sys/proc.h> 63#include <sys/systm.h> 64#include <sys/bio.h> 65#include <sys/buf.h> 66#include <sys/bus.h> 67#include <sys/callout.h> 68#include <sys/cons.h> 69#include <sys/cpu.h> 70#include <sys/eventhandler.h> 71#include <sys/exec.h> 72#include <sys/imgact.h> 73#include <sys/kdb.h> 74#include <sys/kernel.h> 75#include <sys/ktr.h> 76#include <sys/linker.h> 77#include <sys/lock.h> 78#include <sys/malloc.h> 79#include <sys/memrange.h> 80#include <sys/msgbuf.h> 81#include <sys/mutex.h> 82#include <sys/pcpu.h> 83#include <sys/ptrace.h> 84#include <sys/reboot.h> 85#include <sys/rwlock.h> 86#include <sys/sched.h> 87#include <sys/signalvar.h> 88#ifdef SMP 89#include <sys/smp.h> 90#endif 91#include <sys/syscallsubr.h> 92#include <sys/sysctl.h> 93#include <sys/sysent.h> 94#include <sys/sysproto.h> 95#include <sys/ucontext.h> 96#include <sys/vmmeter.h> 97 98#include <vm/vm.h> 99#include <vm/vm_extern.h> 100#include <vm/vm_kern.h> 101#include <vm/vm_page.h> 102#include <vm/vm_map.h> 103#include <vm/vm_object.h> 104#include <vm/vm_pager.h> 105#include <vm/vm_param.h> 106 107#ifdef DDB 108#ifndef KDB 109#error KDB must be enabled in order for DDB to work! 110#endif 111#include <ddb/ddb.h> 112#include <ddb/db_sym.h> 113#endif 114 115#ifdef PC98 116#include <pc98/pc98/pc98_machdep.h> 117#else 118#include <isa/rtc.h> 119#endif 120 121#include <net/netisr.h> 122 123#include <machine/bootinfo.h> 124#include <machine/clock.h> 125#include <machine/cpu.h> 126#include <machine/cputypes.h> 127#include <machine/intr_machdep.h> 128#include <x86/mca.h> 129#include <machine/md_var.h> 130#include <machine/metadata.h> 131#include <machine/mp_watchdog.h> 132#include <machine/pc/bios.h> 133#include <machine/pcb.h> 134#include <machine/pcb_ext.h> 135#include <machine/proc.h> 136#include <machine/reg.h> 137#include <machine/sigframe.h> 138#include <machine/specialreg.h> 139#include <machine/vm86.h> 140#ifdef PERFMON 141#include <machine/perfmon.h> 142#endif 143#ifdef SMP 144#include <machine/smp.h> 145#endif 146#ifdef FDT 147#include <x86/fdt.h> 148#endif 149 150#ifdef DEV_APIC 151#include <machine/apicvar.h> 152#endif 153 154#ifdef DEV_ISA 155#include <x86/isa/icu.h> 156#endif 157 158#ifdef XBOX 159#include <machine/xbox.h> 160 161int arch_i386_is_xbox = 0; 162uint32_t arch_i386_xbox_memsize = 0; 163#endif 164 165#ifdef XEN 166/* XEN includes */ 167#include <xen/xen-os.h> 168#include <xen/hypervisor.h> 169#include <machine/xen/xenvar.h> 170#include <machine/xen/xenfunc.h> 171#include <xen/xen_intr.h> 172 173void Xhypervisor_callback(void); 174void failsafe_callback(void); 175 176extern trap_info_t trap_table[]; 177struct proc_ldt default_proc_ldt; 178extern int init_first; 179int running_xen = 1; 180extern unsigned long physfree; 181#endif /* XEN */ 182 183/* Sanity check for __curthread() */ 184CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 185 186extern register_t init386(int first); 187extern void dblfault_handler(void); 188 189#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 190#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 191 192#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 193#define CPU_ENABLE_SSE 194#endif 195 196static void cpu_startup(void *); 197static void fpstate_drop(struct thread *td); 198static void get_fpcontext(struct thread *td, mcontext_t *mcp, 199 char *xfpusave, size_t xfpusave_len); 200static int set_fpcontext(struct thread *td, mcontext_t *mcp, 201 char *xfpustate, size_t xfpustate_len); 202#ifdef CPU_ENABLE_SSE 203static void set_fpregs_xmm(struct save87 *, struct savexmm *); 204static void fill_fpregs_xmm(struct savexmm *, struct save87 *); 205#endif /* CPU_ENABLE_SSE */ 206SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 207 208#ifdef DDB 209extern vm_offset_t ksym_start, ksym_end; 210#endif 211 212/* Intel ICH registers */ 213#define ICH_PMBASE 0x400 214#define ICH_SMI_EN ICH_PMBASE + 0x30 215 216int _udatasel, _ucodesel; 217u_int basemem; 218 219#ifdef PC98 220int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */ 221int need_post_dma_flush; /* If 1, use invd after DMA transfer. */ 222 223static int ispc98 = 1; 224SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 225#endif 226 227int cold = 1; 228 229#ifdef COMPAT_43 230static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); 231#endif 232#ifdef COMPAT_FREEBSD4 233static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); 234#endif 235 236long Maxmem = 0; 237long realmem = 0; 238 239#ifdef PAE 240FEATURE(pae, "Physical Address Extensions"); 241#endif 242 243/* 244 * The number of PHYSMAP entries must be one less than the number of 245 * PHYSSEG entries because the PHYSMAP entry that spans the largest 246 * physical address that is accessible by ISA DMA is split into two 247 * PHYSSEG entries. 248 */ 249#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) 250 251vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; 252vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; 253 254/* must be 2 less so 0 0 can signal end of chunks */ 255#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) 256#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) 257 258struct kva_md_info kmi; 259 260static struct trapframe proc0_tf; 261struct pcpu __pcpu[MAXCPU]; 262 263struct mtx icu_lock; 264 265struct mem_range_softc mem_range_softc; 266 267static void 268cpu_startup(dummy) 269 void *dummy; 270{ 271 uintmax_t memsize; 272 char *sysenv; 273 274#ifndef PC98 275 /* 276 * On MacBooks, we need to disallow the legacy USB circuit to 277 * generate an SMI# because this can cause several problems, 278 * namely: incorrect CPU frequency detection and failure to 279 * start the APs. 280 * We do this by disabling a bit in the SMI_EN (SMI Control and 281 * Enable register) of the Intel ICH LPC Interface Bridge. 282 */ 283 sysenv = getenv("smbios.system.product"); 284 if (sysenv != NULL) { 285 if (strncmp(sysenv, "MacBook1,1", 10) == 0 || 286 strncmp(sysenv, "MacBook3,1", 10) == 0 || 287 strncmp(sysenv, "MacBook4,1", 10) == 0 || 288 strncmp(sysenv, "MacBookPro1,1", 13) == 0 || 289 strncmp(sysenv, "MacBookPro1,2", 13) == 0 || 290 strncmp(sysenv, "MacBookPro3,1", 13) == 0 || 291 strncmp(sysenv, "MacBookPro4,1", 13) == 0 || 292 strncmp(sysenv, "Macmini1,1", 10) == 0) { 293 if (bootverbose) 294 printf("Disabling LEGACY_USB_EN bit on " 295 "Intel ICH.\n"); 296 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 297 } 298 freeenv(sysenv); 299 } 300#endif /* !PC98 */ 301 302 /* 303 * Good {morning,afternoon,evening,night}. 304 */ 305 startrtclock(); 306 printcpuinfo(); 307 panicifcpuunsupported(); 308#ifdef PERFMON 309 perfmon_init(); 310#endif 311 312 /* 313 * Display physical memory if SMBIOS reports reasonable amount. 314 */ 315 memsize = 0; 316 sysenv = getenv("smbios.memory.enabled"); 317 if (sysenv != NULL) { 318 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; 319 freeenv(sysenv); 320 } 321 if (memsize < ptoa((uintmax_t)cnt.v_free_count)) 322 memsize = ptoa((uintmax_t)Maxmem); 323 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); 324 realmem = atop(memsize); 325 326 /* 327 * Display any holes after the first chunk of extended memory. 328 */ 329 if (bootverbose) { 330 int indx; 331 332 printf("Physical memory chunk(s):\n"); 333 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 334 vm_paddr_t size; 335 336 size = phys_avail[indx + 1] - phys_avail[indx]; 337 printf( 338 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 339 (uintmax_t)phys_avail[indx], 340 (uintmax_t)phys_avail[indx + 1] - 1, 341 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 342 } 343 } 344 345 vm_ksubmap_init(&kmi); 346 347 printf("avail memory = %ju (%ju MB)\n", 348 ptoa((uintmax_t)cnt.v_free_count), 349 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 350 351 /* 352 * Set up buffers, so they can be used to read disk labels. 353 */ 354 bufinit(); 355 vm_pager_bufferinit(); 356#ifndef XEN 357 cpu_setregs(); 358#endif 359} 360 361/* 362 * Send an interrupt to process. 363 * 364 * Stack is set up to allow sigcode stored 365 * at top to call routine, followed by call 366 * to sigreturn routine below. After sigreturn 367 * resets the signal mask, the stack, and the 368 * frame pointer, it returns to the user 369 * specified pc, psl. 370 */ 371#ifdef COMPAT_43 372static void 373osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 374{ 375 struct osigframe sf, *fp; 376 struct proc *p; 377 struct thread *td; 378 struct sigacts *psp; 379 struct trapframe *regs; 380 int sig; 381 int oonstack; 382 383 td = curthread; 384 p = td->td_proc; 385 PROC_LOCK_ASSERT(p, MA_OWNED); 386 sig = ksi->ksi_signo; 387 psp = p->p_sigacts; 388 mtx_assert(&psp->ps_mtx, MA_OWNED); 389 regs = td->td_frame; 390 oonstack = sigonstack(regs->tf_esp); 391 392 /* Allocate space for the signal handler context. */ 393 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack && 394 SIGISMEMBER(psp->ps_sigonstack, sig)) { 395 fp = (struct osigframe *)(td->td_sigstk.ss_sp + 396 td->td_sigstk.ss_size - sizeof(struct osigframe)); 397#if defined(COMPAT_43) 398 td->td_sigstk.ss_flags |= SS_ONSTACK; 399#endif 400 } else 401 fp = (struct osigframe *)regs->tf_esp - 1; 402 403 /* Translate the signal if appropriate. */ 404 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 405 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 406 407 /* Build the argument list for the signal handler. */ 408 sf.sf_signum = sig; 409 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 410 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo)); 411 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 412 /* Signal handler installed with SA_SIGINFO. */ 413 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 414 sf.sf_siginfo.si_signo = sig; 415 sf.sf_siginfo.si_code = ksi->ksi_code; 416 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 417 sf.sf_addr = 0; 418 } else { 419 /* Old FreeBSD-style arguments. */ 420 sf.sf_arg2 = ksi->ksi_code; 421 sf.sf_addr = (register_t)ksi->ksi_addr; 422 sf.sf_ahu.sf_handler = catcher; 423 } 424 mtx_unlock(&psp->ps_mtx); 425 PROC_UNLOCK(p); 426 427 /* Save most if not all of trap frame. */ 428 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 429 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 430 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 431 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 432 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 433 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 434 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 435 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 436 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 437 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 438 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 439 sf.sf_siginfo.si_sc.sc_gs = rgs(); 440 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 441 442 /* Build the signal context to be used by osigreturn(). */ 443 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 444 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 445 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 446 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 447 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 448 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 449 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 450 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 451 452 /* 453 * If we're a vm86 process, we want to save the segment registers. 454 * We also change eflags to be our emulated eflags, not the actual 455 * eflags. 456 */ 457 if (regs->tf_eflags & PSL_VM) { 458 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 459 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 460 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 461 462 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 463 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 464 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 465 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 466 467 if (vm86->vm86_has_vme == 0) 468 sf.sf_siginfo.si_sc.sc_ps = 469 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 470 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 471 472 /* See sendsig() for comments. */ 473 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 474 } 475 476 /* 477 * Copy the sigframe out to the user's stack. 478 */ 479 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 480#ifdef DEBUG 481 printf("process %ld has trashed its stack\n", (long)p->p_pid); 482#endif 483 PROC_LOCK(p); 484 sigexit(td, SIGILL); 485 } 486 487 regs->tf_esp = (int)fp; 488 if (p->p_sysent->sv_sigcode_base != 0) { 489 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode - 490 szosigcode; 491 } else { 492 /* a.out sysentvec does not use shared page */ 493 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode; 494 } 495 regs->tf_eflags &= ~(PSL_T | PSL_D); 496 regs->tf_cs = _ucodesel; 497 regs->tf_ds = _udatasel; 498 regs->tf_es = _udatasel; 499 regs->tf_fs = _udatasel; 500 load_gs(_udatasel); 501 regs->tf_ss = _udatasel; 502 PROC_LOCK(p); 503 mtx_lock(&psp->ps_mtx); 504} 505#endif /* COMPAT_43 */ 506 507#ifdef COMPAT_FREEBSD4 508static void 509freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 510{ 511 struct sigframe4 sf, *sfp; 512 struct proc *p; 513 struct thread *td; 514 struct sigacts *psp; 515 struct trapframe *regs; 516 int sig; 517 int oonstack; 518 519 td = curthread; 520 p = td->td_proc; 521 PROC_LOCK_ASSERT(p, MA_OWNED); 522 sig = ksi->ksi_signo; 523 psp = p->p_sigacts; 524 mtx_assert(&psp->ps_mtx, MA_OWNED); 525 regs = td->td_frame; 526 oonstack = sigonstack(regs->tf_esp); 527 528 /* Save user context. */ 529 bzero(&sf, sizeof(sf)); 530 sf.sf_uc.uc_sigmask = *mask; 531 sf.sf_uc.uc_stack = td->td_sigstk; 532 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 533 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 534 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 535 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 536 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 537 bzero(sf.sf_uc.uc_mcontext.mc_fpregs, 538 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); 539 bzero(sf.sf_uc.uc_mcontext.__spare__, 540 sizeof(sf.sf_uc.uc_mcontext.__spare__)); 541 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); 542 543 /* Allocate space for the signal handler context. */ 544 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 545 SIGISMEMBER(psp->ps_sigonstack, sig)) { 546 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp + 547 td->td_sigstk.ss_size - sizeof(struct sigframe4)); 548#if defined(COMPAT_43) 549 td->td_sigstk.ss_flags |= SS_ONSTACK; 550#endif 551 } else 552 sfp = (struct sigframe4 *)regs->tf_esp - 1; 553 554 /* Translate the signal if appropriate. */ 555 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 556 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 557 558 /* Build the argument list for the signal handler. */ 559 sf.sf_signum = sig; 560 sf.sf_ucontext = (register_t)&sfp->sf_uc; 561 bzero(&sf.sf_si, sizeof(sf.sf_si)); 562 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 563 /* Signal handler installed with SA_SIGINFO. */ 564 sf.sf_siginfo = (register_t)&sfp->sf_si; 565 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 566 567 /* Fill in POSIX parts */ 568 sf.sf_si.si_signo = sig; 569 sf.sf_si.si_code = ksi->ksi_code; 570 sf.sf_si.si_addr = ksi->ksi_addr; 571 } else { 572 /* Old FreeBSD-style arguments. */ 573 sf.sf_siginfo = ksi->ksi_code; 574 sf.sf_addr = (register_t)ksi->ksi_addr; 575 sf.sf_ahu.sf_handler = catcher; 576 } 577 mtx_unlock(&psp->ps_mtx); 578 PROC_UNLOCK(p); 579 580 /* 581 * If we're a vm86 process, we want to save the segment registers. 582 * We also change eflags to be our emulated eflags, not the actual 583 * eflags. 584 */ 585 if (regs->tf_eflags & PSL_VM) { 586 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 587 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 588 589 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 590 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 591 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 592 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 593 594 if (vm86->vm86_has_vme == 0) 595 sf.sf_uc.uc_mcontext.mc_eflags = 596 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 597 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 598 599 /* 600 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 601 * syscalls made by the signal handler. This just avoids 602 * wasting time for our lazy fixup of such faults. PSL_NT 603 * does nothing in vm86 mode, but vm86 programs can set it 604 * almost legitimately in probes for old cpu types. 605 */ 606 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 607 } 608 609 /* 610 * Copy the sigframe out to the user's stack. 611 */ 612 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 613#ifdef DEBUG 614 printf("process %ld has trashed its stack\n", (long)p->p_pid); 615#endif 616 PROC_LOCK(p); 617 sigexit(td, SIGILL); 618 } 619 620 regs->tf_esp = (int)sfp; 621 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode - 622 szfreebsd4_sigcode; 623 regs->tf_eflags &= ~(PSL_T | PSL_D); 624 regs->tf_cs = _ucodesel; 625 regs->tf_ds = _udatasel; 626 regs->tf_es = _udatasel; 627 regs->tf_fs = _udatasel; 628 regs->tf_ss = _udatasel; 629 PROC_LOCK(p); 630 mtx_lock(&psp->ps_mtx); 631} 632#endif /* COMPAT_FREEBSD4 */ 633 634void 635sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 636{ 637 struct sigframe sf, *sfp; 638 struct proc *p; 639 struct thread *td; 640 struct sigacts *psp; 641 char *sp; 642 struct trapframe *regs; 643 struct segment_descriptor *sdp; 644 char *xfpusave; 645 size_t xfpusave_len; 646 int sig; 647 int oonstack; 648 649 td = curthread; 650 p = td->td_proc; 651 PROC_LOCK_ASSERT(p, MA_OWNED); 652 sig = ksi->ksi_signo; 653 psp = p->p_sigacts; 654 mtx_assert(&psp->ps_mtx, MA_OWNED); 655#ifdef COMPAT_FREEBSD4 656 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { 657 freebsd4_sendsig(catcher, ksi, mask); 658 return; 659 } 660#endif 661#ifdef COMPAT_43 662 if (SIGISMEMBER(psp->ps_osigset, sig)) { 663 osendsig(catcher, ksi, mask); 664 return; 665 } 666#endif 667 regs = td->td_frame; 668 oonstack = sigonstack(regs->tf_esp); 669 670#ifdef CPU_ENABLE_SSE 671 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) { 672 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu); 673 xfpusave = __builtin_alloca(xfpusave_len); 674 } else { 675#else 676 { 677#endif 678 xfpusave_len = 0; 679 xfpusave = NULL; 680 } 681 682 /* Save user context. */ 683 bzero(&sf, sizeof(sf)); 684 sf.sf_uc.uc_sigmask = *mask; 685 sf.sf_uc.uc_stack = td->td_sigstk; 686 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 687 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 688 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 689 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 690 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 691 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 692 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len); 693 fpstate_drop(td); 694 /* 695 * Unconditionally fill the fsbase and gsbase into the mcontext. 696 */ 697 sdp = &td->td_pcb->pcb_fsd; 698 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 | 699 sdp->sd_lobase; 700 sdp = &td->td_pcb->pcb_gsd; 701 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 | 702 sdp->sd_lobase; 703 bzero(sf.sf_uc.uc_mcontext.mc_spare2, 704 sizeof(sf.sf_uc.uc_mcontext.mc_spare2)); 705 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); 706 707 /* Allocate space for the signal handler context. */ 708 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 709 SIGISMEMBER(psp->ps_sigonstack, sig)) { 710 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size; 711#if defined(COMPAT_43) 712 td->td_sigstk.ss_flags |= SS_ONSTACK; 713#endif 714 } else 715 sp = (char *)regs->tf_esp - 128; 716 if (xfpusave != NULL) { 717 sp -= xfpusave_len; 718 sp = (char *)((unsigned int)sp & ~0x3F); 719 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp; 720 } 721 sp -= sizeof(struct sigframe); 722 723 /* Align to 16 bytes. */ 724 sfp = (struct sigframe *)((unsigned int)sp & ~0xF); 725 726 /* Translate the signal if appropriate. */ 727 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 728 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 729 730 /* Build the argument list for the signal handler. */ 731 sf.sf_signum = sig; 732 sf.sf_ucontext = (register_t)&sfp->sf_uc; 733 bzero(&sf.sf_si, sizeof(sf.sf_si)); 734 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 735 /* Signal handler installed with SA_SIGINFO. */ 736 sf.sf_siginfo = (register_t)&sfp->sf_si; 737 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 738 739 /* Fill in POSIX parts */ 740 sf.sf_si = ksi->ksi_info; 741 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 742 } else { 743 /* Old FreeBSD-style arguments. */ 744 sf.sf_siginfo = ksi->ksi_code; 745 sf.sf_addr = (register_t)ksi->ksi_addr; 746 sf.sf_ahu.sf_handler = catcher; 747 } 748 mtx_unlock(&psp->ps_mtx); 749 PROC_UNLOCK(p); 750 751 /* 752 * If we're a vm86 process, we want to save the segment registers. 753 * We also change eflags to be our emulated eflags, not the actual 754 * eflags. 755 */ 756 if (regs->tf_eflags & PSL_VM) { 757 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 758 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 759 760 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 761 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 762 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 763 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 764 765 if (vm86->vm86_has_vme == 0) 766 sf.sf_uc.uc_mcontext.mc_eflags = 767 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 768 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 769 770 /* 771 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 772 * syscalls made by the signal handler. This just avoids 773 * wasting time for our lazy fixup of such faults. PSL_NT 774 * does nothing in vm86 mode, but vm86 programs can set it 775 * almost legitimately in probes for old cpu types. 776 */ 777 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 778 } 779 780 /* 781 * Copy the sigframe out to the user's stack. 782 */ 783 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 || 784 (xfpusave != NULL && copyout(xfpusave, 785 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len) 786 != 0)) { 787#ifdef DEBUG 788 printf("process %ld has trashed its stack\n", (long)p->p_pid); 789#endif 790 PROC_LOCK(p); 791 sigexit(td, SIGILL); 792 } 793 794 regs->tf_esp = (int)sfp; 795 regs->tf_eip = p->p_sysent->sv_sigcode_base; 796 if (regs->tf_eip == 0) 797 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode; 798 regs->tf_eflags &= ~(PSL_T | PSL_D); 799 regs->tf_cs = _ucodesel; 800 regs->tf_ds = _udatasel; 801 regs->tf_es = _udatasel; 802 regs->tf_fs = _udatasel; 803 regs->tf_ss = _udatasel; 804 PROC_LOCK(p); 805 mtx_lock(&psp->ps_mtx); 806} 807 808/* 809 * System call to cleanup state after a signal 810 * has been taken. Reset signal mask and 811 * stack state from context left by sendsig (above). 812 * Return to previous pc and psl as specified by 813 * context left by sendsig. Check carefully to 814 * make sure that the user has not modified the 815 * state to gain improper privileges. 816 * 817 * MPSAFE 818 */ 819#ifdef COMPAT_43 820int 821osigreturn(td, uap) 822 struct thread *td; 823 struct osigreturn_args /* { 824 struct osigcontext *sigcntxp; 825 } */ *uap; 826{ 827 struct osigcontext sc; 828 struct trapframe *regs; 829 struct osigcontext *scp; 830 int eflags, error; 831 ksiginfo_t ksi; 832 833 regs = td->td_frame; 834 error = copyin(uap->sigcntxp, &sc, sizeof(sc)); 835 if (error != 0) 836 return (error); 837 scp = ≻ 838 eflags = scp->sc_ps; 839 if (eflags & PSL_VM) { 840 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 841 struct vm86_kernel *vm86; 842 843 /* 844 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 845 * set up the vm86 area, and we can't enter vm86 mode. 846 */ 847 if (td->td_pcb->pcb_ext == 0) 848 return (EINVAL); 849 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 850 if (vm86->vm86_inited == 0) 851 return (EINVAL); 852 853 /* Go back to user mode if both flags are set. */ 854 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 855 ksiginfo_init_trap(&ksi); 856 ksi.ksi_signo = SIGBUS; 857 ksi.ksi_code = BUS_OBJERR; 858 ksi.ksi_addr = (void *)regs->tf_eip; 859 trapsignal(td, &ksi); 860 } 861 862 if (vm86->vm86_has_vme) { 863 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 864 (eflags & VME_USERCHANGE) | PSL_VM; 865 } else { 866 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 867 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 868 (eflags & VM_USERCHANGE) | PSL_VM; 869 } 870 tf->tf_vm86_ds = scp->sc_ds; 871 tf->tf_vm86_es = scp->sc_es; 872 tf->tf_vm86_fs = scp->sc_fs; 873 tf->tf_vm86_gs = scp->sc_gs; 874 tf->tf_ds = _udatasel; 875 tf->tf_es = _udatasel; 876 tf->tf_fs = _udatasel; 877 } else { 878 /* 879 * Don't allow users to change privileged or reserved flags. 880 */ 881 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 882 return (EINVAL); 883 } 884 885 /* 886 * Don't allow users to load a valid privileged %cs. Let the 887 * hardware check for invalid selectors, excess privilege in 888 * other selectors, invalid %eip's and invalid %esp's. 889 */ 890 if (!CS_SECURE(scp->sc_cs)) { 891 ksiginfo_init_trap(&ksi); 892 ksi.ksi_signo = SIGBUS; 893 ksi.ksi_code = BUS_OBJERR; 894 ksi.ksi_trapno = T_PROTFLT; 895 ksi.ksi_addr = (void *)regs->tf_eip; 896 trapsignal(td, &ksi); 897 return (EINVAL); 898 } 899 regs->tf_ds = scp->sc_ds; 900 regs->tf_es = scp->sc_es; 901 regs->tf_fs = scp->sc_fs; 902 } 903 904 /* Restore remaining registers. */ 905 regs->tf_eax = scp->sc_eax; 906 regs->tf_ebx = scp->sc_ebx; 907 regs->tf_ecx = scp->sc_ecx; 908 regs->tf_edx = scp->sc_edx; 909 regs->tf_esi = scp->sc_esi; 910 regs->tf_edi = scp->sc_edi; 911 regs->tf_cs = scp->sc_cs; 912 regs->tf_ss = scp->sc_ss; 913 regs->tf_isp = scp->sc_isp; 914 regs->tf_ebp = scp->sc_fp; 915 regs->tf_esp = scp->sc_sp; 916 regs->tf_eip = scp->sc_pc; 917 regs->tf_eflags = eflags; 918 919#if defined(COMPAT_43) 920 if (scp->sc_onstack & 1) 921 td->td_sigstk.ss_flags |= SS_ONSTACK; 922 else 923 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 924#endif 925 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL, 926 SIGPROCMASK_OLD); 927 return (EJUSTRETURN); 928} 929#endif /* COMPAT_43 */ 930 931#ifdef COMPAT_FREEBSD4 932/* 933 * MPSAFE 934 */ 935int 936freebsd4_sigreturn(td, uap) 937 struct thread *td; 938 struct freebsd4_sigreturn_args /* { 939 const ucontext4 *sigcntxp; 940 } */ *uap; 941{ 942 struct ucontext4 uc; 943 struct trapframe *regs; 944 struct ucontext4 *ucp; 945 int cs, eflags, error; 946 ksiginfo_t ksi; 947 948 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 949 if (error != 0) 950 return (error); 951 ucp = &uc; 952 regs = td->td_frame; 953 eflags = ucp->uc_mcontext.mc_eflags; 954 if (eflags & PSL_VM) { 955 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 956 struct vm86_kernel *vm86; 957 958 /* 959 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 960 * set up the vm86 area, and we can't enter vm86 mode. 961 */ 962 if (td->td_pcb->pcb_ext == 0) 963 return (EINVAL); 964 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 965 if (vm86->vm86_inited == 0) 966 return (EINVAL); 967 968 /* Go back to user mode if both flags are set. */ 969 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 970 ksiginfo_init_trap(&ksi); 971 ksi.ksi_signo = SIGBUS; 972 ksi.ksi_code = BUS_OBJERR; 973 ksi.ksi_addr = (void *)regs->tf_eip; 974 trapsignal(td, &ksi); 975 } 976 if (vm86->vm86_has_vme) { 977 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 978 (eflags & VME_USERCHANGE) | PSL_VM; 979 } else { 980 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 981 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 982 (eflags & VM_USERCHANGE) | PSL_VM; 983 } 984 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 985 tf->tf_eflags = eflags; 986 tf->tf_vm86_ds = tf->tf_ds; 987 tf->tf_vm86_es = tf->tf_es; 988 tf->tf_vm86_fs = tf->tf_fs; 989 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 990 tf->tf_ds = _udatasel; 991 tf->tf_es = _udatasel; 992 tf->tf_fs = _udatasel; 993 } else { 994 /* 995 * Don't allow users to change privileged or reserved flags. 996 */ 997 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 998 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n", 999 td->td_proc->p_pid, td->td_name, eflags); 1000 return (EINVAL); 1001 } 1002 1003 /* 1004 * Don't allow users to load a valid privileged %cs. Let the 1005 * hardware check for invalid selectors, excess privilege in 1006 * other selectors, invalid %eip's and invalid %esp's. 1007 */ 1008 cs = ucp->uc_mcontext.mc_cs; 1009 if (!CS_SECURE(cs)) { 1010 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n", 1011 td->td_proc->p_pid, td->td_name, cs); 1012 ksiginfo_init_trap(&ksi); 1013 ksi.ksi_signo = SIGBUS; 1014 ksi.ksi_code = BUS_OBJERR; 1015 ksi.ksi_trapno = T_PROTFLT; 1016 ksi.ksi_addr = (void *)regs->tf_eip; 1017 trapsignal(td, &ksi); 1018 return (EINVAL); 1019 } 1020 1021 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 1022 } 1023 1024#if defined(COMPAT_43) 1025 if (ucp->uc_mcontext.mc_onstack & 1) 1026 td->td_sigstk.ss_flags |= SS_ONSTACK; 1027 else 1028 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 1029#endif 1030 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); 1031 return (EJUSTRETURN); 1032} 1033#endif /* COMPAT_FREEBSD4 */ 1034 1035/* 1036 * MPSAFE 1037 */ 1038int 1039sys_sigreturn(td, uap) 1040 struct thread *td; 1041 struct sigreturn_args /* { 1042 const struct __ucontext *sigcntxp; 1043 } */ *uap; 1044{ 1045 ucontext_t uc; 1046 struct proc *p; 1047 struct trapframe *regs; 1048 ucontext_t *ucp; 1049 char *xfpustate; 1050 size_t xfpustate_len; 1051 int cs, eflags, error, ret; 1052 ksiginfo_t ksi; 1053 1054 p = td->td_proc; 1055 1056 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 1057 if (error != 0) 1058 return (error); 1059 ucp = &uc; 1060 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { 1061 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid, 1062 td->td_name, ucp->uc_mcontext.mc_flags); 1063 return (EINVAL); 1064 } 1065 regs = td->td_frame; 1066 eflags = ucp->uc_mcontext.mc_eflags; 1067 if (eflags & PSL_VM) { 1068 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 1069 struct vm86_kernel *vm86; 1070 1071 /* 1072 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 1073 * set up the vm86 area, and we can't enter vm86 mode. 1074 */ 1075 if (td->td_pcb->pcb_ext == 0) 1076 return (EINVAL); 1077 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 1078 if (vm86->vm86_inited == 0) 1079 return (EINVAL); 1080 1081 /* Go back to user mode if both flags are set. */ 1082 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 1083 ksiginfo_init_trap(&ksi); 1084 ksi.ksi_signo = SIGBUS; 1085 ksi.ksi_code = BUS_OBJERR; 1086 ksi.ksi_addr = (void *)regs->tf_eip; 1087 trapsignal(td, &ksi); 1088 } 1089 1090 if (vm86->vm86_has_vme) { 1091 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 1092 (eflags & VME_USERCHANGE) | PSL_VM; 1093 } else { 1094 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 1095 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 1096 (eflags & VM_USERCHANGE) | PSL_VM; 1097 } 1098 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 1099 tf->tf_eflags = eflags; 1100 tf->tf_vm86_ds = tf->tf_ds; 1101 tf->tf_vm86_es = tf->tf_es; 1102 tf->tf_vm86_fs = tf->tf_fs; 1103 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 1104 tf->tf_ds = _udatasel; 1105 tf->tf_es = _udatasel; 1106 tf->tf_fs = _udatasel; 1107 } else { 1108 /* 1109 * Don't allow users to change privileged or reserved flags. 1110 */ 1111 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 1112 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n", 1113 td->td_proc->p_pid, td->td_name, eflags); 1114 return (EINVAL); 1115 } 1116 1117 /* 1118 * Don't allow users to load a valid privileged %cs. Let the 1119 * hardware check for invalid selectors, excess privilege in 1120 * other selectors, invalid %eip's and invalid %esp's. 1121 */ 1122 cs = ucp->uc_mcontext.mc_cs; 1123 if (!CS_SECURE(cs)) { 1124 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", 1125 td->td_proc->p_pid, td->td_name, cs); 1126 ksiginfo_init_trap(&ksi); 1127 ksi.ksi_signo = SIGBUS; 1128 ksi.ksi_code = BUS_OBJERR; 1129 ksi.ksi_trapno = T_PROTFLT; 1130 ksi.ksi_addr = (void *)regs->tf_eip; 1131 trapsignal(td, &ksi); 1132 return (EINVAL); 1133 } 1134 1135 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) { 1136 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len; 1137 if (xfpustate_len > cpu_max_ext_state_size - 1138 sizeof(union savefpu)) { 1139 uprintf( 1140 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n", 1141 p->p_pid, td->td_name, xfpustate_len); 1142 return (EINVAL); 1143 } 1144 xfpustate = __builtin_alloca(xfpustate_len); 1145 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate, 1146 xfpustate, xfpustate_len); 1147 if (error != 0) { 1148 uprintf( 1149 "pid %d (%s): sigreturn copying xfpustate failed\n", 1150 p->p_pid, td->td_name); 1151 return (error); 1152 } 1153 } else { 1154 xfpustate = NULL; 1155 xfpustate_len = 0; 1156 } 1157 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, 1158 xfpustate_len); 1159 if (ret != 0) 1160 return (ret); 1161 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 1162 } 1163 1164#if defined(COMPAT_43) 1165 if (ucp->uc_mcontext.mc_onstack & 1) 1166 td->td_sigstk.ss_flags |= SS_ONSTACK; 1167 else 1168 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 1169#endif 1170 1171 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); 1172 return (EJUSTRETURN); 1173} 1174 1175/* 1176 * Machine dependent boot() routine 1177 * 1178 * I haven't seen anything to put here yet 1179 * Possibly some stuff might be grafted back here from boot() 1180 */ 1181void 1182cpu_boot(int howto) 1183{ 1184} 1185 1186/* 1187 * Flush the D-cache for non-DMA I/O so that the I-cache can 1188 * be made coherent later. 1189 */ 1190void 1191cpu_flush_dcache(void *ptr, size_t len) 1192{ 1193 /* Not applicable */ 1194} 1195 1196/* Get current clock frequency for the given cpu id. */ 1197int 1198cpu_est_clockrate(int cpu_id, uint64_t *rate) 1199{ 1200 uint64_t tsc1, tsc2; 1201 uint64_t acnt, mcnt, perf; 1202 register_t reg; 1203 1204 if (pcpu_find(cpu_id) == NULL || rate == NULL) 1205 return (EINVAL); 1206 if ((cpu_feature & CPUID_TSC) == 0) 1207 return (EOPNOTSUPP); 1208 1209 /* 1210 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist, 1211 * DELAY(9) based logic fails. 1212 */ 1213 if (tsc_is_invariant && !tsc_perf_stat) 1214 return (EOPNOTSUPP); 1215 1216#ifdef SMP 1217 if (smp_cpus > 1) { 1218 /* Schedule ourselves on the indicated cpu. */ 1219 thread_lock(curthread); 1220 sched_bind(curthread, cpu_id); 1221 thread_unlock(curthread); 1222 } 1223#endif 1224 1225 /* Calibrate by measuring a short delay. */ 1226 reg = intr_disable(); 1227 if (tsc_is_invariant) { 1228 wrmsr(MSR_MPERF, 0); 1229 wrmsr(MSR_APERF, 0); 1230 tsc1 = rdtsc(); 1231 DELAY(1000); 1232 mcnt = rdmsr(MSR_MPERF); 1233 acnt = rdmsr(MSR_APERF); 1234 tsc2 = rdtsc(); 1235 intr_restore(reg); 1236 perf = 1000 * acnt / mcnt; 1237 *rate = (tsc2 - tsc1) * perf; 1238 } else { 1239 tsc1 = rdtsc(); 1240 DELAY(1000); 1241 tsc2 = rdtsc(); 1242 intr_restore(reg); 1243 *rate = (tsc2 - tsc1) * 1000; 1244 } 1245 1246#ifdef SMP 1247 if (smp_cpus > 1) { 1248 thread_lock(curthread); 1249 sched_unbind(curthread); 1250 thread_unlock(curthread); 1251 } 1252#endif 1253 1254 return (0); 1255} 1256 1257#ifdef XEN 1258 1259static void 1260idle_block(void) 1261{ 1262 1263 HYPERVISOR_sched_op(SCHEDOP_block, 0); 1264} 1265 1266void 1267cpu_halt(void) 1268{ 1269 HYPERVISOR_shutdown(SHUTDOWN_poweroff); 1270} 1271 1272int scheduler_running; 1273 1274static void 1275cpu_idle_hlt(sbintime_t sbt) 1276{ 1277 1278 scheduler_running = 1; 1279 enable_intr(); 1280 idle_block(); 1281} 1282 1283#else 1284/* 1285 * Shutdown the CPU as much as possible 1286 */ 1287void 1288cpu_halt(void) 1289{ 1290 for (;;) 1291 halt(); 1292} 1293 1294#endif 1295 1296void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */ 1297static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */ 1298static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */ 1299TUNABLE_INT("machdep.idle_mwait", &idle_mwait); 1300SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait, 1301 0, "Use MONITOR/MWAIT for short idle"); 1302 1303#define STATE_RUNNING 0x0 1304#define STATE_MWAIT 0x1 1305#define STATE_SLEEPING 0x2 1306 1307#ifndef PC98 1308static void 1309cpu_idle_acpi(sbintime_t sbt) 1310{ 1311 int *state; 1312 1313 state = (int *)PCPU_PTR(monitorbuf); 1314 *state = STATE_SLEEPING; 1315 1316 /* See comments in cpu_idle_hlt(). */ 1317 disable_intr(); 1318 if (sched_runnable()) 1319 enable_intr(); 1320 else if (cpu_idle_hook) 1321 cpu_idle_hook(sbt); 1322 else 1323 __asm __volatile("sti; hlt"); 1324 *state = STATE_RUNNING; 1325} 1326#endif /* !PC98 */ 1327 1328#ifndef XEN 1329static void 1330cpu_idle_hlt(sbintime_t sbt) 1331{ 1332 int *state; 1333 1334 state = (int *)PCPU_PTR(monitorbuf); 1335 *state = STATE_SLEEPING; 1336 1337 /* 1338 * Since we may be in a critical section from cpu_idle(), if 1339 * an interrupt fires during that critical section we may have 1340 * a pending preemption. If the CPU halts, then that thread 1341 * may not execute until a later interrupt awakens the CPU. 1342 * To handle this race, check for a runnable thread after 1343 * disabling interrupts and immediately return if one is 1344 * found. Also, we must absolutely guarentee that hlt is 1345 * the next instruction after sti. This ensures that any 1346 * interrupt that fires after the call to disable_intr() will 1347 * immediately awaken the CPU from hlt. Finally, please note 1348 * that on x86 this works fine because of interrupts enabled only 1349 * after the instruction following sti takes place, while IF is set 1350 * to 1 immediately, allowing hlt instruction to acknowledge the 1351 * interrupt. 1352 */ 1353 disable_intr(); 1354 if (sched_runnable()) 1355 enable_intr(); 1356 else 1357 __asm __volatile("sti; hlt"); 1358 *state = STATE_RUNNING; 1359} 1360#endif 1361 1362/* 1363 * MWAIT cpu power states. Lower 4 bits are sub-states. 1364 */ 1365#define MWAIT_C0 0xf0 1366#define MWAIT_C1 0x00 1367#define MWAIT_C2 0x10 1368#define MWAIT_C3 0x20 1369#define MWAIT_C4 0x30 1370 1371static void 1372cpu_idle_mwait(sbintime_t sbt) 1373{ 1374 int *state; 1375 1376 state = (int *)PCPU_PTR(monitorbuf); 1377 *state = STATE_MWAIT; 1378 1379 /* See comments in cpu_idle_hlt(). */ 1380 disable_intr(); 1381 if (sched_runnable()) { 1382 enable_intr(); 1383 *state = STATE_RUNNING; 1384 return; 1385 } 1386 cpu_monitor(state, 0, 0); 1387 if (*state == STATE_MWAIT) 1388 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0)); 1389 else 1390 enable_intr(); 1391 *state = STATE_RUNNING; 1392} 1393 1394static void 1395cpu_idle_spin(sbintime_t sbt) 1396{ 1397 int *state; 1398 int i; 1399 1400 state = (int *)PCPU_PTR(monitorbuf); 1401 *state = STATE_RUNNING; 1402 1403 /* 1404 * The sched_runnable() call is racy but as long as there is 1405 * a loop missing it one time will have just a little impact if any 1406 * (and it is much better than missing the check at all). 1407 */ 1408 for (i = 0; i < 1000; i++) { 1409 if (sched_runnable()) 1410 return; 1411 cpu_spinwait(); 1412 } 1413} 1414 1415/* 1416 * C1E renders the local APIC timer dead, so we disable it by 1417 * reading the Interrupt Pending Message register and clearing 1418 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). 1419 * 1420 * Reference: 1421 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" 1422 * #32559 revision 3.00+ 1423 */ 1424#define MSR_AMDK8_IPM 0xc0010055 1425#define AMDK8_SMIONCMPHALT (1ULL << 27) 1426#define AMDK8_C1EONCMPHALT (1ULL << 28) 1427#define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT) 1428 1429static void 1430cpu_probe_amdc1e(void) 1431{ 1432 1433 /* 1434 * Detect the presence of C1E capability mostly on latest 1435 * dual-cores (or future) k8 family. 1436 */ 1437 if (cpu_vendor_id == CPU_VENDOR_AMD && 1438 (cpu_id & 0x00000f00) == 0x00000f00 && 1439 (cpu_id & 0x0fff0000) >= 0x00040000) { 1440 cpu_ident_amdc1e = 1; 1441 } 1442} 1443 1444#if defined(PC98) || defined(XEN) 1445void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt; 1446#else 1447void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi; 1448#endif 1449 1450void 1451cpu_idle(int busy) 1452{ 1453#ifndef XEN 1454 uint64_t msr; 1455#endif 1456 sbintime_t sbt = -1; 1457 1458 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", 1459 busy, curcpu); 1460#if defined(MP_WATCHDOG) && !defined(XEN) 1461 ap_watchdog(PCPU_GET(cpuid)); 1462#endif 1463#ifndef XEN 1464 /* If we are busy - try to use fast methods. */ 1465 if (busy) { 1466 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) { 1467 cpu_idle_mwait(busy); 1468 goto out; 1469 } 1470 } 1471#endif 1472 1473 /* If we have time - switch timers into idle mode. */ 1474 if (!busy) { 1475 critical_enter(); 1476 sbt = cpu_idleclock(); 1477 } 1478 1479#ifndef XEN 1480 /* Apply AMD APIC timer C1E workaround. */ 1481 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) { 1482 msr = rdmsr(MSR_AMDK8_IPM); 1483 if (msr & AMDK8_CMPHALT) 1484 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); 1485 } 1486#endif 1487 1488 /* Call main idle method. */ 1489 cpu_idle_fn(sbt); 1490 1491 /* Switch timers mack into active mode. */ 1492 if (!busy) { 1493 cpu_activeclock(); 1494 critical_exit(); 1495 } 1496#ifndef XEN 1497out: 1498#endif 1499 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", 1500 busy, curcpu); 1501} 1502 1503int 1504cpu_idle_wakeup(int cpu) 1505{ 1506 struct pcpu *pcpu; 1507 int *state; 1508 1509 pcpu = pcpu_find(cpu); 1510 state = (int *)pcpu->pc_monitorbuf; 1511 /* 1512 * This doesn't need to be atomic since missing the race will 1513 * simply result in unnecessary IPIs. 1514 */ 1515 if (*state == STATE_SLEEPING) 1516 return (0); 1517 if (*state == STATE_MWAIT) 1518 *state = STATE_RUNNING; 1519 return (1); 1520} 1521 1522/* 1523 * Ordered by speed/power consumption. 1524 */ 1525struct { 1526 void *id_fn; 1527 char *id_name; 1528} idle_tbl[] = { 1529 { cpu_idle_spin, "spin" }, 1530 { cpu_idle_mwait, "mwait" }, 1531 { cpu_idle_hlt, "hlt" }, 1532#ifndef PC98 1533 { cpu_idle_acpi, "acpi" }, 1534#endif 1535 { NULL, NULL } 1536}; 1537 1538static int 1539idle_sysctl_available(SYSCTL_HANDLER_ARGS) 1540{ 1541 char *avail, *p; 1542 int error; 1543 int i; 1544 1545 avail = malloc(256, M_TEMP, M_WAITOK); 1546 p = avail; 1547 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 1548 if (strstr(idle_tbl[i].id_name, "mwait") && 1549 (cpu_feature2 & CPUID2_MON) == 0) 1550 continue; 1551#ifndef PC98 1552 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 1553 cpu_idle_hook == NULL) 1554 continue; 1555#endif 1556 p += sprintf(p, "%s%s", p != avail ? ", " : "", 1557 idle_tbl[i].id_name); 1558 } 1559 error = sysctl_handle_string(oidp, avail, 0, req); 1560 free(avail, M_TEMP); 1561 return (error); 1562} 1563 1564SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 1565 0, 0, idle_sysctl_available, "A", "list of available idle functions"); 1566 1567static int 1568idle_sysctl(SYSCTL_HANDLER_ARGS) 1569{ 1570 char buf[16]; 1571 int error; 1572 char *p; 1573 int i; 1574 1575 p = "unknown"; 1576 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 1577 if (idle_tbl[i].id_fn == cpu_idle_fn) { 1578 p = idle_tbl[i].id_name; 1579 break; 1580 } 1581 } 1582 strncpy(buf, p, sizeof(buf)); 1583 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 1584 if (error != 0 || req->newptr == NULL) 1585 return (error); 1586 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 1587 if (strstr(idle_tbl[i].id_name, "mwait") && 1588 (cpu_feature2 & CPUID2_MON) == 0) 1589 continue; 1590#ifndef PC98 1591 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 1592 cpu_idle_hook == NULL) 1593 continue; 1594#endif 1595 if (strcmp(idle_tbl[i].id_name, buf)) 1596 continue; 1597 cpu_idle_fn = idle_tbl[i].id_fn; 1598 return (0); 1599 } 1600 return (EINVAL); 1601} 1602 1603SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, 1604 idle_sysctl, "A", "currently selected idle function"); 1605 1606/* 1607 * Reset registers to default values on exec. 1608 */ 1609void 1610exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 1611{ 1612 struct trapframe *regs = td->td_frame; 1613 struct pcb *pcb = td->td_pcb; 1614 1615 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ 1616 pcb->pcb_gs = _udatasel; 1617 load_gs(_udatasel); 1618 1619 mtx_lock_spin(&dt_lock); 1620 if (td->td_proc->p_md.md_ldt) 1621 user_ldt_free(td); 1622 else 1623 mtx_unlock_spin(&dt_lock); 1624 1625 bzero((char *)regs, sizeof(struct trapframe)); 1626 regs->tf_eip = imgp->entry_addr; 1627 regs->tf_esp = stack; 1628 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1629 regs->tf_ss = _udatasel; 1630 regs->tf_ds = _udatasel; 1631 regs->tf_es = _udatasel; 1632 regs->tf_fs = _udatasel; 1633 regs->tf_cs = _ucodesel; 1634 1635 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1636 regs->tf_ebx = imgp->ps_strings; 1637 1638 /* 1639 * Reset the hardware debug registers if they were in use. 1640 * They won't have any meaning for the newly exec'd process. 1641 */ 1642 if (pcb->pcb_flags & PCB_DBREGS) { 1643 pcb->pcb_dr0 = 0; 1644 pcb->pcb_dr1 = 0; 1645 pcb->pcb_dr2 = 0; 1646 pcb->pcb_dr3 = 0; 1647 pcb->pcb_dr6 = 0; 1648 pcb->pcb_dr7 = 0; 1649 if (pcb == curpcb) { 1650 /* 1651 * Clear the debug registers on the running 1652 * CPU, otherwise they will end up affecting 1653 * the next process we switch to. 1654 */ 1655 reset_dbregs(); 1656 } 1657 pcb->pcb_flags &= ~PCB_DBREGS; 1658 } 1659 1660 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__; 1661 1662 /* 1663 * Drop the FP state if we hold it, so that the process gets a 1664 * clean FP state if it uses the FPU again. 1665 */ 1666 fpstate_drop(td); 1667 1668 /* 1669 * XXX - Linux emulator 1670 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1671 * on it. 1672 */ 1673 td->td_retval[1] = 0; 1674} 1675 1676void 1677cpu_setregs(void) 1678{ 1679 unsigned int cr0; 1680 1681 cr0 = rcr0(); 1682 1683 /* 1684 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support: 1685 * 1686 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT 1687 * instructions. We must set the CR0_MP bit and use the CR0_TS 1688 * bit to control the trap, because setting the CR0_EM bit does 1689 * not cause WAIT instructions to trap. It's important to trap 1690 * WAIT instructions - otherwise the "wait" variants of no-wait 1691 * control instructions would degenerate to the "no-wait" variants 1692 * after FP context switches but work correctly otherwise. It's 1693 * particularly important to trap WAITs when there is no NPX - 1694 * otherwise the "wait" variants would always degenerate. 1695 * 1696 * Try setting CR0_NE to get correct error reporting on 486DX's. 1697 * Setting it should fail or do nothing on lesser processors. 1698 */ 1699 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 1700 load_cr0(cr0); 1701 load_gs(_udatasel); 1702} 1703 1704u_long bootdev; /* not a struct cdev *- encoding is different */ 1705SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 1706 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)"); 1707 1708static char bootmethod[16] = "BIOS"; 1709SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0, 1710 "System firmware boot method"); 1711 1712/* 1713 * Initialize 386 and configure to run kernel 1714 */ 1715 1716/* 1717 * Initialize segments & interrupt table 1718 */ 1719 1720int _default_ldt; 1721 1722#ifdef XEN 1723union descriptor *gdt; 1724union descriptor *ldt; 1725#else 1726union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1727union descriptor ldt[NLDT]; /* local descriptor table */ 1728#endif 1729static struct gate_descriptor idt0[NIDT]; 1730struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1731struct region_descriptor r_gdt, r_idt; /* table descriptors */ 1732struct mtx dt_lock; /* lock for GDT and LDT */ 1733 1734static struct i386tss dblfault_tss; 1735static char dblfault_stack[PAGE_SIZE]; 1736 1737extern vm_offset_t proc0kstack; 1738 1739 1740/* 1741 * software prototypes -- in more palatable form. 1742 * 1743 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret 1744 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it) 1745 */ 1746struct soft_segment_descriptor gdt_segs[] = { 1747/* GNULL_SEL 0 Null Descriptor */ 1748{ .ssd_base = 0x0, 1749 .ssd_limit = 0x0, 1750 .ssd_type = 0, 1751 .ssd_dpl = SEL_KPL, 1752 .ssd_p = 0, 1753 .ssd_xx = 0, .ssd_xx1 = 0, 1754 .ssd_def32 = 0, 1755 .ssd_gran = 0 }, 1756/* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */ 1757{ .ssd_base = 0x0, 1758 .ssd_limit = 0xfffff, 1759 .ssd_type = SDT_MEMRWA, 1760 .ssd_dpl = SEL_KPL, 1761 .ssd_p = 1, 1762 .ssd_xx = 0, .ssd_xx1 = 0, 1763 .ssd_def32 = 1, 1764 .ssd_gran = 1 }, 1765/* GUFS_SEL 2 %fs Descriptor for user */ 1766{ .ssd_base = 0x0, 1767 .ssd_limit = 0xfffff, 1768 .ssd_type = SDT_MEMRWA, 1769 .ssd_dpl = SEL_UPL, 1770 .ssd_p = 1, 1771 .ssd_xx = 0, .ssd_xx1 = 0, 1772 .ssd_def32 = 1, 1773 .ssd_gran = 1 }, 1774/* GUGS_SEL 3 %gs Descriptor for user */ 1775{ .ssd_base = 0x0, 1776 .ssd_limit = 0xfffff, 1777 .ssd_type = SDT_MEMRWA, 1778 .ssd_dpl = SEL_UPL, 1779 .ssd_p = 1, 1780 .ssd_xx = 0, .ssd_xx1 = 0, 1781 .ssd_def32 = 1, 1782 .ssd_gran = 1 }, 1783/* GCODE_SEL 4 Code Descriptor for kernel */ 1784{ .ssd_base = 0x0, 1785 .ssd_limit = 0xfffff, 1786 .ssd_type = SDT_MEMERA, 1787 .ssd_dpl = SEL_KPL, 1788 .ssd_p = 1, 1789 .ssd_xx = 0, .ssd_xx1 = 0, 1790 .ssd_def32 = 1, 1791 .ssd_gran = 1 }, 1792/* GDATA_SEL 5 Data Descriptor for kernel */ 1793{ .ssd_base = 0x0, 1794 .ssd_limit = 0xfffff, 1795 .ssd_type = SDT_MEMRWA, 1796 .ssd_dpl = SEL_KPL, 1797 .ssd_p = 1, 1798 .ssd_xx = 0, .ssd_xx1 = 0, 1799 .ssd_def32 = 1, 1800 .ssd_gran = 1 }, 1801/* GUCODE_SEL 6 Code Descriptor for user */ 1802{ .ssd_base = 0x0, 1803 .ssd_limit = 0xfffff, 1804 .ssd_type = SDT_MEMERA, 1805 .ssd_dpl = SEL_UPL, 1806 .ssd_p = 1, 1807 .ssd_xx = 0, .ssd_xx1 = 0, 1808 .ssd_def32 = 1, 1809 .ssd_gran = 1 }, 1810/* GUDATA_SEL 7 Data Descriptor for user */ 1811{ .ssd_base = 0x0, 1812 .ssd_limit = 0xfffff, 1813 .ssd_type = SDT_MEMRWA, 1814 .ssd_dpl = SEL_UPL, 1815 .ssd_p = 1, 1816 .ssd_xx = 0, .ssd_xx1 = 0, 1817 .ssd_def32 = 1, 1818 .ssd_gran = 1 }, 1819/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1820{ .ssd_base = 0x400, 1821 .ssd_limit = 0xfffff, 1822 .ssd_type = SDT_MEMRWA, 1823 .ssd_dpl = SEL_KPL, 1824 .ssd_p = 1, 1825 .ssd_xx = 0, .ssd_xx1 = 0, 1826 .ssd_def32 = 1, 1827 .ssd_gran = 1 }, 1828#ifndef XEN 1829/* GPROC0_SEL 9 Proc 0 Tss Descriptor */ 1830{ 1831 .ssd_base = 0x0, 1832 .ssd_limit = sizeof(struct i386tss)-1, 1833 .ssd_type = SDT_SYS386TSS, 1834 .ssd_dpl = 0, 1835 .ssd_p = 1, 1836 .ssd_xx = 0, .ssd_xx1 = 0, 1837 .ssd_def32 = 0, 1838 .ssd_gran = 0 }, 1839/* GLDT_SEL 10 LDT Descriptor */ 1840{ .ssd_base = (int) ldt, 1841 .ssd_limit = sizeof(ldt)-1, 1842 .ssd_type = SDT_SYSLDT, 1843 .ssd_dpl = SEL_UPL, 1844 .ssd_p = 1, 1845 .ssd_xx = 0, .ssd_xx1 = 0, 1846 .ssd_def32 = 0, 1847 .ssd_gran = 0 }, 1848/* GUSERLDT_SEL 11 User LDT Descriptor per process */ 1849{ .ssd_base = (int) ldt, 1850 .ssd_limit = (512 * sizeof(union descriptor)-1), 1851 .ssd_type = SDT_SYSLDT, 1852 .ssd_dpl = 0, 1853 .ssd_p = 1, 1854 .ssd_xx = 0, .ssd_xx1 = 0, 1855 .ssd_def32 = 0, 1856 .ssd_gran = 0 }, 1857/* GPANIC_SEL 12 Panic Tss Descriptor */ 1858{ .ssd_base = (int) &dblfault_tss, 1859 .ssd_limit = sizeof(struct i386tss)-1, 1860 .ssd_type = SDT_SYS386TSS, 1861 .ssd_dpl = 0, 1862 .ssd_p = 1, 1863 .ssd_xx = 0, .ssd_xx1 = 0, 1864 .ssd_def32 = 0, 1865 .ssd_gran = 0 }, 1866/* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */ 1867{ .ssd_base = 0, 1868 .ssd_limit = 0xfffff, 1869 .ssd_type = SDT_MEMERA, 1870 .ssd_dpl = 0, 1871 .ssd_p = 1, 1872 .ssd_xx = 0, .ssd_xx1 = 0, 1873 .ssd_def32 = 0, 1874 .ssd_gran = 1 }, 1875/* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */ 1876{ .ssd_base = 0, 1877 .ssd_limit = 0xfffff, 1878 .ssd_type = SDT_MEMERA, 1879 .ssd_dpl = 0, 1880 .ssd_p = 1, 1881 .ssd_xx = 0, .ssd_xx1 = 0, 1882 .ssd_def32 = 0, 1883 .ssd_gran = 1 }, 1884/* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */ 1885{ .ssd_base = 0, 1886 .ssd_limit = 0xfffff, 1887 .ssd_type = SDT_MEMRWA, 1888 .ssd_dpl = 0, 1889 .ssd_p = 1, 1890 .ssd_xx = 0, .ssd_xx1 = 0, 1891 .ssd_def32 = 1, 1892 .ssd_gran = 1 }, 1893/* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */ 1894{ .ssd_base = 0, 1895 .ssd_limit = 0xfffff, 1896 .ssd_type = SDT_MEMRWA, 1897 .ssd_dpl = 0, 1898 .ssd_p = 1, 1899 .ssd_xx = 0, .ssd_xx1 = 0, 1900 .ssd_def32 = 0, 1901 .ssd_gran = 1 }, 1902/* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */ 1903{ .ssd_base = 0, 1904 .ssd_limit = 0xfffff, 1905 .ssd_type = SDT_MEMRWA, 1906 .ssd_dpl = 0, 1907 .ssd_p = 1, 1908 .ssd_xx = 0, .ssd_xx1 = 0, 1909 .ssd_def32 = 0, 1910 .ssd_gran = 1 }, 1911/* GNDIS_SEL 18 NDIS Descriptor */ 1912{ .ssd_base = 0x0, 1913 .ssd_limit = 0x0, 1914 .ssd_type = 0, 1915 .ssd_dpl = 0, 1916 .ssd_p = 0, 1917 .ssd_xx = 0, .ssd_xx1 = 0, 1918 .ssd_def32 = 0, 1919 .ssd_gran = 0 }, 1920#endif /* !XEN */ 1921}; 1922 1923static struct soft_segment_descriptor ldt_segs[] = { 1924 /* Null Descriptor - overwritten by call gate */ 1925{ .ssd_base = 0x0, 1926 .ssd_limit = 0x0, 1927 .ssd_type = 0, 1928 .ssd_dpl = 0, 1929 .ssd_p = 0, 1930 .ssd_xx = 0, .ssd_xx1 = 0, 1931 .ssd_def32 = 0, 1932 .ssd_gran = 0 }, 1933 /* Null Descriptor - overwritten by call gate */ 1934{ .ssd_base = 0x0, 1935 .ssd_limit = 0x0, 1936 .ssd_type = 0, 1937 .ssd_dpl = 0, 1938 .ssd_p = 0, 1939 .ssd_xx = 0, .ssd_xx1 = 0, 1940 .ssd_def32 = 0, 1941 .ssd_gran = 0 }, 1942 /* Null Descriptor - overwritten by call gate */ 1943{ .ssd_base = 0x0, 1944 .ssd_limit = 0x0, 1945 .ssd_type = 0, 1946 .ssd_dpl = 0, 1947 .ssd_p = 0, 1948 .ssd_xx = 0, .ssd_xx1 = 0, 1949 .ssd_def32 = 0, 1950 .ssd_gran = 0 }, 1951 /* Code Descriptor for user */ 1952{ .ssd_base = 0x0, 1953 .ssd_limit = 0xfffff, 1954 .ssd_type = SDT_MEMERA, 1955 .ssd_dpl = SEL_UPL, 1956 .ssd_p = 1, 1957 .ssd_xx = 0, .ssd_xx1 = 0, 1958 .ssd_def32 = 1, 1959 .ssd_gran = 1 }, 1960 /* Null Descriptor - overwritten by call gate */ 1961{ .ssd_base = 0x0, 1962 .ssd_limit = 0x0, 1963 .ssd_type = 0, 1964 .ssd_dpl = 0, 1965 .ssd_p = 0, 1966 .ssd_xx = 0, .ssd_xx1 = 0, 1967 .ssd_def32 = 0, 1968 .ssd_gran = 0 }, 1969 /* Data Descriptor for user */ 1970{ .ssd_base = 0x0, 1971 .ssd_limit = 0xfffff, 1972 .ssd_type = SDT_MEMRWA, 1973 .ssd_dpl = SEL_UPL, 1974 .ssd_p = 1, 1975 .ssd_xx = 0, .ssd_xx1 = 0, 1976 .ssd_def32 = 1, 1977 .ssd_gran = 1 }, 1978}; 1979 1980void 1981setidt(idx, func, typ, dpl, selec) 1982 int idx; 1983 inthand_t *func; 1984 int typ; 1985 int dpl; 1986 int selec; 1987{ 1988 struct gate_descriptor *ip; 1989 1990 ip = idt + idx; 1991 ip->gd_looffset = (int)func; 1992 ip->gd_selector = selec; 1993 ip->gd_stkcpy = 0; 1994 ip->gd_xx = 0; 1995 ip->gd_type = typ; 1996 ip->gd_dpl = dpl; 1997 ip->gd_p = 1; 1998 ip->gd_hioffset = ((int)func)>>16 ; 1999} 2000 2001extern inthand_t 2002 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 2003 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 2004 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 2005 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 2006 IDTVEC(xmm), 2007#ifdef KDTRACE_HOOKS 2008 IDTVEC(dtrace_ret), 2009#endif 2010#ifdef XENHVM 2011 IDTVEC(xen_intr_upcall), 2012#endif 2013 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 2014 2015#ifdef DDB 2016/* 2017 * Display the index and function name of any IDT entries that don't use 2018 * the default 'rsvd' entry point. 2019 */ 2020DB_SHOW_COMMAND(idt, db_show_idt) 2021{ 2022 struct gate_descriptor *ip; 2023 int idx; 2024 uintptr_t func; 2025 2026 ip = idt; 2027 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { 2028 func = (ip->gd_hioffset << 16 | ip->gd_looffset); 2029 if (func != (uintptr_t)&IDTVEC(rsvd)) { 2030 db_printf("%3d\t", idx); 2031 db_printsym(func, DB_STGY_PROC); 2032 db_printf("\n"); 2033 } 2034 ip++; 2035 } 2036} 2037 2038/* Show privileged registers. */ 2039DB_SHOW_COMMAND(sysregs, db_show_sysregs) 2040{ 2041 uint64_t idtr, gdtr; 2042 2043 idtr = ridt(); 2044 db_printf("idtr\t0x%08x/%04x\n", 2045 (u_int)(idtr >> 16), (u_int)idtr & 0xffff); 2046 gdtr = rgdt(); 2047 db_printf("gdtr\t0x%08x/%04x\n", 2048 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff); 2049 db_printf("ldtr\t0x%04x\n", rldt()); 2050 db_printf("tr\t0x%04x\n", rtr()); 2051 db_printf("cr0\t0x%08x\n", rcr0()); 2052 db_printf("cr2\t0x%08x\n", rcr2()); 2053 db_printf("cr3\t0x%08x\n", rcr3()); 2054 db_printf("cr4\t0x%08x\n", rcr4()); 2055} 2056#endif 2057 2058void 2059sdtossd(sd, ssd) 2060 struct segment_descriptor *sd; 2061 struct soft_segment_descriptor *ssd; 2062{ 2063 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 2064 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 2065 ssd->ssd_type = sd->sd_type; 2066 ssd->ssd_dpl = sd->sd_dpl; 2067 ssd->ssd_p = sd->sd_p; 2068 ssd->ssd_def32 = sd->sd_def32; 2069 ssd->ssd_gran = sd->sd_gran; 2070} 2071 2072#if !defined(PC98) && !defined(XEN) 2073static int 2074add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp) 2075{ 2076 int i, insert_idx, physmap_idx; 2077 2078 physmap_idx = *physmap_idxp; 2079 2080 if (boothowto & RB_VERBOSE) 2081 printf("SMAP type=%02x base=%016llx len=%016llx\n", 2082 smap->type, smap->base, smap->length); 2083 2084 if (smap->type != SMAP_TYPE_MEMORY) 2085 return (1); 2086 2087 if (smap->length == 0) 2088 return (1); 2089 2090#ifndef PAE 2091 if (smap->base > 0xffffffff) { 2092 printf("%uK of memory above 4GB ignored\n", 2093 (u_int)(smap->length / 1024)); 2094 return (1); 2095 } 2096#endif 2097 2098 /* 2099 * Find insertion point while checking for overlap. Start off by 2100 * assuming the new entry will be added to the end. 2101 */ 2102 insert_idx = physmap_idx + 2; 2103 for (i = 0; i <= physmap_idx; i += 2) { 2104 if (smap->base < physmap[i + 1]) { 2105 if (smap->base + smap->length <= physmap[i]) { 2106 insert_idx = i; 2107 break; 2108 } 2109 if (boothowto & RB_VERBOSE) 2110 printf( 2111 "Overlapping memory regions, ignoring second region\n"); 2112 return (1); 2113 } 2114 } 2115 2116 /* See if we can prepend to the next entry. */ 2117 if (insert_idx <= physmap_idx && 2118 smap->base + smap->length == physmap[insert_idx]) { 2119 physmap[insert_idx] = smap->base; 2120 return (1); 2121 } 2122 2123 /* See if we can append to the previous entry. */ 2124 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) { 2125 physmap[insert_idx - 1] += smap->length; 2126 return (1); 2127 } 2128 2129 physmap_idx += 2; 2130 *physmap_idxp = physmap_idx; 2131 if (physmap_idx == PHYSMAP_SIZE) { 2132 printf( 2133 "Too many segments in the physical address map, giving up\n"); 2134 return (0); 2135 } 2136 2137 /* 2138 * Move the last 'N' entries down to make room for the new 2139 * entry if needed. 2140 */ 2141 for (i = physmap_idx; i > insert_idx; i -= 2) { 2142 physmap[i] = physmap[i - 2]; 2143 physmap[i + 1] = physmap[i - 1]; 2144 } 2145 2146 /* Insert the new entry. */ 2147 physmap[insert_idx] = smap->base; 2148 physmap[insert_idx + 1] = smap->base + smap->length; 2149 return (1); 2150} 2151#endif /* !PC98 && !XEN */ 2152 2153#ifndef XEN 2154static void 2155basemem_setup(void) 2156{ 2157 vm_paddr_t pa; 2158 pt_entry_t *pte; 2159 int i; 2160 2161 if (basemem > 640) { 2162 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 2163 basemem); 2164 basemem = 640; 2165 } 2166 2167 /* 2168 * XXX if biosbasemem is now < 640, there is a `hole' 2169 * between the end of base memory and the start of 2170 * ISA memory. The hole may be empty or it may 2171 * contain BIOS code or data. Map it read/write so 2172 * that the BIOS can write to it. (Memory from 0 to 2173 * the physical end of the kernel is mapped read-only 2174 * to begin with and then parts of it are remapped. 2175 * The parts that aren't remapped form holes that 2176 * remain read-only and are unused by the kernel. 2177 * The base memory area is below the physical end of 2178 * the kernel and right now forms a read-only hole. 2179 * The part of it from PAGE_SIZE to 2180 * (trunc_page(biosbasemem * 1024) - 1) will be 2181 * remapped and used by the kernel later.) 2182 * 2183 * This code is similar to the code used in 2184 * pmap_mapdev, but since no memory needs to be 2185 * allocated we simply change the mapping. 2186 */ 2187 for (pa = trunc_page(basemem * 1024); 2188 pa < ISA_HOLE_START; pa += PAGE_SIZE) 2189 pmap_kenter(KERNBASE + pa, pa); 2190 2191 /* 2192 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 2193 * the vm86 page table so that vm86 can scribble on them using 2194 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 2195 * page 0, at least as initialized here? 2196 */ 2197 pte = (pt_entry_t *)vm86paddr; 2198 for (i = basemem / 4; i < 160; i++) 2199 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 2200} 2201#endif /* !XEN */ 2202 2203/* 2204 * Populate the (physmap) array with base/bound pairs describing the 2205 * available physical memory in the system, then test this memory and 2206 * build the phys_avail array describing the actually-available memory. 2207 * 2208 * If we cannot accurately determine the physical memory map, then use 2209 * value from the 0xE801 call, and failing that, the RTC. 2210 * 2211 * Total memory size may be set by the kernel environment variable 2212 * hw.physmem or the compile-time define MAXMEM. 2213 * 2214 * XXX first should be vm_paddr_t. 2215 */ 2216#ifdef PC98 2217static void 2218getmemsize(int first) 2219{ 2220 int off, physmap_idx, pa_indx, da_indx; 2221 u_long physmem_tunable, memtest; 2222 vm_paddr_t physmap[PHYSMAP_SIZE]; 2223 pt_entry_t *pte; 2224 quad_t dcons_addr, dcons_size; 2225 int i; 2226 int pg_n; 2227 u_int extmem; 2228 u_int under16; 2229 vm_paddr_t pa; 2230 2231 bzero(physmap, sizeof(physmap)); 2232 2233 /* XXX - some of EPSON machines can't use PG_N */ 2234 pg_n = PG_N; 2235 if (pc98_machine_type & M_EPSON_PC98) { 2236 switch (epson_machine_id) { 2237#ifdef WB_CACHE 2238 default: 2239#endif 2240 case EPSON_PC486_HX: 2241 case EPSON_PC486_HG: 2242 case EPSON_PC486_HA: 2243 pg_n = 0; 2244 break; 2245 } 2246 } 2247 2248 under16 = pc98_getmemsize(&basemem, &extmem); 2249 basemem_setup(); 2250 2251 physmap[0] = 0; 2252 physmap[1] = basemem * 1024; 2253 physmap_idx = 2; 2254 physmap[physmap_idx] = 0x100000; 2255 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 2256 2257 /* 2258 * Now, physmap contains a map of physical memory. 2259 */ 2260 2261#ifdef SMP 2262 /* make hole for AP bootstrap code */ 2263 physmap[1] = mp_bootaddress(physmap[1]); 2264#endif 2265 2266 /* 2267 * Maxmem isn't the "maximum memory", it's one larger than the 2268 * highest page of the physical address space. It should be 2269 * called something like "Maxphyspage". We may adjust this 2270 * based on ``hw.physmem'' and the results of the memory test. 2271 */ 2272 Maxmem = atop(physmap[physmap_idx + 1]); 2273 2274#ifdef MAXMEM 2275 Maxmem = MAXMEM / 4; 2276#endif 2277 2278 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 2279 Maxmem = atop(physmem_tunable); 2280 2281 /* 2282 * By default keep the memtest enabled. Use a general name so that 2283 * one could eventually do more with the code than just disable it. 2284 */ 2285 memtest = 1; 2286 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); 2287 2288 if (atop(physmap[physmap_idx + 1]) != Maxmem && 2289 (boothowto & RB_VERBOSE)) 2290 printf("Physical memory use set to %ldK\n", Maxmem * 4); 2291 2292 /* 2293 * If Maxmem has been increased beyond what the system has detected, 2294 * extend the last memory segment to the new limit. 2295 */ 2296 if (atop(physmap[physmap_idx + 1]) < Maxmem) 2297 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 2298 2299 /* 2300 * We need to divide chunk if Maxmem is larger than 16MB and 2301 * under 16MB area is not full of memory. 2302 * (1) system area (15-16MB region) is cut off 2303 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY") 2304 */ 2305 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) { 2306 /* 15M - 16M region is cut off, so need to divide chunk */ 2307 physmap[physmap_idx + 1] = under16 * 1024; 2308 physmap_idx += 2; 2309 physmap[physmap_idx] = 0x1000000; 2310 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024; 2311 } 2312 2313 /* call pmap initialization to make new kernel address space */ 2314 pmap_bootstrap(first); 2315 2316 /* 2317 * Size up each available chunk of physical memory. 2318 */ 2319 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 2320 pa_indx = 0; 2321 da_indx = 1; 2322 phys_avail[pa_indx++] = physmap[0]; 2323 phys_avail[pa_indx] = physmap[0]; 2324 dump_avail[da_indx] = physmap[0]; 2325 pte = CMAP3; 2326 2327 /* 2328 * Get dcons buffer address 2329 */ 2330 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 2331 getenv_quad("dcons.size", &dcons_size) == 0) 2332 dcons_addr = 0; 2333 2334 /* 2335 * physmap is in bytes, so when converting to page boundaries, 2336 * round up the start address and round down the end address. 2337 */ 2338 for (i = 0; i <= physmap_idx; i += 2) { 2339 vm_paddr_t end; 2340 2341 end = ptoa((vm_paddr_t)Maxmem); 2342 if (physmap[i + 1] < end) 2343 end = trunc_page(physmap[i + 1]); 2344 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 2345 int tmp, page_bad, full; 2346 int *ptr = (int *)CADDR3; 2347 2348 full = FALSE; 2349 /* 2350 * block out kernel memory as not available. 2351 */ 2352 if (pa >= KERNLOAD && pa < first) 2353 goto do_dump_avail; 2354 2355 /* 2356 * block out dcons buffer 2357 */ 2358 if (dcons_addr > 0 2359 && pa >= trunc_page(dcons_addr) 2360 && pa < dcons_addr + dcons_size) 2361 goto do_dump_avail; 2362 2363 page_bad = FALSE; 2364 if (memtest == 0) 2365 goto skip_memtest; 2366 2367 /* 2368 * map page into kernel: valid, read/write,non-cacheable 2369 */ 2370 *pte = pa | PG_V | PG_RW | pg_n; 2371 invltlb(); 2372 2373 tmp = *(int *)ptr; 2374 /* 2375 * Test for alternating 1's and 0's 2376 */ 2377 *(volatile int *)ptr = 0xaaaaaaaa; 2378 if (*(volatile int *)ptr != 0xaaaaaaaa) 2379 page_bad = TRUE; 2380 /* 2381 * Test for alternating 0's and 1's 2382 */ 2383 *(volatile int *)ptr = 0x55555555; 2384 if (*(volatile int *)ptr != 0x55555555) 2385 page_bad = TRUE; 2386 /* 2387 * Test for all 1's 2388 */ 2389 *(volatile int *)ptr = 0xffffffff; 2390 if (*(volatile int *)ptr != 0xffffffff) 2391 page_bad = TRUE; 2392 /* 2393 * Test for all 0's 2394 */ 2395 *(volatile int *)ptr = 0x0; 2396 if (*(volatile int *)ptr != 0x0) 2397 page_bad = TRUE; 2398 /* 2399 * Restore original value. 2400 */ 2401 *(int *)ptr = tmp; 2402 2403skip_memtest: 2404 /* 2405 * Adjust array of valid/good pages. 2406 */ 2407 if (page_bad == TRUE) 2408 continue; 2409 /* 2410 * If this good page is a continuation of the 2411 * previous set of good pages, then just increase 2412 * the end pointer. Otherwise start a new chunk. 2413 * Note that "end" points one higher than end, 2414 * making the range >= start and < end. 2415 * If we're also doing a speculative memory 2416 * test and we at or past the end, bump up Maxmem 2417 * so that we keep going. The first bad page 2418 * will terminate the loop. 2419 */ 2420 if (phys_avail[pa_indx] == pa) { 2421 phys_avail[pa_indx] += PAGE_SIZE; 2422 } else { 2423 pa_indx++; 2424 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 2425 printf( 2426 "Too many holes in the physical address space, giving up\n"); 2427 pa_indx--; 2428 full = TRUE; 2429 goto do_dump_avail; 2430 } 2431 phys_avail[pa_indx++] = pa; /* start */ 2432 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 2433 } 2434 physmem++; 2435do_dump_avail: 2436 if (dump_avail[da_indx] == pa) { 2437 dump_avail[da_indx] += PAGE_SIZE; 2438 } else { 2439 da_indx++; 2440 if (da_indx == DUMP_AVAIL_ARRAY_END) { 2441 da_indx--; 2442 goto do_next; 2443 } 2444 dump_avail[da_indx++] = pa; /* start */ 2445 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 2446 } 2447do_next: 2448 if (full) 2449 break; 2450 } 2451 } 2452 *pte = 0; 2453 invltlb(); 2454 2455 /* 2456 * XXX 2457 * The last chunk must contain at least one page plus the message 2458 * buffer to avoid complicating other code (message buffer address 2459 * calculation, etc.). 2460 */ 2461 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 2462 round_page(msgbufsize) >= phys_avail[pa_indx]) { 2463 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 2464 phys_avail[pa_indx--] = 0; 2465 phys_avail[pa_indx--] = 0; 2466 } 2467 2468 Maxmem = atop(phys_avail[pa_indx]); 2469 2470 /* Trim off space for the message buffer. */ 2471 phys_avail[pa_indx] -= round_page(msgbufsize); 2472 2473 /* Map the message buffer. */ 2474 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE) 2475 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 2476 off); 2477 2478 PT_UPDATES_FLUSH(); 2479} 2480#else /* PC98 */ 2481static void 2482getmemsize(int first) 2483{ 2484 int has_smap, off, physmap_idx, pa_indx, da_indx; 2485 u_long physmem_tunable, memtest; 2486 vm_paddr_t physmap[PHYSMAP_SIZE]; 2487 pt_entry_t *pte; 2488 quad_t dcons_addr, dcons_size; 2489#ifndef XEN 2490 int hasbrokenint12, i, res; 2491 u_int extmem; 2492 struct vm86frame vmf; 2493 struct vm86context vmc; 2494 vm_paddr_t pa; 2495 struct bios_smap *smap, *smapbase, *smapend; 2496 u_int32_t smapsize; 2497 caddr_t kmdp; 2498#endif 2499 2500 has_smap = 0; 2501#if defined(XEN) 2502 Maxmem = xen_start_info->nr_pages - init_first; 2503 physmem = Maxmem; 2504 basemem = 0; 2505 physmap[0] = init_first << PAGE_SHIFT; 2506 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize); 2507 physmap_idx = 0; 2508#else 2509#ifdef XBOX 2510 if (arch_i386_is_xbox) { 2511 /* 2512 * We queried the memory size before, so chop off 4MB for 2513 * the framebuffer and inform the OS of this. 2514 */ 2515 physmap[0] = 0; 2516 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE; 2517 physmap_idx = 0; 2518 goto physmap_done; 2519 } 2520#endif 2521 bzero(&vmf, sizeof(vmf)); 2522 bzero(physmap, sizeof(physmap)); 2523 basemem = 0; 2524 2525 /* 2526 * Check if the loader supplied an SMAP memory map. If so, 2527 * use that and do not make any VM86 calls. 2528 */ 2529 physmap_idx = 0; 2530 smapbase = NULL; 2531 kmdp = preload_search_by_type("elf kernel"); 2532 if (kmdp == NULL) 2533 kmdp = preload_search_by_type("elf32 kernel"); 2534 if (kmdp != NULL) 2535 smapbase = (struct bios_smap *)preload_search_info(kmdp, 2536 MODINFO_METADATA | MODINFOMD_SMAP); 2537 if (smapbase != NULL) { 2538 /* 2539 * subr_module.c says: 2540 * "Consumer may safely assume that size value precedes data." 2541 * ie: an int32_t immediately precedes SMAP. 2542 */ 2543 smapsize = *((u_int32_t *)smapbase - 1); 2544 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 2545 has_smap = 1; 2546 2547 for (smap = smapbase; smap < smapend; smap++) 2548 if (!add_smap_entry(smap, physmap, &physmap_idx)) 2549 break; 2550 goto have_smap; 2551 } 2552 2553 /* 2554 * Some newer BIOSes have a broken INT 12H implementation 2555 * which causes a kernel panic immediately. In this case, we 2556 * need use the SMAP to determine the base memory size. 2557 */ 2558 hasbrokenint12 = 0; 2559 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12); 2560 if (hasbrokenint12 == 0) { 2561 /* Use INT12 to determine base memory size. */ 2562 vm86_intcall(0x12, &vmf); 2563 basemem = vmf.vmf_ax; 2564 basemem_setup(); 2565 } 2566 2567 /* 2568 * Fetch the memory map with INT 15:E820. Map page 1 R/W into 2569 * the kernel page table so we can use it as a buffer. The 2570 * kernel will unmap this page later. 2571 */ 2572 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT); 2573 vmc.npages = 0; 2574 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 2575 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 2576 KASSERT(res != 0, ("vm86_getptr() failed: address not found")); 2577 2578 vmf.vmf_ebx = 0; 2579 do { 2580 vmf.vmf_eax = 0xE820; 2581 vmf.vmf_edx = SMAP_SIG; 2582 vmf.vmf_ecx = sizeof(struct bios_smap); 2583 i = vm86_datacall(0x15, &vmf, &vmc); 2584 if (i || vmf.vmf_eax != SMAP_SIG) 2585 break; 2586 has_smap = 1; 2587 if (!add_smap_entry(smap, physmap, &physmap_idx)) 2588 break; 2589 } while (vmf.vmf_ebx != 0); 2590 2591have_smap: 2592 /* 2593 * If we didn't fetch the "base memory" size from INT12, 2594 * figure it out from the SMAP (or just guess). 2595 */ 2596 if (basemem == 0) { 2597 for (i = 0; i <= physmap_idx; i += 2) { 2598 if (physmap[i] == 0x00000000) { 2599 basemem = physmap[i + 1] / 1024; 2600 break; 2601 } 2602 } 2603 2604 /* XXX: If we couldn't find basemem from SMAP, just guess. */ 2605 if (basemem == 0) 2606 basemem = 640; 2607 basemem_setup(); 2608 } 2609 2610 if (physmap[1] != 0) 2611 goto physmap_done; 2612 2613 /* 2614 * If we failed to find an SMAP, figure out the extended 2615 * memory size. We will then build a simple memory map with 2616 * two segments, one for "base memory" and the second for 2617 * "extended memory". Note that "extended memory" starts at a 2618 * physical address of 1MB and that both basemem and extmem 2619 * are in units of 1KB. 2620 * 2621 * First, try to fetch the extended memory size via INT 15:E801. 2622 */ 2623 vmf.vmf_ax = 0xE801; 2624 if (vm86_intcall(0x15, &vmf) == 0) { 2625 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 2626 } else { 2627 /* 2628 * If INT15:E801 fails, this is our last ditch effort 2629 * to determine the extended memory size. Currently 2630 * we prefer the RTC value over INT15:88. 2631 */ 2632#if 0 2633 vmf.vmf_ah = 0x88; 2634 vm86_intcall(0x15, &vmf); 2635 extmem = vmf.vmf_ax; 2636#else 2637 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 2638#endif 2639 } 2640 2641 /* 2642 * Special hack for chipsets that still remap the 384k hole when 2643 * there's 16MB of memory - this really confuses people that 2644 * are trying to use bus mastering ISA controllers with the 2645 * "16MB limit"; they only have 16MB, but the remapping puts 2646 * them beyond the limit. 2647 * 2648 * If extended memory is between 15-16MB (16-17MB phys address range), 2649 * chop it to 15MB. 2650 */ 2651 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 2652 extmem = 15 * 1024; 2653 2654 physmap[0] = 0; 2655 physmap[1] = basemem * 1024; 2656 physmap_idx = 2; 2657 physmap[physmap_idx] = 0x100000; 2658 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 2659 2660physmap_done: 2661#endif 2662 /* 2663 * Now, physmap contains a map of physical memory. 2664 */ 2665 2666#ifdef SMP 2667 /* make hole for AP bootstrap code */ 2668 physmap[1] = mp_bootaddress(physmap[1]); 2669#endif 2670 2671 /* 2672 * Maxmem isn't the "maximum memory", it's one larger than the 2673 * highest page of the physical address space. It should be 2674 * called something like "Maxphyspage". We may adjust this 2675 * based on ``hw.physmem'' and the results of the memory test. 2676 */ 2677 Maxmem = atop(physmap[physmap_idx + 1]); 2678 2679#ifdef MAXMEM 2680 Maxmem = MAXMEM / 4; 2681#endif 2682 2683 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 2684 Maxmem = atop(physmem_tunable); 2685 2686 /* 2687 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend 2688 * the amount of memory in the system. 2689 */ 2690 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1])) 2691 Maxmem = atop(physmap[physmap_idx + 1]); 2692 2693 /* 2694 * By default enable the memory test on real hardware, and disable 2695 * it if we appear to be running in a VM. This avoids touching all 2696 * pages unnecessarily, which doesn't matter on real hardware but is 2697 * bad for shared VM hosts. Use a general name so that 2698 * one could eventually do more with the code than just disable it. 2699 */ 2700 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1; 2701 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); 2702 2703 if (atop(physmap[physmap_idx + 1]) != Maxmem && 2704 (boothowto & RB_VERBOSE)) 2705 printf("Physical memory use set to %ldK\n", Maxmem * 4); 2706 2707 /* 2708 * If Maxmem has been increased beyond what the system has detected, 2709 * extend the last memory segment to the new limit. 2710 */ 2711 if (atop(physmap[physmap_idx + 1]) < Maxmem) 2712 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 2713 2714 /* call pmap initialization to make new kernel address space */ 2715 pmap_bootstrap(first); 2716 2717 /* 2718 * Size up each available chunk of physical memory. 2719 */ 2720 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 2721 pa_indx = 0; 2722 da_indx = 1; 2723 phys_avail[pa_indx++] = physmap[0]; 2724 phys_avail[pa_indx] = physmap[0]; 2725 dump_avail[da_indx] = physmap[0]; 2726 pte = CMAP3; 2727 2728 /* 2729 * Get dcons buffer address 2730 */ 2731 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 2732 getenv_quad("dcons.size", &dcons_size) == 0) 2733 dcons_addr = 0; 2734 2735#ifndef XEN 2736 /* 2737 * physmap is in bytes, so when converting to page boundaries, 2738 * round up the start address and round down the end address. 2739 */ 2740 for (i = 0; i <= physmap_idx; i += 2) { 2741 vm_paddr_t end; 2742 2743 end = ptoa((vm_paddr_t)Maxmem); 2744 if (physmap[i + 1] < end) 2745 end = trunc_page(physmap[i + 1]); 2746 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 2747 int tmp, page_bad, full; 2748 int *ptr = (int *)CADDR3; 2749 2750 full = FALSE; 2751 /* 2752 * block out kernel memory as not available. 2753 */ 2754 if (pa >= KERNLOAD && pa < first) 2755 goto do_dump_avail; 2756 2757 /* 2758 * block out dcons buffer 2759 */ 2760 if (dcons_addr > 0 2761 && pa >= trunc_page(dcons_addr) 2762 && pa < dcons_addr + dcons_size) 2763 goto do_dump_avail; 2764 2765 page_bad = FALSE; 2766 if (memtest == 0) 2767 goto skip_memtest; 2768 2769 /* 2770 * map page into kernel: valid, read/write,non-cacheable 2771 */ 2772 *pte = pa | PG_V | PG_RW | PG_N; 2773 invltlb(); 2774 2775 tmp = *(int *)ptr; 2776 /* 2777 * Test for alternating 1's and 0's 2778 */ 2779 *(volatile int *)ptr = 0xaaaaaaaa; 2780 if (*(volatile int *)ptr != 0xaaaaaaaa) 2781 page_bad = TRUE; 2782 /* 2783 * Test for alternating 0's and 1's 2784 */ 2785 *(volatile int *)ptr = 0x55555555; 2786 if (*(volatile int *)ptr != 0x55555555) 2787 page_bad = TRUE; 2788 /* 2789 * Test for all 1's 2790 */ 2791 *(volatile int *)ptr = 0xffffffff; 2792 if (*(volatile int *)ptr != 0xffffffff) 2793 page_bad = TRUE; 2794 /* 2795 * Test for all 0's 2796 */ 2797 *(volatile int *)ptr = 0x0; 2798 if (*(volatile int *)ptr != 0x0) 2799 page_bad = TRUE; 2800 /* 2801 * Restore original value. 2802 */ 2803 *(int *)ptr = tmp; 2804 2805skip_memtest: 2806 /* 2807 * Adjust array of valid/good pages. 2808 */ 2809 if (page_bad == TRUE) 2810 continue; 2811 /* 2812 * If this good page is a continuation of the 2813 * previous set of good pages, then just increase 2814 * the end pointer. Otherwise start a new chunk. 2815 * Note that "end" points one higher than end, 2816 * making the range >= start and < end. 2817 * If we're also doing a speculative memory 2818 * test and we at or past the end, bump up Maxmem 2819 * so that we keep going. The first bad page 2820 * will terminate the loop. 2821 */ 2822 if (phys_avail[pa_indx] == pa) { 2823 phys_avail[pa_indx] += PAGE_SIZE; 2824 } else { 2825 pa_indx++; 2826 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 2827 printf( 2828 "Too many holes in the physical address space, giving up\n"); 2829 pa_indx--; 2830 full = TRUE; 2831 goto do_dump_avail; 2832 } 2833 phys_avail[pa_indx++] = pa; /* start */ 2834 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 2835 } 2836 physmem++; 2837do_dump_avail: 2838 if (dump_avail[da_indx] == pa) { 2839 dump_avail[da_indx] += PAGE_SIZE; 2840 } else { 2841 da_indx++; 2842 if (da_indx == DUMP_AVAIL_ARRAY_END) { 2843 da_indx--; 2844 goto do_next; 2845 } 2846 dump_avail[da_indx++] = pa; /* start */ 2847 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 2848 } 2849do_next: 2850 if (full) 2851 break; 2852 } 2853 } 2854 *pte = 0; 2855 invltlb(); 2856#else 2857 phys_avail[0] = physfree; 2858 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE; 2859 dump_avail[0] = 0; 2860 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE; 2861 2862#endif 2863 2864 /* 2865 * XXX 2866 * The last chunk must contain at least one page plus the message 2867 * buffer to avoid complicating other code (message buffer address 2868 * calculation, etc.). 2869 */ 2870 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 2871 round_page(msgbufsize) >= phys_avail[pa_indx]) { 2872 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 2873 phys_avail[pa_indx--] = 0; 2874 phys_avail[pa_indx--] = 0; 2875 } 2876 2877 Maxmem = atop(phys_avail[pa_indx]); 2878 2879 /* Trim off space for the message buffer. */ 2880 phys_avail[pa_indx] -= round_page(msgbufsize); 2881 2882 /* Map the message buffer. */ 2883 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE) 2884 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 2885 off); 2886 2887 PT_UPDATES_FLUSH(); 2888} 2889#endif /* PC98 */ 2890 2891#ifdef XEN 2892#define MTOPSIZE (1<<(14 + PAGE_SHIFT)) 2893 2894register_t 2895init386(first) 2896 int first; 2897{ 2898 unsigned long gdtmachpfn; 2899 int error, gsel_tss, metadata_missing, x, pa; 2900 struct pcpu *pc; 2901#ifdef CPU_ENABLE_SSE 2902 struct xstate_hdr *xhdr; 2903#endif 2904 struct callback_register event = { 2905 .type = CALLBACKTYPE_event, 2906 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback }, 2907 }; 2908 struct callback_register failsafe = { 2909 .type = CALLBACKTYPE_failsafe, 2910 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback }, 2911 }; 2912 2913 thread0.td_kstack = proc0kstack; 2914 thread0.td_kstack_pages = KSTACK_PAGES; 2915 2916 /* 2917 * This may be done better later if it gets more high level 2918 * components in it. If so just link td->td_proc here. 2919 */ 2920 proc_linkup0(&proc0, &thread0); 2921 2922 metadata_missing = 0; 2923 if (xen_start_info->mod_start) { 2924 preload_metadata = (caddr_t)xen_start_info->mod_start; 2925 preload_bootstrap_relocate(KERNBASE); 2926 } else { 2927 metadata_missing = 1; 2928 } 2929 if (envmode == 1) 2930 kern_envp = static_env; 2931 else if ((caddr_t)xen_start_info->cmd_line) 2932 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line); 2933 2934 boothowto |= xen_boothowto(kern_envp); 2935 2936 /* Init basic tunables, hz etc */ 2937 init_param1(); 2938 2939 /* 2940 * XEN occupies a portion of the upper virtual address space 2941 * At its base it manages an array mapping machine page frames 2942 * to physical page frames - hence we need to be able to 2943 * access 4GB - (64MB - 4MB + 64k) 2944 */ 2945 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2946 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2947 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2948 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2949 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2950 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2951 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2952 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); 2953 2954 pc = &__pcpu[0]; 2955 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 2956 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 2957 2958 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW); 2959 bzero(gdt, PAGE_SIZE); 2960 for (x = 0; x < NGDT; x++) 2961 ssdtosd(&gdt_segs[x], &gdt[x].sd); 2962 2963 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); 2964 2965 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT; 2966 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V); 2967 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0); 2968 lgdt(&r_gdt); 2969 gdtset = 1; 2970 2971 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) { 2972 panic("set_trap_table failed - error %d\n", error); 2973 } 2974 2975 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event); 2976 if (error == 0) 2977 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe); 2978#if CONFIG_XEN_COMPAT <= 0x030002 2979 if (error == -ENOXENSYS) 2980 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL), 2981 (unsigned long)Xhypervisor_callback, 2982 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback); 2983#endif 2984 pcpu_init(pc, 0, sizeof(struct pcpu)); 2985 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) 2986 pmap_kenter(pa + KERNBASE, pa); 2987 dpcpu_init((void *)(first + KERNBASE), 0); 2988 first += DPCPU_SIZE; 2989 physfree += DPCPU_SIZE; 2990 init_first += DPCPU_SIZE / PAGE_SIZE; 2991 2992 PCPU_SET(prvspace, pc); 2993 PCPU_SET(curthread, &thread0); 2994 2995 /* 2996 * Initialize mutexes. 2997 * 2998 * icu_lock: in order to allow an interrupt to occur in a critical 2999 * section, to set pcpu->ipending (etc...) properly, we 3000 * must be able to get the icu lock, so it can't be 3001 * under witness. 3002 */ 3003 mutex_init(); 3004 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); 3005 3006 /* make ldt memory segments */ 3007 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW); 3008 bzero(ldt, PAGE_SIZE); 3009 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); 3010 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); 3011 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 3012 ssdtosd(&ldt_segs[x], &ldt[x].sd); 3013 3014 default_proc_ldt.ldt_base = (caddr_t)ldt; 3015 default_proc_ldt.ldt_len = 6; 3016 _default_ldt = (int)&default_proc_ldt; 3017 PCPU_SET(currentldt, _default_ldt); 3018 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW); 3019 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0])); 3020 3021#if defined(XEN_PRIVILEGED) 3022 /* 3023 * Initialize the i8254 before the console so that console 3024 * initialization can use DELAY(). 3025 */ 3026 i8254_init(); 3027#endif 3028 3029 /* 3030 * Initialize the console before we print anything out. 3031 */ 3032 cninit(); 3033 3034 if (metadata_missing) 3035 printf("WARNING: loader(8) metadata is missing!\n"); 3036 3037#ifdef DEV_ISA 3038#ifdef DEV_ATPIC 3039 elcr_probe(); 3040 atpic_startup(); 3041#else 3042 /* Reset and mask the atpics and leave them shut down. */ 3043 atpic_reset(); 3044 3045 /* 3046 * Point the ICU spurious interrupt vectors at the APIC spurious 3047 * interrupt handler. 3048 */ 3049 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL, 3050 GSEL(GCODE_SEL, SEL_KPL)); 3051 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL, 3052 GSEL(GCODE_SEL, SEL_KPL)); 3053#endif 3054#endif 3055 3056#ifdef DDB 3057 ksym_start = bootinfo.bi_symtab; 3058 ksym_end = bootinfo.bi_esymtab; 3059#endif 3060 3061 kdb_init(); 3062 3063#ifdef KDB 3064 if (boothowto & RB_KDB) 3065 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 3066#endif 3067 3068 finishidentcpu(); /* Final stage of CPU initialization */ 3069 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 3070 GSEL(GCODE_SEL, SEL_KPL)); 3071 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 3072 GSEL(GCODE_SEL, SEL_KPL)); 3073 initializecpu(); /* Initialize CPU registers */ 3074 initializecpucache(); 3075 3076 /* pointer to selector slot for %fs/%gs */ 3077 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 3078 3079 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 3080 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 3081 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 3082 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 3083#if defined(PAE) || defined(PAE_TABLES) 3084 dblfault_tss.tss_cr3 = (int)IdlePDPT; 3085#else 3086 dblfault_tss.tss_cr3 = (int)IdlePTD; 3087#endif 3088 dblfault_tss.tss_eip = (int)dblfault_handler; 3089 dblfault_tss.tss_eflags = PSL_KERNEL; 3090 dblfault_tss.tss_ds = dblfault_tss.tss_es = 3091 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 3092 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 3093 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 3094 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 3095 3096 vm86_initialize(); 3097 getmemsize(first); 3098 init_param2(physmem); 3099 3100 /* now running on new page tables, configured,and u/iom is accessible */ 3101 3102 msgbufinit(msgbufp, msgbufsize); 3103#ifdef DEV_NPX 3104 npxinit(true); 3105#endif 3106 /* 3107 * Set up thread0 pcb after npxinit calculated pcb + fpu save 3108 * area size. Zero out the extended state header in fpu save 3109 * area. 3110 */ 3111 thread0.td_pcb = get_pcb_td(&thread0); 3112 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); 3113#ifdef CPU_ENABLE_SSE 3114 if (use_xsave) { 3115 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 3116 1); 3117 xhdr->xstate_bv = xsave_mask; 3118 } 3119#endif 3120 PCPU_SET(curpcb, thread0.td_pcb); 3121 /* make an initial tss so cpu can get interrupt stack on syscall! */ 3122 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 3123 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16); 3124 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 3125 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 3126 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), 3127 PCPU_GET(common_tss.tss_esp0)); 3128 3129 /* transfer to user mode */ 3130 3131 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 3132 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 3133 3134 /* setup proc 0's pcb */ 3135 thread0.td_pcb->pcb_flags = 0; 3136#if defined(PAE) || defined(PAE_TABLES) 3137 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT; 3138#else 3139 thread0.td_pcb->pcb_cr3 = (int)IdlePTD; 3140#endif 3141 thread0.td_pcb->pcb_ext = 0; 3142 thread0.td_frame = &proc0_tf; 3143 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0]; 3144 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1]; 3145 3146 cpu_probe_amdc1e(); 3147 3148 /* Location of kernel stack for locore */ 3149 return ((register_t)thread0.td_pcb); 3150} 3151 3152#else 3153register_t 3154init386(first) 3155 int first; 3156{ 3157 struct gate_descriptor *gdp; 3158 int gsel_tss, metadata_missing, x, pa; 3159 struct pcpu *pc; 3160#ifdef CPU_ENABLE_SSE 3161 struct xstate_hdr *xhdr; 3162#endif 3163 3164 thread0.td_kstack = proc0kstack; 3165 thread0.td_kstack_pages = KSTACK_PAGES; 3166 3167 /* 3168 * This may be done better later if it gets more high level 3169 * components in it. If so just link td->td_proc here. 3170 */ 3171 proc_linkup0(&proc0, &thread0); 3172 3173#ifdef PC98 3174 /* 3175 * Initialize DMAC 3176 */ 3177 pc98_init_dmac(); 3178#endif 3179 3180 metadata_missing = 0; 3181 if (bootinfo.bi_modulep) { 3182 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 3183 preload_bootstrap_relocate(KERNBASE); 3184 } else { 3185 metadata_missing = 1; 3186 } 3187 if (envmode == 1) 3188 kern_envp = static_env; 3189 else if (bootinfo.bi_envp) 3190 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 3191 3192 /* Init basic tunables, hz etc */ 3193 init_param1(); 3194 3195 /* 3196 * Make gdt memory segments. All segments cover the full 4GB 3197 * of address space and permissions are enforced at page level. 3198 */ 3199 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 3200 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 3201 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1); 3202 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1); 3203 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1); 3204 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1); 3205 3206 pc = &__pcpu[0]; 3207 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); 3208 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 3209 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 3210 3211 for (x = 0; x < NGDT; x++) 3212 ssdtosd(&gdt_segs[x], &gdt[x].sd); 3213 3214 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 3215 r_gdt.rd_base = (int) gdt; 3216 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); 3217 lgdt(&r_gdt); 3218 3219 pcpu_init(pc, 0, sizeof(struct pcpu)); 3220 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) 3221 pmap_kenter(pa + KERNBASE, pa); 3222 dpcpu_init((void *)(first + KERNBASE), 0); 3223 first += DPCPU_SIZE; 3224 PCPU_SET(prvspace, pc); 3225 PCPU_SET(curthread, &thread0); 3226 3227 /* 3228 * Initialize mutexes. 3229 * 3230 * icu_lock: in order to allow an interrupt to occur in a critical 3231 * section, to set pcpu->ipending (etc...) properly, we 3232 * must be able to get the icu lock, so it can't be 3233 * under witness. 3234 */ 3235 mutex_init(); 3236 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); 3237 3238 /* make ldt memory segments */ 3239 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); 3240 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); 3241 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 3242 ssdtosd(&ldt_segs[x], &ldt[x].sd); 3243 3244 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 3245 lldt(_default_ldt); 3246 PCPU_SET(currentldt, _default_ldt); 3247 3248 /* exceptions */ 3249 for (x = 0; x < NIDT; x++) 3250 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 3251 GSEL(GCODE_SEL, SEL_KPL)); 3252 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 3253 GSEL(GCODE_SEL, SEL_KPL)); 3254 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 3255 GSEL(GCODE_SEL, SEL_KPL)); 3256 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL, 3257 GSEL(GCODE_SEL, SEL_KPL)); 3258 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 3259 GSEL(GCODE_SEL, SEL_KPL)); 3260 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 3261 GSEL(GCODE_SEL, SEL_KPL)); 3262 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 3263 GSEL(GCODE_SEL, SEL_KPL)); 3264 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 3265 GSEL(GCODE_SEL, SEL_KPL)); 3266 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 3267 , GSEL(GCODE_SEL, SEL_KPL)); 3268 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 3269 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 3270 GSEL(GCODE_SEL, SEL_KPL)); 3271 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 3272 GSEL(GCODE_SEL, SEL_KPL)); 3273 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 3274 GSEL(GCODE_SEL, SEL_KPL)); 3275 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 3276 GSEL(GCODE_SEL, SEL_KPL)); 3277 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 3278 GSEL(GCODE_SEL, SEL_KPL)); 3279 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 3280 GSEL(GCODE_SEL, SEL_KPL)); 3281 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 3282 GSEL(GCODE_SEL, SEL_KPL)); 3283 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 3284 GSEL(GCODE_SEL, SEL_KPL)); 3285 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 3286 GSEL(GCODE_SEL, SEL_KPL)); 3287 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 3288 GSEL(GCODE_SEL, SEL_KPL)); 3289 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 3290 GSEL(GCODE_SEL, SEL_KPL)); 3291#ifdef KDTRACE_HOOKS 3292 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL, 3293 GSEL(GCODE_SEL, SEL_KPL)); 3294#endif 3295#ifdef XENHVM 3296 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_UPL, 3297 GSEL(GCODE_SEL, SEL_KPL)); 3298#endif 3299 3300 r_idt.rd_limit = sizeof(idt0) - 1; 3301 r_idt.rd_base = (int) idt; 3302 lidt(&r_idt); 3303 3304#ifdef XBOX 3305 /* 3306 * The following code queries the PCI ID of 0:0:0. For the XBOX, 3307 * This should be 0x10de / 0x02a5. 3308 * 3309 * This is exactly what Linux does. 3310 */ 3311 outl(0xcf8, 0x80000000); 3312 if (inl(0xcfc) == 0x02a510de) { 3313 arch_i386_is_xbox = 1; 3314 pic16l_setled(XBOX_LED_GREEN); 3315 3316 /* 3317 * We are an XBOX, but we may have either 64MB or 128MB of 3318 * memory. The PCI host bridge should be programmed for this, 3319 * so we just query it. 3320 */ 3321 outl(0xcf8, 0x80000084); 3322 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64; 3323 } 3324#endif /* XBOX */ 3325 3326 /* 3327 * Initialize the i8254 before the console so that console 3328 * initialization can use DELAY(). 3329 */ 3330 i8254_init(); 3331 3332 /* 3333 * Initialize the console before we print anything out. 3334 */ 3335 cninit(); 3336 3337 if (metadata_missing) 3338 printf("WARNING: loader(8) metadata is missing!\n"); 3339 3340#ifdef DEV_ISA 3341#ifdef DEV_ATPIC 3342#ifndef PC98 3343 elcr_probe(); 3344#endif 3345 atpic_startup(); 3346#else 3347 /* Reset and mask the atpics and leave them shut down. */ 3348 atpic_reset(); 3349 3350 /* 3351 * Point the ICU spurious interrupt vectors at the APIC spurious 3352 * interrupt handler. 3353 */ 3354 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL, 3355 GSEL(GCODE_SEL, SEL_KPL)); 3356 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL, 3357 GSEL(GCODE_SEL, SEL_KPL)); 3358#endif 3359#endif 3360 3361#ifdef DDB 3362 ksym_start = bootinfo.bi_symtab; 3363 ksym_end = bootinfo.bi_esymtab; 3364#endif 3365 3366 kdb_init(); 3367 3368#ifdef KDB 3369 if (boothowto & RB_KDB) 3370 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 3371#endif 3372 3373 finishidentcpu(); /* Final stage of CPU initialization */ 3374 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 3375 GSEL(GCODE_SEL, SEL_KPL)); 3376 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 3377 GSEL(GCODE_SEL, SEL_KPL)); 3378 initializecpu(); /* Initialize CPU registers */ 3379 initializecpucache(); 3380 3381 /* pointer to selector slot for %fs/%gs */ 3382 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 3383 3384 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 3385 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 3386 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 3387 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 3388#if defined(PAE) || defined(PAE_TABLES) 3389 dblfault_tss.tss_cr3 = (int)IdlePDPT; 3390#else 3391 dblfault_tss.tss_cr3 = (int)IdlePTD; 3392#endif 3393 dblfault_tss.tss_eip = (int)dblfault_handler; 3394 dblfault_tss.tss_eflags = PSL_KERNEL; 3395 dblfault_tss.tss_ds = dblfault_tss.tss_es = 3396 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 3397 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 3398 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 3399 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 3400 3401 vm86_initialize(); 3402 getmemsize(first); 3403 init_param2(physmem); 3404 3405 /* now running on new page tables, configured,and u/iom is accessible */ 3406 3407 msgbufinit(msgbufp, msgbufsize); 3408#ifdef DEV_NPX 3409 npxinit(true); 3410#endif 3411 /* 3412 * Set up thread0 pcb after npxinit calculated pcb + fpu save 3413 * area size. Zero out the extended state header in fpu save 3414 * area. 3415 */ 3416 thread0.td_pcb = get_pcb_td(&thread0); 3417 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); 3418#ifdef CPU_ENABLE_SSE 3419 if (use_xsave) { 3420 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 3421 1); 3422 xhdr->xstate_bv = xsave_mask; 3423 } 3424#endif 3425 PCPU_SET(curpcb, thread0.td_pcb); 3426 /* make an initial tss so cpu can get interrupt stack on syscall! */ 3427 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 3428 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16); 3429 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 3430 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 3431 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 3432 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 3433 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 3434 ltr(gsel_tss); 3435 3436 /* make a call gate to reenter kernel with */ 3437 gdp = &ldt[LSYS5CALLS_SEL].gd; 3438 3439 x = (int) &IDTVEC(lcall_syscall); 3440 gdp->gd_looffset = x; 3441 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 3442 gdp->gd_stkcpy = 1; 3443 gdp->gd_type = SDT_SYS386CGT; 3444 gdp->gd_dpl = SEL_UPL; 3445 gdp->gd_p = 1; 3446 gdp->gd_hioffset = x >> 16; 3447 3448 /* XXX does this work? */ 3449 /* XXX yes! */ 3450 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 3451 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 3452 3453 /* transfer to user mode */ 3454 3455 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 3456 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 3457 3458 /* setup proc 0's pcb */ 3459 thread0.td_pcb->pcb_flags = 0; 3460#if defined(PAE) || defined(PAE_TABLES) 3461 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT; 3462#else 3463 thread0.td_pcb->pcb_cr3 = (int)IdlePTD; 3464#endif 3465 thread0.td_pcb->pcb_ext = 0; 3466 thread0.td_frame = &proc0_tf; 3467 3468 cpu_probe_amdc1e(); 3469 3470#ifdef FDT 3471 x86_init_fdt(); 3472#endif 3473 3474 /* Location of kernel stack for locore */ 3475 return ((register_t)thread0.td_pcb); 3476} 3477#endif 3478 3479void 3480cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 3481{ 3482 3483 pcpu->pc_acpi_id = 0xffffffff; 3484} 3485 3486#ifndef PC98 3487static int 3488smap_sysctl_handler(SYSCTL_HANDLER_ARGS) 3489{ 3490 struct bios_smap *smapbase; 3491 struct bios_smap_xattr smap; 3492 caddr_t kmdp; 3493 uint32_t *smapattr; 3494 int count, error, i; 3495 3496 /* Retrieve the system memory map from the loader. */ 3497 kmdp = preload_search_by_type("elf kernel"); 3498 if (kmdp == NULL) 3499 kmdp = preload_search_by_type("elf32 kernel"); 3500 if (kmdp == NULL) 3501 return (0); 3502 smapbase = (struct bios_smap *)preload_search_info(kmdp, 3503 MODINFO_METADATA | MODINFOMD_SMAP); 3504 if (smapbase == NULL) 3505 return (0); 3506 smapattr = (uint32_t *)preload_search_info(kmdp, 3507 MODINFO_METADATA | MODINFOMD_SMAP_XATTR); 3508 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase); 3509 error = 0; 3510 for (i = 0; i < count; i++) { 3511 smap.base = smapbase[i].base; 3512 smap.length = smapbase[i].length; 3513 smap.type = smapbase[i].type; 3514 if (smapattr != NULL) 3515 smap.xattr = smapattr[i]; 3516 else 3517 smap.xattr = 0; 3518 error = SYSCTL_OUT(req, &smap, sizeof(smap)); 3519 } 3520 return (error); 3521} 3522SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, 3523 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data"); 3524#endif /* !PC98 */ 3525 3526void 3527spinlock_enter(void) 3528{ 3529 struct thread *td; 3530 register_t flags; 3531 3532 td = curthread; 3533 if (td->td_md.md_spinlock_count == 0) { 3534 flags = intr_disable(); 3535 td->td_md.md_spinlock_count = 1; 3536 td->td_md.md_saved_flags = flags; 3537 } else 3538 td->td_md.md_spinlock_count++; 3539 critical_enter(); 3540} 3541 3542void 3543spinlock_exit(void) 3544{ 3545 struct thread *td; 3546 register_t flags; 3547 3548 td = curthread; 3549 critical_exit(); 3550 flags = td->td_md.md_saved_flags; 3551 td->td_md.md_spinlock_count--; 3552 if (td->td_md.md_spinlock_count == 0) 3553 intr_restore(flags); 3554} 3555 3556#if defined(I586_CPU) && !defined(NO_F00F_HACK) 3557static void f00f_hack(void *unused); 3558SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 3559 3560static void 3561f00f_hack(void *unused) 3562{ 3563 struct gate_descriptor *new_idt; 3564 vm_offset_t tmp; 3565 3566 if (!has_f00f_bug) 3567 return; 3568 3569 GIANT_REQUIRED; 3570 3571 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 3572 3573 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO); 3574 if (tmp == 0) 3575 panic("kmem_malloc returned 0"); 3576 3577 /* Put the problematic entry (#6) at the end of the lower page. */ 3578 new_idt = (struct gate_descriptor*) 3579 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor)); 3580 bcopy(idt, new_idt, sizeof(idt0)); 3581 r_idt.rd_base = (u_int)new_idt; 3582 lidt(&r_idt); 3583 idt = new_idt; 3584 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ); 3585} 3586#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 3587 3588/* 3589 * Construct a PCB from a trapframe. This is called from kdb_trap() where 3590 * we want to start a backtrace from the function that caused us to enter 3591 * the debugger. We have the context in the trapframe, but base the trace 3592 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 3593 * enough for a backtrace. 3594 */ 3595void 3596makectx(struct trapframe *tf, struct pcb *pcb) 3597{ 3598 3599 pcb->pcb_edi = tf->tf_edi; 3600 pcb->pcb_esi = tf->tf_esi; 3601 pcb->pcb_ebp = tf->tf_ebp; 3602 pcb->pcb_ebx = tf->tf_ebx; 3603 pcb->pcb_eip = tf->tf_eip; 3604 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8; 3605} 3606 3607int 3608ptrace_set_pc(struct thread *td, u_long addr) 3609{ 3610 3611 td->td_frame->tf_eip = addr; 3612 return (0); 3613} 3614 3615int 3616ptrace_single_step(struct thread *td) 3617{ 3618 td->td_frame->tf_eflags |= PSL_T; 3619 return (0); 3620} 3621 3622int 3623ptrace_clear_single_step(struct thread *td) 3624{ 3625 td->td_frame->tf_eflags &= ~PSL_T; 3626 return (0); 3627} 3628 3629int 3630fill_regs(struct thread *td, struct reg *regs) 3631{ 3632 struct pcb *pcb; 3633 struct trapframe *tp; 3634 3635 tp = td->td_frame; 3636 pcb = td->td_pcb; 3637 regs->r_gs = pcb->pcb_gs; 3638 return (fill_frame_regs(tp, regs)); 3639} 3640 3641int 3642fill_frame_regs(struct trapframe *tp, struct reg *regs) 3643{ 3644 regs->r_fs = tp->tf_fs; 3645 regs->r_es = tp->tf_es; 3646 regs->r_ds = tp->tf_ds; 3647 regs->r_edi = tp->tf_edi; 3648 regs->r_esi = tp->tf_esi; 3649 regs->r_ebp = tp->tf_ebp; 3650 regs->r_ebx = tp->tf_ebx; 3651 regs->r_edx = tp->tf_edx; 3652 regs->r_ecx = tp->tf_ecx; 3653 regs->r_eax = tp->tf_eax; 3654 regs->r_eip = tp->tf_eip; 3655 regs->r_cs = tp->tf_cs; 3656 regs->r_eflags = tp->tf_eflags; 3657 regs->r_esp = tp->tf_esp; 3658 regs->r_ss = tp->tf_ss; 3659 return (0); 3660} 3661 3662int 3663set_regs(struct thread *td, struct reg *regs) 3664{ 3665 struct pcb *pcb; 3666 struct trapframe *tp; 3667 3668 tp = td->td_frame; 3669 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 3670 !CS_SECURE(regs->r_cs)) 3671 return (EINVAL); 3672 pcb = td->td_pcb; 3673 tp->tf_fs = regs->r_fs; 3674 tp->tf_es = regs->r_es; 3675 tp->tf_ds = regs->r_ds; 3676 tp->tf_edi = regs->r_edi; 3677 tp->tf_esi = regs->r_esi; 3678 tp->tf_ebp = regs->r_ebp; 3679 tp->tf_ebx = regs->r_ebx; 3680 tp->tf_edx = regs->r_edx; 3681 tp->tf_ecx = regs->r_ecx; 3682 tp->tf_eax = regs->r_eax; 3683 tp->tf_eip = regs->r_eip; 3684 tp->tf_cs = regs->r_cs; 3685 tp->tf_eflags = regs->r_eflags; 3686 tp->tf_esp = regs->r_esp; 3687 tp->tf_ss = regs->r_ss; 3688 pcb->pcb_gs = regs->r_gs; 3689 return (0); 3690} 3691 3692#ifdef CPU_ENABLE_SSE 3693static void 3694fill_fpregs_xmm(sv_xmm, sv_87) 3695 struct savexmm *sv_xmm; 3696 struct save87 *sv_87; 3697{ 3698 register struct env87 *penv_87 = &sv_87->sv_env; 3699 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 3700 int i; 3701 3702 bzero(sv_87, sizeof(*sv_87)); 3703 3704 /* FPU control/status */ 3705 penv_87->en_cw = penv_xmm->en_cw; 3706 penv_87->en_sw = penv_xmm->en_sw; 3707 penv_87->en_tw = penv_xmm->en_tw; 3708 penv_87->en_fip = penv_xmm->en_fip; 3709 penv_87->en_fcs = penv_xmm->en_fcs; 3710 penv_87->en_opcode = penv_xmm->en_opcode; 3711 penv_87->en_foo = penv_xmm->en_foo; 3712 penv_87->en_fos = penv_xmm->en_fos; 3713 3714 /* FPU registers */ 3715 for (i = 0; i < 8; ++i) 3716 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 3717} 3718 3719static void 3720set_fpregs_xmm(sv_87, sv_xmm) 3721 struct save87 *sv_87; 3722 struct savexmm *sv_xmm; 3723{ 3724 register struct env87 *penv_87 = &sv_87->sv_env; 3725 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 3726 int i; 3727 3728 /* FPU control/status */ 3729 penv_xmm->en_cw = penv_87->en_cw; 3730 penv_xmm->en_sw = penv_87->en_sw; 3731 penv_xmm->en_tw = penv_87->en_tw; 3732 penv_xmm->en_fip = penv_87->en_fip; 3733 penv_xmm->en_fcs = penv_87->en_fcs; 3734 penv_xmm->en_opcode = penv_87->en_opcode; 3735 penv_xmm->en_foo = penv_87->en_foo; 3736 penv_xmm->en_fos = penv_87->en_fos; 3737 3738 /* FPU registers */ 3739 for (i = 0; i < 8; ++i) 3740 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 3741} 3742#endif /* CPU_ENABLE_SSE */ 3743 3744int 3745fill_fpregs(struct thread *td, struct fpreg *fpregs) 3746{ 3747 3748 KASSERT(td == curthread || TD_IS_SUSPENDED(td) || 3749 P_SHOULDSTOP(td->td_proc), 3750 ("not suspended thread %p", td)); 3751#ifdef DEV_NPX 3752 npxgetregs(td); 3753#else 3754 bzero(fpregs, sizeof(*fpregs)); 3755#endif 3756#ifdef CPU_ENABLE_SSE 3757 if (cpu_fxsr) 3758 fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm, 3759 (struct save87 *)fpregs); 3760 else 3761#endif /* CPU_ENABLE_SSE */ 3762 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs, 3763 sizeof(*fpregs)); 3764 return (0); 3765} 3766 3767int 3768set_fpregs(struct thread *td, struct fpreg *fpregs) 3769{ 3770 3771#ifdef CPU_ENABLE_SSE 3772 if (cpu_fxsr) 3773 set_fpregs_xmm((struct save87 *)fpregs, 3774 &get_pcb_user_save_td(td)->sv_xmm); 3775 else 3776#endif /* CPU_ENABLE_SSE */ 3777 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87, 3778 sizeof(*fpregs)); 3779#ifdef DEV_NPX 3780 npxuserinited(td); 3781#endif 3782 return (0); 3783} 3784 3785/* 3786 * Get machine context. 3787 */ 3788int 3789get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 3790{ 3791 struct trapframe *tp; 3792 struct segment_descriptor *sdp; 3793 3794 tp = td->td_frame; 3795 3796 PROC_LOCK(curthread->td_proc); 3797 mcp->mc_onstack = sigonstack(tp->tf_esp); 3798 PROC_UNLOCK(curthread->td_proc); 3799 mcp->mc_gs = td->td_pcb->pcb_gs; 3800 mcp->mc_fs = tp->tf_fs; 3801 mcp->mc_es = tp->tf_es; 3802 mcp->mc_ds = tp->tf_ds; 3803 mcp->mc_edi = tp->tf_edi; 3804 mcp->mc_esi = tp->tf_esi; 3805 mcp->mc_ebp = tp->tf_ebp; 3806 mcp->mc_isp = tp->tf_isp; 3807 mcp->mc_eflags = tp->tf_eflags; 3808 if (flags & GET_MC_CLEAR_RET) { 3809 mcp->mc_eax = 0; 3810 mcp->mc_edx = 0; 3811 mcp->mc_eflags &= ~PSL_C; 3812 } else { 3813 mcp->mc_eax = tp->tf_eax; 3814 mcp->mc_edx = tp->tf_edx; 3815 } 3816 mcp->mc_ebx = tp->tf_ebx; 3817 mcp->mc_ecx = tp->tf_ecx; 3818 mcp->mc_eip = tp->tf_eip; 3819 mcp->mc_cs = tp->tf_cs; 3820 mcp->mc_esp = tp->tf_esp; 3821 mcp->mc_ss = tp->tf_ss; 3822 mcp->mc_len = sizeof(*mcp); 3823 get_fpcontext(td, mcp, NULL, 0); 3824 sdp = &td->td_pcb->pcb_fsd; 3825 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; 3826 sdp = &td->td_pcb->pcb_gsd; 3827 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; 3828 mcp->mc_flags = 0; 3829 mcp->mc_xfpustate = 0; 3830 mcp->mc_xfpustate_len = 0; 3831 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2)); 3832 return (0); 3833} 3834 3835/* 3836 * Set machine context. 3837 * 3838 * However, we don't set any but the user modifiable flags, and we won't 3839 * touch the cs selector. 3840 */ 3841int 3842set_mcontext(struct thread *td, mcontext_t *mcp) 3843{ 3844 struct trapframe *tp; 3845 char *xfpustate; 3846 int eflags, ret; 3847 3848 tp = td->td_frame; 3849 if (mcp->mc_len != sizeof(*mcp) || 3850 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) 3851 return (EINVAL); 3852 eflags = (mcp->mc_eflags & PSL_USERCHANGE) | 3853 (tp->tf_eflags & ~PSL_USERCHANGE); 3854 if (mcp->mc_flags & _MC_HASFPXSTATE) { 3855 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size - 3856 sizeof(union savefpu)) 3857 return (EINVAL); 3858 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len); 3859 ret = copyin((void *)mcp->mc_xfpustate, xfpustate, 3860 mcp->mc_xfpustate_len); 3861 if (ret != 0) 3862 return (ret); 3863 } else 3864 xfpustate = NULL; 3865 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len); 3866 if (ret != 0) 3867 return (ret); 3868 tp->tf_fs = mcp->mc_fs; 3869 tp->tf_es = mcp->mc_es; 3870 tp->tf_ds = mcp->mc_ds; 3871 tp->tf_edi = mcp->mc_edi; 3872 tp->tf_esi = mcp->mc_esi; 3873 tp->tf_ebp = mcp->mc_ebp; 3874 tp->tf_ebx = mcp->mc_ebx; 3875 tp->tf_edx = mcp->mc_edx; 3876 tp->tf_ecx = mcp->mc_ecx; 3877 tp->tf_eax = mcp->mc_eax; 3878 tp->tf_eip = mcp->mc_eip; 3879 tp->tf_eflags = eflags; 3880 tp->tf_esp = mcp->mc_esp; 3881 tp->tf_ss = mcp->mc_ss; 3882 td->td_pcb->pcb_gs = mcp->mc_gs; 3883 return (0); 3884} 3885 3886static void 3887get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave, 3888 size_t xfpusave_len) 3889{ 3890#ifdef CPU_ENABLE_SSE 3891 size_t max_len, len; 3892#endif 3893 3894#ifndef DEV_NPX 3895 mcp->mc_fpformat = _MC_FPFMT_NODEV; 3896 mcp->mc_ownedfp = _MC_FPOWNED_NONE; 3897 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate)); 3898#else 3899 mcp->mc_ownedfp = npxgetregs(td); 3900 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0], 3901 sizeof(mcp->mc_fpstate)); 3902 mcp->mc_fpformat = npxformat(); 3903#ifdef CPU_ENABLE_SSE 3904 if (!use_xsave || xfpusave_len == 0) 3905 return; 3906 max_len = cpu_max_ext_state_size - sizeof(union savefpu); 3907 len = xfpusave_len; 3908 if (len > max_len) { 3909 len = max_len; 3910 bzero(xfpusave + max_len, len - max_len); 3911 } 3912 mcp->mc_flags |= _MC_HASFPXSTATE; 3913 mcp->mc_xfpustate_len = len; 3914 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len); 3915#endif 3916#endif 3917} 3918 3919static int 3920set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate, 3921 size_t xfpustate_len) 3922{ 3923 union savefpu *fpstate; 3924 int error; 3925 3926 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 3927 return (0); 3928 else if (mcp->mc_fpformat != _MC_FPFMT_387 && 3929 mcp->mc_fpformat != _MC_FPFMT_XMM) 3930 return (EINVAL); 3931 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) { 3932 /* We don't care what state is left in the FPU or PCB. */ 3933 fpstate_drop(td); 3934 error = 0; 3935 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 3936 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 3937#ifdef DEV_NPX 3938 fpstate = (union savefpu *)&mcp->mc_fpstate; 3939#ifdef CPU_ENABLE_SSE 3940 if (cpu_fxsr) 3941 fpstate->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask; 3942#endif 3943 error = npxsetregs(td, fpstate, xfpustate, xfpustate_len); 3944#else 3945 error = EINVAL; 3946#endif 3947 } else 3948 return (EINVAL); 3949 return (error); 3950} 3951 3952static void 3953fpstate_drop(struct thread *td) 3954{ 3955 3956 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu")); 3957 critical_enter(); 3958#ifdef DEV_NPX 3959 if (PCPU_GET(fpcurthread) == td) 3960 npxdrop(); 3961#endif 3962 /* 3963 * XXX force a full drop of the npx. The above only drops it if we 3964 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. 3965 * 3966 * XXX I don't much like npxgetregs()'s semantics of doing a full 3967 * drop. Dropping only to the pcb matches fnsave's behaviour. 3968 * We only need to drop to !PCB_INITDONE in sendsig(). But 3969 * sendsig() is the only caller of npxgetregs()... perhaps we just 3970 * have too many layers. 3971 */ 3972 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE | 3973 PCB_NPXUSERINITDONE); 3974 critical_exit(); 3975} 3976 3977int 3978fill_dbregs(struct thread *td, struct dbreg *dbregs) 3979{ 3980 struct pcb *pcb; 3981 3982 if (td == NULL) { 3983 dbregs->dr[0] = rdr0(); 3984 dbregs->dr[1] = rdr1(); 3985 dbregs->dr[2] = rdr2(); 3986 dbregs->dr[3] = rdr3(); 3987 dbregs->dr[4] = rdr4(); 3988 dbregs->dr[5] = rdr5(); 3989 dbregs->dr[6] = rdr6(); 3990 dbregs->dr[7] = rdr7(); 3991 } else { 3992 pcb = td->td_pcb; 3993 dbregs->dr[0] = pcb->pcb_dr0; 3994 dbregs->dr[1] = pcb->pcb_dr1; 3995 dbregs->dr[2] = pcb->pcb_dr2; 3996 dbregs->dr[3] = pcb->pcb_dr3; 3997 dbregs->dr[4] = 0; 3998 dbregs->dr[5] = 0; 3999 dbregs->dr[6] = pcb->pcb_dr6; 4000 dbregs->dr[7] = pcb->pcb_dr7; 4001 } 4002 return (0); 4003} 4004 4005int 4006set_dbregs(struct thread *td, struct dbreg *dbregs) 4007{ 4008 struct pcb *pcb; 4009 int i; 4010 4011 if (td == NULL) { 4012 load_dr0(dbregs->dr[0]); 4013 load_dr1(dbregs->dr[1]); 4014 load_dr2(dbregs->dr[2]); 4015 load_dr3(dbregs->dr[3]); 4016 load_dr4(dbregs->dr[4]); 4017 load_dr5(dbregs->dr[5]); 4018 load_dr6(dbregs->dr[6]); 4019 load_dr7(dbregs->dr[7]); 4020 } else { 4021 /* 4022 * Don't let an illegal value for dr7 get set. Specifically, 4023 * check for undefined settings. Setting these bit patterns 4024 * result in undefined behaviour and can lead to an unexpected 4025 * TRCTRAP. 4026 */ 4027 for (i = 0; i < 4; i++) { 4028 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 4029 return (EINVAL); 4030 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02) 4031 return (EINVAL); 4032 } 4033 4034 pcb = td->td_pcb; 4035 4036 /* 4037 * Don't let a process set a breakpoint that is not within the 4038 * process's address space. If a process could do this, it 4039 * could halt the system by setting a breakpoint in the kernel 4040 * (if ddb was enabled). Thus, we need to check to make sure 4041 * that no breakpoints are being enabled for addresses outside 4042 * process's address space. 4043 * 4044 * XXX - what about when the watched area of the user's 4045 * address space is written into from within the kernel 4046 * ... wouldn't that still cause a breakpoint to be generated 4047 * from within kernel mode? 4048 */ 4049 4050 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 4051 /* dr0 is enabled */ 4052 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 4053 return (EINVAL); 4054 } 4055 4056 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 4057 /* dr1 is enabled */ 4058 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 4059 return (EINVAL); 4060 } 4061 4062 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 4063 /* dr2 is enabled */ 4064 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 4065 return (EINVAL); 4066 } 4067 4068 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 4069 /* dr3 is enabled */ 4070 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 4071 return (EINVAL); 4072 } 4073 4074 pcb->pcb_dr0 = dbregs->dr[0]; 4075 pcb->pcb_dr1 = dbregs->dr[1]; 4076 pcb->pcb_dr2 = dbregs->dr[2]; 4077 pcb->pcb_dr3 = dbregs->dr[3]; 4078 pcb->pcb_dr6 = dbregs->dr[6]; 4079 pcb->pcb_dr7 = dbregs->dr[7]; 4080 4081 pcb->pcb_flags |= PCB_DBREGS; 4082 } 4083 4084 return (0); 4085} 4086 4087/* 4088 * Return > 0 if a hardware breakpoint has been hit, and the 4089 * breakpoint was in user space. Return 0, otherwise. 4090 */ 4091int 4092user_dbreg_trap(void) 4093{ 4094 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 4095 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 4096 int nbp; /* number of breakpoints that triggered */ 4097 caddr_t addr[4]; /* breakpoint addresses */ 4098 int i; 4099 4100 dr7 = rdr7(); 4101 if ((dr7 & 0x000000ff) == 0) { 4102 /* 4103 * all GE and LE bits in the dr7 register are zero, 4104 * thus the trap couldn't have been caused by the 4105 * hardware debug registers 4106 */ 4107 return 0; 4108 } 4109 4110 nbp = 0; 4111 dr6 = rdr6(); 4112 bp = dr6 & 0x0000000f; 4113 4114 if (!bp) { 4115 /* 4116 * None of the breakpoint bits are set meaning this 4117 * trap was not caused by any of the debug registers 4118 */ 4119 return 0; 4120 } 4121 4122 /* 4123 * at least one of the breakpoints were hit, check to see 4124 * which ones and if any of them are user space addresses 4125 */ 4126 4127 if (bp & 0x01) { 4128 addr[nbp++] = (caddr_t)rdr0(); 4129 } 4130 if (bp & 0x02) { 4131 addr[nbp++] = (caddr_t)rdr1(); 4132 } 4133 if (bp & 0x04) { 4134 addr[nbp++] = (caddr_t)rdr2(); 4135 } 4136 if (bp & 0x08) { 4137 addr[nbp++] = (caddr_t)rdr3(); 4138 } 4139 4140 for (i = 0; i < nbp; i++) { 4141 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 4142 /* 4143 * addr[i] is in user space 4144 */ 4145 return nbp; 4146 } 4147 } 4148 4149 /* 4150 * None of the breakpoints are in user space. 4151 */ 4152 return 0; 4153} 4154 4155#ifdef KDB 4156 4157/* 4158 * Provide inb() and outb() as functions. They are normally only available as 4159 * inline functions, thus cannot be called from the debugger. 4160 */ 4161 4162/* silence compiler warnings */ 4163u_char inb_(u_short); 4164void outb_(u_short, u_char); 4165 4166u_char 4167inb_(u_short port) 4168{ 4169 return inb(port); 4170} 4171 4172void 4173outb_(u_short port, u_char data) 4174{ 4175 outb(port, data); 4176} 4177 4178#endif /* KDB */ 4179