vm_machdep.c revision 301428
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/i386/i386/vm_machdep.c 301428 2016-06-05 07:34:10Z dchagin $"); 45 46#include "opt_isa.h" 47#include "opt_npx.h" 48#include "opt_reset.h" 49#include "opt_cpu.h" 50#include "opt_xbox.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/bio.h> 55#include <sys/buf.h> 56#include <sys/kernel.h> 57#include <sys/ktr.h> 58#include <sys/lock.h> 59#include <sys/malloc.h> 60#include <sys/mbuf.h> 61#include <sys/mutex.h> 62#include <sys/pioctl.h> 63#include <sys/proc.h> 64#include <sys/sysent.h> 65#include <sys/sf_buf.h> 66#include <sys/smp.h> 67#include <sys/sched.h> 68#include <sys/sysctl.h> 69#include <sys/unistd.h> 70#include <sys/vnode.h> 71#include <sys/vmmeter.h> 72 73#include <machine/cpu.h> 74#include <machine/cputypes.h> 75#include <machine/md_var.h> 76#include <machine/pcb.h> 77#include <machine/pcb_ext.h> 78#include <machine/smp.h> 79#include <machine/vm86.h> 80 81#ifdef CPU_ELAN 82#include <machine/elan_mmcr.h> 83#endif 84 85#include <vm/vm.h> 86#include <vm/vm_extern.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_page.h> 89#include <vm/vm_map.h> 90#include <vm/vm_param.h> 91 92#ifdef XEN 93#include <xen/hypervisor.h> 94#endif 95#ifdef PC98 96#include <pc98/cbus/cbus.h> 97#else 98#include <x86/isa/isa.h> 99#endif 100 101#ifdef XBOX 102#include <machine/xbox.h> 103#endif 104 105#ifndef NSFBUFS 106#define NSFBUFS (512 + maxusers * 16) 107#endif 108 109#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 110#define CPU_ENABLE_SSE 111#endif 112 113_Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread), 114 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread."); 115_Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb), 116 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb."); 117 118static void cpu_reset_real(void); 119#ifdef SMP 120static void cpu_reset_proxy(void); 121static u_int cpu_reset_proxyid; 122static volatile u_int cpu_reset_proxy_active; 123#endif 124 125static int nsfbufs; 126static int nsfbufspeak; 127static int nsfbufsused; 128 129SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 130 "Maximum number of sendfile(2) sf_bufs available"); 131SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 132 "Number of sendfile(2) sf_bufs at peak usage"); 133SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 134 "Number of sendfile(2) sf_bufs in use"); 135 136static void sf_buf_init(void *arg); 137SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 138 139LIST_HEAD(sf_head, sf_buf); 140 141/* 142 * A hash table of active sendfile(2) buffers 143 */ 144static struct sf_head *sf_buf_active; 145static u_long sf_buf_hashmask; 146 147#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 148 149static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 150static u_int sf_buf_alloc_want; 151 152/* 153 * A lock used to synchronize access to the hash table and free list 154 */ 155static struct mtx sf_buf_lock; 156 157union savefpu * 158get_pcb_user_save_td(struct thread *td) 159{ 160 vm_offset_t p; 161 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 162 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 163 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area")); 164 return ((union savefpu *)p); 165} 166 167union savefpu * 168get_pcb_user_save_pcb(struct pcb *pcb) 169{ 170 vm_offset_t p; 171 172 p = (vm_offset_t)(pcb + 1); 173 return ((union savefpu *)p); 174} 175 176struct pcb * 177get_pcb_td(struct thread *td) 178{ 179 vm_offset_t p; 180 181 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 182 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) - 183 sizeof(struct pcb); 184 return ((struct pcb *)p); 185} 186 187void * 188alloc_fpusave(int flags) 189{ 190 void *res; 191#ifdef CPU_ENABLE_SSE 192 struct savefpu_ymm *sf; 193#endif 194 195 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 196#ifdef CPU_ENABLE_SSE 197 if (use_xsave) { 198 sf = (struct savefpu_ymm *)res; 199 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 200 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 201 } 202#endif 203 return (res); 204} 205 206/* 207 * Finish a fork operation, with process p2 nearly set up. 208 * Copy and update the pcb, set up the stack so that the child 209 * ready to run and return to user mode. 210 */ 211void 212cpu_fork(td1, p2, td2, flags) 213 register struct thread *td1; 214 register struct proc *p2; 215 struct thread *td2; 216 int flags; 217{ 218 register struct proc *p1; 219 struct pcb *pcb2; 220 struct mdproc *mdp2; 221 222 p1 = td1->td_proc; 223 if ((flags & RFPROC) == 0) { 224 if ((flags & RFMEM) == 0) { 225 /* unshare user LDT */ 226 struct mdproc *mdp1 = &p1->p_md; 227 struct proc_ldt *pldt, *pldt1; 228 229 mtx_lock_spin(&dt_lock); 230 if ((pldt1 = mdp1->md_ldt) != NULL && 231 pldt1->ldt_refcnt > 1) { 232 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); 233 if (pldt == NULL) 234 panic("could not copy LDT"); 235 mdp1->md_ldt = pldt; 236 set_user_ldt(mdp1); 237 user_ldt_deref(pldt1); 238 } else 239 mtx_unlock_spin(&dt_lock); 240 } 241 return; 242 } 243 244 /* Ensure that td1's pcb is up to date. */ 245 if (td1 == curthread) 246 td1->td_pcb->pcb_gs = rgs(); 247#ifdef DEV_NPX 248 critical_enter(); 249 if (PCPU_GET(fpcurthread) == td1) 250 npxsave(td1->td_pcb->pcb_save); 251 critical_exit(); 252#endif 253 254 /* Point the pcb to the top of the stack */ 255 pcb2 = get_pcb_td(td2); 256 td2->td_pcb = pcb2; 257 258 /* Copy td1's pcb */ 259 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 260 261 /* Properly initialize pcb_save */ 262 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 263 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 264 cpu_max_ext_state_size); 265 266 /* Point mdproc and then copy over td1's contents */ 267 mdp2 = &p2->p_md; 268 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 269 270 /* 271 * Create a new fresh stack for the new process. 272 * Copy the trap frame for the return to user mode as if from a 273 * syscall. This copies most of the user mode register values. 274 * The -16 is so we can expand the trapframe if we go to vm86. 275 */ 276 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1; 277 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 278 279 td2->td_frame->tf_eax = 0; /* Child returns zero */ 280 td2->td_frame->tf_eflags &= ~PSL_C; /* success */ 281 td2->td_frame->tf_edx = 1; 282 283 /* 284 * If the parent process has the trap bit set (i.e. a debugger had 285 * single stepped the process to the system call), we need to clear 286 * the trap flag from the new frame unless the debugger had set PF_FORK 287 * on the parent. Otherwise, the child will receive a (likely 288 * unexpected) SIGTRAP when it executes the first instruction after 289 * returning to userland. 290 */ 291 if ((p1->p_pfsflags & PF_FORK) == 0) 292 td2->td_frame->tf_eflags &= ~PSL_T; 293 294 /* 295 * Set registers for trampoline to user mode. Leave space for the 296 * return address on stack. These are the kernel mode register values. 297 */ 298#if defined(PAE) || defined(PAE_TABLES) 299 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt); 300#else 301 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir); 302#endif 303 pcb2->pcb_edi = 0; 304 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */ 305 pcb2->pcb_ebp = 0; 306 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); 307 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */ 308 pcb2->pcb_eip = (int)fork_trampoline; 309 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */ 310 /*- 311 * pcb2->pcb_dr*: cloned above. 312 * pcb2->pcb_savefpu: cloned above. 313 * pcb2->pcb_flags: cloned above. 314 * pcb2->pcb_onfault: cloned above (always NULL here?). 315 * pcb2->pcb_gs: cloned above. 316 * pcb2->pcb_ext: cleared below. 317 */ 318 319 /* 320 * XXX don't copy the i/o pages. this should probably be fixed. 321 */ 322 pcb2->pcb_ext = 0; 323 324 /* Copy the LDT, if necessary. */ 325 mtx_lock_spin(&dt_lock); 326 if (mdp2->md_ldt != NULL) { 327 if (flags & RFMEM) { 328 mdp2->md_ldt->ldt_refcnt++; 329 } else { 330 mdp2->md_ldt = user_ldt_alloc(mdp2, 331 mdp2->md_ldt->ldt_len); 332 if (mdp2->md_ldt == NULL) 333 panic("could not copy LDT"); 334 } 335 } 336 mtx_unlock_spin(&dt_lock); 337 338 /* Setup to release spin count in fork_exit(). */ 339 td2->td_md.md_spinlock_count = 1; 340 /* 341 * XXX XEN need to check on PSL_USER is handled 342 */ 343 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 344 /* 345 * Now, cpu_switch() can schedule the new process. 346 * pcb_esp is loaded pointing to the cpu_switch() stack frame 347 * containing the return address when exiting cpu_switch. 348 * This will normally be to fork_trampoline(), which will have 349 * %ebx loaded with the new proc's pointer. fork_trampoline() 350 * will set up a stack to call fork_return(p, frame); to complete 351 * the return to user-mode. 352 */ 353} 354 355/* 356 * Intercept the return address from a freshly forked process that has NOT 357 * been scheduled yet. 358 * 359 * This is needed to make kernel threads stay in kernel mode. 360 */ 361void 362cpu_set_fork_handler(td, func, arg) 363 struct thread *td; 364 void (*func)(void *); 365 void *arg; 366{ 367 /* 368 * Note that the trap frame follows the args, so the function 369 * is really called like this: func(arg, frame); 370 */ 371 td->td_pcb->pcb_esi = (int) func; /* function */ 372 td->td_pcb->pcb_ebx = (int) arg; /* first arg */ 373} 374 375void 376cpu_exit(struct thread *td) 377{ 378 379 /* 380 * If this process has a custom LDT, release it. Reset pc->pcb_gs 381 * and %gs before we free it in case they refer to an LDT entry. 382 */ 383 mtx_lock_spin(&dt_lock); 384 if (td->td_proc->p_md.md_ldt) { 385 td->td_pcb->pcb_gs = _udatasel; 386 load_gs(_udatasel); 387 user_ldt_free(td); 388 } else 389 mtx_unlock_spin(&dt_lock); 390} 391 392void 393cpu_thread_exit(struct thread *td) 394{ 395 396#ifdef DEV_NPX 397 critical_enter(); 398 if (td == PCPU_GET(fpcurthread)) 399 npxdrop(); 400 critical_exit(); 401#endif 402 403 /* Disable any hardware breakpoints. */ 404 if (td->td_pcb->pcb_flags & PCB_DBREGS) { 405 reset_dbregs(); 406 td->td_pcb->pcb_flags &= ~PCB_DBREGS; 407 } 408} 409 410void 411cpu_thread_clean(struct thread *td) 412{ 413 struct pcb *pcb; 414 415 pcb = td->td_pcb; 416 if (pcb->pcb_ext != NULL) { 417 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ 418 /* 419 * XXX do we need to move the TSS off the allocated pages 420 * before freeing them? (not done here) 421 */ 422 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext, 423 ctob(IOPAGES + 1)); 424 pcb->pcb_ext = NULL; 425 } 426} 427 428void 429cpu_thread_swapin(struct thread *td) 430{ 431} 432 433void 434cpu_thread_swapout(struct thread *td) 435{ 436} 437 438void 439cpu_thread_alloc(struct thread *td) 440{ 441 struct pcb *pcb; 442#ifdef CPU_ENABLE_SSE 443 struct xstate_hdr *xhdr; 444#endif 445 446 td->td_pcb = pcb = get_pcb_td(td); 447 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1; 448 pcb->pcb_ext = NULL; 449 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 450#ifdef CPU_ENABLE_SSE 451 if (use_xsave) { 452 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 453 bzero(xhdr, sizeof(*xhdr)); 454 xhdr->xstate_bv = xsave_mask; 455 } 456#endif 457} 458 459void 460cpu_thread_free(struct thread *td) 461{ 462 463 cpu_thread_clean(td); 464} 465 466void 467cpu_set_syscall_retval(struct thread *td, int error) 468{ 469 470 switch (error) { 471 case 0: 472 td->td_frame->tf_eax = td->td_retval[0]; 473 td->td_frame->tf_edx = td->td_retval[1]; 474 td->td_frame->tf_eflags &= ~PSL_C; 475 break; 476 477 case ERESTART: 478 /* 479 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int 480 * 0x80 is 2 bytes. We saved this in tf_err. 481 */ 482 td->td_frame->tf_eip -= td->td_frame->tf_err; 483 break; 484 485 case EJUSTRETURN: 486 break; 487 488 default: 489 td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error); 490 td->td_frame->tf_eflags |= PSL_C; 491 break; 492 } 493} 494 495/* 496 * Initialize machine state (pcb and trap frame) for a new thread about to 497 * upcall. Put enough state in the new thread's PCB to get it to go back 498 * userret(), where we can intercept it again to set the return (upcall) 499 * Address and stack, along with those from upcals that are from other sources 500 * such as those generated in thread_userret() itself. 501 */ 502void 503cpu_set_upcall(struct thread *td, struct thread *td0) 504{ 505 struct pcb *pcb2; 506 507 /* Point the pcb to the top of the stack. */ 508 pcb2 = td->td_pcb; 509 510 /* 511 * Copy the upcall pcb. This loads kernel regs. 512 * Those not loaded individually below get their default 513 * values here. 514 */ 515 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 516 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | 517 PCB_KERNNPX); 518 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 519 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save, 520 cpu_max_ext_state_size); 521 522 /* 523 * Create a new fresh stack for the new thread. 524 */ 525 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 526 527 /* If the current thread has the trap bit set (i.e. a debugger had 528 * single stepped the process to the system call), we need to clear 529 * the trap flag from the new frame. Otherwise, the new thread will 530 * receive a (likely unexpected) SIGTRAP when it executes the first 531 * instruction after returning to userland. 532 */ 533 td->td_frame->tf_eflags &= ~PSL_T; 534 535 /* 536 * Set registers for trampoline to user mode. Leave space for the 537 * return address on stack. These are the kernel mode register values. 538 */ 539 pcb2->pcb_edi = 0; 540 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ 541 pcb2->pcb_ebp = 0; 542 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */ 543 pcb2->pcb_ebx = (int)td; /* trampoline arg */ 544 pcb2->pcb_eip = (int)fork_trampoline; 545 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */ 546 pcb2->pcb_gs = rgs(); 547 /* 548 * If we didn't copy the pcb, we'd need to do the following registers: 549 * pcb2->pcb_cr3: cloned above. 550 * pcb2->pcb_dr*: cloned above. 551 * pcb2->pcb_savefpu: cloned above. 552 * pcb2->pcb_flags: cloned above. 553 * pcb2->pcb_onfault: cloned above (always NULL here?). 554 * pcb2->pcb_gs: cloned above. 555 * pcb2->pcb_ext: cleared below. 556 */ 557 pcb2->pcb_ext = NULL; 558 559 /* Setup to release spin count in fork_exit(). */ 560 td->td_md.md_spinlock_count = 1; 561 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 562} 563 564/* 565 * Set that machine state for performing an upcall that has to 566 * be done in thread_userret() so that those upcalls generated 567 * in thread_userret() itself can be done as well. 568 */ 569void 570cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 571 stack_t *stack) 572{ 573 574 /* 575 * Do any extra cleaning that needs to be done. 576 * The thread may have optional components 577 * that are not present in a fresh thread. 578 * This may be a recycled thread so make it look 579 * as though it's newly allocated. 580 */ 581 cpu_thread_clean(td); 582 583 /* 584 * Set the trap frame to point at the beginning of the uts 585 * function. 586 */ 587 td->td_frame->tf_ebp = 0; 588 td->td_frame->tf_esp = 589 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 590 td->td_frame->tf_eip = (int)entry; 591 592 /* 593 * Pass the address of the mailbox for this kse to the uts 594 * function as a parameter on the stack. 595 */ 596 suword((void *)(td->td_frame->tf_esp + sizeof(void *)), 597 (int)arg); 598} 599 600int 601cpu_set_user_tls(struct thread *td, void *tls_base) 602{ 603 struct segment_descriptor sd; 604 uint32_t base; 605 606 /* 607 * Construct a descriptor and store it in the pcb for 608 * the next context switch. Also store it in the gdt 609 * so that the load of tf_fs into %fs will activate it 610 * at return to userland. 611 */ 612 base = (uint32_t)tls_base; 613 sd.sd_lobase = base & 0xffffff; 614 sd.sd_hibase = (base >> 24) & 0xff; 615 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 616 sd.sd_hilimit = 0xf; 617 sd.sd_type = SDT_MEMRWA; 618 sd.sd_dpl = SEL_UPL; 619 sd.sd_p = 1; 620 sd.sd_xx = 0; 621 sd.sd_def32 = 1; 622 sd.sd_gran = 1; 623 critical_enter(); 624 /* set %gs */ 625 td->td_pcb->pcb_gsd = sd; 626 if (td == curthread) { 627 PCPU_GET(fsgs_gdt)[1] = sd; 628 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 629 } 630 critical_exit(); 631 return (0); 632} 633 634/* 635 * Convert kernel VA to physical address 636 */ 637vm_paddr_t 638kvtop(void *addr) 639{ 640 vm_paddr_t pa; 641 642 pa = pmap_kextract((vm_offset_t)addr); 643 if (pa == 0) 644 panic("kvtop: zero page frame"); 645 return (pa); 646} 647 648#ifdef SMP 649static void 650cpu_reset_proxy() 651{ 652 cpuset_t tcrp; 653 654 cpu_reset_proxy_active = 1; 655 while (cpu_reset_proxy_active == 1) 656 ; /* Wait for other cpu to see that we've started */ 657 CPU_SETOF(cpu_reset_proxyid, &tcrp); 658 stop_cpus(tcrp); 659 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); 660 DELAY(1000000); 661 cpu_reset_real(); 662} 663#endif 664 665void 666cpu_reset() 667{ 668#ifdef XBOX 669 if (arch_i386_is_xbox) { 670 /* Kick the PIC16L, it can reboot the box */ 671 pic16l_reboot(); 672 for (;;); 673 } 674#endif 675 676#ifdef SMP 677 cpuset_t map; 678 u_int cnt; 679 680 if (smp_started) { 681 map = all_cpus; 682 CPU_CLR(PCPU_GET(cpuid), &map); 683 CPU_NAND(&map, &stopped_cpus); 684 if (!CPU_EMPTY(&map)) { 685 printf("cpu_reset: Stopping other CPUs\n"); 686 stop_cpus(map); 687 } 688 689 if (PCPU_GET(cpuid) != 0) { 690 cpu_reset_proxyid = PCPU_GET(cpuid); 691 cpustop_restartfunc = cpu_reset_proxy; 692 cpu_reset_proxy_active = 0; 693 printf("cpu_reset: Restarting BSP\n"); 694 695 /* Restart CPU #0. */ 696 /* XXX: restart_cpus(1 << 0); */ 697 CPU_SETOF(0, &started_cpus); 698 wmb(); 699 700 cnt = 0; 701 while (cpu_reset_proxy_active == 0 && cnt < 10000000) 702 cnt++; /* Wait for BSP to announce restart */ 703 if (cpu_reset_proxy_active == 0) 704 printf("cpu_reset: Failed to restart BSP\n"); 705 enable_intr(); 706 cpu_reset_proxy_active = 2; 707 708 while (1); 709 /* NOTREACHED */ 710 } 711 712 DELAY(1000000); 713 } 714#endif 715 cpu_reset_real(); 716 /* NOTREACHED */ 717} 718 719static void 720cpu_reset_real() 721{ 722 struct region_descriptor null_idt; 723#ifndef PC98 724 int b; 725#endif 726 727 disable_intr(); 728#ifdef XEN 729 if (smp_processor_id() == 0) 730 HYPERVISOR_shutdown(SHUTDOWN_reboot); 731 else 732 HYPERVISOR_shutdown(SHUTDOWN_poweroff); 733#endif 734#ifdef CPU_ELAN 735 if (elan_mmcr != NULL) 736 elan_mmcr->RESCFG = 1; 737#endif 738 739 if (cpu == CPU_GEODE1100) { 740 /* Attempt Geode's own reset */ 741 outl(0xcf8, 0x80009044ul); 742 outl(0xcfc, 0xf); 743 } 744 745#ifdef PC98 746 /* 747 * Attempt to do a CPU reset via CPU reset port. 748 */ 749 if ((inb(0x35) & 0xa0) != 0xa0) { 750 outb(0x37, 0x0f); /* SHUT0 = 0. */ 751 outb(0x37, 0x0b); /* SHUT1 = 0. */ 752 } 753 outb(0xf0, 0x00); /* Reset. */ 754#else 755#if !defined(BROKEN_KEYBOARD_RESET) 756 /* 757 * Attempt to do a CPU reset via the keyboard controller, 758 * do not turn off GateA20, as any machine that fails 759 * to do the reset here would then end up in no man's land. 760 */ 761 outb(IO_KBD + 4, 0xFE); 762 DELAY(500000); /* wait 0.5 sec to see if that did it */ 763#endif 764 765 /* 766 * Attempt to force a reset via the Reset Control register at 767 * I/O port 0xcf9. Bit 2 forces a system reset when it 768 * transitions from 0 to 1. Bit 1 selects the type of reset 769 * to attempt: 0 selects a "soft" reset, and 1 selects a 770 * "hard" reset. We try a "hard" reset. The first write sets 771 * bit 1 to select a "hard" reset and clears bit 2. The 772 * second write forces a 0 -> 1 transition in bit 2 to trigger 773 * a reset. 774 */ 775 outb(0xcf9, 0x2); 776 outb(0xcf9, 0x6); 777 DELAY(500000); /* wait 0.5 sec to see if that did it */ 778 779 /* 780 * Attempt to force a reset via the Fast A20 and Init register 781 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. 782 * Bit 0 asserts INIT# when set to 1. We are careful to only 783 * preserve bit 1 while setting bit 0. We also must clear bit 784 * 0 before setting it if it isn't already clear. 785 */ 786 b = inb(0x92); 787 if (b != 0xff) { 788 if ((b & 0x1) != 0) 789 outb(0x92, b & 0xfe); 790 outb(0x92, b | 0x1); 791 DELAY(500000); /* wait 0.5 sec to see if that did it */ 792 } 793#endif /* PC98 */ 794 795 printf("No known reset method worked, attempting CPU shutdown\n"); 796 DELAY(1000000); /* wait 1 sec for printf to complete */ 797 798 /* Wipe the IDT. */ 799 null_idt.rd_limit = 0; 800 null_idt.rd_base = 0; 801 lidt(&null_idt); 802 803 /* "good night, sweet prince .... <THUNK!>" */ 804 breakpoint(); 805 806 /* NOTREACHED */ 807 while(1); 808} 809 810/* 811 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 812 */ 813static void 814sf_buf_init(void *arg) 815{ 816 struct sf_buf *sf_bufs; 817 vm_offset_t sf_base; 818 int i; 819 820 nsfbufs = NSFBUFS; 821 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 822 823 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 824 TAILQ_INIT(&sf_buf_freelist); 825 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 826 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 827 M_NOWAIT | M_ZERO); 828 for (i = 0; i < nsfbufs; i++) { 829 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 830 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 831 } 832 sf_buf_alloc_want = 0; 833 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 834} 835 836/* 837 * Invalidate the cache lines that may belong to the page, if 838 * (possibly old) mapping of the page by sf buffer exists. Returns 839 * TRUE when mapping was found and cache invalidated. 840 */ 841boolean_t 842sf_buf_invalidate_cache(vm_page_t m) 843{ 844 struct sf_head *hash_list; 845 struct sf_buf *sf; 846 boolean_t ret; 847 848 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 849 ret = FALSE; 850 mtx_lock(&sf_buf_lock); 851 LIST_FOREACH(sf, hash_list, list_entry) { 852 if (sf->m == m) { 853 /* 854 * Use pmap_qenter to update the pte for 855 * existing mapping, in particular, the PAT 856 * settings are recalculated. 857 */ 858 pmap_qenter(sf->kva, &m, 1); 859 pmap_invalidate_cache_range(sf->kva, sf->kva + 860 PAGE_SIZE, FALSE); 861 ret = TRUE; 862 break; 863 } 864 } 865 mtx_unlock(&sf_buf_lock); 866 return (ret); 867} 868 869/* 870 * Get an sf_buf from the freelist. May block if none are available. 871 */ 872struct sf_buf * 873sf_buf_alloc(struct vm_page *m, int flags) 874{ 875 pt_entry_t opte, *ptep; 876 struct sf_head *hash_list; 877 struct sf_buf *sf; 878#ifdef SMP 879 cpuset_t other_cpus; 880 u_int cpuid; 881#endif 882 int error; 883 884 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 885 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 886 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 887 mtx_lock(&sf_buf_lock); 888 LIST_FOREACH(sf, hash_list, list_entry) { 889 if (sf->m == m) { 890 sf->ref_count++; 891 if (sf->ref_count == 1) { 892 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 893 nsfbufsused++; 894 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 895 } 896#ifdef SMP 897 goto shootdown; 898#else 899 goto done; 900#endif 901 } 902 } 903 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 904 if (flags & SFB_NOWAIT) 905 goto done; 906 sf_buf_alloc_want++; 907 SFSTAT_INC(sf_allocwait); 908 error = msleep(&sf_buf_freelist, &sf_buf_lock, 909 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 910 sf_buf_alloc_want--; 911 912 /* 913 * If we got a signal, don't risk going back to sleep. 914 */ 915 if (error) 916 goto done; 917 } 918 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 919 if (sf->m != NULL) 920 LIST_REMOVE(sf, list_entry); 921 LIST_INSERT_HEAD(hash_list, sf, list_entry); 922 sf->ref_count = 1; 923 sf->m = m; 924 nsfbufsused++; 925 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 926 927 /* 928 * Update the sf_buf's virtual-to-physical mapping, flushing the 929 * virtual address from the TLB. Since the reference count for 930 * the sf_buf's old mapping was zero, that mapping is not 931 * currently in use. Consequently, there is no need to exchange 932 * the old and new PTEs atomically, even under PAE. 933 */ 934 ptep = vtopte(sf->kva); 935 opte = *ptep; 936#ifdef XEN 937 PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag 938 | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0)); 939#else 940 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V | 941 pmap_cache_bits(m->md.pat_mode, 0); 942#endif 943 944 /* 945 * Avoid unnecessary TLB invalidations: If the sf_buf's old 946 * virtual-to-physical mapping was not used, then any processor 947 * that has invalidated the sf_buf's virtual address from its TLB 948 * since the last used mapping need not invalidate again. 949 */ 950#ifdef SMP 951 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 952 CPU_ZERO(&sf->cpumask); 953shootdown: 954 sched_pin(); 955 cpuid = PCPU_GET(cpuid); 956 if (!CPU_ISSET(cpuid, &sf->cpumask)) { 957 CPU_SET(cpuid, &sf->cpumask); 958 invlpg(sf->kva); 959 } 960 if ((flags & SFB_CPUPRIVATE) == 0) { 961 other_cpus = all_cpus; 962 CPU_CLR(cpuid, &other_cpus); 963 CPU_NAND(&other_cpus, &sf->cpumask); 964 if (!CPU_EMPTY(&other_cpus)) { 965 CPU_OR(&sf->cpumask, &other_cpus); 966 smp_masked_invlpg(other_cpus, sf->kva); 967 } 968 } 969 sched_unpin(); 970#else 971 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 972 pmap_invalidate_page(kernel_pmap, sf->kva); 973#endif 974done: 975 mtx_unlock(&sf_buf_lock); 976 return (sf); 977} 978 979/* 980 * Remove a reference from the given sf_buf, adding it to the free 981 * list when its reference count reaches zero. A freed sf_buf still, 982 * however, retains its virtual-to-physical mapping until it is 983 * recycled or reactivated by sf_buf_alloc(9). 984 */ 985void 986sf_buf_free(struct sf_buf *sf) 987{ 988 989 mtx_lock(&sf_buf_lock); 990 sf->ref_count--; 991 if (sf->ref_count == 0) { 992 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 993 nsfbufsused--; 994#ifdef XEN 995/* 996 * Xen doesn't like having dangling R/W mappings 997 */ 998 pmap_qremove(sf->kva, 1); 999 sf->m = NULL; 1000 LIST_REMOVE(sf, list_entry); 1001#endif 1002 if (sf_buf_alloc_want > 0) 1003 wakeup(&sf_buf_freelist); 1004 } 1005 mtx_unlock(&sf_buf_lock); 1006} 1007 1008/* 1009 * Software interrupt handler for queued VM system processing. 1010 */ 1011void 1012swi_vm(void *dummy) 1013{ 1014 if (busdma_swi_pending != 0) 1015 busdma_swi(); 1016} 1017 1018/* 1019 * Tell whether this address is in some physical memory region. 1020 * Currently used by the kernel coredump code in order to avoid 1021 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 1022 * or other unpredictable behaviour. 1023 */ 1024 1025int 1026is_physical_memory(vm_paddr_t addr) 1027{ 1028 1029#ifdef DEV_ISA 1030 /* The ISA ``memory hole''. */ 1031 if (addr >= 0xa0000 && addr < 0x100000) 1032 return 0; 1033#endif 1034 1035 /* 1036 * stuff other tests for known memory-mapped devices (PCI?) 1037 * here 1038 */ 1039 1040 return 1; 1041} 1042