vm_machdep.c revision 282065
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/i386/i386/vm_machdep.c 282065 2015-04-27 08:02:12Z kib $"); 45 46#include "opt_isa.h" 47#include "opt_npx.h" 48#include "opt_reset.h" 49#include "opt_cpu.h" 50#include "opt_xbox.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/bio.h> 55#include <sys/buf.h> 56#include <sys/kernel.h> 57#include <sys/ktr.h> 58#include <sys/lock.h> 59#include <sys/malloc.h> 60#include <sys/mbuf.h> 61#include <sys/mutex.h> 62#include <sys/pioctl.h> 63#include <sys/proc.h> 64#include <sys/sysent.h> 65#include <sys/sf_buf.h> 66#include <sys/smp.h> 67#include <sys/sched.h> 68#include <sys/sysctl.h> 69#include <sys/unistd.h> 70#include <sys/vnode.h> 71#include <sys/vmmeter.h> 72 73#include <machine/cpu.h> 74#include <machine/cputypes.h> 75#include <machine/md_var.h> 76#include <machine/pcb.h> 77#include <machine/pcb_ext.h> 78#include <machine/smp.h> 79#include <machine/vm86.h> 80 81#ifdef CPU_ELAN 82#include <machine/elan_mmcr.h> 83#endif 84 85#include <vm/vm.h> 86#include <vm/vm_extern.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_page.h> 89#include <vm/vm_map.h> 90#include <vm/vm_param.h> 91 92#ifdef XEN 93#include <xen/hypervisor.h> 94#endif 95#ifdef PC98 96#include <pc98/cbus/cbus.h> 97#else 98#include <x86/isa/isa.h> 99#endif 100 101#ifdef XBOX 102#include <machine/xbox.h> 103#endif 104 105#ifndef NSFBUFS 106#define NSFBUFS (512 + maxusers * 16) 107#endif 108 109#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 110#define CPU_ENABLE_SSE 111#endif 112 113_Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread), 114 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread."); 115_Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb), 116 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb."); 117 118static void cpu_reset_real(void); 119#ifdef SMP 120static void cpu_reset_proxy(void); 121static u_int cpu_reset_proxyid; 122static volatile u_int cpu_reset_proxy_active; 123#endif 124 125static int nsfbufs; 126static int nsfbufspeak; 127static int nsfbufsused; 128 129SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 130 "Maximum number of sendfile(2) sf_bufs available"); 131SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 132 "Number of sendfile(2) sf_bufs at peak usage"); 133SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 134 "Number of sendfile(2) sf_bufs in use"); 135 136static void sf_buf_init(void *arg); 137SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 138 139LIST_HEAD(sf_head, sf_buf); 140 141/* 142 * A hash table of active sendfile(2) buffers 143 */ 144static struct sf_head *sf_buf_active; 145static u_long sf_buf_hashmask; 146 147#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 148 149static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 150static u_int sf_buf_alloc_want; 151 152/* 153 * A lock used to synchronize access to the hash table and free list 154 */ 155static struct mtx sf_buf_lock; 156 157union savefpu * 158get_pcb_user_save_td(struct thread *td) 159{ 160 vm_offset_t p; 161 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 162 cpu_max_ext_state_size; 163 KASSERT((p % 64) == 0, ("Unaligned pcb_user_save area")); 164 return ((union savefpu *)p); 165} 166 167union savefpu * 168get_pcb_user_save_pcb(struct pcb *pcb) 169{ 170 vm_offset_t p; 171 172 p = (vm_offset_t)(pcb + 1); 173 return ((union savefpu *)p); 174} 175 176struct pcb * 177get_pcb_td(struct thread *td) 178{ 179 vm_offset_t p; 180 181 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 182 cpu_max_ext_state_size - sizeof(struct pcb); 183 return ((struct pcb *)p); 184} 185 186void * 187alloc_fpusave(int flags) 188{ 189 void *res; 190#ifdef CPU_ENABLE_SSE 191 struct savefpu_ymm *sf; 192#endif 193 194 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 195#ifdef CPU_ENABLE_SSE 196 if (use_xsave) { 197 sf = (struct savefpu_ymm *)res; 198 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 199 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 200 } 201#endif 202 return (res); 203} 204 205/* 206 * Finish a fork operation, with process p2 nearly set up. 207 * Copy and update the pcb, set up the stack so that the child 208 * ready to run and return to user mode. 209 */ 210void 211cpu_fork(td1, p2, td2, flags) 212 register struct thread *td1; 213 register struct proc *p2; 214 struct thread *td2; 215 int flags; 216{ 217 register struct proc *p1; 218 struct pcb *pcb2; 219 struct mdproc *mdp2; 220 221 p1 = td1->td_proc; 222 if ((flags & RFPROC) == 0) { 223 if ((flags & RFMEM) == 0) { 224 /* unshare user LDT */ 225 struct mdproc *mdp1 = &p1->p_md; 226 struct proc_ldt *pldt, *pldt1; 227 228 mtx_lock_spin(&dt_lock); 229 if ((pldt1 = mdp1->md_ldt) != NULL && 230 pldt1->ldt_refcnt > 1) { 231 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); 232 if (pldt == NULL) 233 panic("could not copy LDT"); 234 mdp1->md_ldt = pldt; 235 set_user_ldt(mdp1); 236 user_ldt_deref(pldt1); 237 } else 238 mtx_unlock_spin(&dt_lock); 239 } 240 return; 241 } 242 243 /* Ensure that td1's pcb is up to date. */ 244 if (td1 == curthread) 245 td1->td_pcb->pcb_gs = rgs(); 246#ifdef DEV_NPX 247 critical_enter(); 248 if (PCPU_GET(fpcurthread) == td1) 249 npxsave(td1->td_pcb->pcb_save); 250 critical_exit(); 251#endif 252 253 /* Point the pcb to the top of the stack */ 254 pcb2 = get_pcb_td(td2); 255 td2->td_pcb = pcb2; 256 257 /* Copy td1's pcb */ 258 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 259 260 /* Properly initialize pcb_save */ 261 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 262 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 263 cpu_max_ext_state_size); 264 265 /* Point mdproc and then copy over td1's contents */ 266 mdp2 = &p2->p_md; 267 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 268 269 /* 270 * Create a new fresh stack for the new process. 271 * Copy the trap frame for the return to user mode as if from a 272 * syscall. This copies most of the user mode register values. 273 * The -16 is so we can expand the trapframe if we go to vm86. 274 */ 275 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1; 276 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 277 278 td2->td_frame->tf_eax = 0; /* Child returns zero */ 279 td2->td_frame->tf_eflags &= ~PSL_C; /* success */ 280 td2->td_frame->tf_edx = 1; 281 282 /* 283 * If the parent process has the trap bit set (i.e. a debugger had 284 * single stepped the process to the system call), we need to clear 285 * the trap flag from the new frame unless the debugger had set PF_FORK 286 * on the parent. Otherwise, the child will receive a (likely 287 * unexpected) SIGTRAP when it executes the first instruction after 288 * returning to userland. 289 */ 290 if ((p1->p_pfsflags & PF_FORK) == 0) 291 td2->td_frame->tf_eflags &= ~PSL_T; 292 293 /* 294 * Set registers for trampoline to user mode. Leave space for the 295 * return address on stack. These are the kernel mode register values. 296 */ 297#if defined(PAE) || defined(PAE_TABLES) 298 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt); 299#else 300 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir); 301#endif 302 pcb2->pcb_edi = 0; 303 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */ 304 pcb2->pcb_ebp = 0; 305 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); 306 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */ 307 pcb2->pcb_eip = (int)fork_trampoline; 308 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */ 309 /*- 310 * pcb2->pcb_dr*: cloned above. 311 * pcb2->pcb_savefpu: cloned above. 312 * pcb2->pcb_flags: cloned above. 313 * pcb2->pcb_onfault: cloned above (always NULL here?). 314 * pcb2->pcb_gs: cloned above. 315 * pcb2->pcb_ext: cleared below. 316 */ 317 318 /* 319 * XXX don't copy the i/o pages. this should probably be fixed. 320 */ 321 pcb2->pcb_ext = 0; 322 323 /* Copy the LDT, if necessary. */ 324 mtx_lock_spin(&dt_lock); 325 if (mdp2->md_ldt != NULL) { 326 if (flags & RFMEM) { 327 mdp2->md_ldt->ldt_refcnt++; 328 } else { 329 mdp2->md_ldt = user_ldt_alloc(mdp2, 330 mdp2->md_ldt->ldt_len); 331 if (mdp2->md_ldt == NULL) 332 panic("could not copy LDT"); 333 } 334 } 335 mtx_unlock_spin(&dt_lock); 336 337 /* Setup to release spin count in fork_exit(). */ 338 td2->td_md.md_spinlock_count = 1; 339 /* 340 * XXX XEN need to check on PSL_USER is handled 341 */ 342 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 343 /* 344 * Now, cpu_switch() can schedule the new process. 345 * pcb_esp is loaded pointing to the cpu_switch() stack frame 346 * containing the return address when exiting cpu_switch. 347 * This will normally be to fork_trampoline(), which will have 348 * %ebx loaded with the new proc's pointer. fork_trampoline() 349 * will set up a stack to call fork_return(p, frame); to complete 350 * the return to user-mode. 351 */ 352} 353 354/* 355 * Intercept the return address from a freshly forked process that has NOT 356 * been scheduled yet. 357 * 358 * This is needed to make kernel threads stay in kernel mode. 359 */ 360void 361cpu_set_fork_handler(td, func, arg) 362 struct thread *td; 363 void (*func)(void *); 364 void *arg; 365{ 366 /* 367 * Note that the trap frame follows the args, so the function 368 * is really called like this: func(arg, frame); 369 */ 370 td->td_pcb->pcb_esi = (int) func; /* function */ 371 td->td_pcb->pcb_ebx = (int) arg; /* first arg */ 372} 373 374void 375cpu_exit(struct thread *td) 376{ 377 378 /* 379 * If this process has a custom LDT, release it. Reset pc->pcb_gs 380 * and %gs before we free it in case they refer to an LDT entry. 381 */ 382 mtx_lock_spin(&dt_lock); 383 if (td->td_proc->p_md.md_ldt) { 384 td->td_pcb->pcb_gs = _udatasel; 385 load_gs(_udatasel); 386 user_ldt_free(td); 387 } else 388 mtx_unlock_spin(&dt_lock); 389} 390 391void 392cpu_thread_exit(struct thread *td) 393{ 394 395#ifdef DEV_NPX 396 critical_enter(); 397 if (td == PCPU_GET(fpcurthread)) 398 npxdrop(); 399 critical_exit(); 400#endif 401 402 /* Disable any hardware breakpoints. */ 403 if (td->td_pcb->pcb_flags & PCB_DBREGS) { 404 reset_dbregs(); 405 td->td_pcb->pcb_flags &= ~PCB_DBREGS; 406 } 407} 408 409void 410cpu_thread_clean(struct thread *td) 411{ 412 struct pcb *pcb; 413 414 pcb = td->td_pcb; 415 if (pcb->pcb_ext != NULL) { 416 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ 417 /* 418 * XXX do we need to move the TSS off the allocated pages 419 * before freeing them? (not done here) 420 */ 421 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext, 422 ctob(IOPAGES + 1)); 423 pcb->pcb_ext = NULL; 424 } 425} 426 427void 428cpu_thread_swapin(struct thread *td) 429{ 430} 431 432void 433cpu_thread_swapout(struct thread *td) 434{ 435} 436 437void 438cpu_thread_alloc(struct thread *td) 439{ 440 struct pcb *pcb; 441#ifdef CPU_ENABLE_SSE 442 struct xstate_hdr *xhdr; 443#endif 444 445 td->td_pcb = pcb = get_pcb_td(td); 446 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1; 447 pcb->pcb_ext = NULL; 448 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 449#ifdef CPU_ENABLE_SSE 450 if (use_xsave) { 451 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 452 bzero(xhdr, sizeof(*xhdr)); 453 xhdr->xstate_bv = xsave_mask; 454 } 455#endif 456} 457 458void 459cpu_thread_free(struct thread *td) 460{ 461 462 cpu_thread_clean(td); 463} 464 465void 466cpu_set_syscall_retval(struct thread *td, int error) 467{ 468 469 switch (error) { 470 case 0: 471 td->td_frame->tf_eax = td->td_retval[0]; 472 td->td_frame->tf_edx = td->td_retval[1]; 473 td->td_frame->tf_eflags &= ~PSL_C; 474 break; 475 476 case ERESTART: 477 /* 478 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int 479 * 0x80 is 2 bytes. We saved this in tf_err. 480 */ 481 td->td_frame->tf_eip -= td->td_frame->tf_err; 482 break; 483 484 case EJUSTRETURN: 485 break; 486 487 default: 488 if (td->td_proc->p_sysent->sv_errsize) { 489 if (error >= td->td_proc->p_sysent->sv_errsize) 490 error = -1; /* XXX */ 491 else 492 error = td->td_proc->p_sysent->sv_errtbl[error]; 493 } 494 td->td_frame->tf_eax = error; 495 td->td_frame->tf_eflags |= PSL_C; 496 break; 497 } 498} 499 500/* 501 * Initialize machine state (pcb and trap frame) for a new thread about to 502 * upcall. Put enough state in the new thread's PCB to get it to go back 503 * userret(), where we can intercept it again to set the return (upcall) 504 * Address and stack, along with those from upcals that are from other sources 505 * such as those generated in thread_userret() itself. 506 */ 507void 508cpu_set_upcall(struct thread *td, struct thread *td0) 509{ 510 struct pcb *pcb2; 511 512 /* Point the pcb to the top of the stack. */ 513 pcb2 = td->td_pcb; 514 515 /* 516 * Copy the upcall pcb. This loads kernel regs. 517 * Those not loaded individually below get their default 518 * values here. 519 */ 520 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 521 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | 522 PCB_KERNNPX); 523 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 524 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save, 525 cpu_max_ext_state_size); 526 527 /* 528 * Create a new fresh stack for the new thread. 529 */ 530 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 531 532 /* If the current thread has the trap bit set (i.e. a debugger had 533 * single stepped the process to the system call), we need to clear 534 * the trap flag from the new frame. Otherwise, the new thread will 535 * receive a (likely unexpected) SIGTRAP when it executes the first 536 * instruction after returning to userland. 537 */ 538 td->td_frame->tf_eflags &= ~PSL_T; 539 540 /* 541 * Set registers for trampoline to user mode. Leave space for the 542 * return address on stack. These are the kernel mode register values. 543 */ 544 pcb2->pcb_edi = 0; 545 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ 546 pcb2->pcb_ebp = 0; 547 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */ 548 pcb2->pcb_ebx = (int)td; /* trampoline arg */ 549 pcb2->pcb_eip = (int)fork_trampoline; 550 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */ 551 pcb2->pcb_gs = rgs(); 552 /* 553 * If we didn't copy the pcb, we'd need to do the following registers: 554 * pcb2->pcb_cr3: cloned above. 555 * pcb2->pcb_dr*: cloned above. 556 * pcb2->pcb_savefpu: cloned above. 557 * pcb2->pcb_flags: cloned above. 558 * pcb2->pcb_onfault: cloned above (always NULL here?). 559 * pcb2->pcb_gs: cloned above. 560 * pcb2->pcb_ext: cleared below. 561 */ 562 pcb2->pcb_ext = NULL; 563 564 /* Setup to release spin count in fork_exit(). */ 565 td->td_md.md_spinlock_count = 1; 566 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 567} 568 569/* 570 * Set that machine state for performing an upcall that has to 571 * be done in thread_userret() so that those upcalls generated 572 * in thread_userret() itself can be done as well. 573 */ 574void 575cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 576 stack_t *stack) 577{ 578 579 /* 580 * Do any extra cleaning that needs to be done. 581 * The thread may have optional components 582 * that are not present in a fresh thread. 583 * This may be a recycled thread so make it look 584 * as though it's newly allocated. 585 */ 586 cpu_thread_clean(td); 587 588 /* 589 * Set the trap frame to point at the beginning of the uts 590 * function. 591 */ 592 td->td_frame->tf_ebp = 0; 593 td->td_frame->tf_esp = 594 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 595 td->td_frame->tf_eip = (int)entry; 596 597 /* 598 * Pass the address of the mailbox for this kse to the uts 599 * function as a parameter on the stack. 600 */ 601 suword((void *)(td->td_frame->tf_esp + sizeof(void *)), 602 (int)arg); 603} 604 605int 606cpu_set_user_tls(struct thread *td, void *tls_base) 607{ 608 struct segment_descriptor sd; 609 uint32_t base; 610 611 /* 612 * Construct a descriptor and store it in the pcb for 613 * the next context switch. Also store it in the gdt 614 * so that the load of tf_fs into %fs will activate it 615 * at return to userland. 616 */ 617 base = (uint32_t)tls_base; 618 sd.sd_lobase = base & 0xffffff; 619 sd.sd_hibase = (base >> 24) & 0xff; 620 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 621 sd.sd_hilimit = 0xf; 622 sd.sd_type = SDT_MEMRWA; 623 sd.sd_dpl = SEL_UPL; 624 sd.sd_p = 1; 625 sd.sd_xx = 0; 626 sd.sd_def32 = 1; 627 sd.sd_gran = 1; 628 critical_enter(); 629 /* set %gs */ 630 td->td_pcb->pcb_gsd = sd; 631 if (td == curthread) { 632 PCPU_GET(fsgs_gdt)[1] = sd; 633 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 634 } 635 critical_exit(); 636 return (0); 637} 638 639/* 640 * Convert kernel VA to physical address 641 */ 642vm_paddr_t 643kvtop(void *addr) 644{ 645 vm_paddr_t pa; 646 647 pa = pmap_kextract((vm_offset_t)addr); 648 if (pa == 0) 649 panic("kvtop: zero page frame"); 650 return (pa); 651} 652 653#ifdef SMP 654static void 655cpu_reset_proxy() 656{ 657 cpuset_t tcrp; 658 659 cpu_reset_proxy_active = 1; 660 while (cpu_reset_proxy_active == 1) 661 ; /* Wait for other cpu to see that we've started */ 662 CPU_SETOF(cpu_reset_proxyid, &tcrp); 663 stop_cpus(tcrp); 664 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); 665 DELAY(1000000); 666 cpu_reset_real(); 667} 668#endif 669 670void 671cpu_reset() 672{ 673#ifdef XBOX 674 if (arch_i386_is_xbox) { 675 /* Kick the PIC16L, it can reboot the box */ 676 pic16l_reboot(); 677 for (;;); 678 } 679#endif 680 681#ifdef SMP 682 cpuset_t map; 683 u_int cnt; 684 685 if (smp_started) { 686 map = all_cpus; 687 CPU_CLR(PCPU_GET(cpuid), &map); 688 CPU_NAND(&map, &stopped_cpus); 689 if (!CPU_EMPTY(&map)) { 690 printf("cpu_reset: Stopping other CPUs\n"); 691 stop_cpus(map); 692 } 693 694 if (PCPU_GET(cpuid) != 0) { 695 cpu_reset_proxyid = PCPU_GET(cpuid); 696 cpustop_restartfunc = cpu_reset_proxy; 697 cpu_reset_proxy_active = 0; 698 printf("cpu_reset: Restarting BSP\n"); 699 700 /* Restart CPU #0. */ 701 /* XXX: restart_cpus(1 << 0); */ 702 CPU_SETOF(0, &started_cpus); 703 wmb(); 704 705 cnt = 0; 706 while (cpu_reset_proxy_active == 0 && cnt < 10000000) 707 cnt++; /* Wait for BSP to announce restart */ 708 if (cpu_reset_proxy_active == 0) 709 printf("cpu_reset: Failed to restart BSP\n"); 710 enable_intr(); 711 cpu_reset_proxy_active = 2; 712 713 while (1); 714 /* NOTREACHED */ 715 } 716 717 DELAY(1000000); 718 } 719#endif 720 cpu_reset_real(); 721 /* NOTREACHED */ 722} 723 724static void 725cpu_reset_real() 726{ 727 struct region_descriptor null_idt; 728#ifndef PC98 729 int b; 730#endif 731 732 disable_intr(); 733#ifdef XEN 734 if (smp_processor_id() == 0) 735 HYPERVISOR_shutdown(SHUTDOWN_reboot); 736 else 737 HYPERVISOR_shutdown(SHUTDOWN_poweroff); 738#endif 739#ifdef CPU_ELAN 740 if (elan_mmcr != NULL) 741 elan_mmcr->RESCFG = 1; 742#endif 743 744 if (cpu == CPU_GEODE1100) { 745 /* Attempt Geode's own reset */ 746 outl(0xcf8, 0x80009044ul); 747 outl(0xcfc, 0xf); 748 } 749 750#ifdef PC98 751 /* 752 * Attempt to do a CPU reset via CPU reset port. 753 */ 754 if ((inb(0x35) & 0xa0) != 0xa0) { 755 outb(0x37, 0x0f); /* SHUT0 = 0. */ 756 outb(0x37, 0x0b); /* SHUT1 = 0. */ 757 } 758 outb(0xf0, 0x00); /* Reset. */ 759#else 760#if !defined(BROKEN_KEYBOARD_RESET) 761 /* 762 * Attempt to do a CPU reset via the keyboard controller, 763 * do not turn off GateA20, as any machine that fails 764 * to do the reset here would then end up in no man's land. 765 */ 766 outb(IO_KBD + 4, 0xFE); 767 DELAY(500000); /* wait 0.5 sec to see if that did it */ 768#endif 769 770 /* 771 * Attempt to force a reset via the Reset Control register at 772 * I/O port 0xcf9. Bit 2 forces a system reset when it 773 * transitions from 0 to 1. Bit 1 selects the type of reset 774 * to attempt: 0 selects a "soft" reset, and 1 selects a 775 * "hard" reset. We try a "hard" reset. The first write sets 776 * bit 1 to select a "hard" reset and clears bit 2. The 777 * second write forces a 0 -> 1 transition in bit 2 to trigger 778 * a reset. 779 */ 780 outb(0xcf9, 0x2); 781 outb(0xcf9, 0x6); 782 DELAY(500000); /* wait 0.5 sec to see if that did it */ 783 784 /* 785 * Attempt to force a reset via the Fast A20 and Init register 786 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. 787 * Bit 0 asserts INIT# when set to 1. We are careful to only 788 * preserve bit 1 while setting bit 0. We also must clear bit 789 * 0 before setting it if it isn't already clear. 790 */ 791 b = inb(0x92); 792 if (b != 0xff) { 793 if ((b & 0x1) != 0) 794 outb(0x92, b & 0xfe); 795 outb(0x92, b | 0x1); 796 DELAY(500000); /* wait 0.5 sec to see if that did it */ 797 } 798#endif /* PC98 */ 799 800 printf("No known reset method worked, attempting CPU shutdown\n"); 801 DELAY(1000000); /* wait 1 sec for printf to complete */ 802 803 /* Wipe the IDT. */ 804 null_idt.rd_limit = 0; 805 null_idt.rd_base = 0; 806 lidt(&null_idt); 807 808 /* "good night, sweet prince .... <THUNK!>" */ 809 breakpoint(); 810 811 /* NOTREACHED */ 812 while(1); 813} 814 815/* 816 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 817 */ 818static void 819sf_buf_init(void *arg) 820{ 821 struct sf_buf *sf_bufs; 822 vm_offset_t sf_base; 823 int i; 824 825 nsfbufs = NSFBUFS; 826 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 827 828 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 829 TAILQ_INIT(&sf_buf_freelist); 830 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 831 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 832 M_NOWAIT | M_ZERO); 833 for (i = 0; i < nsfbufs; i++) { 834 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 835 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 836 } 837 sf_buf_alloc_want = 0; 838 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 839} 840 841/* 842 * Invalidate the cache lines that may belong to the page, if 843 * (possibly old) mapping of the page by sf buffer exists. Returns 844 * TRUE when mapping was found and cache invalidated. 845 */ 846boolean_t 847sf_buf_invalidate_cache(vm_page_t m) 848{ 849 struct sf_head *hash_list; 850 struct sf_buf *sf; 851 boolean_t ret; 852 853 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 854 ret = FALSE; 855 mtx_lock(&sf_buf_lock); 856 LIST_FOREACH(sf, hash_list, list_entry) { 857 if (sf->m == m) { 858 /* 859 * Use pmap_qenter to update the pte for 860 * existing mapping, in particular, the PAT 861 * settings are recalculated. 862 */ 863 pmap_qenter(sf->kva, &m, 1); 864 pmap_invalidate_cache_range(sf->kva, sf->kva + 865 PAGE_SIZE, FALSE); 866 ret = TRUE; 867 break; 868 } 869 } 870 mtx_unlock(&sf_buf_lock); 871 return (ret); 872} 873 874/* 875 * Get an sf_buf from the freelist. May block if none are available. 876 */ 877struct sf_buf * 878sf_buf_alloc(struct vm_page *m, int flags) 879{ 880 pt_entry_t opte, *ptep; 881 struct sf_head *hash_list; 882 struct sf_buf *sf; 883#ifdef SMP 884 cpuset_t other_cpus; 885 u_int cpuid; 886#endif 887 int error; 888 889 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 890 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 891 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 892 mtx_lock(&sf_buf_lock); 893 LIST_FOREACH(sf, hash_list, list_entry) { 894 if (sf->m == m) { 895 sf->ref_count++; 896 if (sf->ref_count == 1) { 897 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 898 nsfbufsused++; 899 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 900 } 901#ifdef SMP 902 goto shootdown; 903#else 904 goto done; 905#endif 906 } 907 } 908 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 909 if (flags & SFB_NOWAIT) 910 goto done; 911 sf_buf_alloc_want++; 912 SFSTAT_INC(sf_allocwait); 913 error = msleep(&sf_buf_freelist, &sf_buf_lock, 914 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 915 sf_buf_alloc_want--; 916 917 /* 918 * If we got a signal, don't risk going back to sleep. 919 */ 920 if (error) 921 goto done; 922 } 923 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 924 if (sf->m != NULL) 925 LIST_REMOVE(sf, list_entry); 926 LIST_INSERT_HEAD(hash_list, sf, list_entry); 927 sf->ref_count = 1; 928 sf->m = m; 929 nsfbufsused++; 930 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 931 932 /* 933 * Update the sf_buf's virtual-to-physical mapping, flushing the 934 * virtual address from the TLB. Since the reference count for 935 * the sf_buf's old mapping was zero, that mapping is not 936 * currently in use. Consequently, there is no need to exchange 937 * the old and new PTEs atomically, even under PAE. 938 */ 939 ptep = vtopte(sf->kva); 940 opte = *ptep; 941#ifdef XEN 942 PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag 943 | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0)); 944#else 945 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V | 946 pmap_cache_bits(m->md.pat_mode, 0); 947#endif 948 949 /* 950 * Avoid unnecessary TLB invalidations: If the sf_buf's old 951 * virtual-to-physical mapping was not used, then any processor 952 * that has invalidated the sf_buf's virtual address from its TLB 953 * since the last used mapping need not invalidate again. 954 */ 955#ifdef SMP 956 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 957 CPU_ZERO(&sf->cpumask); 958shootdown: 959 sched_pin(); 960 cpuid = PCPU_GET(cpuid); 961 if (!CPU_ISSET(cpuid, &sf->cpumask)) { 962 CPU_SET(cpuid, &sf->cpumask); 963 invlpg(sf->kva); 964 } 965 if ((flags & SFB_CPUPRIVATE) == 0) { 966 other_cpus = all_cpus; 967 CPU_CLR(cpuid, &other_cpus); 968 CPU_NAND(&other_cpus, &sf->cpumask); 969 if (!CPU_EMPTY(&other_cpus)) { 970 CPU_OR(&sf->cpumask, &other_cpus); 971 smp_masked_invlpg(other_cpus, sf->kva); 972 } 973 } 974 sched_unpin(); 975#else 976 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 977 pmap_invalidate_page(kernel_pmap, sf->kva); 978#endif 979done: 980 mtx_unlock(&sf_buf_lock); 981 return (sf); 982} 983 984/* 985 * Remove a reference from the given sf_buf, adding it to the free 986 * list when its reference count reaches zero. A freed sf_buf still, 987 * however, retains its virtual-to-physical mapping until it is 988 * recycled or reactivated by sf_buf_alloc(9). 989 */ 990void 991sf_buf_free(struct sf_buf *sf) 992{ 993 994 mtx_lock(&sf_buf_lock); 995 sf->ref_count--; 996 if (sf->ref_count == 0) { 997 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 998 nsfbufsused--; 999#ifdef XEN 1000/* 1001 * Xen doesn't like having dangling R/W mappings 1002 */ 1003 pmap_qremove(sf->kva, 1); 1004 sf->m = NULL; 1005 LIST_REMOVE(sf, list_entry); 1006#endif 1007 if (sf_buf_alloc_want > 0) 1008 wakeup(&sf_buf_freelist); 1009 } 1010 mtx_unlock(&sf_buf_lock); 1011} 1012 1013/* 1014 * Software interrupt handler for queued VM system processing. 1015 */ 1016void 1017swi_vm(void *dummy) 1018{ 1019 if (busdma_swi_pending != 0) 1020 busdma_swi(); 1021} 1022 1023/* 1024 * Tell whether this address is in some physical memory region. 1025 * Currently used by the kernel coredump code in order to avoid 1026 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 1027 * or other unpredictable behaviour. 1028 */ 1029 1030int 1031is_physical_memory(vm_paddr_t addr) 1032{ 1033 1034#ifdef DEV_ISA 1035 /* The ISA ``memory hole''. */ 1036 if (addr >= 0xa0000 && addr < 0x100000) 1037 return 0; 1038#endif 1039 1040 /* 1041 * stuff other tests for known memory-mapped devices (PCI?) 1042 * here 1043 */ 1044 1045 return 1; 1046} 1047