mp_machdep.c revision 261985
1/*- 2 * Copyright (c) 2001-2005 Marcel Moolenaar 3 * Copyright (c) 2000 Doug Rabson 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/mp_machdep.c 261985 2014-02-16 19:12:50Z marcel $"); 30 31#include "opt_kstack_pages.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/ktr.h> 36#include <sys/proc.h> 37#include <sys/bus.h> 38#include <sys/kthread.h> 39#include <sys/lock.h> 40#include <sys/malloc.h> 41#include <sys/mutex.h> 42#include <sys/kernel.h> 43#include <sys/pcpu.h> 44#include <sys/sched.h> 45#include <sys/smp.h> 46#include <sys/sysctl.h> 47#include <sys/uuid.h> 48 49#include <machine/atomic.h> 50#include <machine/bootinfo.h> 51#include <machine/cpu.h> 52#include <machine/fpu.h> 53#include <machine/intr.h> 54#include <machine/mca.h> 55#include <machine/md_var.h> 56#include <machine/pal.h> 57#include <machine/pcb.h> 58#include <machine/sal.h> 59#include <machine/smp.h> 60 61#include <vm/vm.h> 62#include <vm/pmap.h> 63#include <vm/vm_extern.h> 64#include <vm/vm_kern.h> 65 66extern uint64_t bdata[]; 67 68MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations"); 69 70void ia64_ap_startup(void); 71 72#define SAPIC_ID_GET_ID(x) ((u_int)((x) >> 8) & 0xff) 73#define SAPIC_ID_GET_EID(x) ((u_int)(x) & 0xff) 74#define SAPIC_ID_SET(id, eid) ((u_int)(((id) & 0xff) << 8) | ((eid) & 0xff)) 75 76/* State used to wake and bootstrap APs. */ 77struct ia64_ap_state ia64_ap_state; 78 79int ia64_ipi_ast; 80int ia64_ipi_hardclock; 81int ia64_ipi_highfp; 82int ia64_ipi_nmi; 83int ia64_ipi_preempt; 84int ia64_ipi_rndzvs; 85int ia64_ipi_stop; 86 87static u_int 88sz2shft(uint64_t sz) 89{ 90 uint64_t s; 91 u_int shft; 92 93 shft = 12; /* Start with 4K */ 94 s = 1 << shft; 95 while (s < sz) { 96 shft++; 97 s <<= 1; 98 } 99 return (shft); 100} 101 102static u_int 103ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf) 104{ 105 106 PCPU_INC(md.stats.pcs_nasts); 107 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); 108 return (0); 109} 110 111static u_int 112ia64_ih_hardclock(struct thread *td, u_int xiv, struct trapframe *tf) 113{ 114 115 PCPU_INC(md.stats.pcs_nhardclocks); 116 CTR1(KTR_SMP, "IPI_HARDCLOCK, cpuid=%d", PCPU_GET(cpuid)); 117 hardclockintr(); 118 return (0); 119} 120 121static u_int 122ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf) 123{ 124 125 PCPU_INC(md.stats.pcs_nhighfps); 126 ia64_highfp_save_ipi(); 127 return (0); 128} 129 130static u_int 131ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf) 132{ 133 134 PCPU_INC(md.stats.pcs_npreempts); 135 CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid)); 136 sched_preempt(curthread); 137 return (0); 138} 139 140static u_int 141ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf) 142{ 143 144 PCPU_INC(md.stats.pcs_nrdvs); 145 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); 146 smp_rendezvous_action(); 147 return (0); 148} 149 150static u_int 151ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf) 152{ 153 u_int cpuid; 154 155 PCPU_INC(md.stats.pcs_nstops); 156 cpuid = PCPU_GET(cpuid); 157 158 savectx(PCPU_PTR(md.pcb)); 159 160 CPU_SET_ATOMIC(cpuid, &stopped_cpus); 161 while (!CPU_ISSET(cpuid, &started_cpus)) 162 cpu_spinwait(); 163 CPU_CLR_ATOMIC(cpuid, &started_cpus); 164 CPU_CLR_ATOMIC(cpuid, &stopped_cpus); 165 return (0); 166} 167 168struct cpu_group * 169cpu_topo(void) 170{ 171 172 return smp_topo_none(); 173} 174 175static void 176ia64_store_mca_state(void* arg) 177{ 178 struct pcpu *pc = arg; 179 struct thread *td = curthread; 180 181 /* 182 * ia64_mca_save_state() is CPU-sensitive, so bind ourself to our 183 * target CPU. 184 */ 185 thread_lock(td); 186 sched_bind(td, pc->pc_cpuid); 187 thread_unlock(td); 188 189 ia64_mca_init_ap(); 190 191 /* 192 * Get and save the CPU specific MCA records. Should we get the 193 * MCA state for each processor, or just the CMC state? 194 */ 195 ia64_mca_save_state(SAL_INFO_MCA); 196 ia64_mca_save_state(SAL_INFO_CMC); 197 198 kproc_exit(0); 199} 200 201void 202ia64_ap_startup(void) 203{ 204 uint64_t vhpt; 205 206 ia64_ap_state.as_trace = 0x100; 207 208 ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); 209 ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2)); 210 ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2)); 211 ia64_srlz_d(); 212 213 pcpup = ia64_ap_state.as_pcpu; 214 ia64_set_k4((intptr_t)pcpup); 215 216 ia64_ap_state.as_trace = 0x108; 217 218 vhpt = PCPU_GET(md.vhpt); 219 map_vhpt(vhpt); 220 ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1); 221 ia64_srlz_i(); 222 223 ia64_ap_state.as_trace = 0x110; 224 225 ia64_ap_state.as_awake = 1; 226 ia64_ap_state.as_delay = 0; 227 228 map_pal_code(); 229 map_gateway_page(); 230 231 ia64_set_fpsr(IA64_FPSR_DEFAULT); 232 233 /* Wait until it's time for us to be unleashed */ 234 while (ia64_ap_state.as_spin) 235 cpu_spinwait(); 236 237 /* Initialize curthread. */ 238 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 239 PCPU_SET(curthread, PCPU_GET(idlethread)); 240 241 atomic_add_int(&ia64_ap_state.as_awake, 1); 242 while (!smp_started) 243 cpu_spinwait(); 244 245 CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid)); 246 247 cpu_initclocks(); 248 249 ia64_set_tpr(0); 250 ia64_srlz_d(); 251 252 ia64_enable_intr(); 253 254 sched_throw(NULL); 255 /* NOTREACHED */ 256} 257 258void 259cpu_mp_setmaxid(void) 260{ 261 262 /* 263 * Count the number of processors in the system by walking the ACPI 264 * tables. Note that we record the actual number of processors, even 265 * if this is larger than MAXCPU. We only activate MAXCPU processors. 266 */ 267 mp_ncpus = ia64_count_cpus(); 268 269 /* 270 * Set the largest cpuid we're going to use. This is necessary for 271 * VM initialization. 272 */ 273 mp_maxid = min(mp_ncpus, MAXCPU) - 1; 274} 275 276int 277cpu_mp_probe(void) 278{ 279 280 /* 281 * If there's only 1 processor, or we don't have a wake-up vector, 282 * we're not going to enable SMP. Note that no wake-up vector can 283 * also mean that the wake-up mechanism is not supported. In this 284 * case we can have multiple processors, but we simply can't wake 285 * them up... 286 */ 287 return (mp_ncpus > 1 && ia64_ipi_wakeup != 0); 288} 289 290void 291cpu_mp_add(u_int acpi_id, u_int id, u_int eid) 292{ 293 struct pcpu *pc; 294 void *dpcpu; 295 u_int cpuid, sapic_id; 296 297 sapic_id = SAPIC_ID_SET(id, eid); 298 cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id) 299 ? 0 : smp_cpus++; 300 301 KASSERT(!CPU_ISSET(cpuid, &all_cpus), 302 ("%s: cpu%d already in CPU map", __func__, acpi_id)); 303 304 if (cpuid != 0) { 305 pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK); 306 pcpu_init(pc, cpuid, sizeof(*pc)); 307 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, 308 M_WAITOK | M_ZERO); 309 dpcpu_init(dpcpu, cpuid); 310 } else 311 pc = pcpup; 312 313 cpu_pcpu_setup(pc, acpi_id, sapic_id); 314 315 CPU_SET(pc->pc_cpuid, &all_cpus); 316} 317 318void 319cpu_mp_announce() 320{ 321 struct pcpu *pc; 322 uint32_t sapic_id; 323 int i; 324 325 for (i = 0; i <= mp_maxid; i++) { 326 pc = pcpu_find(i); 327 if (pc != NULL) { 328 sapic_id = IA64_LID_GET_SAPIC_ID(pc->pc_md.lid); 329 printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x", 330 i, pc->pc_acpi_id, SAPIC_ID_GET_ID(sapic_id), 331 SAPIC_ID_GET_EID(sapic_id)); 332 if (i == 0) 333 printf(" (BSP)\n"); 334 else 335 printf("\n"); 336 } 337 } 338} 339 340void 341cpu_mp_start() 342{ 343 struct ia64_sal_result result; 344 struct ia64_fdesc *fd; 345 struct pcpu *pc; 346 uintptr_t state; 347 u_char *stp; 348 349 state = ia64_tpa((uintptr_t)&ia64_ap_state); 350 fd = (struct ia64_fdesc *) os_boot_rendez; 351 result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, 352 ia64_tpa(fd->func), state, 0, 0, 0, 0); 353 354 ia64_ap_state.as_pgtbl_pte = PTE_PRESENT | PTE_MA_WB | 355 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | 356 (bootinfo->bi_pbvm_pgtbl & PTE_PPN_MASK); 357 ia64_ap_state.as_pgtbl_itir = sz2shft(bootinfo->bi_pbvm_pgtblsz) << 2; 358 ia64_ap_state.as_text_va = IA64_PBVM_BASE; 359 ia64_ap_state.as_text_pte = PTE_PRESENT | PTE_MA_WB | 360 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RX | 361 (ia64_tpa(IA64_PBVM_BASE) & PTE_PPN_MASK); 362 ia64_ap_state.as_text_itir = bootinfo->bi_text_mapped << 2; 363 ia64_ap_state.as_data_va = (uintptr_t)bdata; 364 ia64_ap_state.as_data_pte = PTE_PRESENT | PTE_MA_WB | 365 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | 366 (ia64_tpa((uintptr_t)bdata) & PTE_PPN_MASK); 367 ia64_ap_state.as_data_itir = bootinfo->bi_data_mapped << 2; 368 369 /* Keep 'em spinning until we unleash them... */ 370 ia64_ap_state.as_spin = 1; 371 372 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 373 pc->pc_md.current_pmap = kernel_pmap; 374 /* The BSP is obviously running already. */ 375 if (pc->pc_cpuid == 0) { 376 pc->pc_md.awake = 1; 377 continue; 378 } 379 380 ia64_ap_state.as_pcpu = pc; 381 pc->pc_md.vhpt = pmap_alloc_vhpt(); 382 if (pc->pc_md.vhpt == 0) { 383 printf("SMP: WARNING: unable to allocate VHPT" 384 " for cpu%d", pc->pc_cpuid); 385 continue; 386 } 387 388 stp = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, M_WAITOK); 389 ia64_ap_state.as_kstack = stp; 390 ia64_ap_state.as_kstack_top = stp + KSTACK_PAGES * PAGE_SIZE; 391 392 ia64_ap_state.as_trace = 0; 393 ia64_ap_state.as_delay = 2000; 394 ia64_ap_state.as_awake = 0; 395 396 if (bootverbose) 397 printf("SMP: waking up cpu%d\n", pc->pc_cpuid); 398 399 /* Here she goes... */ 400 ipi_send(pc, ia64_ipi_wakeup); 401 do { 402 DELAY(1000); 403 } while (--ia64_ap_state.as_delay > 0); 404 405 pc->pc_md.awake = ia64_ap_state.as_awake; 406 407 if (!ia64_ap_state.as_awake) { 408 printf("SMP: WARNING: cpu%d did not wake up (code " 409 "%#lx)\n", pc->pc_cpuid, 410 ia64_ap_state.as_trace - state); 411 } 412 } 413} 414 415static void 416cpu_mp_unleash(void *dummy) 417{ 418 struct pcpu *pc; 419 int cpus; 420 421 if (mp_ncpus <= 1) 422 return; 423 424 /* Allocate XIVs for IPIs */ 425 ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast); 426 ia64_ipi_hardclock = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, 427 ia64_ih_hardclock); 428 ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp); 429 ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI, 430 ia64_ih_preempt); 431 ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs); 432 ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop); 433 434 /* Reserve the NMI vector for IPI_STOP_HARD if possible */ 435 ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0) 436 ? ia64_ipi_stop : 0x400; /* DM=NMI, Vector=n/a */ 437 438 cpus = 0; 439 smp_cpus = 0; 440 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 441 cpus++; 442 if (pc->pc_md.awake) { 443 kproc_create(ia64_store_mca_state, pc, NULL, 0, 0, 444 "mca %u", pc->pc_cpuid); 445 smp_cpus++; 446 } 447 } 448 449 ia64_ap_state.as_awake = 1; 450 ia64_ap_state.as_spin = 0; 451 452 while (ia64_ap_state.as_awake != smp_cpus) 453 cpu_spinwait(); 454 455 if (smp_cpus != cpus || cpus != mp_ncpus) { 456 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", 457 mp_ncpus, cpus, smp_cpus); 458 } 459 460 smp_active = 1; 461 smp_started = 1; 462 463 /* 464 * Now that all CPUs are up and running, bind interrupts to each of 465 * them. 466 */ 467 ia64_bind_intr(); 468} 469SYSINIT(start_aps, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, cpu_mp_unleash, NULL); 470 471/* 472 * send an IPI to a set of cpus. 473 */ 474void 475ipi_selected(cpuset_t cpus, int ipi) 476{ 477 struct pcpu *pc; 478 479 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 480 if (CPU_ISSET(pc->pc_cpuid, &cpus)) 481 ipi_send(pc, ipi); 482 } 483} 484 485/* 486 * send an IPI to a specific CPU. 487 */ 488void 489ipi_cpu(int cpu, u_int ipi) 490{ 491 492 ipi_send(cpuid_to_pcpu[cpu], ipi); 493} 494 495/* 496 * send an IPI to all CPUs EXCEPT myself. 497 */ 498void 499ipi_all_but_self(int ipi) 500{ 501 struct pcpu *pc; 502 503 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 504 if (pc != pcpup) 505 ipi_send(pc, ipi); 506 } 507} 508 509/* 510 * Send an IPI to the specified processor. 511 */ 512void 513ipi_send(struct pcpu *cpu, int xiv) 514{ 515 u_int sapic_id; 516 517 KASSERT(xiv != 0, ("ipi_send")); 518 519 sapic_id = IA64_LID_GET_SAPIC_ID(cpu->pc_md.lid); 520 521 ia64_mf(); 522 ia64_st8(&(ia64_pib->ib_ipi[sapic_id][0]), xiv); 523 ia64_mf_a(); 524 CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid)); 525} 526