mp_machdep.c revision 268181
1/*- 2 * Copyright (c) 2001-2005 Marcel Moolenaar 3 * Copyright (c) 2000 Doug Rabson 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/mp_machdep.c 268181 2014-07-02 21:53:34Z marcel $"); 30 31#include "opt_kstack_pages.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/ktr.h> 36#include <sys/proc.h> 37#include <sys/bus.h> 38#include <sys/kthread.h> 39#include <sys/lock.h> 40#include <sys/malloc.h> 41#include <sys/mutex.h> 42#include <sys/kernel.h> 43#include <sys/pcpu.h> 44#include <sys/sched.h> 45#include <sys/smp.h> 46#include <sys/sysctl.h> 47#include <sys/uuid.h> 48 49#include <machine/atomic.h> 50#include <machine/bootinfo.h> 51#include <machine/cpu.h> 52#include <machine/fpu.h> 53#include <machine/intr.h> 54#include <machine/mca.h> 55#include <machine/md_var.h> 56#include <machine/pal.h> 57#include <machine/pcb.h> 58#include <machine/sal.h> 59#include <machine/smp.h> 60 61#include <vm/vm.h> 62#include <vm/pmap.h> 63#include <vm/vm_extern.h> 64#include <vm/vm_kern.h> 65 66extern uint64_t bdata[]; 67 68extern int smp_disabled; 69 70MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations"); 71 72void ia64_ap_startup(void); 73 74#define SAPIC_ID_GET_ID(x) ((u_int)((x) >> 8) & 0xff) 75#define SAPIC_ID_GET_EID(x) ((u_int)(x) & 0xff) 76#define SAPIC_ID_SET(id, eid) ((u_int)(((id) & 0xff) << 8) | ((eid) & 0xff)) 77 78/* State used to wake and bootstrap APs. */ 79struct ia64_ap_state ia64_ap_state; 80 81int ia64_ipi_ast; 82int ia64_ipi_hardclock; 83int ia64_ipi_highfp; 84int ia64_ipi_nmi; 85int ia64_ipi_preempt; 86int ia64_ipi_rndzvs; 87int ia64_ipi_stop; 88 89static u_int 90sz2shft(uint64_t sz) 91{ 92 uint64_t s; 93 u_int shft; 94 95 shft = 12; /* Start with 4K */ 96 s = 1 << shft; 97 while (s < sz) { 98 shft++; 99 s <<= 1; 100 } 101 return (shft); 102} 103 104static u_int 105ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf) 106{ 107 108 PCPU_INC(md.stats.pcs_nasts); 109 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); 110 return (0); 111} 112 113static u_int 114ia64_ih_hardclock(struct thread *td, u_int xiv, struct trapframe *tf) 115{ 116 117 PCPU_INC(md.stats.pcs_nhardclocks); 118 CTR1(KTR_SMP, "IPI_HARDCLOCK, cpuid=%d", PCPU_GET(cpuid)); 119 hardclockintr(); 120 return (0); 121} 122 123static u_int 124ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf) 125{ 126 127 PCPU_INC(md.stats.pcs_nhighfps); 128 ia64_highfp_save_ipi(); 129 return (0); 130} 131 132static u_int 133ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf) 134{ 135 136 PCPU_INC(md.stats.pcs_npreempts); 137 CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid)); 138 sched_preempt(curthread); 139 return (0); 140} 141 142static u_int 143ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf) 144{ 145 146 PCPU_INC(md.stats.pcs_nrdvs); 147 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); 148 smp_rendezvous_action(); 149 return (0); 150} 151 152static u_int 153ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf) 154{ 155 u_int cpuid; 156 157 PCPU_INC(md.stats.pcs_nstops); 158 cpuid = PCPU_GET(cpuid); 159 160 savectx(PCPU_PTR(md.pcb)); 161 162 CPU_SET_ATOMIC(cpuid, &stopped_cpus); 163 while (!CPU_ISSET(cpuid, &started_cpus)) 164 cpu_spinwait(); 165 CPU_CLR_ATOMIC(cpuid, &started_cpus); 166 CPU_CLR_ATOMIC(cpuid, &stopped_cpus); 167 return (0); 168} 169 170struct cpu_group * 171cpu_topo(void) 172{ 173 174 return smp_topo_none(); 175} 176 177static void 178ia64_store_mca_state(void* arg) 179{ 180 struct pcpu *pc = arg; 181 struct thread *td = curthread; 182 183 /* 184 * ia64_mca_save_state() is CPU-sensitive, so bind ourself to our 185 * target CPU. 186 */ 187 thread_lock(td); 188 sched_bind(td, pc->pc_cpuid); 189 thread_unlock(td); 190 191 ia64_mca_init_ap(); 192 193 /* 194 * Get and save the CPU specific MCA records. Should we get the 195 * MCA state for each processor, or just the CMC state? 196 */ 197 ia64_mca_save_state(SAL_INFO_MCA); 198 ia64_mca_save_state(SAL_INFO_CMC); 199 200 kproc_exit(0); 201} 202 203void 204ia64_ap_startup(void) 205{ 206 uint64_t vhpt; 207 208 ia64_ap_state.as_trace = 0x100; 209 210 ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); 211 ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2)); 212 ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2)); 213 ia64_srlz_d(); 214 215 pcpup = ia64_ap_state.as_pcpu; 216 ia64_set_k4((intptr_t)pcpup); 217 218 ia64_ap_state.as_trace = 0x108; 219 220 vhpt = PCPU_GET(md.vhpt); 221 map_vhpt(vhpt); 222 ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1); 223 ia64_srlz_i(); 224 225 ia64_ap_state.as_trace = 0x110; 226 227 ia64_ap_state.as_awake = 1; 228 ia64_ap_state.as_delay = 0; 229 230 map_pal_code(); 231 map_gateway_page(); 232 233 ia64_set_fpsr(IA64_FPSR_DEFAULT); 234 235 /* Wait until it's time for us to be unleashed */ 236 while (ia64_ap_state.as_spin) 237 cpu_spinwait(); 238 239 /* Initialize curthread. */ 240 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 241 PCPU_SET(curthread, PCPU_GET(idlethread)); 242 243 atomic_add_int(&ia64_ap_state.as_awake, 1); 244 while (!smp_started) 245 cpu_spinwait(); 246 247 CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid)); 248 249 cpu_initclocks(); 250 251 ia64_set_tpr(0); 252 ia64_srlz_d(); 253 254 sched_throw(NULL); 255 /* NOTREACHED */ 256} 257 258void 259cpu_mp_setmaxid(void) 260{ 261 262 /* 263 * Count the number of processors in the system by walking the ACPI 264 * tables. Note that we record the actual number of processors, even 265 * if this is larger than MAXCPU. We only activate MAXCPU processors. 266 */ 267 mp_ncpus = ia64_count_cpus(); 268 269 /* 270 * Set the largest cpuid we're going to use. This is necessary for 271 * VM initialization. 272 */ 273 mp_maxid = min(mp_ncpus, MAXCPU) - 1; 274} 275 276int 277cpu_mp_probe(void) 278{ 279 280 /* 281 * If there's only 1 processor, or we don't have a wake-up vector, 282 * we're not going to enable SMP. Note that no wake-up vector can 283 * also mean that the wake-up mechanism is not supported. In this 284 * case we can have multiple processors, but we simply can't wake 285 * them up... 286 */ 287 return (mp_ncpus > 1 && ia64_ipi_wakeup != 0); 288} 289 290void 291cpu_mp_add(u_int acpi_id, u_int id, u_int eid) 292{ 293 struct pcpu *pc; 294 void *dpcpu; 295 u_int cpuid, sapic_id; 296 297 if (smp_disabled) 298 return; 299 300 sapic_id = SAPIC_ID_SET(id, eid); 301 cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id) 302 ? 0 : smp_cpus++; 303 304 KASSERT(!CPU_ISSET(cpuid, &all_cpus), 305 ("%s: cpu%d already in CPU map", __func__, acpi_id)); 306 307 if (cpuid != 0) { 308 pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK); 309 pcpu_init(pc, cpuid, sizeof(*pc)); 310 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, 311 M_WAITOK | M_ZERO); 312 dpcpu_init(dpcpu, cpuid); 313 } else 314 pc = pcpup; 315 316 cpu_pcpu_setup(pc, acpi_id, sapic_id); 317 318 CPU_SET(pc->pc_cpuid, &all_cpus); 319} 320 321void 322cpu_mp_announce() 323{ 324 struct pcpu *pc; 325 uint32_t sapic_id; 326 int i; 327 328 for (i = 0; i <= mp_maxid; i++) { 329 pc = pcpu_find(i); 330 if (pc != NULL) { 331 sapic_id = IA64_LID_GET_SAPIC_ID(pc->pc_md.lid); 332 printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x", 333 i, pc->pc_acpi_id, SAPIC_ID_GET_ID(sapic_id), 334 SAPIC_ID_GET_EID(sapic_id)); 335 if (i == 0) 336 printf(" (BSP)\n"); 337 else 338 printf("\n"); 339 } 340 } 341} 342 343void 344cpu_mp_start() 345{ 346 struct ia64_sal_result result; 347 struct ia64_fdesc *fd; 348 struct pcpu *pc; 349 uintptr_t state; 350 u_char *stp; 351 352 state = ia64_tpa((uintptr_t)&ia64_ap_state); 353 fd = (struct ia64_fdesc *) os_boot_rendez; 354 result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, 355 ia64_tpa(fd->func), state, 0, 0, 0, 0); 356 357 ia64_ap_state.as_pgtbl_pte = PTE_PRESENT | PTE_MA_WB | 358 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | 359 (bootinfo->bi_pbvm_pgtbl & PTE_PPN_MASK); 360 ia64_ap_state.as_pgtbl_itir = sz2shft(bootinfo->bi_pbvm_pgtblsz) << 2; 361 ia64_ap_state.as_text_va = IA64_PBVM_BASE; 362 ia64_ap_state.as_text_pte = PTE_PRESENT | PTE_MA_WB | 363 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RX | 364 (ia64_tpa(IA64_PBVM_BASE) & PTE_PPN_MASK); 365 ia64_ap_state.as_text_itir = bootinfo->bi_text_mapped << 2; 366 ia64_ap_state.as_data_va = (uintptr_t)bdata; 367 ia64_ap_state.as_data_pte = PTE_PRESENT | PTE_MA_WB | 368 PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | 369 (ia64_tpa((uintptr_t)bdata) & PTE_PPN_MASK); 370 ia64_ap_state.as_data_itir = bootinfo->bi_data_mapped << 2; 371 372 /* Keep 'em spinning until we unleash them... */ 373 ia64_ap_state.as_spin = 1; 374 375 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 376 pc->pc_md.current_pmap = kernel_pmap; 377 /* The BSP is obviously running already. */ 378 if (pc->pc_cpuid == 0) { 379 pc->pc_md.awake = 1; 380 continue; 381 } 382 383 ia64_ap_state.as_pcpu = pc; 384 pc->pc_md.vhpt = pmap_alloc_vhpt(); 385 if (pc->pc_md.vhpt == 0) { 386 printf("SMP: WARNING: unable to allocate VHPT" 387 " for cpu%d", pc->pc_cpuid); 388 continue; 389 } 390 391 stp = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, M_WAITOK); 392 ia64_ap_state.as_kstack = stp; 393 ia64_ap_state.as_kstack_top = stp + KSTACK_PAGES * PAGE_SIZE; 394 395 ia64_ap_state.as_trace = 0; 396 ia64_ap_state.as_delay = 2000; 397 ia64_ap_state.as_awake = 0; 398 399 if (bootverbose) 400 printf("SMP: waking up cpu%d\n", pc->pc_cpuid); 401 402 /* Here she goes... */ 403 ipi_send(pc, ia64_ipi_wakeup); 404 do { 405 DELAY(1000); 406 } while (--ia64_ap_state.as_delay > 0); 407 408 pc->pc_md.awake = ia64_ap_state.as_awake; 409 410 if (!ia64_ap_state.as_awake) { 411 printf("SMP: WARNING: cpu%d did not wake up (code " 412 "%#lx)\n", pc->pc_cpuid, 413 ia64_ap_state.as_trace - state); 414 } 415 } 416} 417 418static void 419cpu_mp_unleash(void *dummy) 420{ 421 struct pcpu *pc; 422 int cpus; 423 424 if (mp_ncpus <= 1) 425 return; 426 427 /* Allocate XIVs for IPIs */ 428 ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast); 429 ia64_ipi_hardclock = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, 430 ia64_ih_hardclock); 431 ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp); 432 ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI, 433 ia64_ih_preempt); 434 ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs); 435 ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop); 436 437 /* Reserve the NMI vector for IPI_STOP_HARD if possible */ 438 ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0) 439 ? ia64_ipi_stop : 0x400; /* DM=NMI, Vector=n/a */ 440 441 cpus = 0; 442 smp_cpus = 0; 443 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 444 cpus++; 445 if (pc->pc_md.awake) { 446 kproc_create(ia64_store_mca_state, pc, NULL, 0, 0, 447 "mca %u", pc->pc_cpuid); 448 smp_cpus++; 449 } 450 } 451 452 ia64_ap_state.as_awake = 1; 453 ia64_ap_state.as_spin = 0; 454 455 while (ia64_ap_state.as_awake != smp_cpus) 456 cpu_spinwait(); 457 458 if (smp_cpus != cpus || cpus != mp_ncpus) { 459 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", 460 mp_ncpus, cpus, smp_cpus); 461 } 462 463 /* XXX Atomic set operation? */ 464 smp_started = 1; 465 466 /* 467 * Now that all CPUs are up and running, bind interrupts to each of 468 * them. 469 */ 470 ia64_bind_intr(); 471} 472SYSINIT(start_aps, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, cpu_mp_unleash, NULL); 473 474/* 475 * send an IPI to a set of cpus. 476 */ 477void 478ipi_selected(cpuset_t cpus, int ipi) 479{ 480 struct pcpu *pc; 481 482 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 483 if (CPU_ISSET(pc->pc_cpuid, &cpus)) 484 ipi_send(pc, ipi); 485 } 486} 487 488/* 489 * send an IPI to a specific CPU. 490 */ 491void 492ipi_cpu(int cpu, u_int ipi) 493{ 494 495 ipi_send(cpuid_to_pcpu[cpu], ipi); 496} 497 498/* 499 * send an IPI to all CPUs EXCEPT myself. 500 */ 501void 502ipi_all_but_self(int ipi) 503{ 504 struct pcpu *pc; 505 506 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 507 if (pc != pcpup) 508 ipi_send(pc, ipi); 509 } 510} 511 512/* 513 * Send an IPI to the specified processor. 514 */ 515void 516ipi_send(struct pcpu *cpu, int xiv) 517{ 518 u_int sapic_id; 519 520 KASSERT(xiv != 0, ("ipi_send")); 521 522 sapic_id = IA64_LID_GET_SAPIC_ID(cpu->pc_md.lid); 523 524 ia64_mf(); 525 ia64_st8(&(ia64_pib->ib_ipi[sapic_id][0]), xiv); 526 ia64_mf_a(); 527 CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid)); 528} 529