xlp_machdep.c revision 331722
1/*- 2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights 3 * reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in 13 * the documentation and/or other materials provided with the 14 * distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * NETLOGIC_BSD */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/mips/nlm/xlp_machdep.c 331722 2018-03-29 02:50:57Z eadler $"); 32 33#include "opt_ddb.h" 34#include "opt_platform.h" 35 36#include <sys/param.h> 37#include <sys/bus.h> 38#include <sys/conf.h> 39#include <sys/rtprio.h> 40#include <sys/systm.h> 41#include <sys/interrupt.h> 42#include <sys/limits.h> 43#include <sys/lock.h> 44#include <sys/malloc.h> 45#include <sys/mutex.h> 46#include <sys/random.h> 47 48#include <sys/cons.h> /* cinit() */ 49#include <sys/kdb.h> 50#include <sys/reboot.h> 51#include <sys/queue.h> 52#include <sys/smp.h> 53#include <sys/timetc.h> 54 55#include <vm/vm.h> 56#include <vm/vm_page.h> 57 58#include <machine/cpu.h> 59#include <machine/cpufunc.h> 60#include <machine/cpuinfo.h> 61#include <machine/tlb.h> 62#include <machine/cpuregs.h> 63#include <machine/frame.h> 64#include <machine/hwfunc.h> 65#include <machine/md_var.h> 66#include <machine/asm.h> 67#include <machine/pmap.h> 68#include <machine/trap.h> 69#include <machine/clock.h> 70#include <machine/fls64.h> 71#include <machine/intr_machdep.h> 72#include <machine/smp.h> 73 74#include <mips/nlm/hal/mips-extns.h> 75#include <mips/nlm/hal/haldefs.h> 76#include <mips/nlm/hal/iomap.h> 77#include <mips/nlm/hal/sys.h> 78#include <mips/nlm/hal/pic.h> 79#include <mips/nlm/hal/uart.h> 80#include <mips/nlm/hal/mmu.h> 81#include <mips/nlm/hal/bridge.h> 82#include <mips/nlm/hal/cpucontrol.h> 83#include <mips/nlm/hal/cop2.h> 84 85#include <mips/nlm/clock.h> 86#include <mips/nlm/interrupt.h> 87#include <mips/nlm/board.h> 88#include <mips/nlm/xlp.h> 89#include <mips/nlm/msgring.h> 90 91#ifdef FDT 92#include <dev/fdt/fdt_common.h> 93#include <dev/ofw/openfirm.h> 94#endif 95 96/* 4KB static data aread to keep a copy of the bootload env until 97 the dynamic kenv is setup */ 98char boot1_env[4096]; 99 100uint64_t xlp_cpu_frequency; 101uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE); 102 103int xlp_ncores; 104int xlp_threads_per_core; 105uint32_t xlp_hw_thread_mask; 106int xlp_cpuid_to_hwtid[MAXCPU]; 107int xlp_hwtid_to_cpuid[MAXCPU]; 108uint64_t xlp_pic_base; 109 110static int xlp_mmuval; 111 112extern uint32_t _end; 113extern char XLPResetEntry[], XLPResetEntryEnd[]; 114 115static void 116xlp_setup_core(void) 117{ 118 uint64_t reg; 119 120 reg = nlm_mfcr(LSU_DEFEATURE); 121 /* Enable Unaligned and L2HPE */ 122 reg |= (1 << 30) | (1 << 23); 123 /* 124 * Experimental : Enable SUE 125 * Speculative Unmap Enable. Enable speculative L2 cache request for 126 * unmapped access. 127 */ 128 reg |= (1ull << 31); 129 /* Clear S1RCM - A0 errata */ 130 reg &= ~0xeull; 131 nlm_mtcr(LSU_DEFEATURE, reg); 132 133 reg = nlm_mfcr(SCHED_DEFEATURE); 134 /* Experimental: Disable BRU accepting ALU ops - A0 errata */ 135 reg |= (1 << 24); 136 nlm_mtcr(SCHED_DEFEATURE, reg); 137} 138 139static void 140xlp_setup_mmu(void) 141{ 142 uint32_t pagegrain; 143 144 if (nlm_threadid() == 0) { 145 nlm_setup_extended_pagemask(0); 146 nlm_large_variable_tlb_en(1); 147 nlm_extended_tlb_en(1); 148 nlm_mmu_setup(0, 0, 0); 149 } 150 151 /* Enable no-read, no-exec, large-physical-address */ 152 pagegrain = mips_rd_pagegrain(); 153 pagegrain |= (1U << 31) | /* RIE */ 154 (1 << 30) | /* XIE */ 155 (1 << 29); /* ELPA */ 156 mips_wr_pagegrain(pagegrain); 157} 158 159static void 160xlp_enable_blocks(void) 161{ 162 uint64_t sysbase; 163 int i; 164 165 for (i = 0; i < XLP_MAX_NODES; i++) { 166 if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i))) 167 continue; 168 sysbase = nlm_get_sys_regbase(i); 169 nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA); 170 } 171} 172 173static void 174xlp_parse_mmu_options(void) 175{ 176 uint64_t sysbase; 177 uint32_t cpu_map = xlp_hw_thread_mask; 178 uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask; 179 int i, j, k; 180 181#ifdef SMP 182 if (cpu_map == 0) 183 cpu_map = 0xffffffff; 184#else /* Uniprocessor! */ 185 if (cpu_map == 0) 186 cpu_map = 0x1; 187 else if (cpu_map != 0x1) { 188 printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n" 189 "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map); 190 cpu_map = 0x1; 191 } 192#endif 193 194 xlp_ncores = 1; 195 core0_thr_mask = cpu_map & 0xf; 196 switch (core0_thr_mask) { 197 case 1: 198 xlp_threads_per_core = 1; 199 xlp_mmuval = 0; 200 break; 201 case 3: 202 xlp_threads_per_core = 2; 203 xlp_mmuval = 2; 204 break; 205 case 0xf: 206 xlp_threads_per_core = 4; 207 xlp_mmuval = 3; 208 break; 209 default: 210 goto unsupp; 211 } 212 213 /* Try to find the enabled cores from SYS block */ 214 sysbase = nlm_get_sys_regbase(0); 215 cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff; 216 217 /* XLP 416 does not report this correctly, fix */ 218 if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416) 219 cpu_rst_mask = 0xe; 220 221 /* Take out cores which do not exist on chip */ 222 for (i = 1; i < XLP_MAX_CORES; i++) { 223 if ((cpu_rst_mask & (1 << i)) == 0) 224 cpu_map &= ~(0xfu << (4 * i)); 225 } 226 227 /* Verify other cores' CPU masks */ 228 for (i = 1; i < XLP_MAX_CORES; i++) { 229 core_thr_mask = (cpu_map >> (4 * i)) & 0xf; 230 if (core_thr_mask == 0) 231 continue; 232 if (core_thr_mask != core0_thr_mask) 233 goto unsupp; 234 xlp_ncores++; 235 } 236 237 xlp_hw_thread_mask = cpu_map; 238 /* setup hardware processor id to cpu id mapping */ 239 for (i = 0; i< MAXCPU; i++) 240 xlp_cpuid_to_hwtid[i] = 241 xlp_hwtid_to_cpuid[i] = -1; 242 for (i = 0, k = 0; i < XLP_MAX_CORES; i++) { 243 if (((cpu_map >> (i * 4)) & 0xf) == 0) 244 continue; 245 for (j = 0; j < xlp_threads_per_core; j++) { 246 xlp_cpuid_to_hwtid[k] = i * 4 + j; 247 xlp_hwtid_to_cpuid[i * 4 + j] = k; 248 k++; 249 } 250 } 251 252 return; 253 254unsupp: 255 printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n" 256 "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n", 257 (u_long)core0_thr_mask, (u_long)cpu_map); 258 panic("Invalid CPU mask - halting.\n"); 259 return; 260} 261 262/* Parse cmd line args as env - copied from ar71xx */ 263static void 264xlp_parse_bootargs(char *cmdline) 265{ 266 char *n, *v; 267 268 while ((v = strsep(&cmdline, " \n")) != NULL) { 269 if (*v == '\0') 270 continue; 271 if (*v == '-') { 272 while (*v != '\0') { 273 v++; 274 switch (*v) { 275 case 'a': boothowto |= RB_ASKNAME; break; 276 case 'd': boothowto |= RB_KDB; break; 277 case 'g': boothowto |= RB_GDB; break; 278 case 's': boothowto |= RB_SINGLE; break; 279 case 'v': boothowto |= RB_VERBOSE; break; 280 } 281 } 282 } else { 283 n = strsep(&v, "="); 284 if (v == NULL) 285 kern_setenv(n, "1"); 286 else 287 kern_setenv(n, v); 288 } 289 } 290} 291 292#ifdef FDT 293static void 294xlp_bootargs_init(__register_t arg) 295{ 296 char buf[2048]; /* early stack is big enough */ 297 void *dtbp; 298 phandle_t chosen; 299 ihandle_t mask; 300 301 dtbp = (void *)(intptr_t)arg; 302#if defined(FDT_DTB_STATIC) 303 /* 304 * In case the device tree blob was not passed as argument try 305 * to use the statically embedded one. 306 */ 307 if (dtbp == NULL) 308 dtbp = &fdt_static_dtb; 309#endif 310 if (OF_install(OFW_FDT, 0) == FALSE) 311 while (1); 312 if (OF_init((void *)dtbp) != 0) 313 while (1); 314 OF_interpret("perform-fixup", 0); 315 316 chosen = OF_finddevice("/chosen"); 317 if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) { 318 xlp_hw_thread_mask = mask; 319 } 320 321 if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1) 322 xlp_parse_bootargs(buf); 323} 324#else 325/* 326 * arg is a pointer to the environment block, the format of the block is 327 * a=xyz\0b=pqr\0\0 328 */ 329static void 330xlp_bootargs_init(__register_t arg) 331{ 332 char buf[2048]; /* early stack is big enough */ 333 char *p, *v, *n; 334 uint32_t mask; 335 336 /* 337 * provide backward compat for passing cpu mask as arg 338 */ 339 if (arg & 1) { 340 xlp_hw_thread_mask = arg; 341 return; 342 } 343 344 p = (void *)(intptr_t)arg; 345 while (*p != '\0') { 346 strlcpy(buf, p, sizeof(buf)); 347 v = buf; 348 n = strsep(&v, "="); 349 if (v == NULL) 350 kern_setenv(n, "1"); 351 else 352 kern_setenv(n, v); 353 p += strlen(p) + 1; 354 } 355 356 /* CPU mask can be passed thru env */ 357 if (getenv_uint("cpumask", &mask) != 0) 358 xlp_hw_thread_mask = mask; 359 360 /* command line argument */ 361 v = kern_getenv("bootargs"); 362 if (v != NULL) { 363 strlcpy(buf, v, sizeof(buf)); 364 xlp_parse_bootargs(buf); 365 freeenv(v); 366 } 367} 368#endif 369 370static void 371mips_init(void) 372{ 373 init_param1(); 374 init_param2(physmem); 375 376 mips_cpu_init(); 377 cpuinfo.cache_coherent_dma = TRUE; 378 pmap_bootstrap(); 379 mips_proc0_init(); 380 mutex_init(); 381#ifdef DDB 382 kdb_init(); 383 if (boothowto & RB_KDB) { 384 kdb_enter("Boot flags requested debugger", NULL); 385 } 386#endif 387} 388 389unsigned int 390platform_get_timecount(struct timecounter *tc __unused) 391{ 392 uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER); 393 394 return (unsigned int)~count; 395} 396 397static void 398xlp_pic_init(void) 399{ 400 struct timecounter pic_timecounter = { 401 platform_get_timecount, /* get_timecount */ 402 0, /* no poll_pps */ 403 ~0U, /* counter_mask */ 404 XLP_IO_CLK, /* frequency */ 405 "XLRPIC", /* name */ 406 2000, /* quality (adjusted in code) */ 407 }; 408 int i; 409 int maxirt; 410 411 xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */ 412 maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()), 413 XLP_PCI_DEVINFO_REG0); 414 printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base, 415 maxirt); 416 /* Bind all PIC irqs to cpu 0 */ 417 for (i = 0; i < maxirt; i++) 418 nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0, 419 1, 0, 0x1); 420 421 nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0); 422 platform_timecounter = &pic_timecounter; 423} 424 425#if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */ 426#ifdef XLP_SIM 427#define XLP_MEM_LIM 0x200000000ULL 428#else 429#define XLP_MEM_LIM 0x10000000000ULL 430#endif 431#else 432#define XLP_MEM_LIM 0xfffff000UL 433#endif 434static vm_paddr_t xlp_mem_excl[] = { 435 0, 0, /* for kernel image region, see xlp_mem_init */ 436 0x0c000000, 0x14000000, /* uboot area, cms queue and other stuff */ 437 0x1fc00000, 0x1fd00000, /* reset vec */ 438 0x1e000000, 0x1e200000, /* poe buffers */ 439}; 440 441static int 442mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend) 443{ 444 int i, pos; 445 446 pos = 0; 447 for (i = 0; i < nitems(xlp_mem_excl); i += 2) { 448 if (mstart > xlp_mem_excl[i + 1]) 449 continue; 450 if (mstart < xlp_mem_excl[i]) { 451 avail[pos++] = mstart; 452 if (mend < xlp_mem_excl[i]) 453 avail[pos++] = mend; 454 else 455 avail[pos++] = xlp_mem_excl[i]; 456 } 457 mstart = xlp_mem_excl[i + 1]; 458 if (mend <= mstart) 459 break; 460 } 461 if (mstart < mend) { 462 avail[pos++] = mstart; 463 avail[pos++] = mend; 464 } 465 return (pos); 466} 467 468static void 469xlp_mem_init(void) 470{ 471 vm_paddr_t physsz, tmp; 472 uint64_t bridgebase, base, lim, val; 473 int i, j, k, n; 474 475 /* update kernel image area in exclude regions */ 476 tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end); 477 tmp = round_page(tmp) + 0x20000; /* round up */ 478 xlp_mem_excl[1] = tmp; 479 480 printf("Memory (from DRAM BARs):\n"); 481 bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */ 482 physsz = 0; 483 for (i = 0, j = 0; i < 8; i++) { 484 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i)); 485 val = (val >> 12) & 0xfffff; 486 base = val << 20; 487 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i)); 488 val = (val >> 12) & 0xfffff; 489 if (val == 0) /* BAR not enabled */ 490 continue; 491 lim = (val + 1) << 20; 492 printf(" BAR %d: %#jx - %#jx : ", i, (intmax_t)base, 493 (intmax_t)lim); 494 495 if (lim <= base) { 496 printf("\tskipped - malformed %#jx -> %#jx\n", 497 (intmax_t)base, (intmax_t)lim); 498 continue; 499 } else if (base >= XLP_MEM_LIM) { 500 printf(" skipped - outside usable limit %#jx.\n", 501 (intmax_t)XLP_MEM_LIM); 502 continue; 503 } else if (lim >= XLP_MEM_LIM) { 504 lim = XLP_MEM_LIM; 505 printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM); 506 } else 507 printf(" usable\n"); 508 509 /* exclude unusable regions from BAR and add rest */ 510 n = mem_exclude_add(&phys_avail[j], base, lim); 511 for (k = j; k < j + n; k += 2) { 512 physsz += phys_avail[k + 1] - phys_avail[k]; 513 printf("\tMem[%d]: %#jx - %#jx\n", k/2, 514 (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]); 515 } 516 j = k; 517 } 518 519 /* setup final entry with 0 */ 520 phys_avail[j] = phys_avail[j + 1] = 0; 521 522 /* copy phys_avail to dump_avail */ 523 for (i = 0; i <= j + 1; i++) 524 dump_avail[i] = phys_avail[i]; 525 526 realmem = physmem = btoc(physsz); 527} 528 529void 530platform_start(__register_t a0 __unused, 531 __register_t a1 __unused, 532 __register_t a2 __unused, 533 __register_t a3 __unused) 534{ 535 536 /* Initialize pcpu stuff */ 537 mips_pcpu0_init(); 538 539 /* initialize console so that we have printf */ 540 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */ 541 542 init_static_kenv(boot1_env, sizeof(boot1_env)); 543 xlp_bootargs_init(a0); 544 545 /* clockrate used by delay, so initialize it here */ 546 xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0); 547 cpu_clock = xlp_cpu_frequency / 1000000; 548 mips_timer_early_init(xlp_cpu_frequency); 549 550 /* Init console please */ 551 cninit(); 552 553 /* Early core init and fixes for errata */ 554 xlp_setup_core(); 555 556 xlp_parse_mmu_options(); 557 xlp_mem_init(); 558 559 bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC, 560 XLPResetEntryEnd - XLPResetEntry); 561#ifdef SMP 562 /* 563 * We will enable the other threads in core 0 here 564 * so that the TLB and cache info is correct when 565 * mips_init runs 566 */ 567 xlp_enable_threads(xlp_mmuval); 568#endif 569 /* setup for the startup core */ 570 xlp_setup_mmu(); 571 572 xlp_enable_blocks(); 573 574 /* Read/Guess/setup board information */ 575 nlm_board_info_setup(); 576 577 /* MIPS generic init */ 578 mips_init(); 579 580 /* 581 * XLP specific post initialization 582 * initialize other on chip stuff 583 */ 584 xlp_pic_init(); 585 586 mips_timer_init_params(xlp_cpu_frequency, 0); 587} 588 589void 590platform_cpu_init() 591{ 592} 593 594void 595platform_reset(void) 596{ 597 uint64_t sysbase = nlm_get_sys_regbase(0); 598 599 nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1); 600 for( ; ; ) 601 __asm __volatile("wait"); 602} 603 604#ifdef SMP 605/* 606 * XLP threads are started simultaneously when we enable threads, this will 607 * ensure that the threads are blocked in platform_init_ap, until they are 608 * ready to proceed to smp_init_secondary() 609 */ 610static volatile int thr_unblock[4]; 611 612int 613platform_start_ap(int cpuid) 614{ 615 uint32_t coremask, val; 616 uint64_t sysbase = nlm_get_sys_regbase(0); 617 int hwtid = xlp_cpuid_to_hwtid[cpuid]; 618 int core, thr; 619 620 core = hwtid / 4; 621 thr = hwtid % 4; 622 if (thr == 0) { 623 /* First thread in core, do core wake up */ 624 coremask = 1u << core; 625 626 /* Enable core clock */ 627 val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL); 628 val &= ~coremask; 629 nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val); 630 631 /* Remove CPU Reset */ 632 val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); 633 val &= ~coremask & 0xff; 634 nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val); 635 636 if (bootverbose) 637 printf("Waking up core %d ...", core); 638 639 /* Poll for CPU to mark itself coherent */ 640 do { 641 val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE); 642 } while ((val & coremask) != 0); 643 if (bootverbose) 644 printf("Done\n"); 645 } else { 646 /* otherwise release the threads stuck in platform_init_ap */ 647 thr_unblock[thr] = 1; 648 } 649 650 return (0); 651} 652 653void 654platform_init_ap(int cpuid) 655{ 656 uint32_t stat; 657 int thr; 658 659 /* The first thread has to setup the MMU and enable other threads */ 660 thr = nlm_threadid(); 661 if (thr == 0) { 662 xlp_setup_core(); 663 xlp_enable_threads(xlp_mmuval); 664 } else { 665 /* 666 * FIXME busy wait here eats too many cycles, especially 667 * in the core 0 while bootup 668 */ 669 while (thr_unblock[thr] == 0) 670 __asm__ __volatile__ ("nop;nop;nop;nop"); 671 thr_unblock[thr] = 0; 672 } 673 674 xlp_setup_mmu(); 675 stat = mips_rd_status(); 676 KASSERT((stat & MIPS_SR_INT_IE) == 0, 677 ("Interrupts enabled in %s!", __func__)); 678 stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT; 679 mips_wr_status(stat); 680 681 nlm_write_c0_eimr(0ull); 682 xlp_enable_irq(IRQ_IPI); 683 xlp_enable_irq(IRQ_TIMER); 684 xlp_enable_irq(IRQ_MSGRING); 685 686 return; 687} 688 689int 690platform_ipi_intrnum(void) 691{ 692 693 return (IRQ_IPI); 694} 695 696void 697platform_ipi_send(int cpuid) 698{ 699 700 nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid], 701 platform_ipi_intrnum(), 0); 702} 703 704void 705platform_ipi_clear(void) 706{ 707} 708 709int 710platform_processor_id(void) 711{ 712 713 return (xlp_hwtid_to_cpuid[nlm_cpuid()]); 714} 715 716void 717platform_cpu_mask(cpuset_t *mask) 718{ 719 int i, s; 720 721 CPU_ZERO(mask); 722 s = xlp_ncores * xlp_threads_per_core; 723 for (i = 0; i < s; i++) 724 CPU_SET(i, mask); 725} 726 727struct cpu_group * 728platform_smp_topo() 729{ 730 731 return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1, 732 xlp_threads_per_core, CG_FLAG_THREAD)); 733} 734#endif 735