xlp_machdep.c revision 261455
154359Sroberto/*-
254359Sroberto * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
354359Sroberto * reserved.
454359Sroberto *
554359Sroberto * Redistribution and use in source and binary forms, with or without
654359Sroberto * modification, are permitted provided that the following conditions are
754359Sroberto * met:
854359Sroberto *
954359Sroberto * 1. Redistributions of source code must retain the above copyright
1054359Sroberto *    notice, this list of conditions and the following disclaimer.
1154359Sroberto * 2. Redistributions in binary form must reproduce the above copyright
1254359Sroberto *    notice, this list of conditions and the following disclaimer in
1354359Sroberto *    the documentation and/or other materials provided with the
1454359Sroberto *    distribution.
1554359Sroberto *
1654359Sroberto * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
1754359Sroberto * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1854359Sroberto * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1954359Sroberto * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * NETLOGIC_BSD */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/mips/nlm/xlp_machdep.c 261455 2014-02-04 03:36:42Z eadler $");
32
33#include "opt_ddb.h"
34#include "opt_platform.h"
35
36#include <sys/param.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/rtprio.h>
40#include <sys/systm.h>
41#include <sys/interrupt.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/random.h>
47
48#include <sys/cons.h>		/* cinit() */
49#include <sys/kdb.h>
50#include <sys/reboot.h>
51#include <sys/queue.h>
52#include <sys/smp.h>
53#include <sys/timetc.h>
54
55#include <vm/vm.h>
56#include <vm/vm_page.h>
57
58#include <machine/cpu.h>
59#include <machine/cpufunc.h>
60#include <machine/cpuinfo.h>
61#include <machine/tlb.h>
62#include <machine/cpuregs.h>
63#include <machine/frame.h>
64#include <machine/hwfunc.h>
65#include <machine/md_var.h>
66#include <machine/asm.h>
67#include <machine/pmap.h>
68#include <machine/trap.h>
69#include <machine/clock.h>
70#include <machine/fls64.h>
71#include <machine/intr_machdep.h>
72#include <machine/smp.h>
73
74#include <mips/nlm/hal/mips-extns.h>
75#include <mips/nlm/hal/haldefs.h>
76#include <mips/nlm/hal/iomap.h>
77#include <mips/nlm/hal/sys.h>
78#include <mips/nlm/hal/pic.h>
79#include <mips/nlm/hal/uart.h>
80#include <mips/nlm/hal/mmu.h>
81#include <mips/nlm/hal/bridge.h>
82#include <mips/nlm/hal/cpucontrol.h>
83#include <mips/nlm/hal/cop2.h>
84
85#include <mips/nlm/clock.h>
86#include <mips/nlm/interrupt.h>
87#include <mips/nlm/board.h>
88#include <mips/nlm/xlp.h>
89#include <mips/nlm/msgring.h>
90
91#ifdef FDT
92#include <dev/fdt/fdt_common.h>
93#include <dev/ofw/openfirm.h>
94#endif
95
96/* 4KB static data aread to keep a copy of the bootload env until
97   the dynamic kenv is setup */
98char boot1_env[4096];
99
100uint64_t xlp_cpu_frequency;
101uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE);
102
103int xlp_ncores;
104int xlp_threads_per_core;
105uint32_t xlp_hw_thread_mask;
106int xlp_cpuid_to_hwtid[MAXCPU];
107int xlp_hwtid_to_cpuid[MAXCPU];
108uint64_t xlp_pic_base;
109
110static int xlp_mmuval;
111
112extern uint32_t _end;
113extern char XLPResetEntry[], XLPResetEntryEnd[];
114
115static void
116xlp_setup_core(void)
117{
118	uint64_t reg;
119
120	reg = nlm_mfcr(LSU_DEFEATURE);
121	/* Enable Unaligned and L2HPE */
122	reg |= (1 << 30) | (1 << 23);
123	/*
124	 * Experimental : Enable SUE
125	 * Speculative Unmap Enable. Enable speculative L2 cache request for
126	 * unmapped access.
127	 */
128	reg |= (1ull << 31);
129	/* Clear S1RCM  - A0 errata */
130	reg &= ~0xeull;
131	nlm_mtcr(LSU_DEFEATURE, reg);
132
133	reg = nlm_mfcr(SCHED_DEFEATURE);
134	/* Experimental: Disable BRU accepting ALU ops - A0 errata */
135	reg |= (1 << 24);
136	nlm_mtcr(SCHED_DEFEATURE, reg);
137}
138
139static void
140xlp_setup_mmu(void)
141{
142	uint32_t pagegrain;
143
144	if (nlm_threadid() == 0) {
145		nlm_setup_extended_pagemask(0);
146		nlm_large_variable_tlb_en(1);
147		nlm_extended_tlb_en(1);
148		nlm_mmu_setup(0, 0, 0);
149	}
150
151	/* Enable no-read, no-exec, large-physical-address */
152	pagegrain = mips_rd_pagegrain();
153	pagegrain |= (1U << 31)	|	/* RIE */
154	    (1 << 30)		|	/* XIE */
155	    (1 << 29);			/* ELPA */
156	mips_wr_pagegrain(pagegrain);
157}
158
159static void
160xlp_enable_blocks(void)
161{
162	uint64_t sysbase;
163	int i;
164
165	for (i = 0; i < XLP_MAX_NODES; i++) {
166		if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i)))
167			continue;
168		sysbase = nlm_get_sys_regbase(i);
169		nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA);
170	}
171}
172
173static void
174xlp_parse_mmu_options(void)
175{
176	uint64_t sysbase;
177	uint32_t cpu_map = xlp_hw_thread_mask;
178	uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask;
179	int i, j, k;
180
181#ifdef SMP
182	if (cpu_map == 0)
183		cpu_map = 0xffffffff;
184#else /* Uniprocessor! */
185	if (cpu_map == 0)
186		cpu_map = 0x1;
187	else if (cpu_map != 0x1) {
188		printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n"
189		    "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map);
190		cpu_map = 0x1;
191	}
192#endif
193
194	xlp_ncores = 1;
195	core0_thr_mask = cpu_map & 0xf;
196	switch (core0_thr_mask) {
197	case 1:
198		xlp_threads_per_core = 1;
199		xlp_mmuval = 0;
200	       	break;
201	case 3:
202		xlp_threads_per_core = 2;
203		xlp_mmuval = 2;
204	       	break;
205	case 0xf:
206		xlp_threads_per_core = 4;
207		xlp_mmuval = 3;
208	       	break;
209	default:
210		goto unsupp;
211	}
212
213	/* Try to find the enabled cores from SYS block */
214	sysbase = nlm_get_sys_regbase(0);
215	cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff;
216
217	/* XLP 416 does not report this correctly, fix */
218	if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416)
219		cpu_rst_mask = 0xe;
220
221	/* Take out cores which do not exist on chip */
222	for (i = 1; i < XLP_MAX_CORES; i++) {
223		if ((cpu_rst_mask & (1 << i)) == 0)
224			cpu_map &= ~(0xfu << (4 * i));
225	}
226
227 	/* Verify other cores' CPU masks */
228	for (i = 1; i < XLP_MAX_CORES; i++) {
229		core_thr_mask = (cpu_map >> (4 * i)) & 0xf;
230		if (core_thr_mask == 0)
231	       		continue;
232		if (core_thr_mask != core0_thr_mask)
233			goto unsupp;
234		xlp_ncores++;
235	}
236
237	xlp_hw_thread_mask = cpu_map;
238	/* setup hardware processor id to cpu id mapping */
239	for (i = 0; i< MAXCPU; i++)
240		xlp_cpuid_to_hwtid[i] =
241		    xlp_hwtid_to_cpuid[i] = -1;
242	for (i = 0, k = 0; i < XLP_MAX_CORES; i++) {
243		if (((cpu_map >> (i * 4)) & 0xf) == 0)
244			continue;
245		for (j = 0; j < xlp_threads_per_core; j++) {
246			xlp_cpuid_to_hwtid[k] = i * 4 + j;
247			xlp_hwtid_to_cpuid[i * 4 + j] = k;
248			k++;
249		}
250	}
251
252	return;
253
254unsupp:
255	printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n"
256	    "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n",
257	    (u_long)core0_thr_mask, (u_long)cpu_map);
258	panic("Invalid CPU mask - halting.\n");
259	return;
260}
261
262/* Parse cmd line args as env - copied from ar71xx */
263static void
264xlp_parse_bootargs(char *cmdline)
265{
266	char *n, *v;
267
268	while ((v = strsep(&cmdline, " \n")) != NULL) {
269		if (*v == '\0')
270			continue;
271		if (*v == '-') {
272			while (*v != '\0') {
273				v++;
274				switch (*v) {
275				case 'a': boothowto |= RB_ASKNAME; break;
276				case 'd': boothowto |= RB_KDB; break;
277				case 'g': boothowto |= RB_GDB; break;
278				case 's': boothowto |= RB_SINGLE; break;
279				case 'v': boothowto |= RB_VERBOSE; break;
280				}
281			}
282		} else {
283			n = strsep(&v, "=");
284			if (v == NULL)
285				setenv(n, "1");
286			else
287				setenv(n, v);
288		}
289	}
290}
291
292#ifdef FDT
293static void
294xlp_bootargs_init(__register_t arg)
295{
296	char	buf[2048]; /* early stack is big enough */
297	void	*dtbp;
298	phandle_t chosen;
299	ihandle_t mask;
300
301	dtbp = (void *)(intptr_t)arg;
302#if defined(FDT_DTB_STATIC)
303	/*
304	 * In case the device tree blob was not passed as argument try
305	 * to use the statically embedded one.
306	 */
307	if (dtbp == NULL)
308		dtbp = &fdt_static_dtb;
309#endif
310	if (OF_install(OFW_FDT, 0) == FALSE)
311		while (1);
312	if (OF_init((void *)dtbp) != 0)
313		while (1);
314	if (fdt_immr_addr(xlp_io_base) != 0)
315		while (1);
316	OF_interpret("perform-fixup", 0);
317
318	chosen = OF_finddevice("/chosen");
319	if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) {
320		xlp_hw_thread_mask = mask;
321	}
322
323	if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1)
324		xlp_parse_bootargs(buf);
325}
326#else
327/*
328 * arg is a pointer to the environment block, the format of the block is
329 * a=xyz\0b=pqr\0\0
330 */
331static void
332xlp_bootargs_init(__register_t arg)
333{
334	char	buf[2048]; /* early stack is big enough */
335	char	*p, *v, *n;
336	uint32_t mask;
337
338	/*
339	 * provide backward compat for passing cpu mask as arg
340	 */
341	if (arg & 1) {
342		xlp_hw_thread_mask = arg;
343		return;
344	}
345
346	p = (void *)(intptr_t)arg;
347	while (*p != '\0') {
348		strlcpy(buf, p, sizeof(buf));
349		v = buf;
350		n = strsep(&v, "=");
351		if (v == NULL)
352			setenv(n, "1");
353		else
354			setenv(n, v);
355		p += strlen(p) + 1;
356	}
357
358	/* CPU mask can be passed thru env */
359	if (getenv_uint("cpumask", &mask) != 0)
360		xlp_hw_thread_mask = mask;
361
362	/* command line argument */
363	v = getenv("bootargs");
364	if (v != NULL) {
365		strlcpy(buf, v, sizeof(buf));
366		xlp_parse_bootargs(buf);
367		freeenv(v);
368	}
369}
370#endif
371
372static void
373mips_init(void)
374{
375	init_param1();
376	init_param2(physmem);
377
378	mips_cpu_init();
379	cpuinfo.cache_coherent_dma = TRUE;
380	pmap_bootstrap();
381	mips_proc0_init();
382	mutex_init();
383#ifdef DDB
384	kdb_init();
385	if (boothowto & RB_KDB) {
386		kdb_enter("Boot flags requested debugger", NULL);
387	}
388#endif
389}
390
391unsigned int
392platform_get_timecount(struct timecounter *tc __unused)
393{
394	uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER);
395
396	return (unsigned int)~count;
397}
398
399static void
400xlp_pic_init(void)
401{
402	struct timecounter pic_timecounter = {
403		platform_get_timecount, /* get_timecount */
404		0,                      /* no poll_pps */
405		~0U,                    /* counter_mask */
406		XLP_IO_CLK,            /* frequency */
407		"XLRPIC",               /* name */
408		2000,                   /* quality (adjusted in code) */
409	};
410        int i;
411	int maxirt;
412
413	xlp_pic_base = nlm_get_pic_regbase(0);  /* TOOD: Add other nodes */
414	maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()),
415	    XLP_PCI_DEVINFO_REG0);
416        printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base,
417	    maxirt);
418	/* Bind all PIC irqs to cpu 0 */
419        for (i = 0; i < maxirt; i++)
420	    nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0,
421	    1, 0, 0x1);
422
423	nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
424	platform_timecounter = &pic_timecounter;
425}
426
427#if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */
428#ifdef XLP_SIM
429#define	XLP_MEM_LIM	0x200000000ULL
430#else
431#define	XLP_MEM_LIM	0x10000000000ULL
432#endif
433#else
434#define	XLP_MEM_LIM	0xfffff000UL
435#endif
436static vm_paddr_t xlp_mem_excl[] = {
437	0,          0,		/* for kernel image region, see xlp_mem_init */
438	0x0c000000, 0x14000000,	/* uboot area, cms queue and other stuff */
439	0x1fc00000, 0x1fd00000,	/* reset vec */
440	0x1e000000, 0x1e200000,	/* poe buffers */
441};
442
443static int
444mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend)
445{
446	int nreg = sizeof(xlp_mem_excl)/sizeof(xlp_mem_excl[0]);
447	int i, pos;
448
449	pos = 0;
450	for (i = 0; i < nreg; i += 2) {
451		if (mstart > xlp_mem_excl[i + 1])
452			continue;
453		if (mstart < xlp_mem_excl[i]) {
454			avail[pos++] = mstart;
455			if (mend < xlp_mem_excl[i])
456				avail[pos++] = mend;
457			else
458				avail[pos++] = xlp_mem_excl[i];
459		}
460		mstart = xlp_mem_excl[i + 1];
461		if (mend <= mstart)
462			break;
463	}
464	if (mstart < mend) {
465		avail[pos++] = mstart;
466		avail[pos++] = mend;
467	}
468	return (pos);
469}
470
471static void
472xlp_mem_init(void)
473{
474	vm_paddr_t physsz, tmp;
475	uint64_t bridgebase, base, lim, val;
476	int i, j, k, n;
477
478	/* update kernel image area in exclude regions */
479	tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end);
480	tmp = round_page(tmp) + 0x20000; /* round up */
481	xlp_mem_excl[1] = tmp;
482
483	printf("Memory (from DRAM BARs):\n");
484	bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */
485	physsz = 0;
486        for (i = 0, j = 0; i < 8; i++) {
487		val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
488                val = (val >>  12) & 0xfffff;
489		base = val << 20;
490		val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
491                val = (val >>  12) & 0xfffff;
492		if (val == 0)	/* BAR not enabled */
493			continue;
494                lim = (val + 1) << 20;
495		printf("  BAR %d: %#jx - %#jx : ", i, (intmax_t)base,
496		    (intmax_t)lim);
497
498		if (lim <= base) {
499			printf("\tskipped - malformed %#jx -> %#jx\n",
500			    (intmax_t)base, (intmax_t)lim);
501			continue;
502		} else if (base >= XLP_MEM_LIM) {
503			printf(" skipped - outside usable limit %#jx.\n",
504			    (intmax_t)XLP_MEM_LIM);
505			continue;
506		} else if (lim >= XLP_MEM_LIM) {
507			lim = XLP_MEM_LIM;
508			printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM);
509		} else
510			printf(" usable\n");
511
512		/* exclude unusable regions from BAR and add rest */
513		n = mem_exclude_add(&phys_avail[j], base, lim);
514		for (k = j; k < j + n; k += 2) {
515			physsz += phys_avail[k + 1] - phys_avail[k];
516			printf("\tMem[%d]: %#jx - %#jx\n", k/2,
517			    (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]);
518		}
519		j = k;
520        }
521
522	/* setup final entry with 0 */
523	phys_avail[j] = phys_avail[j + 1] = 0;
524
525	/* copy phys_avail to dump_avail */
526	for (i = 0; i <= j + 1; i++)
527		dump_avail[i] = phys_avail[i];
528
529	realmem = physmem = btoc(physsz);
530}
531
532void
533platform_start(__register_t a0 __unused,
534    __register_t a1 __unused,
535    __register_t a2 __unused,
536    __register_t a3 __unused)
537{
538
539	/* Initialize pcpu stuff */
540	mips_pcpu0_init();
541
542	/* initialize console so that we have printf */
543	boothowto |= (RB_SERIAL | RB_MULTIPLE);	/* Use multiple consoles */
544
545	init_static_kenv(boot1_env, sizeof(boot1_env));
546	xlp_bootargs_init(a0);
547
548	/* clockrate used by delay, so initialize it here */
549	xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0);
550	cpu_clock = xlp_cpu_frequency / 1000000;
551	mips_timer_early_init(xlp_cpu_frequency);
552
553	/* Init console please */
554	cninit();
555
556	/* Early core init and fixes for errata */
557	xlp_setup_core();
558
559	xlp_parse_mmu_options();
560	xlp_mem_init();
561
562	bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC,
563              XLPResetEntryEnd - XLPResetEntry);
564#ifdef SMP
565	/*
566	 * We will enable the other threads in core 0 here
567	 * so that the TLB and cache info is correct when
568	 * mips_init runs
569	 */
570	xlp_enable_threads(xlp_mmuval);
571#endif
572	/* setup for the startup core */
573	xlp_setup_mmu();
574
575	xlp_enable_blocks();
576
577	/* Read/Guess/setup board information */
578	nlm_board_info_setup();
579
580	/* MIPS generic init */
581	mips_init();
582
583	/*
584	 * XLP specific post initialization
585 	 * initialize other on chip stuff
586	 */
587	xlp_pic_init();
588
589	mips_timer_init_params(xlp_cpu_frequency, 0);
590}
591
592void
593platform_cpu_init()
594{
595}
596
597void
598platform_reset(void)
599{
600	uint64_t sysbase = nlm_get_sys_regbase(0);
601
602	nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
603	for( ; ; )
604		__asm __volatile("wait");
605}
606
607#ifdef SMP
608/*
609 * XLP threads are started simultaneously when we enable threads, this will
610 * ensure that the threads are blocked in platform_init_ap, until they are
611 * ready to proceed to smp_init_secondary()
612 */
613static volatile int thr_unblock[4];
614
615int
616platform_start_ap(int cpuid)
617{
618	uint32_t coremask, val;
619	uint64_t sysbase = nlm_get_sys_regbase(0);
620	int hwtid = xlp_cpuid_to_hwtid[cpuid];
621	int core, thr;
622
623	core = hwtid / 4;
624	thr = hwtid % 4;
625	if (thr == 0) {
626		/* First thread in core, do core wake up */
627		coremask = 1u << core;
628
629		/* Enable core clock */
630		val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
631		val &= ~coremask;
632		nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val);
633
634		/* Remove CPU Reset */
635		val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
636		val &=  ~coremask & 0xff;
637		nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val);
638
639		if (bootverbose)
640			printf("Waking up core %d ...", core);
641
642		/* Poll for CPU to mark itself coherent */
643		do {
644			val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
645       		} while ((val & coremask) != 0);
646		if (bootverbose)
647			printf("Done\n");
648        } else {
649		/* otherwise release the threads stuck in platform_init_ap */
650		thr_unblock[thr] = 1;
651	}
652
653	return (0);
654}
655
656void
657platform_init_ap(int cpuid)
658{
659	uint32_t stat;
660	int thr;
661
662	/* The first thread has to setup the MMU and enable other threads */
663	thr = nlm_threadid();
664	if (thr == 0) {
665		xlp_setup_core();
666		xlp_enable_threads(xlp_mmuval);
667	} else {
668		/*
669		 * FIXME busy wait here eats too many cycles, especially
670		 * in the core 0 while bootup
671		 */
672		while (thr_unblock[thr] == 0)
673			__asm__ __volatile__ ("nop;nop;nop;nop");
674		thr_unblock[thr] = 0;
675	}
676
677	xlp_setup_mmu();
678	stat = mips_rd_status();
679	KASSERT((stat & MIPS_SR_INT_IE) == 0,
680	    ("Interrupts enabled in %s!", __func__));
681	stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT;
682	mips_wr_status(stat);
683
684	nlm_write_c0_eimr(0ull);
685	xlp_enable_irq(IRQ_IPI);
686	xlp_enable_irq(IRQ_TIMER);
687	xlp_enable_irq(IRQ_MSGRING);
688
689	return;
690}
691
692int
693platform_ipi_intrnum(void)
694{
695
696	return (IRQ_IPI);
697}
698
699void
700platform_ipi_send(int cpuid)
701{
702
703	nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid],
704	    platform_ipi_intrnum(), 0);
705}
706
707void
708platform_ipi_clear(void)
709{
710}
711
712int
713platform_processor_id(void)
714{
715
716	return (xlp_hwtid_to_cpuid[nlm_cpuid()]);
717}
718
719void
720platform_cpu_mask(cpuset_t *mask)
721{
722	int i, s;
723
724	CPU_ZERO(mask);
725	s = xlp_ncores * xlp_threads_per_core;
726	for (i = 0; i < s; i++)
727		CPU_SET(i, mask);
728}
729
730struct cpu_group *
731platform_smp_topo()
732{
733
734	return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1,
735		xlp_threads_per_core, CG_FLAG_THREAD));
736}
737#endif
738