mp_machdep.c revision 266203
1/*-
2 * Copyright (c) 2011 Semihalf.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/10/sys/arm/arm/mp_machdep.c 266203 2014-05-16 00:14:50Z ian $");
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/bus.h>
31#include <sys/kernel.h>
32#include <sys/lock.h>
33#include <sys/mutex.h>
34#include <sys/proc.h>
35#include <sys/pcpu.h>
36#include <sys/sched.h>
37#include <sys/smp.h>
38#include <sys/ktr.h>
39#include <sys/malloc.h>
40
41#include <vm/vm.h>
42#include <vm/vm_extern.h>
43#include <vm/vm_kern.h>
44#include <vm/pmap.h>
45
46#include <machine/cpu.h>
47#include <machine/smp.h>
48#include <machine/pcb.h>
49#include <machine/pte.h>
50#include <machine/physmem.h>
51#include <machine/intr.h>
52#include <machine/vmparam.h>
53#ifdef VFP
54#include <machine/vfp.h>
55#endif
56#ifdef CPU_MV_PJ4B
57#include <arm/mv/mvwin.h>
58#include <dev/fdt/fdt_common.h>
59#endif
60
61#include "opt_smp.h"
62
63void *temp_pagetable;
64extern struct pcpu __pcpu[];
65/* used to hold the AP's until we are ready to release them */
66struct mtx ap_boot_mtx;
67struct pcb stoppcbs[MAXCPU];
68
69/* # of Applications processors */
70volatile int mp_naps;
71
72/* Set to 1 once we're ready to let the APs out of the pen. */
73volatile int aps_ready = 0;
74
75static int ipi_handler(void *arg);
76void set_stackptrs(int cpu);
77
78/* Temporary variables for init_secondary()  */
79void *dpcpu[MAXCPU - 1];
80
81/* Determine if we running MP machine */
82int
83cpu_mp_probe(void)
84{
85	CPU_SETOF(0, &all_cpus);
86
87	return (platform_mp_probe());
88}
89
90/* Start Application Processor via platform specific function */
91static int
92check_ap(void)
93{
94	uint32_t ms;
95
96	for (ms = 0; ms < 2000; ++ms) {
97		if ((mp_naps + 1) == mp_ncpus)
98			return (0);		/* success */
99		else
100			DELAY(1000);
101	}
102
103	return (-2);
104}
105
106extern unsigned char _end[];
107
108/* Initialize and fire up non-boot processors */
109void
110cpu_mp_start(void)
111{
112	int error, i;
113	vm_offset_t temp_pagetable_va;
114	vm_paddr_t addr, addr_end;
115
116	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
117
118	/* Reserve memory for application processors */
119	for(i = 0; i < (mp_ncpus - 1); i++)
120		dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
121		    M_WAITOK | M_ZERO);
122	temp_pagetable_va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE,
123	    M_TEMP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
124	addr = arm_physmem_kernaddr;
125	addr_end = (vm_offset_t)&_end - KERNVIRTADDR + arm_physmem_kernaddr;
126	addr_end &= ~L1_S_OFFSET;
127	addr_end += L1_S_SIZE;
128	bzero((void *)temp_pagetable_va,  L1_TABLE_SIZE);
129	for (addr = arm_physmem_kernaddr; addr <= addr_end; addr += L1_S_SIZE) {
130		((int *)(temp_pagetable_va))[addr >> L1_S_SHIFT] =
131		    L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_B|L1_S_AP(AP_KRW)|L1_S_DOM(PMAP_DOMAIN_KERNEL)|addr;
132		((int *)(temp_pagetable_va))[(addr -
133			arm_physmem_kernaddr + KERNVIRTADDR) >> L1_S_SHIFT] =
134		    L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_B|L1_S_AP(AP_KRW)|L1_S_DOM(PMAP_DOMAIN_KERNEL)|addr;
135	}
136
137#if defined(CPU_MV_PJ4B)
138	/* Add ARMADAXP registers required for snoop filter initialization */
139	((int *)(temp_pagetable_va))[MV_BASE >> L1_S_SHIFT] =
140	    L1_TYPE_S|L1_SHARED|L1_S_B|L1_S_AP(AP_KRW)|fdt_immr_pa;
141#endif
142
143	temp_pagetable = (void*)(vtophys(temp_pagetable_va));
144	cpu_idcache_wbinv_all();
145	cpu_l2cache_wbinv_all();
146
147	/* Initialize boot code and start up processors */
148	platform_mp_start_ap();
149
150	/*  Check if ap's started properly */
151	error = check_ap();
152	if (error)
153		printf("WARNING: Some AP's failed to start\n");
154	else
155		for (i = 1; i < mp_ncpus; i++)
156			CPU_SET(i, &all_cpus);
157
158	contigfree((void *)temp_pagetable_va, L1_TABLE_SIZE, M_TEMP);
159}
160
161/* Introduce rest of cores to the world */
162void
163cpu_mp_announce(void)
164{
165
166}
167
168extern vm_paddr_t pmap_pa;
169void
170init_secondary(int cpu)
171{
172	struct pcpu *pc;
173	uint32_t loop_counter;
174	int start = 0, end = 0;
175
176	cpu_idcache_inv_all();
177
178	cpu_setup(NULL);
179	setttb(pmap_pa);
180	cpu_tlb_flushID();
181
182	pc = &__pcpu[cpu];
183
184	/*
185	 * pcpu_init() updates queue, so it should not be executed in parallel
186	 * on several cores
187	 */
188	while(mp_naps < (cpu - 1))
189		;
190
191	pcpu_init(pc, cpu, sizeof(struct pcpu));
192	dpcpu_init(dpcpu[cpu - 1], cpu);
193
194	/* Provide stack pointers for other processor modes. */
195	set_stackptrs(cpu);
196
197	/* Signal our startup to BSP */
198	atomic_add_rel_32(&mp_naps, 1);
199
200	/* Spin until the BSP releases the APs */
201	while (!aps_ready)
202		;
203
204	/* Initialize curthread */
205	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
206	pc->pc_curthread = pc->pc_idlethread;
207	pc->pc_curpcb = pc->pc_idlethread->td_pcb;
208	set_curthread(pc->pc_idlethread);
209#ifdef VFP
210	pc->pc_cpu = cpu;
211
212	vfp_init();
213#endif
214
215	mtx_lock_spin(&ap_boot_mtx);
216
217	atomic_add_rel_32(&smp_cpus, 1);
218
219	if (smp_cpus == mp_ncpus) {
220		/* enable IPI's, tlb shootdown, freezes etc */
221		atomic_store_rel_int(&smp_started, 1);
222	}
223
224	mtx_unlock_spin(&ap_boot_mtx);
225
226	/* Enable ipi */
227#ifdef IPI_IRQ_START
228	start = IPI_IRQ_START;
229#ifdef IPI_IRQ_END
230  	end = IPI_IRQ_END;
231#else
232	end = IPI_IRQ_START;
233#endif
234#endif
235
236	for (int i = start; i <= end; i++)
237		arm_unmask_irq(i);
238	enable_interrupts(I32_bit);
239
240	loop_counter = 0;
241	while (smp_started == 0) {
242		DELAY(100);
243		loop_counter++;
244		if (loop_counter == 1000)
245			CTR0(KTR_SMP, "AP still wait for smp_started");
246	}
247	/* Start per-CPU event timers. */
248	cpu_initclocks_ap();
249
250	CTR0(KTR_SMP, "go into scheduler");
251	platform_mp_init_secondary();
252
253	/* Enter the scheduler */
254	sched_throw(NULL);
255
256	panic("scheduler returned us to %s", __func__);
257	/* NOTREACHED */
258}
259
260static int
261ipi_handler(void *arg)
262{
263	u_int	cpu, ipi;
264
265	cpu = PCPU_GET(cpuid);
266
267	ipi = pic_ipi_get((int)arg);
268
269	while ((ipi != 0x3ff)) {
270		switch (ipi) {
271		case IPI_RENDEZVOUS:
272			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
273			smp_rendezvous_action();
274			break;
275
276		case IPI_AST:
277			CTR0(KTR_SMP, "IPI_AST");
278			break;
279
280		case IPI_STOP:
281		case IPI_STOP_HARD:
282			/*
283			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
284			 * necessary to add it in the switch.
285			 */
286			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
287
288			savectx(&stoppcbs[cpu]);
289
290			/* Indicate we are stopped */
291			CPU_SET_ATOMIC(cpu, &stopped_cpus);
292
293			/* Wait for restart */
294			while (!CPU_ISSET(cpu, &started_cpus))
295				cpu_spinwait();
296
297			CPU_CLR_ATOMIC(cpu, &started_cpus);
298			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
299			CTR0(KTR_SMP, "IPI_STOP (restart)");
300			break;
301		case IPI_PREEMPT:
302			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
303			sched_preempt(curthread);
304			break;
305		case IPI_HARDCLOCK:
306			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
307			hardclockintr();
308			break;
309		case IPI_TLB:
310			CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
311			cpufuncs.cf_tlb_flushID();
312			break;
313		default:
314			panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
315		}
316
317		pic_ipi_clear(ipi);
318		ipi = pic_ipi_get(-1);
319	}
320
321	return (FILTER_HANDLED);
322}
323
324static void
325release_aps(void *dummy __unused)
326{
327	uint32_t loop_counter;
328	int start = 0, end = 0;
329
330	if (mp_ncpus == 1)
331		return;
332#ifdef IPI_IRQ_START
333	start = IPI_IRQ_START;
334#ifdef IPI_IRQ_END
335	end = IPI_IRQ_END;
336#else
337	end = IPI_IRQ_START;
338#endif
339#endif
340
341	for (int i = start; i <= end; i++) {
342		/*
343		 * IPI handler
344		 */
345		/*
346		 * Use 0xdeadbeef as the argument value for irq 0,
347		 * if we used 0, the intr code will give the trap frame
348		 * pointer instead.
349		 */
350		arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i,
351		    INTR_TYPE_MISC | INTR_EXCL, NULL);
352
353		/* Enable ipi */
354		arm_unmask_irq(i);
355	}
356	atomic_store_rel_int(&aps_ready, 1);
357
358	printf("Release APs\n");
359
360	for (loop_counter = 0; loop_counter < 2000; loop_counter++) {
361		if (smp_started)
362			return;
363		DELAY(1000);
364	}
365	printf("AP's not started\n");
366}
367
368SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
369
370struct cpu_group *
371cpu_topo(void)
372{
373
374	return (smp_topo_1level(CG_SHARE_L2, 1, 0));
375}
376
377void
378cpu_mp_setmaxid(void)
379{
380
381	platform_mp_setmaxid();
382}
383
384/* Sending IPI */
385void
386ipi_all_but_self(u_int ipi)
387{
388	cpuset_t other_cpus;
389
390	other_cpus = all_cpus;
391	CPU_CLR(PCPU_GET(cpuid), &other_cpus);
392	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
393	platform_ipi_send(other_cpus, ipi);
394}
395
396void
397ipi_cpu(int cpu, u_int ipi)
398{
399	cpuset_t cpus;
400
401	CPU_ZERO(&cpus);
402	CPU_SET(cpu, &cpus);
403
404	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
405	platform_ipi_send(cpus, ipi);
406}
407
408void
409ipi_selected(cpuset_t cpus, u_int ipi)
410{
411
412	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
413	platform_ipi_send(cpus, ipi);
414}
415
416void
417tlb_broadcast(int ipi)
418{
419
420	if (smp_started)
421		ipi_all_but_self(ipi);
422}
423