mp_cpudep.c revision 265974
1/*-
2 * Copyright (c) 2008 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/mp_cpudep.c 265974 2014-05-13 19:12:53Z ian $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/bus.h>
34#include <sys/pcpu.h>
35#include <sys/proc.h>
36#include <sys/smp.h>
37
38#include <machine/bus.h>
39#include <machine/cpu.h>
40#include <machine/hid.h>
41#include <machine/intr_machdep.h>
42#include <machine/pcb.h>
43#include <machine/psl.h>
44#include <machine/smp.h>
45#include <machine/spr.h>
46#include <machine/trap.h>
47
48#include <dev/ofw/openfirm.h>
49#include <machine/ofw_machdep.h>
50
51void *ap_pcpu;
52
53static register_t bsp_state[8] __aligned(8);
54
55static void cpudep_save_config(void *dummy);
56SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
57
58void
59cpudep_ap_early_bootstrap(void)
60{
61	register_t reg;
62
63	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
64	powerpc_sync();
65
66	switch (mfpvr() >> 16) {
67	case IBM970:
68	case IBM970FX:
69	case IBM970MP:
70		/* Restore HID4 and HID5, which are necessary for the MMU */
71
72		__asm __volatile("ld %0, 16(%2); sync; isync;	\
73		    mtspr %1, %0; sync; isync;"
74		    : "=r"(reg) : "K"(SPR_HID4), "r"(bsp_state));
75		__asm __volatile("ld %0, 24(%2); sync; isync;	\
76		    mtspr %1, %0; sync; isync;"
77		    : "=r"(reg) : "K"(SPR_HID5), "r"(bsp_state));
78		powerpc_sync();
79		break;
80	}
81}
82
83uintptr_t
84cpudep_ap_bootstrap(void)
85{
86	register_t msr, sp;
87
88	msr = PSL_KERNSET & ~PSL_EE;
89	mtmsr(msr);
90
91	pcpup->pc_curthread = pcpup->pc_idlethread;
92#ifdef __powerpc64__
93	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
94#else
95	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
96#endif
97	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
98	sp = pcpup->pc_curpcb->pcb_sp;
99
100	return (sp);
101}
102
103static register_t
104mpc74xx_l2_enable(register_t l2cr_config)
105{
106	register_t ccr, bit;
107	uint16_t	vers;
108
109	vers = mfpvr() >> 16;
110	switch (vers) {
111	case MPC7400:
112	case MPC7410:
113		bit = L2CR_L2IP;
114		break;
115	default:
116		bit = L2CR_L2I;
117		break;
118	}
119
120	ccr = mfspr(SPR_L2CR);
121	if (ccr & L2CR_L2E)
122		return (ccr);
123
124	/* Configure L2 cache. */
125	ccr = l2cr_config & ~L2CR_L2E;
126	mtspr(SPR_L2CR, ccr | L2CR_L2I);
127	do {
128		ccr = mfspr(SPR_L2CR);
129	} while (ccr & bit);
130	powerpc_sync();
131	mtspr(SPR_L2CR, l2cr_config);
132	powerpc_sync();
133
134	return (l2cr_config);
135}
136
137static register_t
138mpc745x_l3_enable(register_t l3cr_config)
139{
140	register_t ccr;
141
142	ccr = mfspr(SPR_L3CR);
143	if (ccr & L3CR_L3E)
144		return (ccr);
145
146	/* Configure L3 cache. */
147	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
148	mtspr(SPR_L3CR, ccr);
149	ccr |= 0x4000000;       /* Magic, but documented. */
150	mtspr(SPR_L3CR, ccr);
151	ccr |= L3CR_L3CLKEN;
152	mtspr(SPR_L3CR, ccr);
153	mtspr(SPR_L3CR, ccr | L3CR_L3I);
154	while (mfspr(SPR_L3CR) & L3CR_L3I)
155		;
156	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
157	powerpc_sync();
158	DELAY(100);
159	mtspr(SPR_L3CR, ccr);
160	powerpc_sync();
161	DELAY(100);
162	ccr |= L3CR_L3E;
163	mtspr(SPR_L3CR, ccr);
164	powerpc_sync();
165
166	return(ccr);
167}
168
169static register_t
170mpc74xx_l1d_enable(void)
171{
172	register_t hid;
173
174	hid = mfspr(SPR_HID0);
175	if (hid & HID0_DCE)
176		return (hid);
177
178	/* Enable L1 D-cache */
179	hid |= HID0_DCE;
180	powerpc_sync();
181	mtspr(SPR_HID0, hid | HID0_DCFI);
182	powerpc_sync();
183
184	return (hid);
185}
186
187static register_t
188mpc74xx_l1i_enable(void)
189{
190	register_t hid;
191
192	hid = mfspr(SPR_HID0);
193	if (hid & HID0_ICE)
194		return (hid);
195
196	/* Enable L1 I-cache */
197	hid |= HID0_ICE;
198	isync();
199	mtspr(SPR_HID0, hid | HID0_ICFI);
200	isync();
201
202	return (hid);
203}
204
205static void
206cpudep_save_config(void *dummy)
207{
208	uint16_t	vers;
209
210	vers = mfpvr() >> 16;
211
212	switch(vers) {
213	case IBM970:
214	case IBM970FX:
215	case IBM970MP:
216		#ifdef __powerpc64__
217		bsp_state[0] = mfspr(SPR_HID0);
218		bsp_state[1] = mfspr(SPR_HID1);
219		bsp_state[2] = mfspr(SPR_HID4);
220		bsp_state[3] = mfspr(SPR_HID5);
221		#else
222		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
223		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
224		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
225		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
226		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
227		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
228		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
229		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
230		#endif
231
232		powerpc_sync();
233
234		break;
235	case IBMCELLBE:
236		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
237		if (mfmsr() & PSL_HV) {
238			bsp_state[0] = mfspr(SPR_HID0);
239			bsp_state[1] = mfspr(SPR_HID1);
240			bsp_state[2] = mfspr(SPR_HID4);
241			bsp_state[3] = mfspr(SPR_HID6);
242
243			bsp_state[4] = mfspr(SPR_CELL_TSCR);
244		}
245		#endif
246
247		bsp_state[5] = mfspr(SPR_CELL_TSRL);
248
249		break;
250	case MPC7450:
251	case MPC7455:
252	case MPC7457:
253		/* Only MPC745x CPUs have an L3 cache. */
254		bsp_state[3] = mfspr(SPR_L3CR);
255
256		/* Fallthrough */
257	case MPC7400:
258	case MPC7410:
259	case MPC7447A:
260	case MPC7448:
261		bsp_state[2] = mfspr(SPR_L2CR);
262		bsp_state[1] = mfspr(SPR_HID1);
263		bsp_state[0] = mfspr(SPR_HID0);
264		break;
265	}
266}
267
268void
269cpudep_ap_setup()
270{
271	register_t	reg;
272	uint16_t	vers;
273
274	vers = mfpvr() >> 16;
275
276	/* The following is needed for restoring from sleep. */
277#ifdef __powerpc64__
278	/* Writing to the time base register is hypervisor-privileged */
279	if (mfmsr() & PSL_HV)
280		mttb(0);
281#else
282	mttb(0);
283#endif
284	switch(vers) {
285	case IBM970:
286	case IBM970FX:
287	case IBM970MP:
288		/* Set HIOR to 0 */
289		__asm __volatile("mtspr 311,%0" :: "r"(0));
290		powerpc_sync();
291
292		/*
293		 * The 970 has strange rules about how to update HID registers.
294		 * See Table 2-3, 970MP manual
295		 */
296
297		__asm __volatile("mtasr %0; sync" :: "r"(0));
298		__asm __volatile(" \
299			ld	%0,0(%2);				\
300			sync; isync;					\
301			mtspr	%1, %0;					\
302			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
303			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
304			sync; isync"
305		    : "=r"(reg) : "K"(SPR_HID0), "r"(bsp_state));
306		__asm __volatile("ld %0, 8(%2); sync; isync;	\
307		    mtspr %1, %0; mtspr %1, %0; sync; isync"
308		    : "=r"(reg) : "K"(SPR_HID1), "r"(bsp_state));
309		__asm __volatile("ld %0, 16(%2); sync; isync;	\
310		    mtspr %1, %0; sync; isync;"
311		    : "=r"(reg) : "K"(SPR_HID4), "r"(bsp_state));
312		__asm __volatile("ld %0, 24(%2); sync; isync;	\
313		    mtspr %1, %0; sync; isync;"
314		    : "=r"(reg) : "K"(SPR_HID5), "r"(bsp_state));
315
316		powerpc_sync();
317		break;
318	case IBMCELLBE:
319		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
320		if (mfmsr() & PSL_HV) {
321			mtspr(SPR_HID0, bsp_state[0]);
322			mtspr(SPR_HID1, bsp_state[1]);
323			mtspr(SPR_HID4, bsp_state[2]);
324			mtspr(SPR_HID6, bsp_state[3]);
325
326			mtspr(SPR_CELL_TSCR, bsp_state[4]);
327		}
328		#endif
329
330		mtspr(SPR_CELL_TSRL, bsp_state[5]);
331
332		break;
333	case MPC7400:
334	case MPC7410:
335	case MPC7447A:
336	case MPC7448:
337	case MPC7450:
338	case MPC7455:
339	case MPC7457:
340		/* XXX: Program the CPU ID into PIR */
341		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
342
343		powerpc_sync();
344		isync();
345
346		mtspr(SPR_HID0, bsp_state[0]); isync();
347		mtspr(SPR_HID1, bsp_state[1]); isync();
348
349		/* Now enable the L3 cache. */
350		switch (vers) {
351		case MPC7450:
352		case MPC7455:
353		case MPC7457:
354			/* Only MPC745x CPUs have an L3 cache. */
355			reg = mpc745x_l3_enable(bsp_state[3]);
356		default:
357			break;
358		}
359
360		reg = mpc74xx_l2_enable(bsp_state[2]);
361		reg = mpc74xx_l1d_enable();
362		reg = mpc74xx_l1i_enable();
363
364		break;
365	default:
366#ifdef __powerpc64__
367		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
368			break;
369#endif
370		printf("WARNING: Unknown CPU type. Cache performace may be "
371		    "suboptimal.\n");
372		break;
373	}
374}
375
376