cpufunc.c revision 317002
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpufunc.c 317002 2017-04-16 06:35:09Z mmel $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpufunc.h>
61
62#if defined(CPU_XSCALE_81342)
63#include <arm/xscale/i8134x/i81342reg.h>
64#endif
65
66#ifdef CPU_XSCALE_IXP425
67#include <arm/xscale/ixp425/ixp425reg.h>
68#include <arm/xscale/ixp425/ixp425var.h>
69#endif
70
71/* PRIMARY CACHE VARIABLES */
72int	arm_picache_size;
73int	arm_picache_line_size;
74int	arm_picache_ways;
75
76int	arm_pdcache_size;	/* and unified */
77int	arm_pdcache_line_size;
78int	arm_pdcache_ways;
79
80int	arm_pcache_type;
81int	arm_pcache_unified;
82
83int	arm_dcache_align;
84int	arm_dcache_align_mask;
85
86u_int	arm_cache_level;
87u_int	arm_cache_type[14];
88u_int	arm_cache_loc;
89
90#ifdef CPU_ARM9
91struct cpu_functions arm9_cpufuncs = {
92	/* CPU functions */
93
94	cpufunc_nullop,			/* cpwait		*/
95
96	/* MMU functions */
97
98	cpufunc_control,		/* control		*/
99	arm9_setttb,			/* Setttb		*/
100
101	/* TLB functions */
102
103	armv4_tlb_flushID,		/* tlb_flushID		*/
104	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
105	armv4_tlb_flushD,		/* tlb_flushD		*/
106	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
107
108	/* Cache operations */
109
110	arm9_icache_sync_range,		/* icache_sync_range	*/
111
112	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
113	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
114	arm9_dcache_inv_range,		/* dcache_inv_range	*/
115	arm9_dcache_wb_range,		/* dcache_wb_range	*/
116
117	armv4_idcache_inv_all,		/* idcache_inv_all	*/
118	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
119	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
120	cpufunc_nullop,			/* l2cache_wbinv_all	*/
121	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
122	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
123	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
124	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
125
126	/* Other functions */
127
128	armv4_drain_writebuf,		/* drain_writebuf	*/
129
130	(void *)cpufunc_nullop,		/* sleep		*/
131
132	/* Soft functions */
133
134	arm9_context_switch,		/* context_switch	*/
135
136	arm9_setup			/* cpu setup		*/
137
138};
139#endif /* CPU_ARM9 */
140
141#if defined(CPU_ARM9E)
142struct cpu_functions armv5_ec_cpufuncs = {
143	/* CPU functions */
144
145	cpufunc_nullop,			/* cpwait		*/
146
147	/* MMU functions */
148
149	cpufunc_control,		/* control		*/
150	armv5_ec_setttb,		/* Setttb		*/
151
152	/* TLB functions */
153
154	armv4_tlb_flushID,		/* tlb_flushID		*/
155	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
156	armv4_tlb_flushD,		/* tlb_flushD		*/
157	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
158
159	/* Cache operations */
160
161	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
162
163	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
164	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
165	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
166	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
167
168	armv4_idcache_inv_all,		/* idcache_inv_all	*/
169	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
170	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
171
172	cpufunc_nullop,                 /* l2cache_wbinv_all    */
173	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
174      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
175	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
176	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
177
178	/* Other functions */
179
180	armv4_drain_writebuf,		/* drain_writebuf	*/
181
182	(void *)cpufunc_nullop,		/* sleep		*/
183
184	/* Soft functions */
185
186	arm9_context_switch,		/* context_switch	*/
187
188	arm10_setup			/* cpu setup		*/
189
190};
191
192struct cpu_functions sheeva_cpufuncs = {
193	/* CPU functions */
194
195	cpufunc_nullop,			/* cpwait		*/
196
197	/* MMU functions */
198
199	cpufunc_control,		/* control		*/
200	sheeva_setttb,			/* Setttb		*/
201
202	/* TLB functions */
203
204	armv4_tlb_flushID,		/* tlb_flushID		*/
205	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
206	armv4_tlb_flushD,		/* tlb_flushD		*/
207	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
208
209	/* Cache operations */
210
211	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
212
213	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
214	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
215	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
216	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
217
218	armv4_idcache_inv_all,		/* idcache_inv_all	*/
219	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
220	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
221
222	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
223	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
224	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
225	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
226	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
227
228	/* Other functions */
229
230	armv4_drain_writebuf,		/* drain_writebuf	*/
231
232	sheeva_cpu_sleep,		/* sleep		*/
233
234	/* Soft functions */
235
236	arm9_context_switch,		/* context_switch	*/
237
238	arm10_setup			/* cpu setup		*/
239};
240#endif /* CPU_ARM9E */
241
242#ifdef CPU_MV_PJ4B
243struct cpu_functions pj4bv7_cpufuncs = {
244	/* MMU functions */
245	.cf_control = cpufunc_control,
246	.cf_setttb = armv7_setttb,
247
248	/* Cache operations */
249	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
250	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
251	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
252	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
253	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
254
255	/* Other functions */
256	.cf_drain_writebuf = armv7_drain_writebuf,
257	.cf_sleep = (void *)cpufunc_nullop,
258
259	/* Soft functions */
260	.cf_setup = pj4bv7_setup
261};
262#endif /* CPU_MV_PJ4B */
263
264#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
265
266struct cpu_functions xscale_cpufuncs = {
267	/* CPU functions */
268
269	xscale_cpwait,			/* cpwait		*/
270
271	/* MMU functions */
272
273	xscale_control,			/* control		*/
274	xscale_setttb,			/* setttb		*/
275
276	/* TLB functions */
277
278	armv4_tlb_flushID,		/* tlb_flushID		*/
279	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
280	armv4_tlb_flushD,		/* tlb_flushD		*/
281	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
282
283	/* Cache operations */
284
285	xscale_cache_syncI_rng,		/* icache_sync_range	*/
286
287	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
288	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
289	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
290	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
291
292	xscale_cache_flushID,		/* idcache_inv_all	*/
293	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
294	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
295	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
296	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
297	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
298	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
299	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
300
301	/* Other functions */
302
303	armv4_drain_writebuf,		/* drain_writebuf	*/
304
305	xscale_cpu_sleep,		/* sleep		*/
306
307	/* Soft functions */
308
309	xscale_context_switch,		/* context_switch	*/
310
311	xscale_setup			/* cpu setup		*/
312};
313#endif
314/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
315
316#ifdef CPU_XSCALE_81342
317struct cpu_functions xscalec3_cpufuncs = {
318	/* CPU functions */
319
320	xscale_cpwait,			/* cpwait		*/
321
322	/* MMU functions */
323
324	xscale_control,			/* control		*/
325	xscalec3_setttb,		/* setttb		*/
326
327	/* TLB functions */
328
329	armv4_tlb_flushID,		/* tlb_flushID		*/
330	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
331	armv4_tlb_flushD,		/* tlb_flushD		*/
332	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
333
334	/* Cache operations */
335
336	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
337
338	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
339	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
340	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
341	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
342
343	xscale_cache_flushID,		/* idcache_inv_all	*/
344	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
345	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
346	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
347	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
348	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
349	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
350	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
351
352	/* Other functions */
353
354	armv4_drain_writebuf,		/* drain_writebuf	*/
355
356	xscale_cpu_sleep,		/* sleep		*/
357
358	/* Soft functions */
359
360	xscalec3_context_switch,	/* context_switch	*/
361
362	xscale_setup			/* cpu setup		*/
363};
364#endif /* CPU_XSCALE_81342 */
365
366
367#if defined(CPU_FA526)
368struct cpu_functions fa526_cpufuncs = {
369	/* CPU functions */
370
371	cpufunc_nullop,			/* cpwait		*/
372
373	/* MMU functions */
374
375	cpufunc_control,		/* control		*/
376	fa526_setttb,			/* setttb		*/
377
378	/* TLB functions */
379
380	armv4_tlb_flushID,		/* tlb_flushID		*/
381	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
382	armv4_tlb_flushD,		/* tlb_flushD		*/
383	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
384
385	/* Cache operations */
386
387	fa526_icache_sync_range,	/* icache_sync_range	*/
388
389	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
390	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
391	fa526_dcache_inv_range,		/* dcache_inv_range	*/
392	fa526_dcache_wb_range,		/* dcache_wb_range	*/
393
394	armv4_idcache_inv_all,		/* idcache_inv_all	*/
395	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
396	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
397	cpufunc_nullop,			/* l2cache_wbinv_all	*/
398	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
399	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
400	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
401	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
402
403	/* Other functions */
404
405	armv4_drain_writebuf,		/* drain_writebuf	*/
406
407	fa526_cpu_sleep,		/* sleep		*/
408
409	/* Soft functions */
410
411
412	fa526_context_switch,		/* context_switch	*/
413
414	fa526_setup			/* cpu setup 		*/
415};
416#endif	/* CPU_FA526 */
417
418#if defined(CPU_ARM1176)
419struct cpu_functions arm1176_cpufuncs = {
420	/* MMU functions */
421	.cf_control = cpufunc_control,
422	.cf_setttb = arm11x6_setttb,
423
424	/* Cache operations */
425	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
426	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
427	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
428	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
429	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
430
431	/* Other functions */
432	.cf_drain_writebuf = arm11_drain_writebuf,
433	.cf_sleep = arm11x6_sleep,
434
435	/* Soft functions */
436	.cf_setup = arm11x6_setup
437};
438#endif /*CPU_ARM1176 */
439
440#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
441struct cpu_functions cortexa_cpufuncs = {
442	/* MMU functions */
443	.cf_control = cpufunc_control,
444	.cf_setttb = armv7_setttb,
445
446	/* Cache operations */
447
448	/*
449	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
450	 * L2 cache controller is actually enabled.
451	 */
452	.cf_l2cache_wbinv_all = cpufunc_nullop,
453	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
454	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
455	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
456	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
457
458	/* Other functions */
459	.cf_drain_writebuf = armv7_drain_writebuf,
460	.cf_sleep = armv7_cpu_sleep,
461
462	/* Soft functions */
463	.cf_setup = cortexa_setup
464};
465#endif /* CPU_CORTEXA */
466
467/*
468 * Global constants also used by locore.s
469 */
470
471struct cpu_functions cpufuncs;
472u_int cputype;
473#if __ARM_ARCH <= 5
474u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore-v4.s */
475#endif
476
477#if defined(CPU_ARM9) ||	\
478  defined (CPU_ARM9E) ||	\
479  defined(CPU_ARM1176) ||	\
480  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
481  defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||			\
482  defined(CPU_XSCALE_81342) || \
483  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
484
485/* Global cache line sizes, use 32 as default */
486int	arm_dcache_min_line_size = 32;
487int	arm_icache_min_line_size = 32;
488int	arm_idcache_min_line_size = 32;
489
490static void get_cachetype_cp15(void);
491
492/* Additional cache information local to this file.  Log2 of some of the
493   above numbers.  */
494static int	arm_dcache_l2_nsets;
495static int	arm_dcache_l2_assoc;
496static int	arm_dcache_l2_linesize;
497
498static void
499get_cachetype_cp15(void)
500{
501	u_int ctype, isize, dsize, cpuid;
502	u_int clevel, csize, i, sel;
503	u_int multiplier;
504	u_char type;
505
506	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
507		: "=r" (ctype));
508
509	cpuid = cpu_ident();
510	/*
511	 * ...and thus spake the ARM ARM:
512	 *
513	 * If an <opcode2> value corresponding to an unimplemented or
514	 * reserved ID register is encountered, the System Control
515	 * processor returns the value of the main ID register.
516	 */
517	if (ctype == cpuid)
518		goto out;
519
520	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
521		/* Resolve minimal cache line sizes */
522		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
523		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
524		arm_idcache_min_line_size =
525		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
526
527		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
528		    : "=r" (clevel));
529		arm_cache_level = clevel;
530		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
531		i = 0;
532		while ((type = (clevel & 0x7)) && i < 7) {
533			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
534			    type == CACHE_SEP_CACHE) {
535				sel = i << 1;
536				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
537				    : : "r" (sel));
538				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
539				    : "=r" (csize));
540				arm_cache_type[sel] = csize;
541				arm_dcache_align = 1 <<
542				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
543				arm_dcache_align_mask = arm_dcache_align - 1;
544			}
545			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
546				sel = (i << 1) | 1;
547				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
548				    : : "r" (sel));
549				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
550				    : "=r" (csize));
551				arm_cache_type[sel] = csize;
552			}
553			i++;
554			clevel >>= 3;
555		}
556	} else {
557		if ((ctype & CPU_CT_S) == 0)
558			arm_pcache_unified = 1;
559
560		/*
561		 * If you want to know how this code works, go read the ARM ARM.
562		 */
563
564		arm_pcache_type = CPU_CT_CTYPE(ctype);
565
566		if (arm_pcache_unified == 0) {
567			isize = CPU_CT_ISIZE(ctype);
568			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
569			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
570			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
571				if (isize & CPU_CT_xSIZE_M)
572					arm_picache_line_size = 0; /* not present */
573				else
574					arm_picache_ways = 1;
575			} else {
576				arm_picache_ways = multiplier <<
577				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
578			}
579			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
580		}
581
582		dsize = CPU_CT_DSIZE(ctype);
583		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
584		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
585		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
586			if (dsize & CPU_CT_xSIZE_M)
587				arm_pdcache_line_size = 0; /* not present */
588			else
589				arm_pdcache_ways = 1;
590		} else {
591			arm_pdcache_ways = multiplier <<
592			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
593		}
594		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
595
596		arm_dcache_align = arm_pdcache_line_size;
597
598		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
599		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
600		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
601		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
602
603	out:
604		arm_dcache_align_mask = arm_dcache_align - 1;
605	}
606}
607#endif /* ARM9 || XSCALE */
608
609/*
610 * Cannot panic here as we may not have a console yet ...
611 */
612
613int
614set_cpufuncs(void)
615{
616	cputype = cpu_ident();
617	cputype &= CPU_ID_CPU_MASK;
618
619#ifdef CPU_ARM9
620	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
621	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
622	    (cputype & 0x0000f000) == 0x00009000) {
623		cpufuncs = arm9_cpufuncs;
624		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
625		get_cachetype_cp15();
626		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
627		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
628		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
629		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
630		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
631		pmap_pte_init_generic();
632		goto out;
633	}
634#endif /* CPU_ARM9 */
635#if defined(CPU_ARM9E)
636	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
637	    cputype == CPU_ID_MV88FR571_41) {
638		uint32_t sheeva_ctrl;
639
640		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
641		    MV_L2_ENABLE);
642		/*
643		 * Workaround for Marvell MV78100 CPU: Cache prefetch
644		 * mechanism may affect the cache coherency validity,
645		 * so it needs to be disabled.
646		 *
647		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
648		 * L2 Prefetching Mechanism) for details.
649		 */
650		if (cputype == CPU_ID_MV88FR571_VD ||
651		    cputype == CPU_ID_MV88FR571_41)
652			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
653
654		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
655
656		cpufuncs = sheeva_cpufuncs;
657		get_cachetype_cp15();
658		pmap_pte_init_generic();
659		goto out;
660	} else if (cputype == CPU_ID_ARM926EJS) {
661		cpufuncs = armv5_ec_cpufuncs;
662		get_cachetype_cp15();
663		pmap_pte_init_generic();
664		goto out;
665	}
666#endif /* CPU_ARM9E */
667#if defined(CPU_ARM1176)
668	if (cputype == CPU_ID_ARM1176JZS) {
669		cpufuncs = arm1176_cpufuncs;
670		get_cachetype_cp15();
671		goto out;
672	}
673#endif /* CPU_ARM1176 */
674#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
675	switch(cputype & CPU_ID_SCHEME_MASK) {
676	case CPU_ID_CORTEXA5:
677	case CPU_ID_CORTEXA7:
678	case CPU_ID_CORTEXA8:
679	case CPU_ID_CORTEXA9:
680	case CPU_ID_CORTEXA12:
681	case CPU_ID_CORTEXA15:
682	case CPU_ID_CORTEXA53:
683	case CPU_ID_CORTEXA57:
684	case CPU_ID_CORTEXA72:
685	case CPU_ID_KRAIT300:
686		cpufuncs = cortexa_cpufuncs;
687		get_cachetype_cp15();
688		goto out;
689	default:
690		break;
691	}
692#endif /* CPU_CORTEXA */
693
694#if defined(CPU_MV_PJ4B)
695	if (cputype == CPU_ID_MV88SV581X_V7 ||
696	    cputype == CPU_ID_MV88SV584X_V7 ||
697	    cputype == CPU_ID_ARM_88SV581X_V7) {
698		cpufuncs = pj4bv7_cpufuncs;
699		get_cachetype_cp15();
700		goto out;
701	}
702#endif /* CPU_MV_PJ4B */
703
704#if defined(CPU_FA526)
705	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
706		cpufuncs = fa526_cpufuncs;
707		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
708		get_cachetype_cp15();
709		pmap_pte_init_generic();
710
711		goto out;
712	}
713#endif	/* CPU_FA526 */
714
715#if defined(CPU_XSCALE_81342)
716	if (cputype == CPU_ID_81342) {
717		cpufuncs = xscalec3_cpufuncs;
718		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
719		get_cachetype_cp15();
720		pmap_pte_init_xscale();
721		goto out;
722	}
723#endif /* CPU_XSCALE_81342 */
724#ifdef CPU_XSCALE_PXA2X0
725	/* ignore core revision to test PXA2xx CPUs */
726	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
727	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
728	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
729
730		cpufuncs = xscale_cpufuncs;
731		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
732		get_cachetype_cp15();
733		pmap_pte_init_xscale();
734
735		goto out;
736	}
737#endif /* CPU_XSCALE_PXA2X0 */
738#ifdef CPU_XSCALE_IXP425
739	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
740            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
741
742		cpufuncs = xscale_cpufuncs;
743		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
744		get_cachetype_cp15();
745		pmap_pte_init_xscale();
746
747		goto out;
748	}
749#endif /* CPU_XSCALE_IXP425 */
750	/*
751	 * Bzzzz. And the answer was ...
752	 */
753	panic("No support for this CPU type (%08x) in kernel", cputype);
754	return(ARCHITECTURE_NOT_PRESENT);
755out:
756	uma_set_align(arm_dcache_align_mask);
757	return (0);
758}
759
760/*
761 * CPU Setup code
762 */
763
764#ifdef CPU_ARM9
765void
766arm9_setup(void)
767{
768	int cpuctrl, cpuctrlmask;
769
770	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
771	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
772	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
773	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
774	    CPU_CONTROL_ROUNDROBIN;
775	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
776		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
777		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
778		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
779		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
780		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
781		 | CPU_CONTROL_ROUNDROBIN;
782
783#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
784	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
785#endif
786
787#ifdef __ARMEB__
788	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
789#endif
790	if (vector_page == ARM_VECTORS_HIGH)
791		cpuctrl |= CPU_CONTROL_VECRELOC;
792
793	/* Clear out the cache */
794	cpu_idcache_wbinv_all();
795
796	/* Set the control register (SCTLR)   */
797	cpu_control(cpuctrlmask, cpuctrl);
798
799}
800#endif	/* CPU_ARM9 */
801
802#if defined(CPU_ARM9E)
803void
804arm10_setup(void)
805{
806	int cpuctrl, cpuctrlmask;
807
808	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
809	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
810	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
811	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
812	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
813	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
814	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
815	    | CPU_CONTROL_BPRD_ENABLE
816	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
817
818#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
819	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
820#endif
821
822#ifdef __ARMEB__
823	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
824#endif
825
826	/* Clear out the cache */
827	cpu_idcache_wbinv_all();
828
829	/* Now really make sure they are clean.  */
830	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
831
832	if (vector_page == ARM_VECTORS_HIGH)
833		cpuctrl |= CPU_CONTROL_VECRELOC;
834
835	/* Set the control register */
836	cpu_control(0xffffffff, cpuctrl);
837
838	/* And again. */
839	cpu_idcache_wbinv_all();
840}
841#endif	/* CPU_ARM9E || CPU_ARM10 */
842
843#if defined(CPU_ARM1176) \
844 || defined(CPU_MV_PJ4B) \
845 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
846static __inline void
847cpu_scc_setup_ccnt(void)
848{
849/* This is how you give userland access to the CCNT and PMCn
850 * registers.
851 * BEWARE! This gives write access also, which may not be what
852 * you want!
853 */
854#ifdef _PMC_USER_READ_WRITE_
855	/* Set PMUSERENR[0] to allow userland access */
856	cp15_pmuserenr_set(1);
857#endif
858#if defined(CPU_ARM1176)
859	/* Set PMCR[2,0] to enable counters and reset CCNT */
860	cp15_pmcr_set(5);
861#else
862	/* Set up the PMCCNTR register as a cyclecounter:
863	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
864	 * Set PMCR[2,0] to enable counters and reset CCNT
865	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
866	cp15_pminten_clr(0xFFFFFFFF);
867	cp15_pmcr_set(5);
868	cp15_pmcnten_set(0x80000000);
869#endif
870}
871#endif
872
873#if defined(CPU_ARM1176)
874void
875arm11x6_setup(void)
876{
877	uint32_t auxctrl, auxctrl_wax;
878	uint32_t tmp, tmp2;
879	uint32_t cpuid;
880
881	cpuid = cpu_ident();
882
883	auxctrl = 0;
884	auxctrl_wax = ~0;
885
886	/*
887	 * Enable an errata workaround
888	 */
889	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
890		auxctrl = ARM1176_AUXCTL_PHD;
891		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
892	}
893
894	tmp = cp15_actlr_get();
895	tmp2 = tmp;
896	tmp &= auxctrl_wax;
897	tmp |= auxctrl;
898	if (tmp != tmp2)
899		cp15_actlr_set(tmp);
900
901	cpu_scc_setup_ccnt();
902}
903#endif  /* CPU_ARM1176 */
904
905#ifdef CPU_MV_PJ4B
906void
907pj4bv7_setup(void)
908{
909
910	pj4b_config();
911	cpu_scc_setup_ccnt();
912}
913#endif /* CPU_MV_PJ4B */
914
915#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
916
917void
918cortexa_setup(void)
919{
920
921	cpu_scc_setup_ccnt();
922}
923#endif  /* CPU_CORTEXA */
924
925#if defined(CPU_FA526)
926void
927fa526_setup(void)
928{
929	int cpuctrl, cpuctrlmask;
930
931	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
932		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
933		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
934		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
935		| CPU_CONTROL_BPRD_ENABLE;
936	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
937		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
938		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
939		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
940		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
941		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
942		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
943
944#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
945	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
946#endif
947
948#ifdef __ARMEB__
949	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
950#endif
951
952	if (vector_page == ARM_VECTORS_HIGH)
953		cpuctrl |= CPU_CONTROL_VECRELOC;
954
955	/* Clear out the cache */
956	cpu_idcache_wbinv_all();
957
958	/* Set the control register */
959	cpu_control(0xffffffff, cpuctrl);
960}
961#endif	/* CPU_FA526 */
962
963#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
964  defined(CPU_XSCALE_81342)
965void
966xscale_setup(void)
967{
968	uint32_t auxctl;
969	int cpuctrl, cpuctrlmask;
970
971	/*
972	 * The XScale Write Buffer is always enabled.  Our option
973	 * is to enable/disable coalescing.  Note that bits 6:3
974	 * must always be enabled.
975	 */
976
977	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
978		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
979		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
980		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
981		 | CPU_CONTROL_BPRD_ENABLE;
982	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
983		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
984		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
985		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
986		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
987		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
988		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
989		 CPU_CONTROL_L2_ENABLE;
990
991#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
992	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
993#endif
994
995#ifdef __ARMEB__
996	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
997#endif
998
999	if (vector_page == ARM_VECTORS_HIGH)
1000		cpuctrl |= CPU_CONTROL_VECRELOC;
1001#ifdef CPU_XSCALE_CORE3
1002	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1003#endif
1004
1005	/* Clear out the cache */
1006	cpu_idcache_wbinv_all();
1007
1008	/*
1009	 * Set the control register.  Note that bits 6:3 must always
1010	 * be set to 1.
1011	 */
1012/*	cpu_control(cpuctrlmask, cpuctrl);*/
1013	cpu_control(0xffffffff, cpuctrl);
1014
1015	/* Make sure write coalescing is turned on */
1016	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1017		: "=r" (auxctl));
1018#ifdef XSCALE_NO_COALESCE_WRITES
1019	auxctl |= XSCALE_AUXCTL_K;
1020#else
1021	auxctl &= ~XSCALE_AUXCTL_K;
1022#endif
1023#ifdef CPU_XSCALE_CORE3
1024	auxctl |= XSCALE_AUXCTL_LLR;
1025	auxctl |= XSCALE_AUXCTL_MD_MASK;
1026#endif
1027	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1028		: : "r" (auxctl));
1029}
1030#endif	/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
1031