cpufunc.c revision 314530
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpufunc.c 314530 2017-03-02 01:18:46Z ian $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpufunc.h>
61
62#if defined(CPU_XSCALE_81342)
63#include <arm/xscale/i8134x/i81342reg.h>
64#endif
65
66#ifdef CPU_XSCALE_IXP425
67#include <arm/xscale/ixp425/ixp425reg.h>
68#include <arm/xscale/ixp425/ixp425var.h>
69#endif
70
71/* PRIMARY CACHE VARIABLES */
72int	arm_picache_size;
73int	arm_picache_line_size;
74int	arm_picache_ways;
75
76int	arm_pdcache_size;	/* and unified */
77int	arm_pdcache_line_size;
78int	arm_pdcache_ways;
79
80int	arm_pcache_type;
81int	arm_pcache_unified;
82
83int	arm_dcache_align;
84int	arm_dcache_align_mask;
85
86u_int	arm_cache_level;
87u_int	arm_cache_type[14];
88u_int	arm_cache_loc;
89
90#ifdef CPU_ARM9
91struct cpu_functions arm9_cpufuncs = {
92	/* CPU functions */
93
94	cpufunc_nullop,			/* cpwait		*/
95
96	/* MMU functions */
97
98	cpufunc_control,		/* control		*/
99	arm9_setttb,			/* Setttb		*/
100
101	/* TLB functions */
102
103	armv4_tlb_flushID,		/* tlb_flushID		*/
104	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
105	armv4_tlb_flushD,		/* tlb_flushD		*/
106	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
107
108	/* Cache operations */
109
110	arm9_icache_sync_range,		/* icache_sync_range	*/
111
112	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
113	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
114	arm9_dcache_inv_range,		/* dcache_inv_range	*/
115	arm9_dcache_wb_range,		/* dcache_wb_range	*/
116
117	armv4_idcache_inv_all,		/* idcache_inv_all	*/
118	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
119	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
120	cpufunc_nullop,			/* l2cache_wbinv_all	*/
121	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
122	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
123	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
124	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
125
126	/* Other functions */
127
128	armv4_drain_writebuf,		/* drain_writebuf	*/
129
130	(void *)cpufunc_nullop,		/* sleep		*/
131
132	/* Soft functions */
133
134	arm9_context_switch,		/* context_switch	*/
135
136	arm9_setup			/* cpu setup		*/
137
138};
139#endif /* CPU_ARM9 */
140
141#if defined(CPU_ARM9E)
142struct cpu_functions armv5_ec_cpufuncs = {
143	/* CPU functions */
144
145	cpufunc_nullop,			/* cpwait		*/
146
147	/* MMU functions */
148
149	cpufunc_control,		/* control		*/
150	armv5_ec_setttb,		/* Setttb		*/
151
152	/* TLB functions */
153
154	armv4_tlb_flushID,		/* tlb_flushID		*/
155	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
156	armv4_tlb_flushD,		/* tlb_flushD		*/
157	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
158
159	/* Cache operations */
160
161	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
162
163	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
164	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
165	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
166	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
167
168	armv4_idcache_inv_all,		/* idcache_inv_all	*/
169	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
170	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
171
172	cpufunc_nullop,                 /* l2cache_wbinv_all    */
173	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
174      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
175	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
176	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
177
178	/* Other functions */
179
180	armv4_drain_writebuf,		/* drain_writebuf	*/
181
182	(void *)cpufunc_nullop,		/* sleep		*/
183
184	/* Soft functions */
185
186	arm9_context_switch,		/* context_switch	*/
187
188	arm10_setup			/* cpu setup		*/
189
190};
191
192struct cpu_functions sheeva_cpufuncs = {
193	/* CPU functions */
194
195	cpufunc_nullop,			/* cpwait		*/
196
197	/* MMU functions */
198
199	cpufunc_control,		/* control		*/
200	sheeva_setttb,			/* Setttb		*/
201
202	/* TLB functions */
203
204	armv4_tlb_flushID,		/* tlb_flushID		*/
205	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
206	armv4_tlb_flushD,		/* tlb_flushD		*/
207	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
208
209	/* Cache operations */
210
211	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
212
213	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
214	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
215	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
216	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
217
218	armv4_idcache_inv_all,		/* idcache_inv_all	*/
219	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
220	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
221
222	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
223	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
224	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
225	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
226	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
227
228	/* Other functions */
229
230	armv4_drain_writebuf,		/* drain_writebuf	*/
231
232	sheeva_cpu_sleep,		/* sleep		*/
233
234	/* Soft functions */
235
236	arm9_context_switch,		/* context_switch	*/
237
238	arm10_setup			/* cpu setup		*/
239};
240#endif /* CPU_ARM9E */
241
242#ifdef CPU_MV_PJ4B
243struct cpu_functions pj4bv7_cpufuncs = {
244	/* CPU functions */
245
246	armv7_drain_writebuf,		/* cpwait		*/
247
248	/* MMU functions */
249
250	cpufunc_control,		/* control		*/
251	armv7_setttb,			/* Setttb		*/
252
253	/* TLB functions */
254
255	armv7_tlb_flushID,		/* tlb_flushID		*/
256	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
257	armv7_tlb_flushID,		/* tlb_flushD		*/
258	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
259
260	/* Cache operations */
261	armv7_icache_sync_range,	/* icache_sync_range	*/
262
263	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
264	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
265	armv7_dcache_inv_range,		/* dcache_inv_range	*/
266	armv7_dcache_wb_range,		/* dcache_wb_range	*/
267
268	armv7_idcache_inv_all,		/* idcache_inv_all	*/
269	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
270	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
271
272	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
273	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
274	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
275	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
276	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
277
278	/* Other functions */
279
280	armv7_drain_writebuf,		/* drain_writebuf	*/
281
282	(void *)cpufunc_nullop,		/* sleep		*/
283
284	/* Soft functions */
285	armv7_context_switch,		/* context_switch	*/
286
287	pj4bv7_setup			/* cpu setup		*/
288};
289#endif /* CPU_MV_PJ4B */
290
291#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
292
293struct cpu_functions xscale_cpufuncs = {
294	/* CPU functions */
295
296	xscale_cpwait,			/* cpwait		*/
297
298	/* MMU functions */
299
300	xscale_control,			/* control		*/
301	xscale_setttb,			/* setttb		*/
302
303	/* TLB functions */
304
305	armv4_tlb_flushID,		/* tlb_flushID		*/
306	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
307	armv4_tlb_flushD,		/* tlb_flushD		*/
308	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
309
310	/* Cache operations */
311
312	xscale_cache_syncI_rng,		/* icache_sync_range	*/
313
314	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
315	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
316	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
317	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
318
319	xscale_cache_flushID,		/* idcache_inv_all	*/
320	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
321	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
322	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
323	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
324	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
325	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
326	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
327
328	/* Other functions */
329
330	armv4_drain_writebuf,		/* drain_writebuf	*/
331
332	xscale_cpu_sleep,		/* sleep		*/
333
334	/* Soft functions */
335
336	xscale_context_switch,		/* context_switch	*/
337
338	xscale_setup			/* cpu setup		*/
339};
340#endif
341/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
342
343#ifdef CPU_XSCALE_81342
344struct cpu_functions xscalec3_cpufuncs = {
345	/* CPU functions */
346
347	xscale_cpwait,			/* cpwait		*/
348
349	/* MMU functions */
350
351	xscale_control,			/* control		*/
352	xscalec3_setttb,		/* setttb		*/
353
354	/* TLB functions */
355
356	armv4_tlb_flushID,		/* tlb_flushID		*/
357	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
358	armv4_tlb_flushD,		/* tlb_flushD		*/
359	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
360
361	/* Cache operations */
362
363	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
364
365	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
366	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
367	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
368	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
369
370	xscale_cache_flushID,		/* idcache_inv_all	*/
371	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
372	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
373	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
374	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
375	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
376	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
377	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
378
379	/* Other functions */
380
381	armv4_drain_writebuf,		/* drain_writebuf	*/
382
383	xscale_cpu_sleep,		/* sleep		*/
384
385	/* Soft functions */
386
387	xscalec3_context_switch,	/* context_switch	*/
388
389	xscale_setup			/* cpu setup		*/
390};
391#endif /* CPU_XSCALE_81342 */
392
393
394#if defined(CPU_FA526)
395struct cpu_functions fa526_cpufuncs = {
396	/* CPU functions */
397
398	cpufunc_nullop,			/* cpwait		*/
399
400	/* MMU functions */
401
402	cpufunc_control,		/* control		*/
403	fa526_setttb,			/* setttb		*/
404
405	/* TLB functions */
406
407	armv4_tlb_flushID,		/* tlb_flushID		*/
408	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
409	armv4_tlb_flushD,		/* tlb_flushD		*/
410	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
411
412	/* Cache operations */
413
414	fa526_icache_sync_range,	/* icache_sync_range	*/
415
416	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
417	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
418	fa526_dcache_inv_range,		/* dcache_inv_range	*/
419	fa526_dcache_wb_range,		/* dcache_wb_range	*/
420
421	armv4_idcache_inv_all,		/* idcache_inv_all	*/
422	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
423	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
424	cpufunc_nullop,			/* l2cache_wbinv_all	*/
425	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
426	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
427	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
428	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
429
430	/* Other functions */
431
432	armv4_drain_writebuf,		/* drain_writebuf	*/
433
434	fa526_cpu_sleep,		/* sleep		*/
435
436	/* Soft functions */
437
438
439	fa526_context_switch,		/* context_switch	*/
440
441	fa526_setup			/* cpu setup 		*/
442};
443#endif	/* CPU_FA526 */
444
445#if defined(CPU_ARM1176)
446struct cpu_functions arm1176_cpufuncs = {
447	/* CPU functions */
448
449	cpufunc_nullop,                 /* cpwait               */
450
451	/* MMU functions */
452
453	cpufunc_control,                /* control              */
454	arm11x6_setttb,                 /* Setttb               */
455
456	/* TLB functions */
457
458	arm11_tlb_flushID,              /* tlb_flushID          */
459	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
460	arm11_tlb_flushD,               /* tlb_flushD           */
461	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
462
463	/* Cache operations */
464
465	arm11x6_icache_sync_range,      /* icache_sync_range    */
466
467	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
468	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
469	armv6_dcache_inv_range,         /* dcache_inv_range     */
470	armv6_dcache_wb_range,          /* dcache_wb_range      */
471
472	armv6_idcache_inv_all,		/* idcache_inv_all	*/
473	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
474	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
475
476	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
477	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
478	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
479	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
480	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
481
482	/* Other functions */
483
484	arm11_drain_writebuf,           /* drain_writebuf       */
485
486	arm11x6_sleep,                  /* sleep                */
487
488	/* Soft functions */
489
490	arm11_context_switch,           /* context_switch       */
491
492	arm11x6_setup                   /* cpu setup            */
493};
494#endif /*CPU_ARM1176 */
495
496#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
497struct cpu_functions cortexa_cpufuncs = {
498	/* CPU functions */
499
500	cpufunc_nullop,                 /* cpwait               */
501
502	/* MMU functions */
503
504	cpufunc_control,                /* control              */
505	armv7_setttb,                   /* Setttb               */
506
507	/*
508	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
509	 * whether the hardware implements separate I+D or not, so we use the
510	 * same 'ID' functions for all 3 variations.
511	 */
512
513	armv7_tlb_flushID,              /* tlb_flushID          */
514	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
515	armv7_tlb_flushID,              /* tlb_flushD           */
516	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
517
518	/* Cache operations */
519
520	armv7_icache_sync_range,        /* icache_sync_range    */
521
522	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
523	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
524	armv7_dcache_inv_range,         /* dcache_inv_range     */
525	armv7_dcache_wb_range,          /* dcache_wb_range      */
526
527	armv7_idcache_inv_all,		/* idcache_inv_all	*/
528	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
529	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
530
531	/*
532	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
533	 * L2 cache controller is actually enabled.
534	 */
535	cpufunc_nullop,                 /* l2cache_wbinv_all    */
536	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
537	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
538	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
539	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
540
541	/* Other functions */
542
543	armv7_drain_writebuf,           /* drain_writebuf       */
544
545	armv7_cpu_sleep,                /* sleep                */
546
547	/* Soft functions */
548
549	armv7_context_switch,           /* context_switch       */
550
551	cortexa_setup                     /* cpu setup            */
552};
553#endif /* CPU_CORTEXA */
554
555/*
556 * Global constants also used by locore.s
557 */
558
559struct cpu_functions cpufuncs;
560u_int cputype;
561#if __ARM_ARCH <= 5
562u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore-v4.s */
563#endif
564
565#if defined(CPU_ARM9) ||	\
566  defined (CPU_ARM9E) ||	\
567  defined(CPU_ARM1176) ||	\
568  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
569  defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||			\
570  defined(CPU_XSCALE_81342) || \
571  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
572
573/* Global cache line sizes, use 32 as default */
574int	arm_dcache_min_line_size = 32;
575int	arm_icache_min_line_size = 32;
576int	arm_idcache_min_line_size = 32;
577
578static void get_cachetype_cp15(void);
579
580/* Additional cache information local to this file.  Log2 of some of the
581   above numbers.  */
582static int	arm_dcache_l2_nsets;
583static int	arm_dcache_l2_assoc;
584static int	arm_dcache_l2_linesize;
585
586static void
587get_cachetype_cp15(void)
588{
589	u_int ctype, isize, dsize, cpuid;
590	u_int clevel, csize, i, sel;
591	u_int multiplier;
592	u_char type;
593
594	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
595		: "=r" (ctype));
596
597	cpuid = cpu_ident();
598	/*
599	 * ...and thus spake the ARM ARM:
600	 *
601	 * If an <opcode2> value corresponding to an unimplemented or
602	 * reserved ID register is encountered, the System Control
603	 * processor returns the value of the main ID register.
604	 */
605	if (ctype == cpuid)
606		goto out;
607
608	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
609		/* Resolve minimal cache line sizes */
610		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
611		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
612		arm_idcache_min_line_size =
613		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
614
615		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
616		    : "=r" (clevel));
617		arm_cache_level = clevel;
618		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
619		i = 0;
620		while ((type = (clevel & 0x7)) && i < 7) {
621			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
622			    type == CACHE_SEP_CACHE) {
623				sel = i << 1;
624				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
625				    : : "r" (sel));
626				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
627				    : "=r" (csize));
628				arm_cache_type[sel] = csize;
629				arm_dcache_align = 1 <<
630				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
631				arm_dcache_align_mask = arm_dcache_align - 1;
632			}
633			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
634				sel = (i << 1) | 1;
635				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
636				    : : "r" (sel));
637				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
638				    : "=r" (csize));
639				arm_cache_type[sel] = csize;
640			}
641			i++;
642			clevel >>= 3;
643		}
644	} else {
645		if ((ctype & CPU_CT_S) == 0)
646			arm_pcache_unified = 1;
647
648		/*
649		 * If you want to know how this code works, go read the ARM ARM.
650		 */
651
652		arm_pcache_type = CPU_CT_CTYPE(ctype);
653
654		if (arm_pcache_unified == 0) {
655			isize = CPU_CT_ISIZE(ctype);
656			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
657			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
658			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
659				if (isize & CPU_CT_xSIZE_M)
660					arm_picache_line_size = 0; /* not present */
661				else
662					arm_picache_ways = 1;
663			} else {
664				arm_picache_ways = multiplier <<
665				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
666			}
667			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
668		}
669
670		dsize = CPU_CT_DSIZE(ctype);
671		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
672		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
673		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
674			if (dsize & CPU_CT_xSIZE_M)
675				arm_pdcache_line_size = 0; /* not present */
676			else
677				arm_pdcache_ways = 1;
678		} else {
679			arm_pdcache_ways = multiplier <<
680			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
681		}
682		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
683
684		arm_dcache_align = arm_pdcache_line_size;
685
686		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
687		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
688		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
689		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
690
691	out:
692		arm_dcache_align_mask = arm_dcache_align - 1;
693	}
694}
695#endif /* ARM9 || XSCALE */
696
697/*
698 * Cannot panic here as we may not have a console yet ...
699 */
700
701int
702set_cpufuncs(void)
703{
704	cputype = cpu_ident();
705	cputype &= CPU_ID_CPU_MASK;
706
707#ifdef CPU_ARM9
708	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
709	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
710	    (cputype & 0x0000f000) == 0x00009000) {
711		cpufuncs = arm9_cpufuncs;
712		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
713		get_cachetype_cp15();
714		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
715		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
716		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
717		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
718		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
719		pmap_pte_init_generic();
720		goto out;
721	}
722#endif /* CPU_ARM9 */
723#if defined(CPU_ARM9E)
724	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
725	    cputype == CPU_ID_MV88FR571_41) {
726		uint32_t sheeva_ctrl;
727
728		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
729		    MV_L2_ENABLE);
730		/*
731		 * Workaround for Marvell MV78100 CPU: Cache prefetch
732		 * mechanism may affect the cache coherency validity,
733		 * so it needs to be disabled.
734		 *
735		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
736		 * L2 Prefetching Mechanism) for details.
737		 */
738		if (cputype == CPU_ID_MV88FR571_VD ||
739		    cputype == CPU_ID_MV88FR571_41)
740			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
741
742		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
743
744		cpufuncs = sheeva_cpufuncs;
745		get_cachetype_cp15();
746		pmap_pte_init_generic();
747		goto out;
748	} else if (cputype == CPU_ID_ARM926EJS) {
749		cpufuncs = armv5_ec_cpufuncs;
750		get_cachetype_cp15();
751		pmap_pte_init_generic();
752		goto out;
753	}
754#endif /* CPU_ARM9E */
755#if defined(CPU_ARM1176)
756	if (cputype == CPU_ID_ARM1176JZS) {
757		cpufuncs = arm1176_cpufuncs;
758		get_cachetype_cp15();
759		goto out;
760	}
761#endif /* CPU_ARM1176 */
762#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
763	switch(cputype & CPU_ID_SCHEME_MASK) {
764	case CPU_ID_CORTEXA5:
765	case CPU_ID_CORTEXA7:
766	case CPU_ID_CORTEXA8:
767	case CPU_ID_CORTEXA9:
768	case CPU_ID_CORTEXA12:
769	case CPU_ID_CORTEXA15:
770	case CPU_ID_KRAIT300:
771		cpufuncs = cortexa_cpufuncs;
772		get_cachetype_cp15();
773		goto out;
774	default:
775		break;
776	}
777#endif /* CPU_CORTEXA */
778
779#if defined(CPU_MV_PJ4B)
780	if (cputype == CPU_ID_MV88SV581X_V7 ||
781	    cputype == CPU_ID_MV88SV584X_V7 ||
782	    cputype == CPU_ID_ARM_88SV581X_V7) {
783		cpufuncs = pj4bv7_cpufuncs;
784		get_cachetype_cp15();
785		goto out;
786	}
787#endif /* CPU_MV_PJ4B */
788
789#if defined(CPU_FA526)
790	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
791		cpufuncs = fa526_cpufuncs;
792		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
793		get_cachetype_cp15();
794		pmap_pte_init_generic();
795
796		goto out;
797	}
798#endif	/* CPU_FA526 */
799
800#if defined(CPU_XSCALE_81342)
801	if (cputype == CPU_ID_81342) {
802		cpufuncs = xscalec3_cpufuncs;
803		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
804		get_cachetype_cp15();
805		pmap_pte_init_xscale();
806		goto out;
807	}
808#endif /* CPU_XSCALE_81342 */
809#ifdef CPU_XSCALE_PXA2X0
810	/* ignore core revision to test PXA2xx CPUs */
811	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
812	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
813	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
814
815		cpufuncs = xscale_cpufuncs;
816		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
817		get_cachetype_cp15();
818		pmap_pte_init_xscale();
819
820		goto out;
821	}
822#endif /* CPU_XSCALE_PXA2X0 */
823#ifdef CPU_XSCALE_IXP425
824	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
825            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
826
827		cpufuncs = xscale_cpufuncs;
828		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
829		get_cachetype_cp15();
830		pmap_pte_init_xscale();
831
832		goto out;
833	}
834#endif /* CPU_XSCALE_IXP425 */
835	/*
836	 * Bzzzz. And the answer was ...
837	 */
838	panic("No support for this CPU type (%08x) in kernel", cputype);
839	return(ARCHITECTURE_NOT_PRESENT);
840out:
841	uma_set_align(arm_dcache_align_mask);
842	return (0);
843}
844
845/*
846 * CPU Setup code
847 */
848
849#ifdef CPU_ARM9
850void
851arm9_setup(void)
852{
853	int cpuctrl, cpuctrlmask;
854
855	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
856	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
857	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
858	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
859	    CPU_CONTROL_ROUNDROBIN;
860	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
861		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
862		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
863		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
864		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
865		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
866		 | CPU_CONTROL_ROUNDROBIN;
867
868#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
869	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
870#endif
871
872#ifdef __ARMEB__
873	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
874#endif
875	if (vector_page == ARM_VECTORS_HIGH)
876		cpuctrl |= CPU_CONTROL_VECRELOC;
877
878	/* Clear out the cache */
879	cpu_idcache_wbinv_all();
880
881	/* Set the control register (SCTLR)   */
882	cpu_control(cpuctrlmask, cpuctrl);
883
884}
885#endif	/* CPU_ARM9 */
886
887#if defined(CPU_ARM9E)
888void
889arm10_setup(void)
890{
891	int cpuctrl, cpuctrlmask;
892
893	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
894	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
895	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
896	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
897	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
898	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
899	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
900	    | CPU_CONTROL_BPRD_ENABLE
901	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
902
903#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
904	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
905#endif
906
907#ifdef __ARMEB__
908	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
909#endif
910
911	/* Clear out the cache */
912	cpu_idcache_wbinv_all();
913
914	/* Now really make sure they are clean.  */
915	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
916
917	if (vector_page == ARM_VECTORS_HIGH)
918		cpuctrl |= CPU_CONTROL_VECRELOC;
919
920	/* Set the control register */
921	cpu_control(0xffffffff, cpuctrl);
922
923	/* And again. */
924	cpu_idcache_wbinv_all();
925}
926#endif	/* CPU_ARM9E || CPU_ARM10 */
927
928#if defined(CPU_ARM1176) \
929 || defined(CPU_MV_PJ4B) \
930 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
931static __inline void
932cpu_scc_setup_ccnt(void)
933{
934/* This is how you give userland access to the CCNT and PMCn
935 * registers.
936 * BEWARE! This gives write access also, which may not be what
937 * you want!
938 */
939#ifdef _PMC_USER_READ_WRITE_
940	/* Set PMUSERENR[0] to allow userland access */
941	cp15_pmuserenr_set(1);
942#endif
943#if defined(CPU_ARM1176)
944	/* Set PMCR[2,0] to enable counters and reset CCNT */
945	cp15_pmcr_set(5);
946#else
947	/* Set up the PMCCNTR register as a cyclecounter:
948	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
949	 * Set PMCR[2,0] to enable counters and reset CCNT
950	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
951	cp15_pminten_clr(0xFFFFFFFF);
952	cp15_pmcr_set(5);
953	cp15_pmcnten_set(0x80000000);
954#endif
955}
956#endif
957
958#if defined(CPU_ARM1176)
959void
960arm11x6_setup(void)
961{
962	uint32_t auxctrl, auxctrl_wax;
963	uint32_t tmp, tmp2;
964	uint32_t cpuid;
965
966	cpuid = cpu_ident();
967
968	auxctrl = 0;
969	auxctrl_wax = ~0;
970
971	/*
972	 * Enable an errata workaround
973	 */
974	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
975		auxctrl = ARM1176_AUXCTL_PHD;
976		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
977	}
978
979	tmp = cp15_actlr_get();
980	tmp2 = tmp;
981	tmp &= auxctrl_wax;
982	tmp |= auxctrl;
983	if (tmp != tmp2)
984		cp15_actlr_set(tmp);
985
986	cpu_scc_setup_ccnt();
987}
988#endif  /* CPU_ARM1176 */
989
990#ifdef CPU_MV_PJ4B
991void
992pj4bv7_setup(void)
993{
994
995	pj4b_config();
996	cpu_scc_setup_ccnt();
997}
998#endif /* CPU_MV_PJ4B */
999
1000#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1001
1002void
1003cortexa_setup(void)
1004{
1005
1006	cpu_scc_setup_ccnt();
1007}
1008#endif  /* CPU_CORTEXA */
1009
1010#if defined(CPU_FA526)
1011void
1012fa526_setup(void)
1013{
1014	int cpuctrl, cpuctrlmask;
1015
1016	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1017		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1018		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1019		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1020		| CPU_CONTROL_BPRD_ENABLE;
1021	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1022		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1023		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1024		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1025		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1026		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1027		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1028
1029#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1030	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1031#endif
1032
1033#ifdef __ARMEB__
1034	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1035#endif
1036
1037	if (vector_page == ARM_VECTORS_HIGH)
1038		cpuctrl |= CPU_CONTROL_VECRELOC;
1039
1040	/* Clear out the cache */
1041	cpu_idcache_wbinv_all();
1042
1043	/* Set the control register */
1044	cpu_control(0xffffffff, cpuctrl);
1045}
1046#endif	/* CPU_FA526 */
1047
1048#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1049  defined(CPU_XSCALE_81342)
1050void
1051xscale_setup(void)
1052{
1053	uint32_t auxctl;
1054	int cpuctrl, cpuctrlmask;
1055
1056	/*
1057	 * The XScale Write Buffer is always enabled.  Our option
1058	 * is to enable/disable coalescing.  Note that bits 6:3
1059	 * must always be enabled.
1060	 */
1061
1062	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1063		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1064		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1065		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1066		 | CPU_CONTROL_BPRD_ENABLE;
1067	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1068		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1069		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1070		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1071		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1072		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1073		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1074		 CPU_CONTROL_L2_ENABLE;
1075
1076#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1077	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1078#endif
1079
1080#ifdef __ARMEB__
1081	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1082#endif
1083
1084	if (vector_page == ARM_VECTORS_HIGH)
1085		cpuctrl |= CPU_CONTROL_VECRELOC;
1086#ifdef CPU_XSCALE_CORE3
1087	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1088#endif
1089
1090	/* Clear out the cache */
1091	cpu_idcache_wbinv_all();
1092
1093	/*
1094	 * Set the control register.  Note that bits 6:3 must always
1095	 * be set to 1.
1096	 */
1097/*	cpu_control(cpuctrlmask, cpuctrl);*/
1098	cpu_control(0xffffffff, cpuctrl);
1099
1100	/* Make sure write coalescing is turned on */
1101	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1102		: "=r" (auxctl));
1103#ifdef XSCALE_NO_COALESCE_WRITES
1104	auxctl |= XSCALE_AUXCTL_K;
1105#else
1106	auxctl &= ~XSCALE_AUXCTL_K;
1107#endif
1108#ifdef CPU_XSCALE_CORE3
1109	auxctl |= XSCALE_AUXCTL_LLR;
1110	auxctl |= XSCALE_AUXCTL_MD_MASK;
1111#endif
1112	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1113		: : "r" (auxctl));
1114}
1115#endif	/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
1116