1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD$");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62#include <machine/bootconfig.h>
63
64#ifdef CPU_XSCALE_80200
65#include <arm/xscale/i80200/i80200reg.h>
66#include <arm/xscale/i80200/i80200var.h>
67#endif
68
69#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70#include <arm/xscale/i80321/i80321reg.h>
71#include <arm/xscale/i80321/i80321var.h>
72#endif
73
74/*
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
79 */
80#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81#include <arm/xscale/i8134x/i81342reg.h>
82#endif
83
84#ifdef CPU_XSCALE_IXP425
85#include <arm/xscale/ixp425/ixp425reg.h>
86#include <arm/xscale/ixp425/ixp425var.h>
87#endif
88
89/* PRIMARY CACHE VARIABLES */
90int	arm_picache_size;
91int	arm_picache_line_size;
92int	arm_picache_ways;
93
94int	arm_pdcache_size;	/* and unified */
95int	arm_pdcache_line_size;
96int	arm_pdcache_ways;
97
98int	arm_pcache_type;
99int	arm_pcache_unified;
100
101int	arm_dcache_align;
102int	arm_dcache_align_mask;
103
104u_int	arm_cache_level;
105u_int	arm_cache_type[14];
106u_int	arm_cache_loc;
107
108/* 1 == use cpu_sleep(), 0 == don't */
109int cpu_do_powersave;
110int ctrl;
111
112#ifdef CPU_ARM9
113struct cpu_functions arm9_cpufuncs = {
114	/* CPU functions */
115
116	cpufunc_id,			/* id			*/
117	cpufunc_nullop,			/* cpwait		*/
118
119	/* MMU functions */
120
121	cpufunc_control,		/* control		*/
122	cpufunc_domains,		/* Domain		*/
123	arm9_setttb,			/* Setttb		*/
124	cpufunc_faultstatus,		/* Faultstatus		*/
125	cpufunc_faultaddress,		/* Faultaddress		*/
126
127	/* TLB functions */
128
129	armv4_tlb_flushID,		/* tlb_flushID		*/
130	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
131	armv4_tlb_flushI,		/* tlb_flushI		*/
132	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
133	armv4_tlb_flushD,		/* tlb_flushD		*/
134	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
135
136	/* Cache operations */
137
138	arm9_icache_sync_all,		/* icache_sync_all	*/
139	arm9_icache_sync_range,		/* icache_sync_range	*/
140
141	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
142	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
143	arm9_dcache_inv_range,		/* dcache_inv_range	*/
144	arm9_dcache_wb_range,		/* dcache_wb_range	*/
145
146	armv4_idcache_inv_all,		/* idcache_inv_all	*/
147	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
148	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
149	cpufunc_nullop,			/* l2cache_wbinv_all	*/
150	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
152	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
153	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
154
155	/* Other functions */
156
157	cpufunc_nullop,			/* flush_prefetchbuf	*/
158	armv4_drain_writebuf,		/* drain_writebuf	*/
159	cpufunc_nullop,			/* flush_brnchtgt_C	*/
160	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
161
162	(void *)cpufunc_nullop,		/* sleep		*/
163
164	/* Soft functions */
165
166	cpufunc_null_fixup,		/* dataabt_fixup	*/
167	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
168
169	arm9_context_switch,		/* context_switch	*/
170
171	arm9_setup			/* cpu setup		*/
172
173};
174#endif /* CPU_ARM9 */
175
176#if defined(CPU_ARM9E) || defined(CPU_ARM10)
177struct cpu_functions armv5_ec_cpufuncs = {
178	/* CPU functions */
179
180	cpufunc_id,			/* id			*/
181	cpufunc_nullop,			/* cpwait		*/
182
183	/* MMU functions */
184
185	cpufunc_control,		/* control		*/
186	cpufunc_domains,		/* Domain		*/
187	armv5_ec_setttb,		/* Setttb		*/
188	cpufunc_faultstatus,		/* Faultstatus		*/
189	cpufunc_faultaddress,		/* Faultaddress		*/
190
191	/* TLB functions */
192
193	armv4_tlb_flushID,		/* tlb_flushID		*/
194	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
195	armv4_tlb_flushI,		/* tlb_flushI		*/
196	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
197	armv4_tlb_flushD,		/* tlb_flushD		*/
198	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
199
200	/* Cache operations */
201
202	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
203	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
204
205	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
206	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
207	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
208	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
209
210	armv4_idcache_inv_all,		/* idcache_inv_all	*/
211	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
212	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
213
214	cpufunc_nullop,                 /* l2cache_wbinv_all    */
215	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
216      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
217	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
218	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
219
220	/* Other functions */
221
222	cpufunc_nullop,			/* flush_prefetchbuf	*/
223	armv4_drain_writebuf,		/* drain_writebuf	*/
224	cpufunc_nullop,			/* flush_brnchtgt_C	*/
225	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
226
227	(void *)cpufunc_nullop,		/* sleep		*/
228
229	/* Soft functions */
230
231	cpufunc_null_fixup,		/* dataabt_fixup	*/
232	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
233
234	arm10_context_switch,		/* context_switch	*/
235
236	arm10_setup			/* cpu setup		*/
237
238};
239
240struct cpu_functions sheeva_cpufuncs = {
241	/* CPU functions */
242
243	cpufunc_id,			/* id			*/
244	cpufunc_nullop,			/* cpwait		*/
245
246	/* MMU functions */
247
248	cpufunc_control,		/* control		*/
249	cpufunc_domains,		/* Domain		*/
250	sheeva_setttb,			/* Setttb		*/
251	cpufunc_faultstatus,		/* Faultstatus		*/
252	cpufunc_faultaddress,		/* Faultaddress		*/
253
254	/* TLB functions */
255
256	armv4_tlb_flushID,		/* tlb_flushID		*/
257	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
258	armv4_tlb_flushI,		/* tlb_flushI		*/
259	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
260	armv4_tlb_flushD,		/* tlb_flushD		*/
261	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
262
263	/* Cache operations */
264
265	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
266	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
267
268	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
269	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
270	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
271	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
272
273	armv4_idcache_inv_all,		/* idcache_inv_all	*/
274	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
275	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
276
277	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
278	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
279	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
280	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
281	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
282
283	/* Other functions */
284
285	cpufunc_nullop,			/* flush_prefetchbuf	*/
286	armv4_drain_writebuf,		/* drain_writebuf	*/
287	cpufunc_nullop,			/* flush_brnchtgt_C	*/
288	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
289
290	sheeva_cpu_sleep,		/* sleep		*/
291
292	/* Soft functions */
293
294	cpufunc_null_fixup,		/* dataabt_fixup	*/
295	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
296
297	arm10_context_switch,		/* context_switch	*/
298
299	arm10_setup			/* cpu setup		*/
300};
301#endif /* CPU_ARM9E || CPU_ARM10 */
302
303#ifdef CPU_ARM10
304struct cpu_functions arm10_cpufuncs = {
305	/* CPU functions */
306
307	cpufunc_id,			/* id			*/
308	cpufunc_nullop,			/* cpwait		*/
309
310	/* MMU functions */
311
312	cpufunc_control,		/* control		*/
313	cpufunc_domains,		/* Domain		*/
314	arm10_setttb,			/* Setttb		*/
315	cpufunc_faultstatus,		/* Faultstatus		*/
316	cpufunc_faultaddress,		/* Faultaddress		*/
317
318	/* TLB functions */
319
320	armv4_tlb_flushID,		/* tlb_flushID		*/
321	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
322	armv4_tlb_flushI,		/* tlb_flushI		*/
323	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
324	armv4_tlb_flushD,		/* tlb_flushD		*/
325	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
326
327	/* Cache operations */
328
329	arm10_icache_sync_all,		/* icache_sync_all	*/
330	arm10_icache_sync_range,	/* icache_sync_range	*/
331
332	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
333	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
334	arm10_dcache_inv_range,		/* dcache_inv_range	*/
335	arm10_dcache_wb_range,		/* dcache_wb_range	*/
336
337	armv4_idcache_inv_all,		/* idcache_inv_all	*/
338	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
339	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
340	cpufunc_nullop,			/* l2cache_wbinv_all	*/
341	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
342	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
343	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
344	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
345
346	/* Other functions */
347
348	cpufunc_nullop,			/* flush_prefetchbuf	*/
349	armv4_drain_writebuf,		/* drain_writebuf	*/
350	cpufunc_nullop,			/* flush_brnchtgt_C	*/
351	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
352
353	(void *)cpufunc_nullop,		/* sleep		*/
354
355	/* Soft functions */
356
357	cpufunc_null_fixup,		/* dataabt_fixup	*/
358	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
359
360	arm10_context_switch,		/* context_switch	*/
361
362	arm10_setup			/* cpu setup		*/
363
364};
365#endif /* CPU_ARM10 */
366
367#ifdef CPU_MV_PJ4B
368struct cpu_functions pj4bv7_cpufuncs = {
369	/* CPU functions */
370
371	cpufunc_id,			/* id			*/
372	arm11_drain_writebuf,		/* cpwait		*/
373
374	/* MMU functions */
375
376	cpufunc_control,		/* control		*/
377	cpufunc_domains,		/* Domain		*/
378	pj4b_setttb,			/* Setttb		*/
379	cpufunc_faultstatus,		/* Faultstatus		*/
380	cpufunc_faultaddress,		/* Faultaddress		*/
381
382	/* TLB functions */
383
384	armv7_tlb_flushID,		/* tlb_flushID		*/
385	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
386	armv7_tlb_flushID,		/* tlb_flushI		*/
387	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
388	armv7_tlb_flushID,		/* tlb_flushD		*/
389	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
390
391	/* Cache operations */
392	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
393	armv7_icache_sync_range,	/* icache_sync_range	*/
394
395	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
396	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
397	armv7_dcache_inv_range,		/* dcache_inv_range	*/
398	armv7_dcache_wb_range,		/* dcache_wb_range	*/
399
400	armv7_idcache_inv_all,		/* idcache_inv_all	*/
401	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
402	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
403
404	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
405	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
406	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
407	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
408	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
409
410	/* Other functions */
411
412	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
413	arm11_drain_writebuf,		/* drain_writebuf	*/
414	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
415	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
416
417	(void *)cpufunc_nullop,		/* sleep		*/
418
419	/* Soft functions */
420
421	cpufunc_null_fixup,		/* dataabt_fixup	*/
422	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
423
424	arm11_context_switch,		/* context_switch	*/
425
426	pj4bv7_setup			/* cpu setup		*/
427};
428#endif /* CPU_MV_PJ4B */
429
430#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
431  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
432  defined(CPU_XSCALE_80219)
433
434struct cpu_functions xscale_cpufuncs = {
435	/* CPU functions */
436
437	cpufunc_id,			/* id			*/
438	xscale_cpwait,			/* cpwait		*/
439
440	/* MMU functions */
441
442	xscale_control,			/* control		*/
443	cpufunc_domains,		/* domain		*/
444	xscale_setttb,			/* setttb		*/
445	cpufunc_faultstatus,		/* faultstatus		*/
446	cpufunc_faultaddress,		/* faultaddress		*/
447
448	/* TLB functions */
449
450	armv4_tlb_flushID,		/* tlb_flushID		*/
451	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
452	armv4_tlb_flushI,		/* tlb_flushI		*/
453	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
454	armv4_tlb_flushD,		/* tlb_flushD		*/
455	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
456
457	/* Cache operations */
458
459	xscale_cache_syncI,		/* icache_sync_all	*/
460	xscale_cache_syncI_rng,		/* icache_sync_range	*/
461
462	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
463	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
464	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
465	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
466
467	xscale_cache_flushID,		/* idcache_inv_all	*/
468	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
469	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
470	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
471	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
472	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
473	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
474	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
475
476	/* Other functions */
477
478	cpufunc_nullop,			/* flush_prefetchbuf	*/
479	armv4_drain_writebuf,		/* drain_writebuf	*/
480	cpufunc_nullop,			/* flush_brnchtgt_C	*/
481	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
482
483	xscale_cpu_sleep,		/* sleep		*/
484
485	/* Soft functions */
486
487	cpufunc_null_fixup,		/* dataabt_fixup	*/
488	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
489
490	xscale_context_switch,		/* context_switch	*/
491
492	xscale_setup			/* cpu setup		*/
493};
494#endif
495/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
496   CPU_XSCALE_80219 */
497
498#ifdef CPU_XSCALE_81342
499struct cpu_functions xscalec3_cpufuncs = {
500	/* CPU functions */
501
502	cpufunc_id,			/* id			*/
503	xscale_cpwait,			/* cpwait		*/
504
505	/* MMU functions */
506
507	xscale_control,			/* control		*/
508	cpufunc_domains,		/* domain		*/
509	xscalec3_setttb,		/* setttb		*/
510	cpufunc_faultstatus,		/* faultstatus		*/
511	cpufunc_faultaddress,		/* faultaddress		*/
512
513	/* TLB functions */
514
515	armv4_tlb_flushID,		/* tlb_flushID		*/
516	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
517	armv4_tlb_flushI,		/* tlb_flushI		*/
518	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
519	armv4_tlb_flushD,		/* tlb_flushD		*/
520	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
521
522	/* Cache operations */
523
524	xscalec3_cache_syncI,		/* icache_sync_all	*/
525	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
526
527	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
528	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
529	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
530	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
531
532	xscale_cache_flushID,		/* idcache_inv_all	*/
533	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
534	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
535	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
536	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
537	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
538	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
539	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
540
541	/* Other functions */
542
543	cpufunc_nullop,			/* flush_prefetchbuf	*/
544	armv4_drain_writebuf,		/* drain_writebuf	*/
545	cpufunc_nullop,			/* flush_brnchtgt_C	*/
546	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
547
548	xscale_cpu_sleep,		/* sleep		*/
549
550	/* Soft functions */
551
552	cpufunc_null_fixup,		/* dataabt_fixup	*/
553	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
554
555	xscalec3_context_switch,	/* context_switch	*/
556
557	xscale_setup			/* cpu setup		*/
558};
559#endif /* CPU_XSCALE_81342 */
560
561
562#if defined(CPU_FA526) || defined(CPU_FA626TE)
563struct cpu_functions fa526_cpufuncs = {
564	/* CPU functions */
565
566	cpufunc_id,			/* id			*/
567	cpufunc_nullop,			/* cpwait		*/
568
569	/* MMU functions */
570
571	cpufunc_control,		/* control		*/
572	cpufunc_domains,		/* domain		*/
573	fa526_setttb,			/* setttb		*/
574	cpufunc_faultstatus,		/* faultstatus		*/
575	cpufunc_faultaddress,		/* faultaddress		*/
576
577	/* TLB functions */
578
579	armv4_tlb_flushID,		/* tlb_flushID		*/
580	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
581	armv4_tlb_flushI,		/* tlb_flushI		*/
582	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
583	armv4_tlb_flushD,		/* tlb_flushD		*/
584	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
585
586	/* Cache operations */
587
588	fa526_icache_sync_all,		/* icache_sync_all	*/
589	fa526_icache_sync_range,	/* icache_sync_range	*/
590
591	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
592	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
593	fa526_dcache_inv_range,		/* dcache_inv_range	*/
594	fa526_dcache_wb_range,		/* dcache_wb_range	*/
595
596	armv4_idcache_inv_all,		/* idcache_inv_all	*/
597	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
598	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
599	cpufunc_nullop,			/* l2cache_wbinv_all	*/
600	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
601	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
602	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
603	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
604
605	/* Other functions */
606
607	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
608	armv4_drain_writebuf,		/* drain_writebuf	*/
609	cpufunc_nullop,			/* flush_brnchtgt_C	*/
610	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
611
612	fa526_cpu_sleep,		/* sleep		*/
613
614	/* Soft functions */
615
616	cpufunc_null_fixup,		/* dataabt_fixup	*/
617	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
618
619	fa526_context_switch,		/* context_switch	*/
620
621	fa526_setup			/* cpu setup 		*/
622};
623#endif	/* CPU_FA526 || CPU_FA626TE */
624
625#if defined(CPU_ARM1136)
626struct cpu_functions arm1136_cpufuncs = {
627	/* CPU functions */
628
629	cpufunc_id,                     /* id                   */
630	cpufunc_nullop,                 /* cpwait               */
631
632	/* MMU functions */
633
634	cpufunc_control,                /* control              */
635	cpufunc_domains,                /* Domain               */
636	arm11x6_setttb,                 /* Setttb               */
637	cpufunc_faultstatus,            /* Faultstatus          */
638	cpufunc_faultaddress,           /* Faultaddress         */
639
640	/* TLB functions */
641
642	arm11_tlb_flushID,              /* tlb_flushID          */
643	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
644	arm11_tlb_flushI,               /* tlb_flushI           */
645	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
646	arm11_tlb_flushD,               /* tlb_flushD           */
647	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
648
649	/* Cache operations */
650
651	arm11x6_icache_sync_all,        /* icache_sync_all      */
652	arm11x6_icache_sync_range,      /* icache_sync_range    */
653
654	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
655	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
656	armv6_dcache_inv_range,         /* dcache_inv_range     */
657	armv6_dcache_wb_range,          /* dcache_wb_range      */
658
659	armv6_idcache_inv_all,		/* idcache_inv_all	*/
660	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
661	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
662
663	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
664	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
665	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
666	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
667	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
668
669	/* Other functions */
670
671	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
672	arm11_drain_writebuf,           /* drain_writebuf       */
673	cpufunc_nullop,                 /* flush_brnchtgt_C     */
674	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
675
676	arm11_sleep,                  	/* sleep                */
677
678	/* Soft functions */
679
680	cpufunc_null_fixup,             /* dataabt_fixup        */
681	cpufunc_null_fixup,             /* prefetchabt_fixup    */
682
683	arm11_context_switch,           /* context_switch       */
684
685	arm11x6_setup                   /* cpu setup            */
686};
687#endif /* CPU_ARM1136 */
688#if defined(CPU_ARM1176)
689struct cpu_functions arm1176_cpufuncs = {
690	/* CPU functions */
691
692	cpufunc_id,                     /* id                   */
693	cpufunc_nullop,                 /* cpwait               */
694
695	/* MMU functions */
696
697	cpufunc_control,                /* control              */
698	cpufunc_domains,                /* Domain               */
699	arm11x6_setttb,                 /* Setttb               */
700	cpufunc_faultstatus,            /* Faultstatus          */
701	cpufunc_faultaddress,           /* Faultaddress         */
702
703	/* TLB functions */
704
705	arm11_tlb_flushID,              /* tlb_flushID          */
706	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
707	arm11_tlb_flushI,               /* tlb_flushI           */
708	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
709	arm11_tlb_flushD,               /* tlb_flushD           */
710	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
711
712	/* Cache operations */
713
714	arm11x6_icache_sync_all,        /* icache_sync_all      */
715	arm11x6_icache_sync_range,      /* icache_sync_range    */
716
717	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
718	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
719	armv6_dcache_inv_range,         /* dcache_inv_range     */
720	armv6_dcache_wb_range,          /* dcache_wb_range      */
721
722	armv6_idcache_inv_all,		/* idcache_inv_all	*/
723	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
724	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
725
726	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
727	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
728	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
729	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
730	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
731
732	/* Other functions */
733
734	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
735	arm11_drain_writebuf,           /* drain_writebuf       */
736	cpufunc_nullop,                 /* flush_brnchtgt_C     */
737	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
738
739	arm11x6_sleep,                  /* sleep                */
740
741	/* Soft functions */
742
743	cpufunc_null_fixup,             /* dataabt_fixup        */
744	cpufunc_null_fixup,             /* prefetchabt_fixup    */
745
746	arm11_context_switch,           /* context_switch       */
747
748	arm11x6_setup                   /* cpu setup            */
749};
750#endif /*CPU_ARM1176 */
751
752#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
753struct cpu_functions cortexa_cpufuncs = {
754	/* CPU functions */
755
756	cpufunc_id,                     /* id                   */
757	cpufunc_nullop,                 /* cpwait               */
758
759	/* MMU functions */
760
761	cpufunc_control,                /* control              */
762	cpufunc_domains,                /* Domain               */
763	armv7_setttb,                   /* Setttb               */
764	cpufunc_faultstatus,            /* Faultstatus          */
765	cpufunc_faultaddress,           /* Faultaddress         */
766
767	/*
768	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
769	 * whether the hardware implements separate I+D or not, so we use the
770	 * same 'ID' functions for all 3 variations.
771	 */
772
773	armv7_tlb_flushID,              /* tlb_flushID          */
774	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
775	armv7_tlb_flushID,              /* tlb_flushI           */
776	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
777	armv7_tlb_flushID,              /* tlb_flushD           */
778	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
779
780	/* Cache operations */
781
782	armv7_icache_sync_all, 	        /* icache_sync_all      */
783	armv7_icache_sync_range,        /* icache_sync_range    */
784
785	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
786	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
787	armv7_dcache_inv_range,         /* dcache_inv_range     */
788	armv7_dcache_wb_range,          /* dcache_wb_range      */
789
790	armv7_idcache_inv_all,		/* idcache_inv_all	*/
791	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
792	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
793
794	/*
795	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
796	 * L2 cache controller is actually enabled.
797	 */
798	cpufunc_nullop,                 /* l2cache_wbinv_all    */
799	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
800	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
801	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
802	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
803
804	/* Other functions */
805
806	cpufunc_nullop,                 /* flush_prefetchbuf    */
807	armv7_drain_writebuf,           /* drain_writebuf       */
808	cpufunc_nullop,                 /* flush_brnchtgt_C     */
809	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
810
811	armv7_sleep,                    /* sleep                */
812
813	/* Soft functions */
814
815	cpufunc_null_fixup,             /* dataabt_fixup        */
816	cpufunc_null_fixup,             /* prefetchabt_fixup    */
817
818	armv7_context_switch,           /* context_switch       */
819
820	cortexa_setup                     /* cpu setup            */
821};
822#endif /* CPU_CORTEXA */
823
824/*
825 * Global constants also used by locore.s
826 */
827
828struct cpu_functions cpufuncs;
829u_int cputype;
830u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
831
832#if defined(CPU_ARM9) ||	\
833  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
834  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
835  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
836  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
837  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
838  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
839
840static void get_cachetype_cp15(void);
841
842/* Additional cache information local to this file.  Log2 of some of the
843   above numbers.  */
844static int	arm_dcache_l2_nsets;
845static int	arm_dcache_l2_assoc;
846static int	arm_dcache_l2_linesize;
847
848static void
849get_cachetype_cp15()
850{
851	u_int ctype, isize, dsize, cpuid;
852	u_int clevel, csize, i, sel;
853	u_int multiplier;
854	u_char type;
855
856	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
857		: "=r" (ctype));
858
859	cpuid = cpufunc_id();
860	/*
861	 * ...and thus spake the ARM ARM:
862	 *
863	 * If an <opcode2> value corresponding to an unimplemented or
864	 * reserved ID register is encountered, the System Control
865	 * processor returns the value of the main ID register.
866	 */
867	if (ctype == cpuid)
868		goto out;
869
870	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
871		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
872		    : "=r" (clevel));
873		arm_cache_level = clevel;
874		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
875		i = 0;
876		while ((type = (clevel & 0x7)) && i < 7) {
877			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
878			    type == CACHE_SEP_CACHE) {
879				sel = i << 1;
880				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
881				    : : "r" (sel));
882				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
883				    : "=r" (csize));
884				arm_cache_type[sel] = csize;
885				arm_dcache_align = 1 <<
886				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
887				arm_dcache_align_mask = arm_dcache_align - 1;
888			}
889			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
890				sel = (i << 1) | 1;
891				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
892				    : : "r" (sel));
893				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
894				    : "=r" (csize));
895				arm_cache_type[sel] = csize;
896			}
897			i++;
898			clevel >>= 3;
899		}
900	} else {
901		if ((ctype & CPU_CT_S) == 0)
902			arm_pcache_unified = 1;
903
904		/*
905		 * If you want to know how this code works, go read the ARM ARM.
906		 */
907
908		arm_pcache_type = CPU_CT_CTYPE(ctype);
909
910		if (arm_pcache_unified == 0) {
911			isize = CPU_CT_ISIZE(ctype);
912			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
913			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
914			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
915				if (isize & CPU_CT_xSIZE_M)
916					arm_picache_line_size = 0; /* not present */
917				else
918					arm_picache_ways = 1;
919			} else {
920				arm_picache_ways = multiplier <<
921				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
922			}
923			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
924		}
925
926		dsize = CPU_CT_DSIZE(ctype);
927		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
928		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
929		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
930			if (dsize & CPU_CT_xSIZE_M)
931				arm_pdcache_line_size = 0; /* not present */
932			else
933				arm_pdcache_ways = 1;
934		} else {
935			arm_pdcache_ways = multiplier <<
936			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
937		}
938		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
939
940		arm_dcache_align = arm_pdcache_line_size;
941
942		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
943		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
944		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
945		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
946
947	out:
948		arm_dcache_align_mask = arm_dcache_align - 1;
949	}
950}
951#endif /* ARM9 || XSCALE */
952
953/*
954 * Cannot panic here as we may not have a console yet ...
955 */
956
957int
958set_cpufuncs()
959{
960	cputype = cpufunc_id();
961	cputype &= CPU_ID_CPU_MASK;
962
963	/*
964	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
965	 * CPU type where we want to use it by default, then we set it.
966	 */
967
968#ifdef CPU_ARM9
969	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
970	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
971	    (cputype & 0x0000f000) == 0x00009000) {
972		cpufuncs = arm9_cpufuncs;
973		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
974		get_cachetype_cp15();
975		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
976		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
977		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
978		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
979		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
980#ifdef ARM9_CACHE_WRITE_THROUGH
981		pmap_pte_init_arm9();
982#else
983		pmap_pte_init_generic();
984#endif
985		goto out;
986	}
987#endif /* CPU_ARM9 */
988#if defined(CPU_ARM9E) || defined(CPU_ARM10)
989	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
990	    cputype == CPU_ID_MV88FR571_41) {
991		uint32_t sheeva_ctrl;
992
993		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
994		    MV_L2_ENABLE);
995		/*
996		 * Workaround for Marvell MV78100 CPU: Cache prefetch
997		 * mechanism may affect the cache coherency validity,
998		 * so it needs to be disabled.
999		 *
1000		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1001		 * L2 Prefetching Mechanism) for details.
1002		 */
1003		if (cputype == CPU_ID_MV88FR571_VD ||
1004		    cputype == CPU_ID_MV88FR571_41)
1005			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1006
1007		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1008
1009		cpufuncs = sheeva_cpufuncs;
1010		get_cachetype_cp15();
1011		pmap_pte_init_generic();
1012		goto out;
1013	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1014		cpufuncs = armv5_ec_cpufuncs;
1015		get_cachetype_cp15();
1016		pmap_pte_init_generic();
1017		goto out;
1018	}
1019#endif /* CPU_ARM9E || CPU_ARM10 */
1020#ifdef CPU_ARM10
1021	if (/* cputype == CPU_ID_ARM1020T || */
1022	    cputype == CPU_ID_ARM1020E) {
1023		/*
1024		 * Select write-through cacheing (this isn't really an
1025		 * option on ARM1020T).
1026		 */
1027		cpufuncs = arm10_cpufuncs;
1028		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1029		get_cachetype_cp15();
1030		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1031		arm10_dcache_sets_max =
1032		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1033		    arm10_dcache_sets_inc;
1034		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1035		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1036		pmap_pte_init_generic();
1037		goto out;
1038	}
1039#endif /* CPU_ARM10 */
1040#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1041	if (cputype == CPU_ID_ARM1136JS
1042	    || cputype == CPU_ID_ARM1136JSR1
1043	    || cputype == CPU_ID_ARM1176JZS) {
1044#ifdef CPU_ARM1136
1045		if (cputype == CPU_ID_ARM1136JS
1046		    || cputype == CPU_ID_ARM1136JSR1)
1047			cpufuncs = arm1136_cpufuncs;
1048#endif
1049#ifdef CPU_ARM1176
1050		if (cputype == CPU_ID_ARM1176JZS)
1051			cpufuncs = arm1176_cpufuncs;
1052#endif
1053		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1054		get_cachetype_cp15();
1055
1056		pmap_pte_init_mmu_v6();
1057
1058		goto out;
1059	}
1060#endif /* CPU_ARM1136 || CPU_ARM1176 */
1061#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1062	if (cputype == CPU_ID_CORTEXA7 ||
1063	    cputype == CPU_ID_CORTEXA8R1 ||
1064	    cputype == CPU_ID_CORTEXA8R2 ||
1065	    cputype == CPU_ID_CORTEXA8R3 ||
1066	    cputype == CPU_ID_CORTEXA9R1 ||
1067	    cputype == CPU_ID_CORTEXA9R2 ||
1068	    cputype == CPU_ID_CORTEXA9R3 ||
1069	    cputype == CPU_ID_CORTEXA15R0 ||
1070	    cputype == CPU_ID_CORTEXA15R1 ||
1071	    cputype == CPU_ID_CORTEXA15R2 ||
1072	    cputype == CPU_ID_CORTEXA15R3 ||
1073	    cputype == CPU_ID_KRAIT ) {
1074		cpufuncs = cortexa_cpufuncs;
1075		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1076		get_cachetype_cp15();
1077
1078		pmap_pte_init_mmu_v6();
1079		/* Use powersave on this CPU. */
1080		cpu_do_powersave = 1;
1081		goto out;
1082	}
1083#endif /* CPU_CORTEXA */
1084
1085#if defined(CPU_MV_PJ4B)
1086	if (cputype == CPU_ID_MV88SV581X_V7 ||
1087	    cputype == CPU_ID_MV88SV584X_V7 ||
1088	    cputype == CPU_ID_ARM_88SV581X_V7) {
1089		cpufuncs = pj4bv7_cpufuncs;
1090		get_cachetype_cp15();
1091		pmap_pte_init_mmu_v6();
1092		goto out;
1093	}
1094#endif /* CPU_MV_PJ4B */
1095
1096#if defined(CPU_FA526) || defined(CPU_FA626TE)
1097	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1098		cpufuncs = fa526_cpufuncs;
1099		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1100		get_cachetype_cp15();
1101		pmap_pte_init_generic();
1102
1103		/* Use powersave on this CPU. */
1104		cpu_do_powersave = 1;
1105
1106		goto out;
1107	}
1108#endif	/* CPU_FA526 || CPU_FA626TE */
1109
1110#ifdef CPU_XSCALE_80200
1111	if (cputype == CPU_ID_80200) {
1112		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1113
1114		i80200_icu_init();
1115
1116#if defined(XSCALE_CCLKCFG)
1117		/*
1118		 * Crank CCLKCFG to maximum legal value.
1119		 */
1120		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1121			:
1122			: "r" (XSCALE_CCLKCFG));
1123#endif
1124
1125		/*
1126		 * XXX Disable ECC in the Bus Controller Unit; we
1127		 * don't really support it, yet.  Clear any pending
1128		 * error indications.
1129		 */
1130		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1131			:
1132			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1133
1134		cpufuncs = xscale_cpufuncs;
1135		/*
1136		 * i80200 errata: Step-A0 and A1 have a bug where
1137		 * D$ dirty bits are not cleared on "invalidate by
1138		 * address".
1139		 *
1140		 * Workaround: Clean cache line before invalidating.
1141		 */
1142		if (rev == 0 || rev == 1)
1143			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1144
1145		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1146		get_cachetype_cp15();
1147		pmap_pte_init_xscale();
1148		goto out;
1149	}
1150#endif /* CPU_XSCALE_80200 */
1151#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1152	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1153	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1154	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1155		cpufuncs = xscale_cpufuncs;
1156		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1157		get_cachetype_cp15();
1158		pmap_pte_init_xscale();
1159		goto out;
1160	}
1161#endif /* CPU_XSCALE_80321 */
1162
1163#if defined(CPU_XSCALE_81342)
1164	if (cputype == CPU_ID_81342) {
1165		cpufuncs = xscalec3_cpufuncs;
1166		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1167		get_cachetype_cp15();
1168		pmap_pte_init_xscale();
1169		goto out;
1170	}
1171#endif /* CPU_XSCALE_81342 */
1172#ifdef CPU_XSCALE_PXA2X0
1173	/* ignore core revision to test PXA2xx CPUs */
1174	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1175	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1176	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1177
1178		cpufuncs = xscale_cpufuncs;
1179		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1180		get_cachetype_cp15();
1181		pmap_pte_init_xscale();
1182
1183		/* Use powersave on this CPU. */
1184		cpu_do_powersave = 1;
1185
1186		goto out;
1187	}
1188#endif /* CPU_XSCALE_PXA2X0 */
1189#ifdef CPU_XSCALE_IXP425
1190	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1191            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1192
1193		cpufuncs = xscale_cpufuncs;
1194		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1195		get_cachetype_cp15();
1196		pmap_pte_init_xscale();
1197
1198		goto out;
1199	}
1200#endif /* CPU_XSCALE_IXP425 */
1201	/*
1202	 * Bzzzz. And the answer was ...
1203	 */
1204	panic("No support for this CPU type (%08x) in kernel", cputype);
1205	return(ARCHITECTURE_NOT_PRESENT);
1206out:
1207	uma_set_align(arm_dcache_align_mask);
1208	return (0);
1209}
1210
1211/*
1212 * Fixup routines for data and prefetch aborts.
1213 *
1214 * Several compile time symbols are used
1215 *
1216 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1217 * correction of registers after a fault.
1218 */
1219
1220
1221/*
1222 * Null abort fixup routine.
1223 * For use when no fixup is required.
1224 */
1225int
1226cpufunc_null_fixup(arg)
1227	void *arg;
1228{
1229	return(ABORT_FIXUP_OK);
1230}
1231
1232/*
1233 * CPU Setup code
1234 */
1235
1236#if defined (CPU_ARM9) || \
1237  defined(CPU_ARM9E) || \
1238  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1239  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1240  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1241  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1242  defined(CPU_FA526) || defined(CPU_FA626TE)
1243
1244#define IGN	0
1245#define OR	1
1246#define BIC	2
1247
1248struct cpu_option {
1249	char	*co_name;
1250	int	co_falseop;
1251	int	co_trueop;
1252	int	co_value;
1253};
1254
1255static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1256
1257static u_int
1258parse_cpu_options(args, optlist, cpuctrl)
1259	char *args;
1260	struct cpu_option *optlist;
1261	u_int cpuctrl;
1262{
1263	int integer;
1264
1265	if (args == NULL)
1266		return(cpuctrl);
1267
1268	while (optlist->co_name) {
1269		if (get_bootconf_option(args, optlist->co_name,
1270		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1271			if (integer) {
1272				if (optlist->co_trueop == OR)
1273					cpuctrl |= optlist->co_value;
1274				else if (optlist->co_trueop == BIC)
1275					cpuctrl &= ~optlist->co_value;
1276			} else {
1277				if (optlist->co_falseop == OR)
1278					cpuctrl |= optlist->co_value;
1279				else if (optlist->co_falseop == BIC)
1280					cpuctrl &= ~optlist->co_value;
1281			}
1282		}
1283		++optlist;
1284	}
1285	return(cpuctrl);
1286}
1287#endif /* CPU_ARM9 || XSCALE*/
1288
1289#ifdef CPU_ARM9
1290struct cpu_option arm9_options[] = {
1291	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1292	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1293	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1294	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1295	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1296	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1297	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1298	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1299	{ NULL,			IGN, IGN, 0 }
1300};
1301
1302void
1303arm9_setup(args)
1304	char *args;
1305{
1306	int cpuctrl, cpuctrlmask;
1307
1308	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1309	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1310	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1311	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1312	    CPU_CONTROL_ROUNDROBIN;
1313	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1314		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1315		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1316		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1317		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1318		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1319		 | CPU_CONTROL_ROUNDROBIN;
1320
1321#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1322	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1323#endif
1324
1325	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1326
1327#ifdef __ARMEB__
1328	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1329#endif
1330	if (vector_page == ARM_VECTORS_HIGH)
1331		cpuctrl |= CPU_CONTROL_VECRELOC;
1332
1333	/* Clear out the cache */
1334	cpu_idcache_wbinv_all();
1335
1336	/* Set the control register */
1337	cpu_control(cpuctrlmask, cpuctrl);
1338	ctrl = cpuctrl;
1339
1340}
1341#endif	/* CPU_ARM9 */
1342
1343#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1344struct cpu_option arm10_options[] = {
1345	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1346	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1347	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1348	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1349	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1350	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1351	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1352	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1353	{ NULL,			IGN, IGN, 0 }
1354};
1355
1356void
1357arm10_setup(args)
1358	char *args;
1359{
1360	int cpuctrl, cpuctrlmask;
1361
1362	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1363	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1364	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1365	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1366	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1367	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1368	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1369	    | CPU_CONTROL_BPRD_ENABLE
1370	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1371
1372#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1373	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1374#endif
1375
1376	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1377
1378#ifdef __ARMEB__
1379	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1380#endif
1381
1382	/* Clear out the cache */
1383	cpu_idcache_wbinv_all();
1384
1385	/* Now really make sure they are clean.  */
1386	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1387
1388	if (vector_page == ARM_VECTORS_HIGH)
1389		cpuctrl |= CPU_CONTROL_VECRELOC;
1390
1391	/* Set the control register */
1392	ctrl = cpuctrl;
1393	cpu_control(0xffffffff, cpuctrl);
1394
1395	/* And again. */
1396	cpu_idcache_wbinv_all();
1397}
1398#endif	/* CPU_ARM9E || CPU_ARM10 */
1399
1400#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1401struct cpu_option arm11_options[] = {
1402	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1403	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1404	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1405	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1406	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1407	{ NULL,			IGN, IGN, 0 }
1408};
1409
1410void
1411arm11x6_setup(char *args)
1412{
1413	int cpuctrl, cpuctrl_wax;
1414	uint32_t auxctrl, auxctrl_wax;
1415	uint32_t tmp, tmp2;
1416	uint32_t sbz=0;
1417	uint32_t cpuid;
1418
1419	cpuid = cpufunc_id();
1420
1421	cpuctrl =
1422		CPU_CONTROL_MMU_ENABLE  |
1423		CPU_CONTROL_DC_ENABLE   |
1424		CPU_CONTROL_WBUF_ENABLE |
1425		CPU_CONTROL_32BP_ENABLE |
1426		CPU_CONTROL_32BD_ENABLE |
1427		CPU_CONTROL_LABT_ENABLE |
1428		CPU_CONTROL_SYST_ENABLE |
1429		CPU_CONTROL_IC_ENABLE;
1430
1431	/*
1432	 * "write as existing" bits
1433	 * inverse of this is mask
1434	 */
1435	cpuctrl_wax =
1436		(3 << 30) | /* SBZ */
1437		(1 << 29) | /* FA */
1438		(1 << 28) | /* TR */
1439		(3 << 26) | /* SBZ */
1440		(3 << 19) | /* SBZ */
1441		(1 << 17);  /* SBZ */
1442
1443	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1444	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1445
1446	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1447
1448#ifdef __ARMEB__
1449	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1450#endif
1451
1452	if (vector_page == ARM_VECTORS_HIGH)
1453		cpuctrl |= CPU_CONTROL_VECRELOC;
1454
1455	auxctrl = 0;
1456	auxctrl_wax = ~0;
1457	/*
1458	 * This options enables the workaround for the 364296 ARM1136
1459	 * r0pX errata (possible cache data corruption with
1460	 * hit-under-miss enabled). It sets the undocumented bit 31 in
1461	 * the auxiliary control register and the FI bit in the control
1462	 * register, thus disabling hit-under-miss without putting the
1463	 * processor into full low interrupt latency mode. ARM11MPCore
1464	 * is not affected.
1465	 */
1466	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1467		cpuctrl |= CPU_CONTROL_FI_ENABLE;
1468		auxctrl = ARM1136_AUXCTL_PFI;
1469		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1470	}
1471
1472	/*
1473	 * Enable an errata workaround
1474	 */
1475	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1476		auxctrl = ARM1176_AUXCTL_PHD;
1477		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1478	}
1479
1480	/* Clear out the cache */
1481	cpu_idcache_wbinv_all();
1482
1483	/* Now really make sure they are clean.  */
1484	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1485
1486	/* Allow detection code to find the VFP if it's fitted.  */
1487	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1488
1489	/* Set the control register */
1490	ctrl = cpuctrl;
1491	cpu_control(~cpuctrl_wax, cpuctrl);
1492
1493	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1494			"and	%1, %0, %2\n\t"
1495			"orr	%1, %1, %3\n\t"
1496			"teq	%0, %1\n\t"
1497			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1498			: "=r"(tmp), "=r"(tmp2) :
1499			  "r"(auxctrl_wax), "r"(auxctrl));
1500
1501	/* And again. */
1502	cpu_idcache_wbinv_all();
1503}
1504#endif  /* CPU_ARM1136 || CPU_ARM1176 */
1505
1506#ifdef CPU_MV_PJ4B
1507void
1508pj4bv7_setup(args)
1509	char *args;
1510{
1511	int cpuctrl;
1512
1513	pj4b_config();
1514
1515	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1516#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1517	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1518#endif
1519	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1520	cpuctrl |= (0xf << 3);
1521	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1522	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1523	if (vector_page == ARM_VECTORS_HIGH)
1524		cpuctrl |= CPU_CONTROL_VECRELOC;
1525	cpuctrl |= (0x5 << 16) | (1 < 22);
1526	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1527
1528	/* Clear out the cache */
1529	cpu_idcache_wbinv_all();
1530
1531	/* Set the control register */
1532	ctrl = cpuctrl;
1533	cpu_control(0xFFFFFFFF, cpuctrl);
1534
1535	/* And again. */
1536	cpu_idcache_wbinv_all();
1537}
1538#endif /* CPU_MV_PJ4B */
1539
1540#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1541
1542void
1543cortexa_setup(char *args)
1544{
1545	int cpuctrl, cpuctrlmask;
1546
1547	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1548	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1549	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1550	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1551	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1552	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1553
1554	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1555	    CPU_CONTROL_IC_ENABLE |
1556	    CPU_CONTROL_DC_ENABLE |
1557	    CPU_CONTROL_BPRD_ENABLE;
1558
1559#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1560	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1561#endif
1562
1563	/* Switch to big endian */
1564#ifdef __ARMEB__
1565	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1566#endif
1567
1568	/* Check if the vector page is at the high address (0xffff0000) */
1569	if (vector_page == ARM_VECTORS_HIGH)
1570		cpuctrl |= CPU_CONTROL_VECRELOC;
1571
1572	/* Clear out the cache */
1573	cpu_idcache_wbinv_all();
1574
1575	/* Set the control register */
1576	ctrl = cpuctrl;
1577	cpu_control(cpuctrlmask, cpuctrl);
1578
1579	/* And again. */
1580	cpu_idcache_wbinv_all();
1581#ifdef SMP
1582	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1583#endif
1584}
1585#endif  /* CPU_CORTEXA */
1586
1587#if defined(CPU_FA526) || defined(CPU_FA626TE)
1588struct cpu_option fa526_options[] = {
1589#ifdef COMPAT_12
1590	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1591					   CPU_CONTROL_DC_ENABLE) },
1592	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1593#endif	/* COMPAT_12 */
1594	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1595					   CPU_CONTROL_DC_ENABLE) },
1596	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1597					   CPU_CONTROL_DC_ENABLE) },
1598	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1599	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1600	{ NULL,			IGN, IGN, 0 }
1601};
1602
1603void
1604fa526_setup(char *args)
1605{
1606	int cpuctrl, cpuctrlmask;
1607
1608	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1609		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1610		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1611		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1612		| CPU_CONTROL_BPRD_ENABLE;
1613	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1614		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1615		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1616		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1617		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1618		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1619		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1620
1621#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1622	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1623#endif
1624
1625	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1626
1627#ifdef __ARMEB__
1628	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1629#endif
1630
1631	if (vector_page == ARM_VECTORS_HIGH)
1632		cpuctrl |= CPU_CONTROL_VECRELOC;
1633
1634	/* Clear out the cache */
1635	cpu_idcache_wbinv_all();
1636
1637	/* Set the control register */
1638	ctrl = cpuctrl;
1639	cpu_control(0xffffffff, cpuctrl);
1640}
1641#endif	/* CPU_FA526 || CPU_FA626TE */
1642
1643#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1644  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1645  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1646struct cpu_option xscale_options[] = {
1647#ifdef COMPAT_12
1648	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1649	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1650#endif	/* COMPAT_12 */
1651	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1652	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1653	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1654	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1655	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1656	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1657	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1658	{ NULL,			IGN, IGN, 0 }
1659};
1660
1661void
1662xscale_setup(args)
1663	char *args;
1664{
1665	uint32_t auxctl;
1666	int cpuctrl, cpuctrlmask;
1667
1668	/*
1669	 * The XScale Write Buffer is always enabled.  Our option
1670	 * is to enable/disable coalescing.  Note that bits 6:3
1671	 * must always be enabled.
1672	 */
1673
1674	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1675		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1676		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1677		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1678		 | CPU_CONTROL_BPRD_ENABLE;
1679	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1680		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1681		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1682		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1683		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1684		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1685		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1686		 CPU_CONTROL_L2_ENABLE;
1687
1688#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1689	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1690#endif
1691
1692	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1693
1694#ifdef __ARMEB__
1695	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1696#endif
1697
1698	if (vector_page == ARM_VECTORS_HIGH)
1699		cpuctrl |= CPU_CONTROL_VECRELOC;
1700#ifdef CPU_XSCALE_CORE3
1701	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1702#endif
1703
1704	/* Clear out the cache */
1705	cpu_idcache_wbinv_all();
1706
1707	/*
1708	 * Set the control register.  Note that bits 6:3 must always
1709	 * be set to 1.
1710	 */
1711	ctrl = cpuctrl;
1712/*	cpu_control(cpuctrlmask, cpuctrl);*/
1713	cpu_control(0xffffffff, cpuctrl);
1714
1715	/* Make sure write coalescing is turned on */
1716	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1717		: "=r" (auxctl));
1718#ifdef XSCALE_NO_COALESCE_WRITES
1719	auxctl |= XSCALE_AUXCTL_K;
1720#else
1721	auxctl &= ~XSCALE_AUXCTL_K;
1722#endif
1723#ifdef CPU_XSCALE_CORE3
1724	auxctl |= XSCALE_AUXCTL_LLR;
1725	auxctl |= XSCALE_AUXCTL_MD_MASK;
1726#endif
1727	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1728		: : "r" (auxctl));
1729}
1730#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1731	   CPU_XSCALE_80219 */
1732