cpufunc.c revision 266046
1145519Sdarrenr/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2145510Sdarrenr
3145510Sdarrenr/*-
4255332Scy * arm7tdmi support code Copyright (c) 2001 John Fremlin
5145510Sdarrenr * arm8 support code Copyright (c) 1997 ARM Limited
6145510Sdarrenr * arm8 support code Copyright (c) 1997 Causality Limited
7145510Sdarrenr * arm9 support code Copyright (C) 2001 ARM Ltd
8145510Sdarrenr * Copyright (c) 1997 Mark Brinicombe.
9145510Sdarrenr * Copyright (c) 1997 Causality Limited
10145510Sdarrenr * All rights reserved.
11145510Sdarrenr *
12145510Sdarrenr * Redistribution and use in source and binary forms, with or without
13145510Sdarrenr * modification, are permitted provided that the following conditions
14145510Sdarrenr * are met:
15145510Sdarrenr * 1. Redistributions of source code must retain the above copyright
16145510Sdarrenr *    notice, this list of conditions and the following disclaimer.
17145510Sdarrenr * 2. Redistributions in binary form must reproduce the above copyright
18145510Sdarrenr *    notice, this list of conditions and the following disclaimer in the
19145510Sdarrenr *    documentation and/or other materials provided with the distribution.
20145510Sdarrenr * 3. All advertising materials mentioning features or use of this software
21145510Sdarrenr *    must display the following acknowledgement:
22145510Sdarrenr *	This product includes software developed by Causality Limited.
23145510Sdarrenr * 4. The name of Causality Limited may not be used to endorse or promote
24145510Sdarrenr *    products derived from this software without specific prior written
25145510Sdarrenr *    permission.
26145510Sdarrenr *
27145510Sdarrenr * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28145510Sdarrenr * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29145510Sdarrenr * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30145510Sdarrenr * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31145510Sdarrenr * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32145510Sdarrenr * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33145510Sdarrenr * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34145510Sdarrenr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35145510Sdarrenr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36145510Sdarrenr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37145510Sdarrenr * SUCH DAMAGE.
38145510Sdarrenr *
39145510Sdarrenr * RiscBSD kernel project
40145510Sdarrenr *
41145510Sdarrenr * cpufuncs.c
42145510Sdarrenr *
43145510Sdarrenr * C functions for supporting CPU / MMU / TLB specific operations.
44145510Sdarrenr *
45145510Sdarrenr * Created      : 30/01/97
46145510Sdarrenr */
47145510Sdarrenr#include <sys/cdefs.h>
48145510Sdarrenr__FBSDID("$FreeBSD: stable/10/sys/arm/arm/cpufunc.c 266046 2014-05-14 16:32:27Z ian $");
49145510Sdarrenr
50145510Sdarrenr#include <sys/param.h>
51145510Sdarrenr#include <sys/systm.h>
52145510Sdarrenr#include <sys/lock.h>
53145510Sdarrenr#include <sys/mutex.h>
54145510Sdarrenr#include <sys/bus.h>
55145510Sdarrenr#include <machine/bus.h>
56145510Sdarrenr#include <machine/cpu.h>
57145510Sdarrenr#include <machine/disassem.h>
58145510Sdarrenr
59145510Sdarrenr#include <vm/vm.h>
60145510Sdarrenr#include <vm/pmap.h>
61145510Sdarrenr#include <vm/uma.h>
62145510Sdarrenr
63145510Sdarrenr#include <machine/cpuconf.h>
64145510Sdarrenr#include <machine/cpufunc.h>
65145510Sdarrenr#include <machine/bootconfig.h>
66145510Sdarrenr
67145510Sdarrenr#ifdef CPU_XSCALE_80200
68145510Sdarrenr#include <arm/xscale/i80200/i80200reg.h>
69145510Sdarrenr#include <arm/xscale/i80200/i80200var.h>
70145510Sdarrenr#endif
71145510Sdarrenr
72145510Sdarrenr#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73145510Sdarrenr#include <arm/xscale/i80321/i80321reg.h>
74145510Sdarrenr#include <arm/xscale/i80321/i80321var.h>
75145510Sdarrenr#endif
76145510Sdarrenr
77145510Sdarrenr/*
78145510Sdarrenr * Some definitions in i81342reg.h clash with i80321reg.h.
79145510Sdarrenr * This only happens for the LINT kernel. As it happens,
80145510Sdarrenr * we don't need anything from i81342reg.h that we already
81145510Sdarrenr * got from somewhere else during a LINT compile.
82255332Scy */
83255332Scy#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
84145510Sdarrenr#include <arm/xscale/i8134x/i81342reg.h>
85145510Sdarrenr#endif
86145510Sdarrenr
87145510Sdarrenr#ifdef CPU_XSCALE_IXP425
88145510Sdarrenr#include <arm/xscale/ixp425/ixp425reg.h>
89145510Sdarrenr#include <arm/xscale/ixp425/ixp425var.h>
90145510Sdarrenr#endif
91145510Sdarrenr
92145510Sdarrenr/* PRIMARY CACHE VARIABLES */
93145510Sdarrenrint	arm_picache_size;
94145510Sdarrenrint	arm_picache_line_size;
95145510Sdarrenrint	arm_picache_ways;
96145510Sdarrenr
97145510Sdarrenrint	arm_pdcache_size;	/* and unified */
98145510Sdarrenrint	arm_pdcache_line_size;
99145510Sdarrenrint	arm_pdcache_ways;
100145510Sdarrenr
101145510Sdarrenrint	arm_pcache_type;
102145510Sdarrenrint	arm_pcache_unified;
103145510Sdarrenr
104145510Sdarrenrint	arm_dcache_align;
105145510Sdarrenrint	arm_dcache_align_mask;
106145510Sdarrenr
107145510Sdarrenru_int	arm_cache_level;
108145510Sdarrenru_int	arm_cache_type[14];
109145510Sdarrenru_int	arm_cache_loc;
110255332Scy
111255332Scy/* 1 == use cpu_sleep(), 0 == don't */
112145510Sdarrenrint cpu_do_powersave;
113145510Sdarrenrint ctrl;
114145510Sdarrenr
115145510Sdarrenr#ifdef CPU_ARM7TDMI
116145510Sdarrenrstruct cpu_functions arm7tdmi_cpufuncs = {
117145510Sdarrenr	/* CPU functions */
118145510Sdarrenr
119145510Sdarrenr	cpufunc_id,			/* id			*/
120145510Sdarrenr	cpufunc_nullop,			/* cpwait		*/
121145510Sdarrenr
122145510Sdarrenr	/* MMU functions */
123145510Sdarrenr
124255332Scy	cpufunc_control,		/* control		*/
125255332Scy	cpufunc_domains,		/* domain		*/
126145510Sdarrenr	arm7tdmi_setttb,		/* setttb		*/
127145510Sdarrenr	cpufunc_faultstatus,		/* faultstatus		*/
128145510Sdarrenr	cpufunc_faultaddress,		/* faultaddress		*/
129145510Sdarrenr
130145510Sdarrenr	/* TLB functions */
131145510Sdarrenr
132255332Scy	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
133255332Scy	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
134145510Sdarrenr	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
135145510Sdarrenr	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
136145510Sdarrenr	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
137145510Sdarrenr	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
138145510Sdarrenr
139145510Sdarrenr	/* Cache operations */
140255332Scy
141255332Scy	cpufunc_nullop,			/* icache_sync_all	*/
142145510Sdarrenr	(void *)cpufunc_nullop,		/* icache_sync_range	*/
143145510Sdarrenr
144145510Sdarrenr	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
145145510Sdarrenr	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
146145510Sdarrenr	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
147145510Sdarrenr	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
148145510Sdarrenr
149145510Sdarrenr	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
150145510Sdarrenr	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
151145510Sdarrenr	cpufunc_nullop,			/* l2cache_wbinv_all	*/
152145510Sdarrenr	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
153255332Scy	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
154145510Sdarrenr	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
155145510Sdarrenr
156145510Sdarrenr	/* Other functions */
157145510Sdarrenr
158255332Scy	cpufunc_nullop,			/* flush_prefetchbuf	*/
159145510Sdarrenr	cpufunc_nullop,			/* drain_writebuf	*/
160145510Sdarrenr	cpufunc_nullop,			/* flush_brnchtgt_C	*/
161145510Sdarrenr	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
162145510Sdarrenr
163145510Sdarrenr	(void *)cpufunc_nullop,		/* sleep		*/
164145510Sdarrenr
165145510Sdarrenr	/* Soft functions */
166145510Sdarrenr
167145510Sdarrenr	late_abort_fixup,		/* dataabt_fixup	*/
168145510Sdarrenr	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
169
170	arm7tdmi_context_switch,	/* context_switch	*/
171
172	arm7tdmi_setup			/* cpu setup		*/
173
174};
175#endif	/* CPU_ARM7TDMI */
176
177#ifdef CPU_ARM8
178struct cpu_functions arm8_cpufuncs = {
179	/* CPU functions */
180
181	cpufunc_id,			/* id			*/
182	cpufunc_nullop,			/* cpwait		*/
183
184	/* MMU functions */
185
186	cpufunc_control,		/* control		*/
187	cpufunc_domains,		/* domain		*/
188	arm8_setttb,			/* setttb		*/
189	cpufunc_faultstatus,		/* faultstatus		*/
190	cpufunc_faultaddress,		/* faultaddress		*/
191
192	/* TLB functions */
193
194	arm8_tlb_flushID,		/* tlb_flushID		*/
195	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
196	arm8_tlb_flushID,		/* tlb_flushI		*/
197	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
198	arm8_tlb_flushID,		/* tlb_flushD		*/
199	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
200
201	/* Cache operations */
202
203	cpufunc_nullop,			/* icache_sync_all	*/
204	(void *)cpufunc_nullop,		/* icache_sync_range	*/
205
206	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
207	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
208/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
209	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
210
211	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
212	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
213	cpufunc_nullop,			/* l2cache_wbinv_all	*/
214	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
215	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
216	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
217
218	/* Other functions */
219
220	cpufunc_nullop,			/* flush_prefetchbuf	*/
221	cpufunc_nullop,			/* drain_writebuf	*/
222	cpufunc_nullop,			/* flush_brnchtgt_C	*/
223	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
224
225	(void *)cpufunc_nullop,		/* sleep		*/
226
227	/* Soft functions */
228
229	cpufunc_null_fixup,		/* dataabt_fixup	*/
230	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
231
232	arm8_context_switch,		/* context_switch	*/
233
234	arm8_setup			/* cpu setup		*/
235};
236#endif	/* CPU_ARM8 */
237
238#ifdef CPU_ARM9
239struct cpu_functions arm9_cpufuncs = {
240	/* CPU functions */
241
242	cpufunc_id,			/* id			*/
243	cpufunc_nullop,			/* cpwait		*/
244
245	/* MMU functions */
246
247	cpufunc_control,		/* control		*/
248	cpufunc_domains,		/* Domain		*/
249	arm9_setttb,			/* Setttb		*/
250	cpufunc_faultstatus,		/* Faultstatus		*/
251	cpufunc_faultaddress,		/* Faultaddress		*/
252
253	/* TLB functions */
254
255	armv4_tlb_flushID,		/* tlb_flushID		*/
256	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
257	armv4_tlb_flushI,		/* tlb_flushI		*/
258	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
259	armv4_tlb_flushD,		/* tlb_flushD		*/
260	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
261
262	/* Cache operations */
263
264	arm9_icache_sync_all,		/* icache_sync_all	*/
265	arm9_icache_sync_range,		/* icache_sync_range	*/
266
267	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
268	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
269	arm9_dcache_inv_range,		/* dcache_inv_range	*/
270	arm9_dcache_wb_range,		/* dcache_wb_range	*/
271
272	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
273	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
274	cpufunc_nullop,			/* l2cache_wbinv_all	*/
275	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
276	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
277	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
278
279	/* Other functions */
280
281	cpufunc_nullop,			/* flush_prefetchbuf	*/
282	armv4_drain_writebuf,		/* drain_writebuf	*/
283	cpufunc_nullop,			/* flush_brnchtgt_C	*/
284	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
285
286	(void *)cpufunc_nullop,		/* sleep		*/
287
288	/* Soft functions */
289
290	cpufunc_null_fixup,		/* dataabt_fixup	*/
291	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
292
293	arm9_context_switch,		/* context_switch	*/
294
295	arm9_setup			/* cpu setup		*/
296
297};
298#endif /* CPU_ARM9 */
299
300#if defined(CPU_ARM9E) || defined(CPU_ARM10)
301struct cpu_functions armv5_ec_cpufuncs = {
302	/* CPU functions */
303
304	cpufunc_id,			/* id			*/
305	cpufunc_nullop,			/* cpwait		*/
306
307	/* MMU functions */
308
309	cpufunc_control,		/* control		*/
310	cpufunc_domains,		/* Domain		*/
311	armv5_ec_setttb,		/* Setttb		*/
312	cpufunc_faultstatus,		/* Faultstatus		*/
313	cpufunc_faultaddress,		/* Faultaddress		*/
314
315	/* TLB functions */
316
317	armv4_tlb_flushID,		/* tlb_flushID		*/
318	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
319	armv4_tlb_flushI,		/* tlb_flushI		*/
320	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
321	armv4_tlb_flushD,		/* tlb_flushD		*/
322	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
323
324	/* Cache operations */
325
326	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
327	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
328
329	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
330	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
331	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
332	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
333
334	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
335	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
336
337	cpufunc_nullop,                 /* l2cache_wbinv_all    */
338	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
339      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
340	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
341
342	/* Other functions */
343
344	cpufunc_nullop,			/* flush_prefetchbuf	*/
345	armv4_drain_writebuf,		/* drain_writebuf	*/
346	cpufunc_nullop,			/* flush_brnchtgt_C	*/
347	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
348
349	(void *)cpufunc_nullop,		/* sleep		*/
350
351	/* Soft functions */
352
353	cpufunc_null_fixup,		/* dataabt_fixup	*/
354	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
355
356	arm10_context_switch,		/* context_switch	*/
357
358	arm10_setup			/* cpu setup		*/
359
360};
361
362struct cpu_functions sheeva_cpufuncs = {
363	/* CPU functions */
364
365	cpufunc_id,			/* id			*/
366	cpufunc_nullop,			/* cpwait		*/
367
368	/* MMU functions */
369
370	cpufunc_control,		/* control		*/
371	cpufunc_domains,		/* Domain		*/
372	sheeva_setttb,			/* Setttb		*/
373	cpufunc_faultstatus,		/* Faultstatus		*/
374	cpufunc_faultaddress,		/* Faultaddress		*/
375
376	/* TLB functions */
377
378	armv4_tlb_flushID,		/* tlb_flushID		*/
379	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
380	armv4_tlb_flushI,		/* tlb_flushI		*/
381	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
382	armv4_tlb_flushD,		/* tlb_flushD		*/
383	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
384
385	/* Cache operations */
386
387	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
388	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
389
390	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
391	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
392	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
393	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
394
395	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
396	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
397
398	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
399	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
400	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
401	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
402
403	/* Other functions */
404
405	cpufunc_nullop,			/* flush_prefetchbuf	*/
406	armv4_drain_writebuf,		/* drain_writebuf	*/
407	cpufunc_nullop,			/* flush_brnchtgt_C	*/
408	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
409
410	sheeva_cpu_sleep,		/* sleep		*/
411
412	/* Soft functions */
413
414	cpufunc_null_fixup,		/* dataabt_fixup	*/
415	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
416
417	arm10_context_switch,		/* context_switch	*/
418
419	arm10_setup			/* cpu setup		*/
420};
421#endif /* CPU_ARM9E || CPU_ARM10 */
422
423#ifdef CPU_ARM10
424struct cpu_functions arm10_cpufuncs = {
425	/* CPU functions */
426
427	cpufunc_id,			/* id			*/
428	cpufunc_nullop,			/* cpwait		*/
429
430	/* MMU functions */
431
432	cpufunc_control,		/* control		*/
433	cpufunc_domains,		/* Domain		*/
434	arm10_setttb,			/* Setttb		*/
435	cpufunc_faultstatus,		/* Faultstatus		*/
436	cpufunc_faultaddress,		/* Faultaddress		*/
437
438	/* TLB functions */
439
440	armv4_tlb_flushID,		/* tlb_flushID		*/
441	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
442	armv4_tlb_flushI,		/* tlb_flushI		*/
443	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
444	armv4_tlb_flushD,		/* tlb_flushD		*/
445	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
446
447	/* Cache operations */
448
449	arm10_icache_sync_all,		/* icache_sync_all	*/
450	arm10_icache_sync_range,	/* icache_sync_range	*/
451
452	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
453	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
454	arm10_dcache_inv_range,		/* dcache_inv_range	*/
455	arm10_dcache_wb_range,		/* dcache_wb_range	*/
456
457	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
458	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
459	cpufunc_nullop,			/* l2cache_wbinv_all	*/
460	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
461	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
462	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
463
464	/* Other functions */
465
466	cpufunc_nullop,			/* flush_prefetchbuf	*/
467	armv4_drain_writebuf,		/* drain_writebuf	*/
468	cpufunc_nullop,			/* flush_brnchtgt_C	*/
469	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
470
471	(void *)cpufunc_nullop,		/* sleep		*/
472
473	/* Soft functions */
474
475	cpufunc_null_fixup,		/* dataabt_fixup	*/
476	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
477
478	arm10_context_switch,		/* context_switch	*/
479
480	arm10_setup			/* cpu setup		*/
481
482};
483#endif /* CPU_ARM10 */
484
485#ifdef CPU_MV_PJ4B
486struct cpu_functions pj4bv7_cpufuncs = {
487	/* CPU functions */
488
489	cpufunc_id,			/* id			*/
490	arm11_drain_writebuf,		/* cpwait		*/
491
492	/* MMU functions */
493
494	cpufunc_control,		/* control		*/
495	cpufunc_domains,		/* Domain		*/
496	pj4b_setttb,			/* Setttb		*/
497	cpufunc_faultstatus,		/* Faultstatus		*/
498	cpufunc_faultaddress,		/* Faultaddress		*/
499
500	/* TLB functions */
501
502	armv7_tlb_flushID,		/* tlb_flushID		*/
503	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
504	armv7_tlb_flushID,		/* tlb_flushI		*/
505	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
506	armv7_tlb_flushID,		/* tlb_flushD		*/
507	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
508
509	/* Cache operations */
510	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
511	armv7_icache_sync_range,	/* icache_sync_range	*/
512
513	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
514	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
515	armv7_dcache_inv_range,		/* dcache_inv_range	*/
516	armv7_dcache_wb_range,		/* dcache_wb_range	*/
517
518	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
519	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
520
521	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
522	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
523	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
524	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
525
526	/* Other functions */
527
528	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
529	arm11_drain_writebuf,		/* drain_writebuf	*/
530	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
531	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
532
533	(void *)cpufunc_nullop,		/* sleep		*/
534
535	/* Soft functions */
536
537	cpufunc_null_fixup,		/* dataabt_fixup	*/
538	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
539
540	arm11_context_switch,		/* context_switch	*/
541
542	pj4bv7_setup			/* cpu setup		*/
543};
544#endif /* CPU_MV_PJ4B */
545
546#ifdef CPU_SA110
547struct cpu_functions sa110_cpufuncs = {
548	/* CPU functions */
549
550	cpufunc_id,			/* id			*/
551	cpufunc_nullop,			/* cpwait		*/
552
553	/* MMU functions */
554
555	cpufunc_control,		/* control		*/
556	cpufunc_domains,		/* domain		*/
557	sa1_setttb,			/* setttb		*/
558	cpufunc_faultstatus,		/* faultstatus		*/
559	cpufunc_faultaddress,		/* faultaddress		*/
560
561	/* TLB functions */
562
563	armv4_tlb_flushID,		/* tlb_flushID		*/
564	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
565	armv4_tlb_flushI,		/* tlb_flushI		*/
566	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
567	armv4_tlb_flushD,		/* tlb_flushD		*/
568	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
569
570	/* Cache operations */
571
572	sa1_cache_syncI,		/* icache_sync_all	*/
573	sa1_cache_syncI_rng,		/* icache_sync_range	*/
574
575	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
576	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
577/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
578	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
579
580	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
581	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
582	cpufunc_nullop,			/* l2cache_wbinv_all	*/
583	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
584	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
585	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
586
587	/* Other functions */
588
589	cpufunc_nullop,			/* flush_prefetchbuf	*/
590	armv4_drain_writebuf,		/* drain_writebuf	*/
591	cpufunc_nullop,			/* flush_brnchtgt_C	*/
592	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
593
594	(void *)cpufunc_nullop,		/* sleep		*/
595
596	/* Soft functions */
597
598	cpufunc_null_fixup,		/* dataabt_fixup	*/
599	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
600
601	sa110_context_switch,		/* context_switch	*/
602
603	sa110_setup			/* cpu setup		*/
604};
605#endif	/* CPU_SA110 */
606
607#if defined(CPU_SA1100) || defined(CPU_SA1110)
608struct cpu_functions sa11x0_cpufuncs = {
609	/* CPU functions */
610
611	cpufunc_id,			/* id			*/
612	cpufunc_nullop,			/* cpwait		*/
613
614	/* MMU functions */
615
616	cpufunc_control,		/* control		*/
617	cpufunc_domains,		/* domain		*/
618	sa1_setttb,			/* setttb		*/
619	cpufunc_faultstatus,		/* faultstatus		*/
620	cpufunc_faultaddress,		/* faultaddress		*/
621
622	/* TLB functions */
623
624	armv4_tlb_flushID,		/* tlb_flushID		*/
625	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
626	armv4_tlb_flushI,		/* tlb_flushI		*/
627	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
628	armv4_tlb_flushD,		/* tlb_flushD		*/
629	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
630
631	/* Cache operations */
632
633	sa1_cache_syncI,		/* icache_sync_all	*/
634	sa1_cache_syncI_rng,		/* icache_sync_range	*/
635
636	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
637	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
638/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
639	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
640
641	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
642	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
643	cpufunc_nullop,			/* l2cache_wbinv_all	*/
644	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
645	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
646	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
647
648	/* Other functions */
649
650	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
651	armv4_drain_writebuf,		/* drain_writebuf	*/
652	cpufunc_nullop,			/* flush_brnchtgt_C	*/
653	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
654
655	sa11x0_cpu_sleep,		/* sleep		*/
656
657	/* Soft functions */
658
659	cpufunc_null_fixup,		/* dataabt_fixup	*/
660	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
661
662	sa11x0_context_switch,		/* context_switch	*/
663
664	sa11x0_setup			/* cpu setup		*/
665};
666#endif	/* CPU_SA1100 || CPU_SA1110 */
667
668#ifdef CPU_IXP12X0
669struct cpu_functions ixp12x0_cpufuncs = {
670	/* CPU functions */
671
672	cpufunc_id,			/* id			*/
673	cpufunc_nullop,			/* cpwait		*/
674
675	/* MMU functions */
676
677	cpufunc_control,		/* control		*/
678	cpufunc_domains,		/* domain		*/
679	sa1_setttb,			/* setttb		*/
680	cpufunc_faultstatus,		/* faultstatus		*/
681	cpufunc_faultaddress,		/* faultaddress		*/
682
683	/* TLB functions */
684
685	armv4_tlb_flushID,		/* tlb_flushID		*/
686	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
687	armv4_tlb_flushI,		/* tlb_flushI		*/
688	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
689	armv4_tlb_flushD,		/* tlb_flushD		*/
690	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
691
692	/* Cache operations */
693
694	sa1_cache_syncI,		/* icache_sync_all	*/
695	sa1_cache_syncI_rng,		/* icache_sync_range	*/
696
697	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
698	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
699/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
700	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
701
702	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
703	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
704	cpufunc_nullop,			/* l2cache_wbinv_all	*/
705	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
706	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
707	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
708
709	/* Other functions */
710
711	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
712	armv4_drain_writebuf,		/* drain_writebuf	*/
713	cpufunc_nullop,			/* flush_brnchtgt_C	*/
714	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
715
716	(void *)cpufunc_nullop,		/* sleep		*/
717
718	/* Soft functions */
719
720	cpufunc_null_fixup,		/* dataabt_fixup	*/
721	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
722
723	ixp12x0_context_switch,		/* context_switch	*/
724
725	ixp12x0_setup			/* cpu setup		*/
726};
727#endif	/* CPU_IXP12X0 */
728
729#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
730  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
731  defined(CPU_XSCALE_80219)
732
733struct cpu_functions xscale_cpufuncs = {
734	/* CPU functions */
735
736	cpufunc_id,			/* id			*/
737	xscale_cpwait,			/* cpwait		*/
738
739	/* MMU functions */
740
741	xscale_control,			/* control		*/
742	cpufunc_domains,		/* domain		*/
743	xscale_setttb,			/* setttb		*/
744	cpufunc_faultstatus,		/* faultstatus		*/
745	cpufunc_faultaddress,		/* faultaddress		*/
746
747	/* TLB functions */
748
749	armv4_tlb_flushID,		/* tlb_flushID		*/
750	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
751	armv4_tlb_flushI,		/* tlb_flushI		*/
752	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
753	armv4_tlb_flushD,		/* tlb_flushD		*/
754	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
755
756	/* Cache operations */
757
758	xscale_cache_syncI,		/* icache_sync_all	*/
759	xscale_cache_syncI_rng,		/* icache_sync_range	*/
760
761	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
762	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
763	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
764	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
765
766	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
767	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
768	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
769	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
770	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
771	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
772
773	/* Other functions */
774
775	cpufunc_nullop,			/* flush_prefetchbuf	*/
776	armv4_drain_writebuf,		/* drain_writebuf	*/
777	cpufunc_nullop,			/* flush_brnchtgt_C	*/
778	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
779
780	xscale_cpu_sleep,		/* sleep		*/
781
782	/* Soft functions */
783
784	cpufunc_null_fixup,		/* dataabt_fixup	*/
785	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
786
787	xscale_context_switch,		/* context_switch	*/
788
789	xscale_setup			/* cpu setup		*/
790};
791#endif
792/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
793   CPU_XSCALE_80219 */
794
795#ifdef CPU_XSCALE_81342
796struct cpu_functions xscalec3_cpufuncs = {
797	/* CPU functions */
798
799	cpufunc_id,			/* id			*/
800	xscale_cpwait,			/* cpwait		*/
801
802	/* MMU functions */
803
804	xscale_control,			/* control		*/
805	cpufunc_domains,		/* domain		*/
806	xscalec3_setttb,		/* setttb		*/
807	cpufunc_faultstatus,		/* faultstatus		*/
808	cpufunc_faultaddress,		/* faultaddress		*/
809
810	/* TLB functions */
811
812	armv4_tlb_flushID,		/* tlb_flushID		*/
813	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
814	armv4_tlb_flushI,		/* tlb_flushI		*/
815	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
816	armv4_tlb_flushD,		/* tlb_flushD		*/
817	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
818
819	/* Cache operations */
820
821	xscalec3_cache_syncI,		/* icache_sync_all	*/
822	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
823
824	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
825	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
826	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
827	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
828
829	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
830	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
831	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
832	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
833	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
834	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
835
836	/* Other functions */
837
838	cpufunc_nullop,			/* flush_prefetchbuf	*/
839	armv4_drain_writebuf,		/* drain_writebuf	*/
840	cpufunc_nullop,			/* flush_brnchtgt_C	*/
841	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
842
843	xscale_cpu_sleep,		/* sleep		*/
844
845	/* Soft functions */
846
847	cpufunc_null_fixup,		/* dataabt_fixup	*/
848	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
849
850	xscalec3_context_switch,	/* context_switch	*/
851
852	xscale_setup			/* cpu setup		*/
853};
854#endif /* CPU_XSCALE_81342 */
855
856
857#if defined(CPU_FA526) || defined(CPU_FA626TE)
858struct cpu_functions fa526_cpufuncs = {
859	/* CPU functions */
860
861	cpufunc_id,			/* id			*/
862	cpufunc_nullop,			/* cpwait		*/
863
864	/* MMU functions */
865
866	cpufunc_control,		/* control		*/
867	cpufunc_domains,		/* domain		*/
868	fa526_setttb,			/* setttb		*/
869	cpufunc_faultstatus,		/* faultstatus		*/
870	cpufunc_faultaddress,		/* faultaddress		*/
871
872	/* TLB functions */
873
874	armv4_tlb_flushID,		/* tlb_flushID		*/
875	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
876	armv4_tlb_flushI,		/* tlb_flushI		*/
877	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
878	armv4_tlb_flushD,		/* tlb_flushD		*/
879	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
880
881	/* Cache operations */
882
883	fa526_icache_sync_all,		/* icache_sync_all	*/
884	fa526_icache_sync_range,	/* icache_sync_range	*/
885
886	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
887	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
888	fa526_dcache_inv_range,		/* dcache_inv_range	*/
889	fa526_dcache_wb_range,		/* dcache_wb_range	*/
890
891	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
892	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
893	cpufunc_nullop,			/* l2cache_wbinv_all	*/
894	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
895	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
896	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
897
898	/* Other functions */
899
900	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
901	armv4_drain_writebuf,		/* drain_writebuf	*/
902	cpufunc_nullop,			/* flush_brnchtgt_C	*/
903	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
904
905	fa526_cpu_sleep,		/* sleep		*/
906
907	/* Soft functions */
908
909	cpufunc_null_fixup,		/* dataabt_fixup	*/
910	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
911
912	fa526_context_switch,		/* context_switch	*/
913
914	fa526_setup			/* cpu setup 		*/
915};
916#endif	/* CPU_FA526 || CPU_FA626TE */
917
918#if defined(CPU_ARM1136)
919struct cpu_functions arm1136_cpufuncs = {
920	/* CPU functions */
921
922	cpufunc_id,                     /* id                   */
923	cpufunc_nullop,                 /* cpwait               */
924
925	/* MMU functions */
926
927	cpufunc_control,                /* control              */
928	cpufunc_domains,                /* Domain               */
929	arm11x6_setttb,                 /* Setttb               */
930	cpufunc_faultstatus,            /* Faultstatus          */
931	cpufunc_faultaddress,           /* Faultaddress         */
932
933	/* TLB functions */
934
935	arm11_tlb_flushID,              /* tlb_flushID          */
936	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
937	arm11_tlb_flushI,               /* tlb_flushI           */
938	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
939	arm11_tlb_flushD,               /* tlb_flushD           */
940	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
941
942	/* Cache operations */
943
944	arm11x6_icache_sync_all,        /* icache_sync_all      */
945	arm11x6_icache_sync_range,      /* icache_sync_range    */
946
947	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
948	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
949	armv6_dcache_inv_range,         /* dcache_inv_range     */
950	armv6_dcache_wb_range,          /* dcache_wb_range      */
951
952	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
953	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
954
955	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
956	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
957	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
958	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
959
960	/* Other functions */
961
962	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
963	arm11_drain_writebuf,           /* drain_writebuf       */
964	cpufunc_nullop,                 /* flush_brnchtgt_C     */
965	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
966
967	arm11_sleep,                  	/* sleep                */
968
969	/* Soft functions */
970
971	cpufunc_null_fixup,             /* dataabt_fixup        */
972	cpufunc_null_fixup,             /* prefetchabt_fixup    */
973
974	arm11_context_switch,           /* context_switch       */
975
976	arm11x6_setup                   /* cpu setup            */
977};
978#endif /* CPU_ARM1136 */
979#if defined(CPU_ARM1176)
980struct cpu_functions arm1176_cpufuncs = {
981	/* CPU functions */
982
983	cpufunc_id,                     /* id                   */
984	cpufunc_nullop,                 /* cpwait               */
985
986	/* MMU functions */
987
988	cpufunc_control,                /* control              */
989	cpufunc_domains,                /* Domain               */
990	arm11x6_setttb,                 /* Setttb               */
991	cpufunc_faultstatus,            /* Faultstatus          */
992	cpufunc_faultaddress,           /* Faultaddress         */
993
994	/* TLB functions */
995
996	arm11_tlb_flushID,              /* tlb_flushID          */
997	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
998	arm11_tlb_flushI,               /* tlb_flushI           */
999	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1000	arm11_tlb_flushD,               /* tlb_flushD           */
1001	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1002
1003	/* Cache operations */
1004
1005	arm11x6_icache_sync_all,        /* icache_sync_all      */
1006	arm11x6_icache_sync_range,      /* icache_sync_range    */
1007
1008	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
1009	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
1010	armv6_dcache_inv_range,         /* dcache_inv_range     */
1011	armv6_dcache_wb_range,          /* dcache_wb_range      */
1012
1013	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
1014	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
1015
1016	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
1017	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1018	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1019	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1020
1021	/* Other functions */
1022
1023	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
1024	arm11_drain_writebuf,           /* drain_writebuf       */
1025	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1026	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1027
1028	arm11x6_sleep,                  /* sleep                */
1029
1030	/* Soft functions */
1031
1032	cpufunc_null_fixup,             /* dataabt_fixup        */
1033	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1034
1035	arm11_context_switch,           /* context_switch       */
1036
1037	arm11x6_setup                   /* cpu setup            */
1038};
1039#endif /*CPU_ARM1176 */
1040
1041#if defined(CPU_CORTEXA)
1042struct cpu_functions cortexa_cpufuncs = {
1043	/* CPU functions */
1044
1045	cpufunc_id,                     /* id                   */
1046	cpufunc_nullop,                 /* cpwait               */
1047
1048	/* MMU functions */
1049
1050	cpufunc_control,                /* control              */
1051	cpufunc_domains,                /* Domain               */
1052	armv7_setttb,                   /* Setttb               */
1053	cpufunc_faultstatus,            /* Faultstatus          */
1054	cpufunc_faultaddress,           /* Faultaddress         */
1055
1056	/* TLB functions */
1057
1058	armv7_tlb_flushID,              /* tlb_flushID          */
1059	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
1060	arm11_tlb_flushI,               /* tlb_flushI           */
1061	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1062	arm11_tlb_flushD,               /* tlb_flushD           */
1063	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1064
1065	/* Cache operations */
1066
1067	armv7_idcache_wbinv_all,         /* icache_sync_all      */
1068	armv7_icache_sync_range,        /* icache_sync_range    */
1069
1070	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
1071	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
1072	armv7_dcache_inv_range,         /* dcache_inv_range     */
1073	armv7_dcache_wb_range,          /* dcache_wb_range      */
1074
1075	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
1076	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
1077
1078	/*
1079	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
1080	 * L2 cache controller is actually enabled.
1081	 */
1082	cpufunc_nullop,                 /* l2cache_wbinv_all    */
1083	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1084	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1085	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1086
1087	/* Other functions */
1088
1089	cpufunc_nullop,                 /* flush_prefetchbuf    */
1090	armv7_drain_writebuf,           /* drain_writebuf       */
1091	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1092	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1093
1094	arm11_sleep,                    /* sleep                */
1095
1096	/* Soft functions */
1097
1098	cpufunc_null_fixup,             /* dataabt_fixup        */
1099	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1100
1101	armv7_context_switch,           /* context_switch       */
1102
1103	cortexa_setup                     /* cpu setup            */
1104};
1105#endif /* CPU_CORTEXA */
1106
1107/*
1108 * Global constants also used by locore.s
1109 */
1110
1111struct cpu_functions cpufuncs;
1112u_int cputype;
1113u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1114
1115#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||	\
1116  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
1117  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1118  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1119  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
1120  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1121  defined(CPU_CORTEXA)
1122
1123static void get_cachetype_cp15(void);
1124
1125/* Additional cache information local to this file.  Log2 of some of the
1126   above numbers.  */
1127static int	arm_dcache_l2_nsets;
1128static int	arm_dcache_l2_assoc;
1129static int	arm_dcache_l2_linesize;
1130
1131static void
1132get_cachetype_cp15()
1133{
1134	u_int ctype, isize, dsize, cpuid;
1135	u_int clevel, csize, i, sel;
1136	u_int multiplier;
1137	u_char type;
1138
1139	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
1140		: "=r" (ctype));
1141
1142	cpuid = cpufunc_id();
1143	/*
1144	 * ...and thus spake the ARM ARM:
1145	 *
1146	 * If an <opcode2> value corresponding to an unimplemented or
1147	 * reserved ID register is encountered, the System Control
1148	 * processor returns the value of the main ID register.
1149	 */
1150	if (ctype == cpuid)
1151		goto out;
1152
1153	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
1154		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
1155		    : "=r" (clevel));
1156		arm_cache_level = clevel;
1157		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
1158		i = 0;
1159		while ((type = (clevel & 0x7)) && i < 7) {
1160			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
1161			    type == CACHE_SEP_CACHE) {
1162				sel = i << 1;
1163				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1164				    : : "r" (sel));
1165				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1166				    : "=r" (csize));
1167				arm_cache_type[sel] = csize;
1168				arm_dcache_align = 1 <<
1169				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
1170				arm_dcache_align_mask = arm_dcache_align - 1;
1171			}
1172			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
1173				sel = (i << 1) | 1;
1174				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1175				    : : "r" (sel));
1176				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1177				    : "=r" (csize));
1178				arm_cache_type[sel] = csize;
1179			}
1180			i++;
1181			clevel >>= 3;
1182		}
1183	} else {
1184		if ((ctype & CPU_CT_S) == 0)
1185			arm_pcache_unified = 1;
1186
1187		/*
1188		 * If you want to know how this code works, go read the ARM ARM.
1189		 */
1190
1191		arm_pcache_type = CPU_CT_CTYPE(ctype);
1192
1193		if (arm_pcache_unified == 0) {
1194			isize = CPU_CT_ISIZE(ctype);
1195			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1196			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1197			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1198				if (isize & CPU_CT_xSIZE_M)
1199					arm_picache_line_size = 0; /* not present */
1200				else
1201					arm_picache_ways = 1;
1202			} else {
1203				arm_picache_ways = multiplier <<
1204				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1205			}
1206			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1207		}
1208
1209		dsize = CPU_CT_DSIZE(ctype);
1210		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1211		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1212		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1213			if (dsize & CPU_CT_xSIZE_M)
1214				arm_pdcache_line_size = 0; /* not present */
1215			else
1216				arm_pdcache_ways = 1;
1217		} else {
1218			arm_pdcache_ways = multiplier <<
1219			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1220		}
1221		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1222
1223		arm_dcache_align = arm_pdcache_line_size;
1224
1225		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1226		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1227		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1228		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1229
1230	out:
1231		arm_dcache_align_mask = arm_dcache_align - 1;
1232	}
1233}
1234#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1235
1236#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1237    defined(CPU_IXP12X0)
1238/* Cache information for CPUs without cache type registers. */
1239struct cachetab {
1240	u_int32_t ct_cpuid;
1241	int	ct_pcache_type;
1242	int	ct_pcache_unified;
1243	int	ct_pdcache_size;
1244	int	ct_pdcache_line_size;
1245	int	ct_pdcache_ways;
1246	int	ct_picache_size;
1247	int	ct_picache_line_size;
1248	int	ct_picache_ways;
1249};
1250
1251struct cachetab cachetab[] = {
1252    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1253    /* XXX is this type right for SA-1? */
1254    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1255    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1256    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1257    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1258    { 0, 0, 0, 0, 0, 0, 0, 0}
1259};
1260
1261static void get_cachetype_table(void);
1262
1263static void
1264get_cachetype_table()
1265{
1266	int i;
1267	u_int32_t cpuid = cpufunc_id();
1268
1269	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1270		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1271			arm_pcache_type = cachetab[i].ct_pcache_type;
1272			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1273			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1274			arm_pdcache_line_size =
1275			    cachetab[i].ct_pdcache_line_size;
1276			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1277			arm_picache_size = cachetab[i].ct_picache_size;
1278			arm_picache_line_size =
1279			    cachetab[i].ct_picache_line_size;
1280			arm_picache_ways = cachetab[i].ct_picache_ways;
1281		}
1282	}
1283	arm_dcache_align = arm_pdcache_line_size;
1284
1285	arm_dcache_align_mask = arm_dcache_align - 1;
1286}
1287
1288#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
1289
1290/*
1291 * Cannot panic here as we may not have a console yet ...
1292 */
1293
1294int
1295set_cpufuncs()
1296{
1297	cputype = cpufunc_id();
1298	cputype &= CPU_ID_CPU_MASK;
1299
1300	/*
1301	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1302	 * CPU type where we want to use it by default, then we set it.
1303	 */
1304
1305#ifdef CPU_ARM7TDMI
1306	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1307	    CPU_ID_IS7(cputype) &&
1308	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1309		cpufuncs = arm7tdmi_cpufuncs;
1310		cpu_reset_needs_v4_MMU_disable = 0;
1311		get_cachetype_cp15();
1312		pmap_pte_init_generic();
1313		goto out;
1314	}
1315#endif
1316#ifdef CPU_ARM8
1317	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1318	    (cputype & 0x0000f000) == 0x00008000) {
1319		cpufuncs = arm8_cpufuncs;
1320		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1321		get_cachetype_cp15();
1322		pmap_pte_init_arm8();
1323		goto out;
1324	}
1325#endif	/* CPU_ARM8 */
1326#ifdef CPU_ARM9
1327	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1328	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1329	    (cputype & 0x0000f000) == 0x00009000) {
1330		cpufuncs = arm9_cpufuncs;
1331		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1332		get_cachetype_cp15();
1333		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1334		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1335		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
1336		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1337		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1338#ifdef ARM9_CACHE_WRITE_THROUGH
1339		pmap_pte_init_arm9();
1340#else
1341		pmap_pte_init_generic();
1342#endif
1343		goto out;
1344	}
1345#endif /* CPU_ARM9 */
1346#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1347	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1348	    cputype == CPU_ID_MV88FR571_41) {
1349		uint32_t sheeva_ctrl;
1350
1351		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1352		    MV_L2_ENABLE);
1353		/*
1354		 * Workaround for Marvell MV78100 CPU: Cache prefetch
1355		 * mechanism may affect the cache coherency validity,
1356		 * so it needs to be disabled.
1357		 *
1358		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1359		 * L2 Prefetching Mechanism) for details.
1360		 */
1361		if (cputype == CPU_ID_MV88FR571_VD ||
1362		    cputype == CPU_ID_MV88FR571_41)
1363			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1364
1365		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1366
1367		cpufuncs = sheeva_cpufuncs;
1368		get_cachetype_cp15();
1369		pmap_pte_init_generic();
1370		goto out;
1371	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1372		cpufuncs = armv5_ec_cpufuncs;
1373		get_cachetype_cp15();
1374		pmap_pte_init_generic();
1375		goto out;
1376	}
1377#endif /* CPU_ARM9E || CPU_ARM10 */
1378#ifdef CPU_ARM10
1379	if (/* cputype == CPU_ID_ARM1020T || */
1380	    cputype == CPU_ID_ARM1020E) {
1381		/*
1382		 * Select write-through cacheing (this isn't really an
1383		 * option on ARM1020T).
1384		 */
1385		cpufuncs = arm10_cpufuncs;
1386		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1387		get_cachetype_cp15();
1388		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1389		arm10_dcache_sets_max =
1390		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1391		    arm10_dcache_sets_inc;
1392		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1393		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1394		pmap_pte_init_generic();
1395		goto out;
1396	}
1397#endif /* CPU_ARM10 */
1398#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1399	if (cputype == CPU_ID_ARM1136JS
1400	    || cputype == CPU_ID_ARM1136JSR1
1401	    || cputype == CPU_ID_ARM1176JZS) {
1402#ifdef CPU_ARM1136
1403		if (cputype == CPU_ID_ARM1136JS
1404		    || cputype == CPU_ID_ARM1136JSR1)
1405			cpufuncs = arm1136_cpufuncs;
1406#endif
1407#ifdef CPU_ARM1176
1408		if (cputype == CPU_ID_ARM1176JZS)
1409			cpufuncs = arm1176_cpufuncs;
1410#endif
1411		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1412		get_cachetype_cp15();
1413
1414		pmap_pte_init_mmu_v6();
1415
1416		goto out;
1417	}
1418#endif /* CPU_ARM1136 || CPU_ARM1176 */
1419#ifdef CPU_CORTEXA
1420	if (cputype == CPU_ID_CORTEXA7 ||
1421	    cputype == CPU_ID_CORTEXA8R1 ||
1422	    cputype == CPU_ID_CORTEXA8R2 ||
1423	    cputype == CPU_ID_CORTEXA8R3 ||
1424	    cputype == CPU_ID_CORTEXA9R1 ||
1425	    cputype == CPU_ID_CORTEXA9R2 ||
1426	    cputype == CPU_ID_CORTEXA9R3 ||
1427	    cputype == CPU_ID_CORTEXA15 ) {
1428		cpufuncs = cortexa_cpufuncs;
1429		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1430		get_cachetype_cp15();
1431
1432		pmap_pte_init_mmu_v6();
1433		/* Use powersave on this CPU. */
1434		cpu_do_powersave = 1;
1435		goto out;
1436	}
1437#endif /* CPU_CORTEXA */
1438
1439#if defined(CPU_MV_PJ4B)
1440	if (cputype == CPU_ID_MV88SV581X_V7 ||
1441	    cputype == CPU_ID_MV88SV584X_V7 ||
1442	    cputype == CPU_ID_ARM_88SV581X_V7) {
1443		cpufuncs = pj4bv7_cpufuncs;
1444		get_cachetype_cp15();
1445		pmap_pte_init_mmu_v6();
1446		goto out;
1447	}
1448#endif /* CPU_MV_PJ4B */
1449#ifdef CPU_SA110
1450	if (cputype == CPU_ID_SA110) {
1451		cpufuncs = sa110_cpufuncs;
1452		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1453		get_cachetype_table();
1454		pmap_pte_init_sa1();
1455		goto out;
1456	}
1457#endif	/* CPU_SA110 */
1458#ifdef CPU_SA1100
1459	if (cputype == CPU_ID_SA1100) {
1460		cpufuncs = sa11x0_cpufuncs;
1461		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1462		get_cachetype_table();
1463		pmap_pte_init_sa1();
1464		/* Use powersave on this CPU. */
1465		cpu_do_powersave = 1;
1466
1467		goto out;
1468	}
1469#endif	/* CPU_SA1100 */
1470#ifdef CPU_SA1110
1471	if (cputype == CPU_ID_SA1110) {
1472		cpufuncs = sa11x0_cpufuncs;
1473		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1474		get_cachetype_table();
1475		pmap_pte_init_sa1();
1476		/* Use powersave on this CPU. */
1477		cpu_do_powersave = 1;
1478
1479		goto out;
1480	}
1481#endif	/* CPU_SA1110 */
1482#if defined(CPU_FA526) || defined(CPU_FA626TE)
1483	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1484		cpufuncs = fa526_cpufuncs;
1485		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1486		get_cachetype_cp15();
1487		pmap_pte_init_generic();
1488
1489		/* Use powersave on this CPU. */
1490		cpu_do_powersave = 1;
1491
1492		goto out;
1493	}
1494#endif	/* CPU_FA526 || CPU_FA626TE */
1495#ifdef CPU_IXP12X0
1496        if (cputype == CPU_ID_IXP1200) {
1497                cpufuncs = ixp12x0_cpufuncs;
1498                cpu_reset_needs_v4_MMU_disable = 1;
1499                get_cachetype_table();
1500                pmap_pte_init_sa1();
1501		goto out;
1502        }
1503#endif  /* CPU_IXP12X0 */
1504#ifdef CPU_XSCALE_80200
1505	if (cputype == CPU_ID_80200) {
1506		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1507
1508		i80200_icu_init();
1509
1510#if defined(XSCALE_CCLKCFG)
1511		/*
1512		 * Crank CCLKCFG to maximum legal value.
1513		 */
1514		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1515			:
1516			: "r" (XSCALE_CCLKCFG));
1517#endif
1518
1519		/*
1520		 * XXX Disable ECC in the Bus Controller Unit; we
1521		 * don't really support it, yet.  Clear any pending
1522		 * error indications.
1523		 */
1524		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1525			:
1526			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1527
1528		cpufuncs = xscale_cpufuncs;
1529		/*
1530		 * i80200 errata: Step-A0 and A1 have a bug where
1531		 * D$ dirty bits are not cleared on "invalidate by
1532		 * address".
1533		 *
1534		 * Workaround: Clean cache line before invalidating.
1535		 */
1536		if (rev == 0 || rev == 1)
1537			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1538
1539		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1540		get_cachetype_cp15();
1541		pmap_pte_init_xscale();
1542		goto out;
1543	}
1544#endif /* CPU_XSCALE_80200 */
1545#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1546	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1547	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1548	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1549		cpufuncs = xscale_cpufuncs;
1550		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1551		get_cachetype_cp15();
1552		pmap_pte_init_xscale();
1553		goto out;
1554	}
1555#endif /* CPU_XSCALE_80321 */
1556
1557#if defined(CPU_XSCALE_81342)
1558	if (cputype == CPU_ID_81342) {
1559		cpufuncs = xscalec3_cpufuncs;
1560		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1561		get_cachetype_cp15();
1562		pmap_pte_init_xscale();
1563		goto out;
1564	}
1565#endif /* CPU_XSCALE_81342 */
1566#ifdef CPU_XSCALE_PXA2X0
1567	/* ignore core revision to test PXA2xx CPUs */
1568	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1569	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1570	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1571
1572		cpufuncs = xscale_cpufuncs;
1573		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1574		get_cachetype_cp15();
1575		pmap_pte_init_xscale();
1576
1577		/* Use powersave on this CPU. */
1578		cpu_do_powersave = 1;
1579
1580		goto out;
1581	}
1582#endif /* CPU_XSCALE_PXA2X0 */
1583#ifdef CPU_XSCALE_IXP425
1584	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1585            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1586
1587		cpufuncs = xscale_cpufuncs;
1588		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1589		get_cachetype_cp15();
1590		pmap_pte_init_xscale();
1591
1592		goto out;
1593	}
1594#endif /* CPU_XSCALE_IXP425 */
1595	/*
1596	 * Bzzzz. And the answer was ...
1597	 */
1598	panic("No support for this CPU type (%08x) in kernel", cputype);
1599	return(ARCHITECTURE_NOT_PRESENT);
1600out:
1601	uma_set_align(arm_dcache_align_mask);
1602	return (0);
1603}
1604
1605/*
1606 * Fixup routines for data and prefetch aborts.
1607 *
1608 * Several compile time symbols are used
1609 *
1610 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1611 * correction of registers after a fault.
1612 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1613 * when defined should use late aborts
1614 */
1615
1616
1617/*
1618 * Null abort fixup routine.
1619 * For use when no fixup is required.
1620 */
1621int
1622cpufunc_null_fixup(arg)
1623	void *arg;
1624{
1625	return(ABORT_FIXUP_OK);
1626}
1627
1628
1629#if defined(CPU_ARM7TDMI)
1630
1631#ifdef DEBUG_FAULT_CORRECTION
1632#define DFC_PRINTF(x)		printf x
1633#define DFC_DISASSEMBLE(x)	disassemble(x)
1634#else
1635#define DFC_PRINTF(x)		/* nothing */
1636#define DFC_DISASSEMBLE(x)	/* nothing */
1637#endif
1638
1639/*
1640 * "Early" data abort fixup.
1641 *
1642 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1643 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1644 *
1645 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1646 */
1647int
1648early_abort_fixup(arg)
1649	void *arg;
1650{
1651	struct trapframe *frame = arg;
1652	u_int fault_pc;
1653	u_int fault_instruction;
1654	int saved_lr = 0;
1655
1656	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1657
1658		/* Ok an abort in SVC mode */
1659
1660		/*
1661		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1662		 * as the fault happened in svc mode but we need it in the
1663		 * usr slot so we can treat the registers as an array of ints
1664		 * during fixing.
1665		 * NOTE: This PC is in the position but writeback is not
1666		 * allowed on r15.
1667		 * Doing it like this is more efficient than trapping this
1668		 * case in all possible locations in the following fixup code.
1669		 */
1670
1671		saved_lr = frame->tf_usr_lr;
1672		frame->tf_usr_lr = frame->tf_svc_lr;
1673
1674		/*
1675		 * Note the trapframe does not have the SVC r13 so a fault
1676		 * from an instruction with writeback to r13 in SVC mode is
1677		 * not allowed. This should not happen as the kstack is
1678		 * always valid.
1679		 */
1680	}
1681
1682	/* Get fault address and status from the CPU */
1683
1684	fault_pc = frame->tf_pc;
1685	fault_instruction = *((volatile unsigned int *)fault_pc);
1686
1687	/* Decode the fault instruction and fix the registers as needed */
1688
1689	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1690		int base;
1691		int loop;
1692		int count;
1693		int *registers = &frame->tf_r0;
1694
1695		DFC_PRINTF(("LDM/STM\n"));
1696		DFC_DISASSEMBLE(fault_pc);
1697		if (fault_instruction & (1 << 21)) {
1698			DFC_PRINTF(("This instruction must be corrected\n"));
1699			base = (fault_instruction >> 16) & 0x0f;
1700			if (base == 15)
1701				return ABORT_FIXUP_FAILED;
1702			/* Count registers transferred */
1703			count = 0;
1704			for (loop = 0; loop < 16; ++loop) {
1705				if (fault_instruction & (1<<loop))
1706					++count;
1707			}
1708			DFC_PRINTF(("%d registers used\n", count));
1709			DFC_PRINTF(("Corrected r%d by %d bytes ",
1710				       base, count * 4));
1711			if (fault_instruction & (1 << 23)) {
1712				DFC_PRINTF(("down\n"));
1713				registers[base] -= count * 4;
1714			} else {
1715				DFC_PRINTF(("up\n"));
1716				registers[base] += count * 4;
1717			}
1718		}
1719	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1720		int base;
1721		int offset;
1722		int *registers = &frame->tf_r0;
1723
1724		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1725
1726		DFC_DISASSEMBLE(fault_pc);
1727
1728		/* Only need to fix registers if write back is turned on */
1729
1730		if ((fault_instruction & (1 << 21)) != 0) {
1731			base = (fault_instruction >> 16) & 0x0f;
1732			if (base == 13 &&
1733			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1734				return ABORT_FIXUP_FAILED;
1735			if (base == 15)
1736				return ABORT_FIXUP_FAILED;
1737
1738			offset = (fault_instruction & 0xff) << 2;
1739			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1740			if ((fault_instruction & (1 << 23)) != 0)
1741				offset = -offset;
1742			registers[base] += offset;
1743			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1744		}
1745	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1746		return ABORT_FIXUP_FAILED;
1747
1748	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1749
1750		/* Ok an abort in SVC mode */
1751
1752		/*
1753		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1754		 * as the fault happened in svc mode but we need it in the
1755		 * usr slot so we can treat the registers as an array of ints
1756		 * during fixing.
1757		 * NOTE: This PC is in the position but writeback is not
1758		 * allowed on r15.
1759		 * Doing it like this is more efficient than trapping this
1760		 * case in all possible locations in the prior fixup code.
1761		 */
1762
1763		frame->tf_svc_lr = frame->tf_usr_lr;
1764		frame->tf_usr_lr = saved_lr;
1765
1766		/*
1767		 * Note the trapframe does not have the SVC r13 so a fault
1768		 * from an instruction with writeback to r13 in SVC mode is
1769		 * not allowed. This should not happen as the kstack is
1770		 * always valid.
1771		 */
1772	}
1773
1774	return(ABORT_FIXUP_OK);
1775}
1776#endif	/* CPU_ARM2/250/3/6/7 */
1777
1778
1779#if defined(CPU_ARM7TDMI)
1780/*
1781 * "Late" (base updated) data abort fixup
1782 *
1783 * For ARM6 (in late-abort mode) and ARM7.
1784 *
1785 * In this model, all data-transfer instructions need fixing up.  We defer
1786 * LDM, STM, LDC and STC fixup to the early-abort handler.
1787 */
1788int
1789late_abort_fixup(arg)
1790	void *arg;
1791{
1792	struct trapframe *frame = arg;
1793	u_int fault_pc;
1794	u_int fault_instruction;
1795	int saved_lr = 0;
1796
1797	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1798
1799		/* Ok an abort in SVC mode */
1800
1801		/*
1802		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1803		 * as the fault happened in svc mode but we need it in the
1804		 * usr slot so we can treat the registers as an array of ints
1805		 * during fixing.
1806		 * NOTE: This PC is in the position but writeback is not
1807		 * allowed on r15.
1808		 * Doing it like this is more efficient than trapping this
1809		 * case in all possible locations in the following fixup code.
1810		 */
1811
1812		saved_lr = frame->tf_usr_lr;
1813		frame->tf_usr_lr = frame->tf_svc_lr;
1814
1815		/*
1816		 * Note the trapframe does not have the SVC r13 so a fault
1817		 * from an instruction with writeback to r13 in SVC mode is
1818		 * not allowed. This should not happen as the kstack is
1819		 * always valid.
1820		 */
1821	}
1822
1823	/* Get fault address and status from the CPU */
1824
1825	fault_pc = frame->tf_pc;
1826	fault_instruction = *((volatile unsigned int *)fault_pc);
1827
1828	/* Decode the fault instruction and fix the registers as needed */
1829
1830	/* Was is a swap instruction ? */
1831
1832	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1833		DFC_DISASSEMBLE(fault_pc);
1834	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1835
1836		/* Was is a ldr/str instruction */
1837		/* This is for late abort only */
1838
1839		int base;
1840		int offset;
1841		int *registers = &frame->tf_r0;
1842
1843		DFC_DISASSEMBLE(fault_pc);
1844
1845		/* This is for late abort only */
1846
1847		if ((fault_instruction & (1 << 24)) == 0
1848		    || (fault_instruction & (1 << 21)) != 0) {
1849			/* postindexed ldr/str with no writeback */
1850
1851			base = (fault_instruction >> 16) & 0x0f;
1852			if (base == 13 &&
1853			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1854				return ABORT_FIXUP_FAILED;
1855			if (base == 15)
1856				return ABORT_FIXUP_FAILED;
1857			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1858				       base, registers[base]));
1859			if ((fault_instruction & (1 << 25)) == 0) {
1860				/* Immediate offset - easy */
1861
1862				offset = fault_instruction & 0xfff;
1863				if ((fault_instruction & (1 << 23)))
1864					offset = -offset;
1865				registers[base] += offset;
1866				DFC_PRINTF(("imm=%08x ", offset));
1867			} else {
1868				/* offset is a shifted register */
1869				int shift;
1870
1871				offset = fault_instruction & 0x0f;
1872				if (offset == base)
1873					return ABORT_FIXUP_FAILED;
1874
1875				/*
1876				 * Register offset - hard we have to
1877				 * cope with shifts !
1878				 */
1879				offset = registers[offset];
1880
1881				if ((fault_instruction & (1 << 4)) == 0)
1882					/* shift with amount */
1883					shift = (fault_instruction >> 7) & 0x1f;
1884				else {
1885					/* shift with register */
1886					if ((fault_instruction & (1 << 7)) != 0)
1887						/* undefined for now so bail out */
1888						return ABORT_FIXUP_FAILED;
1889					shift = ((fault_instruction >> 8) & 0xf);
1890					if (base == shift)
1891						return ABORT_FIXUP_FAILED;
1892					DFC_PRINTF(("shift reg=%d ", shift));
1893					shift = registers[shift];
1894				}
1895				DFC_PRINTF(("shift=%08x ", shift));
1896				switch (((fault_instruction >> 5) & 0x3)) {
1897				case 0 : /* Logical left */
1898					offset = (int)(((u_int)offset) << shift);
1899					break;
1900				case 1 : /* Logical Right */
1901					if (shift == 0) shift = 32;
1902					offset = (int)(((u_int)offset) >> shift);
1903					break;
1904				case 2 : /* Arithmetic Right */
1905					if (shift == 0) shift = 32;
1906					offset = (int)(((int)offset) >> shift);
1907					break;
1908				case 3 : /* Rotate right (rol or rxx) */
1909					return ABORT_FIXUP_FAILED;
1910					break;
1911				}
1912
1913				DFC_PRINTF(("abt: fixed LDR/STR with "
1914					       "register offset\n"));
1915				if ((fault_instruction & (1 << 23)))
1916					offset = -offset;
1917				DFC_PRINTF(("offset=%08x ", offset));
1918				registers[base] += offset;
1919			}
1920			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1921		}
1922	}
1923
1924	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1925
1926		/* Ok an abort in SVC mode */
1927
1928		/*
1929		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1930		 * as the fault happened in svc mode but we need it in the
1931		 * usr slot so we can treat the registers as an array of ints
1932		 * during fixing.
1933		 * NOTE: This PC is in the position but writeback is not
1934		 * allowed on r15.
1935		 * Doing it like this is more efficient than trapping this
1936		 * case in all possible locations in the prior fixup code.
1937		 */
1938
1939		frame->tf_svc_lr = frame->tf_usr_lr;
1940		frame->tf_usr_lr = saved_lr;
1941
1942		/*
1943		 * Note the trapframe does not have the SVC r13 so a fault
1944		 * from an instruction with writeback to r13 in SVC mode is
1945		 * not allowed. This should not happen as the kstack is
1946		 * always valid.
1947		 */
1948	}
1949
1950	/*
1951	 * Now let the early-abort fixup routine have a go, in case it
1952	 * was an LDM, STM, LDC or STC that faulted.
1953	 */
1954
1955	return early_abort_fixup(arg);
1956}
1957#endif	/* CPU_ARM7TDMI */
1958
1959/*
1960 * CPU Setup code
1961 */
1962
1963#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1964  defined(CPU_ARM9E) || \
1965  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1966  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1967  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1968  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1969  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1970  defined(CPU_FA526) || defined(CPU_FA626TE)
1971
1972#define IGN	0
1973#define OR	1
1974#define BIC	2
1975
1976struct cpu_option {
1977	char	*co_name;
1978	int	co_falseop;
1979	int	co_trueop;
1980	int	co_value;
1981};
1982
1983static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1984
1985static u_int
1986parse_cpu_options(args, optlist, cpuctrl)
1987	char *args;
1988	struct cpu_option *optlist;
1989	u_int cpuctrl;
1990{
1991	int integer;
1992
1993	if (args == NULL)
1994		return(cpuctrl);
1995
1996	while (optlist->co_name) {
1997		if (get_bootconf_option(args, optlist->co_name,
1998		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1999			if (integer) {
2000				if (optlist->co_trueop == OR)
2001					cpuctrl |= optlist->co_value;
2002				else if (optlist->co_trueop == BIC)
2003					cpuctrl &= ~optlist->co_value;
2004			} else {
2005				if (optlist->co_falseop == OR)
2006					cpuctrl |= optlist->co_value;
2007				else if (optlist->co_falseop == BIC)
2008					cpuctrl &= ~optlist->co_value;
2009			}
2010		}
2011		++optlist;
2012	}
2013	return(cpuctrl);
2014}
2015#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
2016
2017#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
2018struct cpu_option arm678_options[] = {
2019#ifdef COMPAT_12
2020	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2021	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2022#endif	/* COMPAT_12 */
2023	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2024	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2025	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2026	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2027	{ NULL,			IGN, IGN, 0 }
2028};
2029
2030#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2031
2032#ifdef CPU_ARM7TDMI
2033struct cpu_option arm7tdmi_options[] = {
2034	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2035	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2036	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2037	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2038#ifdef COMPAT_12
2039	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2040#endif	/* COMPAT_12 */
2041	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2042	{ NULL,			IGN, IGN, 0 }
2043};
2044
2045void
2046arm7tdmi_setup(args)
2047	char *args;
2048{
2049	int cpuctrl;
2050
2051	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2052		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2053		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2054
2055	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2056	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2057
2058#ifdef __ARMEB__
2059	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2060#endif
2061
2062	/* Clear out the cache */
2063	cpu_idcache_wbinv_all();
2064
2065	/* Set the control register */
2066	ctrl = cpuctrl;
2067	cpu_control(0xffffffff, cpuctrl);
2068}
2069#endif	/* CPU_ARM7TDMI */
2070
2071#ifdef CPU_ARM8
2072struct cpu_option arm8_options[] = {
2073	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2074	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2075	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2076	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2077#ifdef COMPAT_12
2078	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2079#endif	/* COMPAT_12 */
2080	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2081	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2082	{ NULL,			IGN, IGN, 0 }
2083};
2084
2085void
2086arm8_setup(args)
2087	char *args;
2088{
2089	int integer;
2090	int cpuctrl, cpuctrlmask;
2091	int clocktest;
2092	int setclock = 0;
2093
2094	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2095		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2096		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2097	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2098		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2099		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2100		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2101		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2102
2103#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2104	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2105#endif
2106
2107	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2108	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2109
2110#ifdef __ARMEB__
2111	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2112#endif
2113
2114	/* Get clock configuration */
2115	clocktest = arm8_clock_config(0, 0) & 0x0f;
2116
2117	/* Special ARM8 clock and test configuration */
2118	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2119		clocktest = 0;
2120		setclock = 1;
2121	}
2122	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2123		if (integer)
2124			clocktest |= 0x01;
2125		else
2126			clocktest &= ~(0x01);
2127		setclock = 1;
2128	}
2129	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2130		if (integer)
2131			clocktest |= 0x02;
2132		else
2133			clocktest &= ~(0x02);
2134		setclock = 1;
2135	}
2136	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2137		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2138		setclock = 1;
2139	}
2140	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2141		clocktest |= (integer & 7) << 5;
2142		setclock = 1;
2143	}
2144
2145	/* Clear out the cache */
2146	cpu_idcache_wbinv_all();
2147
2148	/* Set the control register */
2149	ctrl = cpuctrl;
2150	cpu_control(0xffffffff, cpuctrl);
2151
2152	/* Set the clock/test register */
2153	if (setclock)
2154		arm8_clock_config(0x7f, clocktest);
2155}
2156#endif	/* CPU_ARM8 */
2157
2158#ifdef CPU_ARM9
2159struct cpu_option arm9_options[] = {
2160	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2161	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2162	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2163	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2164	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2165	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2166	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2167	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2168	{ NULL,			IGN, IGN, 0 }
2169};
2170
2171void
2172arm9_setup(args)
2173	char *args;
2174{
2175	int cpuctrl, cpuctrlmask;
2176
2177	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2178	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2179	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2180	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
2181	    CPU_CONTROL_ROUNDROBIN;
2182	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2183		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2184		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2185		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2186		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2187		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2188		 | CPU_CONTROL_ROUNDROBIN;
2189
2190#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2191	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2192#endif
2193
2194	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2195
2196#ifdef __ARMEB__
2197	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2198#endif
2199	if (vector_page == ARM_VECTORS_HIGH)
2200		cpuctrl |= CPU_CONTROL_VECRELOC;
2201
2202	/* Clear out the cache */
2203	cpu_idcache_wbinv_all();
2204
2205	/* Set the control register */
2206	cpu_control(cpuctrlmask, cpuctrl);
2207	ctrl = cpuctrl;
2208
2209}
2210#endif	/* CPU_ARM9 */
2211
2212#if defined(CPU_ARM9E) || defined(CPU_ARM10)
2213struct cpu_option arm10_options[] = {
2214	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2215	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2216	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2217	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2218	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2219	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2220	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2221	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2222	{ NULL,			IGN, IGN, 0 }
2223};
2224
2225void
2226arm10_setup(args)
2227	char *args;
2228{
2229	int cpuctrl, cpuctrlmask;
2230
2231	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2232	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2233	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2234	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2235	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2236	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2237	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2238	    | CPU_CONTROL_BPRD_ENABLE
2239	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2240
2241#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2242	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2243#endif
2244
2245	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2246
2247#ifdef __ARMEB__
2248	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2249#endif
2250
2251	/* Clear out the cache */
2252	cpu_idcache_wbinv_all();
2253
2254	/* Now really make sure they are clean.  */
2255	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2256
2257	if (vector_page == ARM_VECTORS_HIGH)
2258		cpuctrl |= CPU_CONTROL_VECRELOC;
2259
2260	/* Set the control register */
2261	ctrl = cpuctrl;
2262	cpu_control(0xffffffff, cpuctrl);
2263
2264	/* And again. */
2265	cpu_idcache_wbinv_all();
2266}
2267#endif	/* CPU_ARM9E || CPU_ARM10 */
2268
2269#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
2270struct cpu_option arm11_options[] = {
2271	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2272	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2273	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2274	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2275	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2276	{ NULL,			IGN, IGN, 0 }
2277};
2278
2279void
2280arm11x6_setup(char *args)
2281{
2282	int cpuctrl, cpuctrl_wax;
2283	uint32_t auxctrl, auxctrl_wax;
2284	uint32_t tmp, tmp2;
2285	uint32_t sbz=0;
2286	uint32_t cpuid;
2287
2288	cpuid = cpufunc_id();
2289
2290	cpuctrl =
2291		CPU_CONTROL_MMU_ENABLE  |
2292		CPU_CONTROL_DC_ENABLE   |
2293		CPU_CONTROL_WBUF_ENABLE |
2294		CPU_CONTROL_32BP_ENABLE |
2295		CPU_CONTROL_32BD_ENABLE |
2296		CPU_CONTROL_LABT_ENABLE |
2297		CPU_CONTROL_SYST_ENABLE |
2298		CPU_CONTROL_IC_ENABLE;
2299
2300	/*
2301	 * "write as existing" bits
2302	 * inverse of this is mask
2303	 */
2304	cpuctrl_wax =
2305		(3 << 30) | /* SBZ */
2306		(1 << 29) | /* FA */
2307		(1 << 28) | /* TR */
2308		(3 << 26) | /* SBZ */
2309		(3 << 19) | /* SBZ */
2310		(1 << 17);  /* SBZ */
2311
2312	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2313	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2314
2315	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2316
2317#ifdef __ARMEB__
2318	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2319#endif
2320
2321	if (vector_page == ARM_VECTORS_HIGH)
2322		cpuctrl |= CPU_CONTROL_VECRELOC;
2323
2324	auxctrl = 0;
2325	auxctrl_wax = ~0;
2326	/*
2327	 * This options enables the workaround for the 364296 ARM1136
2328	 * r0pX errata (possible cache data corruption with
2329	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2330	 * the auxiliary control register and the FI bit in the control
2331	 * register, thus disabling hit-under-miss without putting the
2332	 * processor into full low interrupt latency mode. ARM11MPCore
2333	 * is not affected.
2334	 */
2335	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2336		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2337		auxctrl = ARM1136_AUXCTL_PFI;
2338		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
2339	}
2340
2341	/*
2342	 * Enable an errata workaround
2343	 */
2344	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
2345		auxctrl = ARM1176_AUXCTL_PHD;
2346		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
2347	}
2348
2349	/* Clear out the cache */
2350	cpu_idcache_wbinv_all();
2351
2352	/* Now really make sure they are clean.  */
2353	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2354
2355	/* Allow detection code to find the VFP if it's fitted.  */
2356	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2357
2358	/* Set the control register */
2359	ctrl = cpuctrl;
2360	cpu_control(~cpuctrl_wax, cpuctrl);
2361
2362	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2363			"and	%1, %0, %2\n\t"
2364			"orr	%1, %1, %3\n\t"
2365			"teq	%0, %1\n\t"
2366			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2367			: "=r"(tmp), "=r"(tmp2) :
2368			  "r"(auxctrl_wax), "r"(auxctrl));
2369
2370	/* And again. */
2371	cpu_idcache_wbinv_all();
2372}
2373#endif  /* CPU_ARM1136 || CPU_ARM1176 */
2374
2375#ifdef CPU_MV_PJ4B
2376void
2377pj4bv7_setup(args)
2378	char *args;
2379{
2380	int cpuctrl;
2381
2382	pj4b_config();
2383
2384	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2385#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2386	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2387#endif
2388	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2389	cpuctrl |= (0xf << 3);
2390	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2391	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2392	if (vector_page == ARM_VECTORS_HIGH)
2393		cpuctrl |= CPU_CONTROL_VECRELOC;
2394	cpuctrl |= (0x5 << 16) | (1 < 22);
2395	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2396
2397	/* Clear out the cache */
2398	cpu_idcache_wbinv_all();
2399
2400	/* Set the control register */
2401	ctrl = cpuctrl;
2402	cpu_control(0xFFFFFFFF, cpuctrl);
2403
2404	/* And again. */
2405	cpu_idcache_wbinv_all();
2406}
2407#endif /* CPU_MV_PJ4B */
2408
2409#ifdef CPU_CORTEXA
2410
2411void
2412cortexa_setup(char *args)
2413{
2414	int cpuctrl, cpuctrlmask;
2415
2416	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
2417	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
2418	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
2419	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
2420	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
2421	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
2422
2423	cpuctrl = CPU_CONTROL_MMU_ENABLE |
2424	    CPU_CONTROL_IC_ENABLE |
2425	    CPU_CONTROL_DC_ENABLE |
2426	    CPU_CONTROL_BPRD_ENABLE;
2427
2428#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2429	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2430#endif
2431
2432	/* Switch to big endian */
2433#ifdef __ARMEB__
2434	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2435#endif
2436
2437	/* Check if the vector page is at the high address (0xffff0000) */
2438	if (vector_page == ARM_VECTORS_HIGH)
2439		cpuctrl |= CPU_CONTROL_VECRELOC;
2440
2441	/* Clear out the cache */
2442	cpu_idcache_wbinv_all();
2443
2444	/* Set the control register */
2445	ctrl = cpuctrl;
2446	cpu_control(cpuctrlmask, cpuctrl);
2447
2448	/* And again. */
2449	cpu_idcache_wbinv_all();
2450#ifdef SMP
2451	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
2452#endif
2453}
2454#endif  /* CPU_CORTEXA */
2455
2456
2457#ifdef CPU_SA110
2458struct cpu_option sa110_options[] = {
2459#ifdef COMPAT_12
2460	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2461	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2462#endif	/* COMPAT_12 */
2463	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2464	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2465	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2466	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2467	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2468	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2469	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2470	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2471	{ NULL,			IGN, IGN, 0 }
2472};
2473
2474void
2475sa110_setup(args)
2476	char *args;
2477{
2478	int cpuctrl, cpuctrlmask;
2479
2480	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2481		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2482		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2483		 | CPU_CONTROL_WBUF_ENABLE;
2484	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2485		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2486		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2487		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2488		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2489		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2490		 | CPU_CONTROL_CPCLK;
2491
2492#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2493	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2494#endif
2495
2496	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2497
2498#ifdef __ARMEB__
2499	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2500#endif
2501
2502	/* Clear out the cache */
2503	cpu_idcache_wbinv_all();
2504
2505	/* Set the control register */
2506	ctrl = cpuctrl;
2507/*	cpu_control(cpuctrlmask, cpuctrl);*/
2508	cpu_control(0xffffffff, cpuctrl);
2509
2510	/*
2511	 * enable clockswitching, note that this doesn't read or write to r0,
2512	 * r0 is just to make it valid asm
2513	 */
2514	__asm ("mcr 15, 0, r0, c15, c1, 2");
2515}
2516#endif	/* CPU_SA110 */
2517
2518#if defined(CPU_SA1100) || defined(CPU_SA1110)
2519struct cpu_option sa11x0_options[] = {
2520#ifdef COMPAT_12
2521	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2522	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2523#endif	/* COMPAT_12 */
2524	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2525	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2526	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2527	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2528	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2529	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2530	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2531	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2532	{ NULL,			IGN, IGN, 0 }
2533};
2534
2535void
2536sa11x0_setup(args)
2537	char *args;
2538{
2539	int cpuctrl, cpuctrlmask;
2540
2541	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2542		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2543		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2544		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2545	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2546		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2547		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2548		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2549		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2550		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2551		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2552
2553#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2554	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2555#endif
2556
2557
2558	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2559
2560#ifdef __ARMEB__
2561	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2562#endif
2563
2564	if (vector_page == ARM_VECTORS_HIGH)
2565		cpuctrl |= CPU_CONTROL_VECRELOC;
2566	/* Clear out the cache */
2567	cpu_idcache_wbinv_all();
2568	/* Set the control register */
2569	ctrl = cpuctrl;
2570	cpu_control(0xffffffff, cpuctrl);
2571}
2572#endif	/* CPU_SA1100 || CPU_SA1110 */
2573
2574#if defined(CPU_FA526) || defined(CPU_FA626TE)
2575struct cpu_option fa526_options[] = {
2576#ifdef COMPAT_12
2577	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
2578					   CPU_CONTROL_DC_ENABLE) },
2579	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2580#endif	/* COMPAT_12 */
2581	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
2582					   CPU_CONTROL_DC_ENABLE) },
2583	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
2584					   CPU_CONTROL_DC_ENABLE) },
2585	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2586	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2587	{ NULL,			IGN, IGN, 0 }
2588};
2589
2590void
2591fa526_setup(char *args)
2592{
2593	int cpuctrl, cpuctrlmask;
2594
2595	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2596		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2597		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2598		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2599		| CPU_CONTROL_BPRD_ENABLE;
2600	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2601		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2602		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2603		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2604		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2605		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2606		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2607
2608#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2609	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2610#endif
2611
2612	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2613
2614#ifdef __ARMEB__
2615	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2616#endif
2617
2618	if (vector_page == ARM_VECTORS_HIGH)
2619		cpuctrl |= CPU_CONTROL_VECRELOC;
2620
2621	/* Clear out the cache */
2622	cpu_idcache_wbinv_all();
2623
2624	/* Set the control register */
2625	ctrl = cpuctrl;
2626	cpu_control(0xffffffff, cpuctrl);
2627}
2628#endif	/* CPU_FA526 || CPU_FA626TE */
2629
2630
2631#if defined(CPU_IXP12X0)
2632struct cpu_option ixp12x0_options[] = {
2633	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2634	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2635	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2636	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2637	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2638	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2639	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2640	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2641	{ NULL,			IGN, IGN, 0 }
2642};
2643
2644void
2645ixp12x0_setup(args)
2646	char *args;
2647{
2648	int cpuctrl, cpuctrlmask;
2649
2650
2651	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2652		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2653		 | CPU_CONTROL_IC_ENABLE;
2654
2655	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2656		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2657		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2658		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2659		 | CPU_CONTROL_VECRELOC;
2660
2661#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2662	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2663#endif
2664
2665	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2666
2667#ifdef __ARMEB__
2668	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2669#endif
2670
2671	if (vector_page == ARM_VECTORS_HIGH)
2672		cpuctrl |= CPU_CONTROL_VECRELOC;
2673
2674	/* Clear out the cache */
2675	cpu_idcache_wbinv_all();
2676
2677	/* Set the control register */
2678	ctrl = cpuctrl;
2679	/* cpu_control(0xffffffff, cpuctrl); */
2680	cpu_control(cpuctrlmask, cpuctrl);
2681}
2682#endif /* CPU_IXP12X0 */
2683
2684#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2685  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2686  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2687struct cpu_option xscale_options[] = {
2688#ifdef COMPAT_12
2689	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2690	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2691#endif	/* COMPAT_12 */
2692	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2693	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2694	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2695	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2696	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2697	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2698	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2699	{ NULL,			IGN, IGN, 0 }
2700};
2701
2702void
2703xscale_setup(args)
2704	char *args;
2705{
2706	uint32_t auxctl;
2707	int cpuctrl, cpuctrlmask;
2708
2709	/*
2710	 * The XScale Write Buffer is always enabled.  Our option
2711	 * is to enable/disable coalescing.  Note that bits 6:3
2712	 * must always be enabled.
2713	 */
2714
2715	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2716		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2717		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2718		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2719		 | CPU_CONTROL_BPRD_ENABLE;
2720	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2721		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2722		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2723		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2724		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2725		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2726		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2727		 CPU_CONTROL_L2_ENABLE;
2728
2729#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2730	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2731#endif
2732
2733	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2734
2735#ifdef __ARMEB__
2736	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2737#endif
2738
2739	if (vector_page == ARM_VECTORS_HIGH)
2740		cpuctrl |= CPU_CONTROL_VECRELOC;
2741#ifdef CPU_XSCALE_CORE3
2742	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2743#endif
2744
2745	/* Clear out the cache */
2746	cpu_idcache_wbinv_all();
2747
2748	/*
2749	 * Set the control register.  Note that bits 6:3 must always
2750	 * be set to 1.
2751	 */
2752	ctrl = cpuctrl;
2753/*	cpu_control(cpuctrlmask, cpuctrl);*/
2754	cpu_control(0xffffffff, cpuctrl);
2755
2756	/* Make sure write coalescing is turned on */
2757	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2758		: "=r" (auxctl));
2759#ifdef XSCALE_NO_COALESCE_WRITES
2760	auxctl |= XSCALE_AUXCTL_K;
2761#else
2762	auxctl &= ~XSCALE_AUXCTL_K;
2763#endif
2764#ifdef CPU_XSCALE_CORE3
2765	auxctl |= XSCALE_AUXCTL_LLR;
2766	auxctl |= XSCALE_AUXCTL_MD_MASK;
2767#endif
2768	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2769		: : "r" (auxctl));
2770}
2771#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2772	   CPU_XSCALE_80219 */
2773