cpufunc.c revision 266332
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/10/sys/arm/arm/cpufunc.c 266332 2014-05-17 17:54:38Z ian $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62#include <machine/bootconfig.h>
63
64#ifdef CPU_XSCALE_80200
65#include <arm/xscale/i80200/i80200reg.h>
66#include <arm/xscale/i80200/i80200var.h>
67#endif
68
69#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70#include <arm/xscale/i80321/i80321reg.h>
71#include <arm/xscale/i80321/i80321var.h>
72#endif
73
74/*
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
79 */
80#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81#include <arm/xscale/i8134x/i81342reg.h>
82#endif
83
84#ifdef CPU_XSCALE_IXP425
85#include <arm/xscale/ixp425/ixp425reg.h>
86#include <arm/xscale/ixp425/ixp425var.h>
87#endif
88
89/* PRIMARY CACHE VARIABLES */
90int	arm_picache_size;
91int	arm_picache_line_size;
92int	arm_picache_ways;
93
94int	arm_pdcache_size;	/* and unified */
95int	arm_pdcache_line_size;
96int	arm_pdcache_ways;
97
98int	arm_pcache_type;
99int	arm_pcache_unified;
100
101int	arm_dcache_align;
102int	arm_dcache_align_mask;
103
104u_int	arm_cache_level;
105u_int	arm_cache_type[14];
106u_int	arm_cache_loc;
107
108/* 1 == use cpu_sleep(), 0 == don't */
109int cpu_do_powersave;
110int ctrl;
111
112#ifdef CPU_ARM9
113struct cpu_functions arm9_cpufuncs = {
114	/* CPU functions */
115
116	cpufunc_id,			/* id			*/
117	cpufunc_nullop,			/* cpwait		*/
118
119	/* MMU functions */
120
121	cpufunc_control,		/* control		*/
122	cpufunc_domains,		/* Domain		*/
123	arm9_setttb,			/* Setttb		*/
124	cpufunc_faultstatus,		/* Faultstatus		*/
125	cpufunc_faultaddress,		/* Faultaddress		*/
126
127	/* TLB functions */
128
129	armv4_tlb_flushID,		/* tlb_flushID		*/
130	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
131	armv4_tlb_flushI,		/* tlb_flushI		*/
132	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
133	armv4_tlb_flushD,		/* tlb_flushD		*/
134	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
135
136	/* Cache operations */
137
138	arm9_icache_sync_all,		/* icache_sync_all	*/
139	arm9_icache_sync_range,		/* icache_sync_range	*/
140
141	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
142	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
143	arm9_dcache_inv_range,		/* dcache_inv_range	*/
144	arm9_dcache_wb_range,		/* dcache_wb_range	*/
145
146	armv4_idcache_inv_all,		/* idcache_inv_all	*/
147	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
148	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
149	cpufunc_nullop,			/* l2cache_wbinv_all	*/
150	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
152	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
153
154	/* Other functions */
155
156	cpufunc_nullop,			/* flush_prefetchbuf	*/
157	armv4_drain_writebuf,		/* drain_writebuf	*/
158	cpufunc_nullop,			/* flush_brnchtgt_C	*/
159	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
160
161	(void *)cpufunc_nullop,		/* sleep		*/
162
163	/* Soft functions */
164
165	cpufunc_null_fixup,		/* dataabt_fixup	*/
166	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
167
168	arm9_context_switch,		/* context_switch	*/
169
170	arm9_setup			/* cpu setup		*/
171
172};
173#endif /* CPU_ARM9 */
174
175#if defined(CPU_ARM9E) || defined(CPU_ARM10)
176struct cpu_functions armv5_ec_cpufuncs = {
177	/* CPU functions */
178
179	cpufunc_id,			/* id			*/
180	cpufunc_nullop,			/* cpwait		*/
181
182	/* MMU functions */
183
184	cpufunc_control,		/* control		*/
185	cpufunc_domains,		/* Domain		*/
186	armv5_ec_setttb,		/* Setttb		*/
187	cpufunc_faultstatus,		/* Faultstatus		*/
188	cpufunc_faultaddress,		/* Faultaddress		*/
189
190	/* TLB functions */
191
192	armv4_tlb_flushID,		/* tlb_flushID		*/
193	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
194	armv4_tlb_flushI,		/* tlb_flushI		*/
195	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
196	armv4_tlb_flushD,		/* tlb_flushD		*/
197	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
198
199	/* Cache operations */
200
201	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
202	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
203
204	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
205	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
206	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
207	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
208
209	armv4_idcache_inv_all,		/* idcache_inv_all	*/
210	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
211	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
212
213	cpufunc_nullop,                 /* l2cache_wbinv_all    */
214	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
215      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
216	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
217
218	/* Other functions */
219
220	cpufunc_nullop,			/* flush_prefetchbuf	*/
221	armv4_drain_writebuf,		/* drain_writebuf	*/
222	cpufunc_nullop,			/* flush_brnchtgt_C	*/
223	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
224
225	(void *)cpufunc_nullop,		/* sleep		*/
226
227	/* Soft functions */
228
229	cpufunc_null_fixup,		/* dataabt_fixup	*/
230	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
231
232	arm10_context_switch,		/* context_switch	*/
233
234	arm10_setup			/* cpu setup		*/
235
236};
237
238struct cpu_functions sheeva_cpufuncs = {
239	/* CPU functions */
240
241	cpufunc_id,			/* id			*/
242	cpufunc_nullop,			/* cpwait		*/
243
244	/* MMU functions */
245
246	cpufunc_control,		/* control		*/
247	cpufunc_domains,		/* Domain		*/
248	sheeva_setttb,			/* Setttb		*/
249	cpufunc_faultstatus,		/* Faultstatus		*/
250	cpufunc_faultaddress,		/* Faultaddress		*/
251
252	/* TLB functions */
253
254	armv4_tlb_flushID,		/* tlb_flushID		*/
255	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
256	armv4_tlb_flushI,		/* tlb_flushI		*/
257	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
258	armv4_tlb_flushD,		/* tlb_flushD		*/
259	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
260
261	/* Cache operations */
262
263	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
264	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
265
266	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
267	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
268	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
269	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
270
271	armv4_idcache_inv_all,		/* idcache_inv_all	*/
272	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
273	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
274
275	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
276	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
277	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
278	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
279
280	/* Other functions */
281
282	cpufunc_nullop,			/* flush_prefetchbuf	*/
283	armv4_drain_writebuf,		/* drain_writebuf	*/
284	cpufunc_nullop,			/* flush_brnchtgt_C	*/
285	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
286
287	sheeva_cpu_sleep,		/* sleep		*/
288
289	/* Soft functions */
290
291	cpufunc_null_fixup,		/* dataabt_fixup	*/
292	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
293
294	arm10_context_switch,		/* context_switch	*/
295
296	arm10_setup			/* cpu setup		*/
297};
298#endif /* CPU_ARM9E || CPU_ARM10 */
299
300#ifdef CPU_ARM10
301struct cpu_functions arm10_cpufuncs = {
302	/* CPU functions */
303
304	cpufunc_id,			/* id			*/
305	cpufunc_nullop,			/* cpwait		*/
306
307	/* MMU functions */
308
309	cpufunc_control,		/* control		*/
310	cpufunc_domains,		/* Domain		*/
311	arm10_setttb,			/* Setttb		*/
312	cpufunc_faultstatus,		/* Faultstatus		*/
313	cpufunc_faultaddress,		/* Faultaddress		*/
314
315	/* TLB functions */
316
317	armv4_tlb_flushID,		/* tlb_flushID		*/
318	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
319	armv4_tlb_flushI,		/* tlb_flushI		*/
320	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
321	armv4_tlb_flushD,		/* tlb_flushD		*/
322	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
323
324	/* Cache operations */
325
326	arm10_icache_sync_all,		/* icache_sync_all	*/
327	arm10_icache_sync_range,	/* icache_sync_range	*/
328
329	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
330	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
331	arm10_dcache_inv_range,		/* dcache_inv_range	*/
332	arm10_dcache_wb_range,		/* dcache_wb_range	*/
333
334	armv4_idcache_inv_all,		/* idcache_inv_all	*/
335	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
336	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
337	cpufunc_nullop,			/* l2cache_wbinv_all	*/
338	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
339	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
340	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
341
342	/* Other functions */
343
344	cpufunc_nullop,			/* flush_prefetchbuf	*/
345	armv4_drain_writebuf,		/* drain_writebuf	*/
346	cpufunc_nullop,			/* flush_brnchtgt_C	*/
347	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
348
349	(void *)cpufunc_nullop,		/* sleep		*/
350
351	/* Soft functions */
352
353	cpufunc_null_fixup,		/* dataabt_fixup	*/
354	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
355
356	arm10_context_switch,		/* context_switch	*/
357
358	arm10_setup			/* cpu setup		*/
359
360};
361#endif /* CPU_ARM10 */
362
363#ifdef CPU_MV_PJ4B
364struct cpu_functions pj4bv7_cpufuncs = {
365	/* CPU functions */
366
367	cpufunc_id,			/* id			*/
368	arm11_drain_writebuf,		/* cpwait		*/
369
370	/* MMU functions */
371
372	cpufunc_control,		/* control		*/
373	cpufunc_domains,		/* Domain		*/
374	pj4b_setttb,			/* Setttb		*/
375	cpufunc_faultstatus,		/* Faultstatus		*/
376	cpufunc_faultaddress,		/* Faultaddress		*/
377
378	/* TLB functions */
379
380	armv7_tlb_flushID,		/* tlb_flushID		*/
381	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
382	armv7_tlb_flushID,		/* tlb_flushI		*/
383	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
384	armv7_tlb_flushID,		/* tlb_flushD		*/
385	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
386
387	/* Cache operations */
388	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
389	armv7_icache_sync_range,	/* icache_sync_range	*/
390
391	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
392	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
393	armv7_dcache_inv_range,		/* dcache_inv_range	*/
394	armv7_dcache_wb_range,		/* dcache_wb_range	*/
395
396	armv7_idcache_inv_all,		/* idcache_inv_all	*/
397	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
398	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
399
400	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
401	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
402	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
403	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
404
405	/* Other functions */
406
407	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
408	arm11_drain_writebuf,		/* drain_writebuf	*/
409	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
410	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
411
412	(void *)cpufunc_nullop,		/* sleep		*/
413
414	/* Soft functions */
415
416	cpufunc_null_fixup,		/* dataabt_fixup	*/
417	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
418
419	arm11_context_switch,		/* context_switch	*/
420
421	pj4bv7_setup			/* cpu setup		*/
422};
423#endif /* CPU_MV_PJ4B */
424
425#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
426  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
427  defined(CPU_XSCALE_80219)
428
429struct cpu_functions xscale_cpufuncs = {
430	/* CPU functions */
431
432	cpufunc_id,			/* id			*/
433	xscale_cpwait,			/* cpwait		*/
434
435	/* MMU functions */
436
437	xscale_control,			/* control		*/
438	cpufunc_domains,		/* domain		*/
439	xscale_setttb,			/* setttb		*/
440	cpufunc_faultstatus,		/* faultstatus		*/
441	cpufunc_faultaddress,		/* faultaddress		*/
442
443	/* TLB functions */
444
445	armv4_tlb_flushID,		/* tlb_flushID		*/
446	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
447	armv4_tlb_flushI,		/* tlb_flushI		*/
448	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
449	armv4_tlb_flushD,		/* tlb_flushD		*/
450	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
451
452	/* Cache operations */
453
454	xscale_cache_syncI,		/* icache_sync_all	*/
455	xscale_cache_syncI_rng,		/* icache_sync_range	*/
456
457	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
458	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
459	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
460	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
461
462	xscale_cache_flushID,		/* idcache_inv_all	*/
463	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
464	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
465	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
466	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
467	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
468	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
469
470	/* Other functions */
471
472	cpufunc_nullop,			/* flush_prefetchbuf	*/
473	armv4_drain_writebuf,		/* drain_writebuf	*/
474	cpufunc_nullop,			/* flush_brnchtgt_C	*/
475	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
476
477	xscale_cpu_sleep,		/* sleep		*/
478
479	/* Soft functions */
480
481	cpufunc_null_fixup,		/* dataabt_fixup	*/
482	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
483
484	xscale_context_switch,		/* context_switch	*/
485
486	xscale_setup			/* cpu setup		*/
487};
488#endif
489/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
490   CPU_XSCALE_80219 */
491
492#ifdef CPU_XSCALE_81342
493struct cpu_functions xscalec3_cpufuncs = {
494	/* CPU functions */
495
496	cpufunc_id,			/* id			*/
497	xscale_cpwait,			/* cpwait		*/
498
499	/* MMU functions */
500
501	xscale_control,			/* control		*/
502	cpufunc_domains,		/* domain		*/
503	xscalec3_setttb,		/* setttb		*/
504	cpufunc_faultstatus,		/* faultstatus		*/
505	cpufunc_faultaddress,		/* faultaddress		*/
506
507	/* TLB functions */
508
509	armv4_tlb_flushID,		/* tlb_flushID		*/
510	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
511	armv4_tlb_flushI,		/* tlb_flushI		*/
512	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
513	armv4_tlb_flushD,		/* tlb_flushD		*/
514	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
515
516	/* Cache operations */
517
518	xscalec3_cache_syncI,		/* icache_sync_all	*/
519	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
520
521	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
522	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
523	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
524	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
525
526	xscale_cache_flushID,		/* idcache_inv_all	*/
527	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
528	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
529	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
530	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
531	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
532	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
533
534	/* Other functions */
535
536	cpufunc_nullop,			/* flush_prefetchbuf	*/
537	armv4_drain_writebuf,		/* drain_writebuf	*/
538	cpufunc_nullop,			/* flush_brnchtgt_C	*/
539	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
540
541	xscale_cpu_sleep,		/* sleep		*/
542
543	/* Soft functions */
544
545	cpufunc_null_fixup,		/* dataabt_fixup	*/
546	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
547
548	xscalec3_context_switch,	/* context_switch	*/
549
550	xscale_setup			/* cpu setup		*/
551};
552#endif /* CPU_XSCALE_81342 */
553
554
555#if defined(CPU_FA526) || defined(CPU_FA626TE)
556struct cpu_functions fa526_cpufuncs = {
557	/* CPU functions */
558
559	cpufunc_id,			/* id			*/
560	cpufunc_nullop,			/* cpwait		*/
561
562	/* MMU functions */
563
564	cpufunc_control,		/* control		*/
565	cpufunc_domains,		/* domain		*/
566	fa526_setttb,			/* setttb		*/
567	cpufunc_faultstatus,		/* faultstatus		*/
568	cpufunc_faultaddress,		/* faultaddress		*/
569
570	/* TLB functions */
571
572	armv4_tlb_flushID,		/* tlb_flushID		*/
573	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
574	armv4_tlb_flushI,		/* tlb_flushI		*/
575	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
576	armv4_tlb_flushD,		/* tlb_flushD		*/
577	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
578
579	/* Cache operations */
580
581	fa526_icache_sync_all,		/* icache_sync_all	*/
582	fa526_icache_sync_range,	/* icache_sync_range	*/
583
584	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
585	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
586	fa526_dcache_inv_range,		/* dcache_inv_range	*/
587	fa526_dcache_wb_range,		/* dcache_wb_range	*/
588
589	armv4_idcache_inv_all,		/* idcache_inv_all	*/
590	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
591	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
592	cpufunc_nullop,			/* l2cache_wbinv_all	*/
593	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
594	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
595	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
596
597	/* Other functions */
598
599	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
600	armv4_drain_writebuf,		/* drain_writebuf	*/
601	cpufunc_nullop,			/* flush_brnchtgt_C	*/
602	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
603
604	fa526_cpu_sleep,		/* sleep		*/
605
606	/* Soft functions */
607
608	cpufunc_null_fixup,		/* dataabt_fixup	*/
609	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
610
611	fa526_context_switch,		/* context_switch	*/
612
613	fa526_setup			/* cpu setup 		*/
614};
615#endif	/* CPU_FA526 || CPU_FA626TE */
616
617#if defined(CPU_ARM1136)
618struct cpu_functions arm1136_cpufuncs = {
619	/* CPU functions */
620
621	cpufunc_id,                     /* id                   */
622	cpufunc_nullop,                 /* cpwait               */
623
624	/* MMU functions */
625
626	cpufunc_control,                /* control              */
627	cpufunc_domains,                /* Domain               */
628	arm11x6_setttb,                 /* Setttb               */
629	cpufunc_faultstatus,            /* Faultstatus          */
630	cpufunc_faultaddress,           /* Faultaddress         */
631
632	/* TLB functions */
633
634	arm11_tlb_flushID,              /* tlb_flushID          */
635	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
636	arm11_tlb_flushI,               /* tlb_flushI           */
637	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
638	arm11_tlb_flushD,               /* tlb_flushD           */
639	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
640
641	/* Cache operations */
642
643	arm11x6_icache_sync_all,        /* icache_sync_all      */
644	arm11x6_icache_sync_range,      /* icache_sync_range    */
645
646	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
647	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
648	armv6_dcache_inv_range,         /* dcache_inv_range     */
649	armv6_dcache_wb_range,          /* dcache_wb_range      */
650
651	armv6_idcache_inv_all,		/* idcache_inv_all	*/
652	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
653	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
654
655	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
656	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
657	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
658	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
659
660	/* Other functions */
661
662	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
663	arm11_drain_writebuf,           /* drain_writebuf       */
664	cpufunc_nullop,                 /* flush_brnchtgt_C     */
665	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
666
667	arm11_sleep,                  	/* sleep                */
668
669	/* Soft functions */
670
671	cpufunc_null_fixup,             /* dataabt_fixup        */
672	cpufunc_null_fixup,             /* prefetchabt_fixup    */
673
674	arm11_context_switch,           /* context_switch       */
675
676	arm11x6_setup                   /* cpu setup            */
677};
678#endif /* CPU_ARM1136 */
679#if defined(CPU_ARM1176)
680struct cpu_functions arm1176_cpufuncs = {
681	/* CPU functions */
682
683	cpufunc_id,                     /* id                   */
684	cpufunc_nullop,                 /* cpwait               */
685
686	/* MMU functions */
687
688	cpufunc_control,                /* control              */
689	cpufunc_domains,                /* Domain               */
690	arm11x6_setttb,                 /* Setttb               */
691	cpufunc_faultstatus,            /* Faultstatus          */
692	cpufunc_faultaddress,           /* Faultaddress         */
693
694	/* TLB functions */
695
696	arm11_tlb_flushID,              /* tlb_flushID          */
697	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
698	arm11_tlb_flushI,               /* tlb_flushI           */
699	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
700	arm11_tlb_flushD,               /* tlb_flushD           */
701	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
702
703	/* Cache operations */
704
705	arm11x6_icache_sync_all,        /* icache_sync_all      */
706	arm11x6_icache_sync_range,      /* icache_sync_range    */
707
708	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
709	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
710	armv6_dcache_inv_range,         /* dcache_inv_range     */
711	armv6_dcache_wb_range,          /* dcache_wb_range      */
712
713	armv6_idcache_inv_all,		/* idcache_inv_all	*/
714	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
715	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
716
717	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
718	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
719	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
720	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
721
722	/* Other functions */
723
724	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
725	arm11_drain_writebuf,           /* drain_writebuf       */
726	cpufunc_nullop,                 /* flush_brnchtgt_C     */
727	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
728
729	arm11x6_sleep,                  /* sleep                */
730
731	/* Soft functions */
732
733	cpufunc_null_fixup,             /* dataabt_fixup        */
734	cpufunc_null_fixup,             /* prefetchabt_fixup    */
735
736	arm11_context_switch,           /* context_switch       */
737
738	arm11x6_setup                   /* cpu setup            */
739};
740#endif /*CPU_ARM1176 */
741
742#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
743struct cpu_functions cortexa_cpufuncs = {
744	/* CPU functions */
745
746	cpufunc_id,                     /* id                   */
747	cpufunc_nullop,                 /* cpwait               */
748
749	/* MMU functions */
750
751	cpufunc_control,                /* control              */
752	cpufunc_domains,                /* Domain               */
753	armv7_setttb,                   /* Setttb               */
754	cpufunc_faultstatus,            /* Faultstatus          */
755	cpufunc_faultaddress,           /* Faultaddress         */
756
757	/*
758	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
759	 * whether the hardware implements separate I+D or not, so we use the
760	 * same 'ID' functions for all 3 variations.
761	 */
762
763	armv7_tlb_flushID,              /* tlb_flushID          */
764	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
765	armv7_tlb_flushID,              /* tlb_flushI           */
766	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
767	armv7_tlb_flushID,              /* tlb_flushD           */
768	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
769
770	/* Cache operations */
771
772	armv7_idcache_wbinv_all,         /* icache_sync_all      */
773	armv7_icache_sync_range,        /* icache_sync_range    */
774
775	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
776	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
777	armv7_dcache_inv_range,         /* dcache_inv_range     */
778	armv7_dcache_wb_range,          /* dcache_wb_range      */
779
780	armv7_idcache_inv_all,		/* idcache_inv_all	*/
781	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
782	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
783
784	/*
785	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
786	 * L2 cache controller is actually enabled.
787	 */
788	cpufunc_nullop,                 /* l2cache_wbinv_all    */
789	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
790	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
791	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
792
793	/* Other functions */
794
795	cpufunc_nullop,                 /* flush_prefetchbuf    */
796	armv7_drain_writebuf,           /* drain_writebuf       */
797	cpufunc_nullop,                 /* flush_brnchtgt_C     */
798	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
799
800	armv7_sleep,                    /* sleep                */
801
802	/* Soft functions */
803
804	cpufunc_null_fixup,             /* dataabt_fixup        */
805	cpufunc_null_fixup,             /* prefetchabt_fixup    */
806
807	armv7_context_switch,           /* context_switch       */
808
809	cortexa_setup                     /* cpu setup            */
810};
811#endif /* CPU_CORTEXA */
812
813/*
814 * Global constants also used by locore.s
815 */
816
817struct cpu_functions cpufuncs;
818u_int cputype;
819u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
820
821#if defined(CPU_ARM9) ||	\
822  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
823  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
824  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
825  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
826  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
827  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
828
829static void get_cachetype_cp15(void);
830
831/* Additional cache information local to this file.  Log2 of some of the
832   above numbers.  */
833static int	arm_dcache_l2_nsets;
834static int	arm_dcache_l2_assoc;
835static int	arm_dcache_l2_linesize;
836
837static void
838get_cachetype_cp15()
839{
840	u_int ctype, isize, dsize, cpuid;
841	u_int clevel, csize, i, sel;
842	u_int multiplier;
843	u_char type;
844
845	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
846		: "=r" (ctype));
847
848	cpuid = cpufunc_id();
849	/*
850	 * ...and thus spake the ARM ARM:
851	 *
852	 * If an <opcode2> value corresponding to an unimplemented or
853	 * reserved ID register is encountered, the System Control
854	 * processor returns the value of the main ID register.
855	 */
856	if (ctype == cpuid)
857		goto out;
858
859	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
860		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
861		    : "=r" (clevel));
862		arm_cache_level = clevel;
863		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
864		i = 0;
865		while ((type = (clevel & 0x7)) && i < 7) {
866			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
867			    type == CACHE_SEP_CACHE) {
868				sel = i << 1;
869				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
870				    : : "r" (sel));
871				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
872				    : "=r" (csize));
873				arm_cache_type[sel] = csize;
874				arm_dcache_align = 1 <<
875				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
876				arm_dcache_align_mask = arm_dcache_align - 1;
877			}
878			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
879				sel = (i << 1) | 1;
880				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
881				    : : "r" (sel));
882				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
883				    : "=r" (csize));
884				arm_cache_type[sel] = csize;
885			}
886			i++;
887			clevel >>= 3;
888		}
889	} else {
890		if ((ctype & CPU_CT_S) == 0)
891			arm_pcache_unified = 1;
892
893		/*
894		 * If you want to know how this code works, go read the ARM ARM.
895		 */
896
897		arm_pcache_type = CPU_CT_CTYPE(ctype);
898
899		if (arm_pcache_unified == 0) {
900			isize = CPU_CT_ISIZE(ctype);
901			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
902			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
903			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
904				if (isize & CPU_CT_xSIZE_M)
905					arm_picache_line_size = 0; /* not present */
906				else
907					arm_picache_ways = 1;
908			} else {
909				arm_picache_ways = multiplier <<
910				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
911			}
912			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
913		}
914
915		dsize = CPU_CT_DSIZE(ctype);
916		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
917		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
918		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
919			if (dsize & CPU_CT_xSIZE_M)
920				arm_pdcache_line_size = 0; /* not present */
921			else
922				arm_pdcache_ways = 1;
923		} else {
924			arm_pdcache_ways = multiplier <<
925			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
926		}
927		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
928
929		arm_dcache_align = arm_pdcache_line_size;
930
931		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
932		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
933		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
934		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
935
936	out:
937		arm_dcache_align_mask = arm_dcache_align - 1;
938	}
939}
940#endif /* ARM9 || XSCALE */
941
942/*
943 * Cannot panic here as we may not have a console yet ...
944 */
945
946int
947set_cpufuncs()
948{
949	cputype = cpufunc_id();
950	cputype &= CPU_ID_CPU_MASK;
951
952	/*
953	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
954	 * CPU type where we want to use it by default, then we set it.
955	 */
956
957#ifdef CPU_ARM9
958	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
959	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
960	    (cputype & 0x0000f000) == 0x00009000) {
961		cpufuncs = arm9_cpufuncs;
962		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
963		get_cachetype_cp15();
964		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
965		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
966		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
967		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
968		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
969#ifdef ARM9_CACHE_WRITE_THROUGH
970		pmap_pte_init_arm9();
971#else
972		pmap_pte_init_generic();
973#endif
974		goto out;
975	}
976#endif /* CPU_ARM9 */
977#if defined(CPU_ARM9E) || defined(CPU_ARM10)
978	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
979	    cputype == CPU_ID_MV88FR571_41) {
980		uint32_t sheeva_ctrl;
981
982		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
983		    MV_L2_ENABLE);
984		/*
985		 * Workaround for Marvell MV78100 CPU: Cache prefetch
986		 * mechanism may affect the cache coherency validity,
987		 * so it needs to be disabled.
988		 *
989		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
990		 * L2 Prefetching Mechanism) for details.
991		 */
992		if (cputype == CPU_ID_MV88FR571_VD ||
993		    cputype == CPU_ID_MV88FR571_41)
994			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
995
996		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
997
998		cpufuncs = sheeva_cpufuncs;
999		get_cachetype_cp15();
1000		pmap_pte_init_generic();
1001		goto out;
1002	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1003		cpufuncs = armv5_ec_cpufuncs;
1004		get_cachetype_cp15();
1005		pmap_pte_init_generic();
1006		goto out;
1007	}
1008#endif /* CPU_ARM9E || CPU_ARM10 */
1009#ifdef CPU_ARM10
1010	if (/* cputype == CPU_ID_ARM1020T || */
1011	    cputype == CPU_ID_ARM1020E) {
1012		/*
1013		 * Select write-through cacheing (this isn't really an
1014		 * option on ARM1020T).
1015		 */
1016		cpufuncs = arm10_cpufuncs;
1017		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1018		get_cachetype_cp15();
1019		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1020		arm10_dcache_sets_max =
1021		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1022		    arm10_dcache_sets_inc;
1023		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1024		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1025		pmap_pte_init_generic();
1026		goto out;
1027	}
1028#endif /* CPU_ARM10 */
1029#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1030	if (cputype == CPU_ID_ARM1136JS
1031	    || cputype == CPU_ID_ARM1136JSR1
1032	    || cputype == CPU_ID_ARM1176JZS) {
1033#ifdef CPU_ARM1136
1034		if (cputype == CPU_ID_ARM1136JS
1035		    || cputype == CPU_ID_ARM1136JSR1)
1036			cpufuncs = arm1136_cpufuncs;
1037#endif
1038#ifdef CPU_ARM1176
1039		if (cputype == CPU_ID_ARM1176JZS)
1040			cpufuncs = arm1176_cpufuncs;
1041#endif
1042		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1043		get_cachetype_cp15();
1044
1045		pmap_pte_init_mmu_v6();
1046
1047		goto out;
1048	}
1049#endif /* CPU_ARM1136 || CPU_ARM1176 */
1050#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1051	if (cputype == CPU_ID_CORTEXA7 ||
1052	    cputype == CPU_ID_CORTEXA8R1 ||
1053	    cputype == CPU_ID_CORTEXA8R2 ||
1054	    cputype == CPU_ID_CORTEXA8R3 ||
1055	    cputype == CPU_ID_CORTEXA9R1 ||
1056	    cputype == CPU_ID_CORTEXA9R2 ||
1057	    cputype == CPU_ID_CORTEXA9R3 ||
1058	    cputype == CPU_ID_CORTEXA15 ||
1059	    cputype == CPU_ID_KRAIT ) {
1060		cpufuncs = cortexa_cpufuncs;
1061		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1062		get_cachetype_cp15();
1063
1064		pmap_pte_init_mmu_v6();
1065		/* Use powersave on this CPU. */
1066		cpu_do_powersave = 1;
1067		goto out;
1068	}
1069#endif /* CPU_CORTEXA */
1070
1071#if defined(CPU_MV_PJ4B)
1072	if (cputype == CPU_ID_MV88SV581X_V7 ||
1073	    cputype == CPU_ID_MV88SV584X_V7 ||
1074	    cputype == CPU_ID_ARM_88SV581X_V7) {
1075		cpufuncs = pj4bv7_cpufuncs;
1076		get_cachetype_cp15();
1077		pmap_pte_init_mmu_v6();
1078		goto out;
1079	}
1080#endif /* CPU_MV_PJ4B */
1081
1082#if defined(CPU_FA526) || defined(CPU_FA626TE)
1083	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1084		cpufuncs = fa526_cpufuncs;
1085		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1086		get_cachetype_cp15();
1087		pmap_pte_init_generic();
1088
1089		/* Use powersave on this CPU. */
1090		cpu_do_powersave = 1;
1091
1092		goto out;
1093	}
1094#endif	/* CPU_FA526 || CPU_FA626TE */
1095
1096#ifdef CPU_XSCALE_80200
1097	if (cputype == CPU_ID_80200) {
1098		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1099
1100		i80200_icu_init();
1101
1102#if defined(XSCALE_CCLKCFG)
1103		/*
1104		 * Crank CCLKCFG to maximum legal value.
1105		 */
1106		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1107			:
1108			: "r" (XSCALE_CCLKCFG));
1109#endif
1110
1111		/*
1112		 * XXX Disable ECC in the Bus Controller Unit; we
1113		 * don't really support it, yet.  Clear any pending
1114		 * error indications.
1115		 */
1116		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1117			:
1118			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1119
1120		cpufuncs = xscale_cpufuncs;
1121		/*
1122		 * i80200 errata: Step-A0 and A1 have a bug where
1123		 * D$ dirty bits are not cleared on "invalidate by
1124		 * address".
1125		 *
1126		 * Workaround: Clean cache line before invalidating.
1127		 */
1128		if (rev == 0 || rev == 1)
1129			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1130
1131		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1132		get_cachetype_cp15();
1133		pmap_pte_init_xscale();
1134		goto out;
1135	}
1136#endif /* CPU_XSCALE_80200 */
1137#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1138	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1139	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1140	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1141		cpufuncs = xscale_cpufuncs;
1142		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1143		get_cachetype_cp15();
1144		pmap_pte_init_xscale();
1145		goto out;
1146	}
1147#endif /* CPU_XSCALE_80321 */
1148
1149#if defined(CPU_XSCALE_81342)
1150	if (cputype == CPU_ID_81342) {
1151		cpufuncs = xscalec3_cpufuncs;
1152		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1153		get_cachetype_cp15();
1154		pmap_pte_init_xscale();
1155		goto out;
1156	}
1157#endif /* CPU_XSCALE_81342 */
1158#ifdef CPU_XSCALE_PXA2X0
1159	/* ignore core revision to test PXA2xx CPUs */
1160	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1161	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1162	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1163
1164		cpufuncs = xscale_cpufuncs;
1165		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1166		get_cachetype_cp15();
1167		pmap_pte_init_xscale();
1168
1169		/* Use powersave on this CPU. */
1170		cpu_do_powersave = 1;
1171
1172		goto out;
1173	}
1174#endif /* CPU_XSCALE_PXA2X0 */
1175#ifdef CPU_XSCALE_IXP425
1176	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1177            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1178
1179		cpufuncs = xscale_cpufuncs;
1180		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1181		get_cachetype_cp15();
1182		pmap_pte_init_xscale();
1183
1184		goto out;
1185	}
1186#endif /* CPU_XSCALE_IXP425 */
1187	/*
1188	 * Bzzzz. And the answer was ...
1189	 */
1190	panic("No support for this CPU type (%08x) in kernel", cputype);
1191	return(ARCHITECTURE_NOT_PRESENT);
1192out:
1193	uma_set_align(arm_dcache_align_mask);
1194	return (0);
1195}
1196
1197/*
1198 * Fixup routines for data and prefetch aborts.
1199 *
1200 * Several compile time symbols are used
1201 *
1202 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1203 * correction of registers after a fault.
1204 */
1205
1206
1207/*
1208 * Null abort fixup routine.
1209 * For use when no fixup is required.
1210 */
1211int
1212cpufunc_null_fixup(arg)
1213	void *arg;
1214{
1215	return(ABORT_FIXUP_OK);
1216}
1217
1218/*
1219 * CPU Setup code
1220 */
1221
1222#if defined (CPU_ARM9) || \
1223  defined(CPU_ARM9E) || \
1224  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1225  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1226  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1227  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1228  defined(CPU_FA526) || defined(CPU_FA626TE)
1229
1230#define IGN	0
1231#define OR	1
1232#define BIC	2
1233
1234struct cpu_option {
1235	char	*co_name;
1236	int	co_falseop;
1237	int	co_trueop;
1238	int	co_value;
1239};
1240
1241static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1242
1243static u_int
1244parse_cpu_options(args, optlist, cpuctrl)
1245	char *args;
1246	struct cpu_option *optlist;
1247	u_int cpuctrl;
1248{
1249	int integer;
1250
1251	if (args == NULL)
1252		return(cpuctrl);
1253
1254	while (optlist->co_name) {
1255		if (get_bootconf_option(args, optlist->co_name,
1256		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1257			if (integer) {
1258				if (optlist->co_trueop == OR)
1259					cpuctrl |= optlist->co_value;
1260				else if (optlist->co_trueop == BIC)
1261					cpuctrl &= ~optlist->co_value;
1262			} else {
1263				if (optlist->co_falseop == OR)
1264					cpuctrl |= optlist->co_value;
1265				else if (optlist->co_falseop == BIC)
1266					cpuctrl &= ~optlist->co_value;
1267			}
1268		}
1269		++optlist;
1270	}
1271	return(cpuctrl);
1272}
1273#endif /* CPU_ARM9 || XSCALE*/
1274
1275#ifdef CPU_ARM9
1276struct cpu_option arm9_options[] = {
1277	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1278	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1279	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1280	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1281	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1282	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1283	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1284	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1285	{ NULL,			IGN, IGN, 0 }
1286};
1287
1288void
1289arm9_setup(args)
1290	char *args;
1291{
1292	int cpuctrl, cpuctrlmask;
1293
1294	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1295	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1296	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1297	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1298	    CPU_CONTROL_ROUNDROBIN;
1299	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1300		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1301		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1302		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1303		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1304		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1305		 | CPU_CONTROL_ROUNDROBIN;
1306
1307#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1308	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1309#endif
1310
1311	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1312
1313#ifdef __ARMEB__
1314	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1315#endif
1316	if (vector_page == ARM_VECTORS_HIGH)
1317		cpuctrl |= CPU_CONTROL_VECRELOC;
1318
1319	/* Clear out the cache */
1320	cpu_idcache_wbinv_all();
1321
1322	/* Set the control register */
1323	cpu_control(cpuctrlmask, cpuctrl);
1324	ctrl = cpuctrl;
1325
1326}
1327#endif	/* CPU_ARM9 */
1328
1329#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1330struct cpu_option arm10_options[] = {
1331	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1332	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1333	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1334	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1335	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1336	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1337	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1338	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1339	{ NULL,			IGN, IGN, 0 }
1340};
1341
1342void
1343arm10_setup(args)
1344	char *args;
1345{
1346	int cpuctrl, cpuctrlmask;
1347
1348	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1349	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1350	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1351	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1352	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1353	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1354	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1355	    | CPU_CONTROL_BPRD_ENABLE
1356	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1357
1358#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1359	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1360#endif
1361
1362	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1363
1364#ifdef __ARMEB__
1365	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1366#endif
1367
1368	/* Clear out the cache */
1369	cpu_idcache_wbinv_all();
1370
1371	/* Now really make sure they are clean.  */
1372	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1373
1374	if (vector_page == ARM_VECTORS_HIGH)
1375		cpuctrl |= CPU_CONTROL_VECRELOC;
1376
1377	/* Set the control register */
1378	ctrl = cpuctrl;
1379	cpu_control(0xffffffff, cpuctrl);
1380
1381	/* And again. */
1382	cpu_idcache_wbinv_all();
1383}
1384#endif	/* CPU_ARM9E || CPU_ARM10 */
1385
1386#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1387struct cpu_option arm11_options[] = {
1388	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1389	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1390	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1391	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1392	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1393	{ NULL,			IGN, IGN, 0 }
1394};
1395
1396void
1397arm11x6_setup(char *args)
1398{
1399	int cpuctrl, cpuctrl_wax;
1400	uint32_t auxctrl, auxctrl_wax;
1401	uint32_t tmp, tmp2;
1402	uint32_t sbz=0;
1403	uint32_t cpuid;
1404
1405	cpuid = cpufunc_id();
1406
1407	cpuctrl =
1408		CPU_CONTROL_MMU_ENABLE  |
1409		CPU_CONTROL_DC_ENABLE   |
1410		CPU_CONTROL_WBUF_ENABLE |
1411		CPU_CONTROL_32BP_ENABLE |
1412		CPU_CONTROL_32BD_ENABLE |
1413		CPU_CONTROL_LABT_ENABLE |
1414		CPU_CONTROL_SYST_ENABLE |
1415		CPU_CONTROL_IC_ENABLE;
1416
1417	/*
1418	 * "write as existing" bits
1419	 * inverse of this is mask
1420	 */
1421	cpuctrl_wax =
1422		(3 << 30) | /* SBZ */
1423		(1 << 29) | /* FA */
1424		(1 << 28) | /* TR */
1425		(3 << 26) | /* SBZ */
1426		(3 << 19) | /* SBZ */
1427		(1 << 17);  /* SBZ */
1428
1429	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1430	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1431
1432	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1433
1434#ifdef __ARMEB__
1435	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1436#endif
1437
1438	if (vector_page == ARM_VECTORS_HIGH)
1439		cpuctrl |= CPU_CONTROL_VECRELOC;
1440
1441	auxctrl = 0;
1442	auxctrl_wax = ~0;
1443	/*
1444	 * This options enables the workaround for the 364296 ARM1136
1445	 * r0pX errata (possible cache data corruption with
1446	 * hit-under-miss enabled). It sets the undocumented bit 31 in
1447	 * the auxiliary control register and the FI bit in the control
1448	 * register, thus disabling hit-under-miss without putting the
1449	 * processor into full low interrupt latency mode. ARM11MPCore
1450	 * is not affected.
1451	 */
1452	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1453		cpuctrl |= CPU_CONTROL_FI_ENABLE;
1454		auxctrl = ARM1136_AUXCTL_PFI;
1455		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1456	}
1457
1458	/*
1459	 * Enable an errata workaround
1460	 */
1461	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1462		auxctrl = ARM1176_AUXCTL_PHD;
1463		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1464	}
1465
1466	/* Clear out the cache */
1467	cpu_idcache_wbinv_all();
1468
1469	/* Now really make sure they are clean.  */
1470	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1471
1472	/* Allow detection code to find the VFP if it's fitted.  */
1473	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1474
1475	/* Set the control register */
1476	ctrl = cpuctrl;
1477	cpu_control(~cpuctrl_wax, cpuctrl);
1478
1479	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1480			"and	%1, %0, %2\n\t"
1481			"orr	%1, %1, %3\n\t"
1482			"teq	%0, %1\n\t"
1483			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1484			: "=r"(tmp), "=r"(tmp2) :
1485			  "r"(auxctrl_wax), "r"(auxctrl));
1486
1487	/* And again. */
1488	cpu_idcache_wbinv_all();
1489}
1490#endif  /* CPU_ARM1136 || CPU_ARM1176 */
1491
1492#ifdef CPU_MV_PJ4B
1493void
1494pj4bv7_setup(args)
1495	char *args;
1496{
1497	int cpuctrl;
1498
1499	pj4b_config();
1500
1501	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1502#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1503	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1504#endif
1505	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1506	cpuctrl |= (0xf << 3);
1507	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1508	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1509	if (vector_page == ARM_VECTORS_HIGH)
1510		cpuctrl |= CPU_CONTROL_VECRELOC;
1511	cpuctrl |= (0x5 << 16) | (1 < 22);
1512	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1513
1514	/* Clear out the cache */
1515	cpu_idcache_wbinv_all();
1516
1517	/* Set the control register */
1518	ctrl = cpuctrl;
1519	cpu_control(0xFFFFFFFF, cpuctrl);
1520
1521	/* And again. */
1522	cpu_idcache_wbinv_all();
1523}
1524#endif /* CPU_MV_PJ4B */
1525
1526#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1527
1528void
1529cortexa_setup(char *args)
1530{
1531	int cpuctrl, cpuctrlmask;
1532
1533	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1534	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1535	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1536	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1537	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1538	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1539
1540	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1541	    CPU_CONTROL_IC_ENABLE |
1542	    CPU_CONTROL_DC_ENABLE |
1543	    CPU_CONTROL_BPRD_ENABLE;
1544
1545#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1546	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1547#endif
1548
1549	/* Switch to big endian */
1550#ifdef __ARMEB__
1551	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1552#endif
1553
1554	/* Check if the vector page is at the high address (0xffff0000) */
1555	if (vector_page == ARM_VECTORS_HIGH)
1556		cpuctrl |= CPU_CONTROL_VECRELOC;
1557
1558	/* Clear out the cache */
1559	cpu_idcache_wbinv_all();
1560
1561	/* Set the control register */
1562	ctrl = cpuctrl;
1563	cpu_control(cpuctrlmask, cpuctrl);
1564
1565	/* And again. */
1566	cpu_idcache_wbinv_all();
1567#ifdef SMP
1568	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1569#endif
1570}
1571#endif  /* CPU_CORTEXA */
1572
1573#if defined(CPU_FA526) || defined(CPU_FA626TE)
1574struct cpu_option fa526_options[] = {
1575#ifdef COMPAT_12
1576	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1577					   CPU_CONTROL_DC_ENABLE) },
1578	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1579#endif	/* COMPAT_12 */
1580	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1581					   CPU_CONTROL_DC_ENABLE) },
1582	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1583					   CPU_CONTROL_DC_ENABLE) },
1584	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1585	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1586	{ NULL,			IGN, IGN, 0 }
1587};
1588
1589void
1590fa526_setup(char *args)
1591{
1592	int cpuctrl, cpuctrlmask;
1593
1594	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1595		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1596		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1597		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1598		| CPU_CONTROL_BPRD_ENABLE;
1599	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1600		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1601		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1602		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1603		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1604		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1605		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1606
1607#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1608	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1609#endif
1610
1611	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1612
1613#ifdef __ARMEB__
1614	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1615#endif
1616
1617	if (vector_page == ARM_VECTORS_HIGH)
1618		cpuctrl |= CPU_CONTROL_VECRELOC;
1619
1620	/* Clear out the cache */
1621	cpu_idcache_wbinv_all();
1622
1623	/* Set the control register */
1624	ctrl = cpuctrl;
1625	cpu_control(0xffffffff, cpuctrl);
1626}
1627#endif	/* CPU_FA526 || CPU_FA626TE */
1628
1629#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1630  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1631  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1632struct cpu_option xscale_options[] = {
1633#ifdef COMPAT_12
1634	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1635	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1636#endif	/* COMPAT_12 */
1637	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1638	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1639	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1640	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1641	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1642	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1643	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1644	{ NULL,			IGN, IGN, 0 }
1645};
1646
1647void
1648xscale_setup(args)
1649	char *args;
1650{
1651	uint32_t auxctl;
1652	int cpuctrl, cpuctrlmask;
1653
1654	/*
1655	 * The XScale Write Buffer is always enabled.  Our option
1656	 * is to enable/disable coalescing.  Note that bits 6:3
1657	 * must always be enabled.
1658	 */
1659
1660	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1661		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1662		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1663		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1664		 | CPU_CONTROL_BPRD_ENABLE;
1665	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1666		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1667		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1668		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1669		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1670		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1671		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1672		 CPU_CONTROL_L2_ENABLE;
1673
1674#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1675	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1676#endif
1677
1678	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1679
1680#ifdef __ARMEB__
1681	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1682#endif
1683
1684	if (vector_page == ARM_VECTORS_HIGH)
1685		cpuctrl |= CPU_CONTROL_VECRELOC;
1686#ifdef CPU_XSCALE_CORE3
1687	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1688#endif
1689
1690	/* Clear out the cache */
1691	cpu_idcache_wbinv_all();
1692
1693	/*
1694	 * Set the control register.  Note that bits 6:3 must always
1695	 * be set to 1.
1696	 */
1697	ctrl = cpuctrl;
1698/*	cpu_control(cpuctrlmask, cpuctrl);*/
1699	cpu_control(0xffffffff, cpuctrl);
1700
1701	/* Make sure write coalescing is turned on */
1702	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1703		: "=r" (auxctl));
1704#ifdef XSCALE_NO_COALESCE_WRITES
1705	auxctl |= XSCALE_AUXCTL_K;
1706#else
1707	auxctl &= ~XSCALE_AUXCTL_K;
1708#endif
1709#ifdef CPU_XSCALE_CORE3
1710	auxctl |= XSCALE_AUXCTL_LLR;
1711	auxctl |= XSCALE_AUXCTL_MD_MASK;
1712#endif
1713	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1714		: : "r" (auxctl));
1715}
1716#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1717	   CPU_XSCALE_80219 */
1718