cpufunc_asm_armv5.S revision 275767
1/*	$NetBSD: cpufunc_asm_armv5.S,v 1.3 2007/01/06 00:50:54 christos Exp $	*/
2
3/*
4 * Copyright (c) 2002, 2005 ARM Limited
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 *    products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * ARMv5 assembly functions for manipulating caches.
32 * These routines can be used by any core that supports the set/index
33 * operations.
34 */
35
36#include <machine/asm.h>
37__FBSDID("$FreeBSD: stable/10/sys/arm/arm/cpufunc_asm_armv5.S 275767 2014-12-14 16:28:53Z andrew $");
38
39/*
40 * Functions to set the MMU Translation Table Base register
41 *
42 * We need to clean and flush the cache as it uses virtual
43 * addresses that are about to change.
44 */
45ENTRY(armv5_setttb)
46	stmfd	sp!, {r0, lr}
47	bl	_C_LABEL(armv5_idcache_wbinv_all)
48	ldmfd	sp!, {r0, lr}
49
50	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
51
52	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
53	RET
54END(armv5_setttb)
55
56/*
57 * Cache operations.  For the entire cache we use the set/index
58 * operations.
59 */
60	s_max	.req r0
61	i_max	.req r1
62	s_inc	.req r2
63	i_inc	.req r3
64
65ENTRY_NP(armv5_icache_sync_range)
66	ldr	ip, .Larmv5_line_size
67	cmp	r1, #0x4000
68	bcs	.Larmv5_icache_sync_all
69	ldr	ip, [ip]
70	sub	r1, r1, #1		/* Don't overrun */
71	sub	r3, ip, #1
72	and	r2, r0, r3
73	add	r1, r1, r2
74	bic	r0, r0, r3
751:
76	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
77	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
78	add	r0, r0, ip
79	subs	r1, r1, ip
80	bpl	1b
81	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
82	RET
83END(armv5_icache_sync_range)
84
85ENTRY_NP(armv5_icache_sync_all)
86.Larmv5_icache_sync_all:
87	/*
88	 * We assume that the code here can never be out of sync with the
89	 * dcache, so that we can safely flush the Icache and fall through
90	 * into the Dcache cleaning code.
91	 */
92	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
93	/* Fall through to clean Dcache. */
94
95.Larmv5_dcache_wb:
96	ldr	ip, .Larmv5_cache_data
97	ldmia	ip, {s_max, i_max, s_inc, i_inc}
981:
99	orr	ip, s_max, i_max
1002:
101	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
102	sub	ip, ip, i_inc
103	tst	ip, i_max		/* Index 0 is last one */
104	bne	2b			/* Next index */
105	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
106	subs	s_max, s_max, s_inc
107	bpl	1b			/* Next set */
108	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
109	RET
110END(armv5_icache_sync_all)
111
112.Larmv5_line_size:
113	.word	_C_LABEL(arm_pdcache_line_size)
114
115ENTRY(armv5_dcache_wb_range)
116	ldr	ip, .Larmv5_line_size
117	cmp	r1, #0x4000
118	bcs	.Larmv5_dcache_wb
119	ldr	ip, [ip]
120	sub	r1, r1, #1		/* Don't overrun */
121	sub	r3, ip, #1
122	and	r2, r0, r3
123	add	r1, r1, r2
124	bic	r0, r0, r3
1251:
126	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
127	add	r0, r0, ip
128	subs	r1, r1, ip
129	bpl	1b
130	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
131	RET
132END(armv5_dcache_wb_range)
133
134ENTRY(armv5_dcache_wbinv_range)
135	ldr	ip, .Larmv5_line_size
136	cmp	r1, #0x4000
137	bcs	.Larmv5_dcache_wbinv_all
138	ldr	ip, [ip]
139	sub	r1, r1, #1		/* Don't overrun */
140	sub	r3, ip, #1
141	and	r2, r0, r3
142	add	r1, r1, r2
143	bic	r0, r0, r3
1441:
145	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
146	add	r0, r0, ip
147	subs	r1, r1, ip
148	bpl	1b
149	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
150	RET
151END(armv5_dcache_wbinv_range)
152
153/*
154 * Note, we must not invalidate everything.  If the range is too big we
155 * must use wb-inv of the entire cache.
156 */
157ENTRY(armv5_dcache_inv_range)
158	ldr	ip, .Larmv5_line_size
159	cmp	r1, #0x4000
160	bcs	.Larmv5_dcache_wbinv_all
161	ldr	ip, [ip]
162	sub	r1, r1, #1		/* Don't overrun */
163	sub	r3, ip, #1
164	and	r2, r0, r3
165	add	r1, r1, r2
166	bic	r0, r0, r3
1671:
168	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
169	add	r0, r0, ip
170	subs	r1, r1, ip
171	bpl	1b
172	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
173	RET
174END(armv5_dcache_inv_range)
175
176ENTRY(armv5_idcache_wbinv_range)
177	ldr	ip, .Larmv5_line_size
178	cmp	r1, #0x4000
179	bcs	.Larmv5_idcache_wbinv_all
180	ldr	ip, [ip]
181	sub	r1, r1, #1		/* Don't overrun */
182	sub	r3, ip, #1
183	and	r2, r0, r3
184	add	r1, r1, r2
185	bic	r0, r0, r3
1861:
187	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
188	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
189	add	r0, r0, ip
190	subs	r1, r1, ip
191	bpl	1b
192	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
193	RET
194END(armv5_idcache_wbinv_range)
195
196ENTRY_NP(armv5_idcache_wbinv_all)
197.Larmv5_idcache_wbinv_all:
198	/*
199	 * We assume that the code here can never be out of sync with the
200	 * dcache, so that we can safely flush the Icache and fall through
201	 * into the Dcache purging code.
202	 */
203	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
204	/* Fall through to purge Dcache. */
205
206EENTRY(armv5_dcache_wbinv_all)
207.Larmv5_dcache_wbinv_all:
208	ldr	ip, .Larmv5_cache_data
209	ldmia	ip, {s_max, i_max, s_inc, i_inc}
2101:
211	orr	ip, s_max, i_max
2122:
213	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
214	sub	ip, ip, i_inc
215	tst	ip, i_max		/* Index 0 is last one */
216	bne	2b			/* Next index */
217	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
218	subs	s_max, s_max, s_inc
219	bpl	1b			/* Next set */
220	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
221	RET
222EEND(armv5_dcache_wbinv_all)
223END(armv5_idcache_wbinv_all)
224
225.Larmv5_cache_data:
226	.word	_C_LABEL(armv5_dcache_sets_max)
227
228	.bss
229
230/* XXX The following macros should probably be moved to asm.h */
231#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
232#define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
233
234/*
235 * Parameters for the cache cleaning code.  Note that the order of these
236 * four variables is assumed in the code above.  Hence the reason for
237 * declaring them in the assembler file.
238 */
239	.align 0
240C_OBJECT(armv5_dcache_sets_max)
241	.space	4
242C_OBJECT(armv5_dcache_index_max)
243	.space	4
244C_OBJECT(armv5_dcache_sets_inc)
245	.space	4
246C_OBJECT(armv5_dcache_index_inc)
247	.space	4
248