cpufunc_asm_armv6.S revision 266203
1/*	$NetBSD: cpufunc_asm_armv6.S,v 1.4 2010/12/10 02:06:22 bsh Exp $	*/
2
3/*
4 * Copyright (c) 2002, 2005 ARM Limited
5 * Portions Copyright (c) 2007 Microsoft
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the company may not be used to endorse or promote
17 *    products derived from this software without specific prior written
18 *    permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
24 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * ARMv6 assembly functions for manipulating caches.
33 * These routines can be used by any core that supports the mcrr address
34 * range operations.
35 */
36
37/*
38 * $FreeBSD: stable/10/sys/arm/arm/cpufunc_asm_armv6.S 266203 2014-05-16 00:14:50Z ian $
39 */
40
41#include <machine/asm.h>
42
43	.arch	armv6
44
45/*
46 * Functions to set the MMU Translation Table Base register
47 *
48 * We need to clean and flush the cache as it uses virtual
49 * addresses that are about to change.
50 */
51ENTRY(armv6_setttb)
52#ifdef PMAP_CACHE_VIVT
53	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
54	mcr	p15, 0, r0, c7, c14, 0	/* clean and invalidate D cache */
55#endif
56	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
57
58	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
59
60	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
61	RET
62END(armv6_setttb)
63
64/*
65 * Cache operations.
66 */
67
68/* LINTSTUB: void armv6_icache_sync_range(vaddr_t, vsize_t); */
69ENTRY_NP(armv6_icache_sync_range)
70	add	r1, r1, r0
71	sub	r1, r1, #1
72	mcrr	p15, 0, r1, r0, c5	/* invalidate I cache range */
73	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
74	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
75	RET
76END(armv6_icache_sync_range)
77
78/* LINTSTUB: void armv6_icache_sync_all(void); */
79ENTRY_NP(armv6_icache_sync_all)
80	/*
81	 * We assume that the code here can never be out of sync with the
82	 * dcache, so that we can safely flush the Icache and fall through
83	 * into the Dcache cleaning code.
84	 */
85	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
86	mcr	p15, 0, r0, c7, c10, 0	/* Clean D cache */
87	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
88	RET
89END(armv6_icache_sync_all)
90
91/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */
92ENTRY(armv6_dcache_wb_range)
93	add	r1, r1, r0
94	sub	r1, r1, #1
95	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
96	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
97	RET
98END(armv6_dcache_wb_range)
99
100/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
101ENTRY(armv6_dcache_wbinv_range)
102	add	r1, r1, r0
103	sub	r1, r1, #1
104	mcrr	p15, 0, r1, r0, c14	/* clean and invaliate D cache range */
105	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
106	RET
107END(armv6_dcache_wbinv_range)
108
109/*
110 * Note, we must not invalidate everything.  If the range is too big we
111 * must use wb-inv of the entire cache.
112 *
113 * LINTSTUB: void armv6_dcache_inv_range(vaddr_t, vsize_t);
114 */
115ENTRY(armv6_dcache_inv_range)
116	add	r1, r1, r0
117	sub	r1, r1, #1
118	mcrr	p15, 0, r1, r0, c6	/* invaliate D cache range */
119	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
120	RET
121END(armv6_dcache_inv_range)
122
123/* LINTSTUB: void armv6_idcache_wbinv_range(vaddr_t, vsize_t); */
124ENTRY(armv6_idcache_wbinv_range)
125	add	r1, r1, r0
126	sub	r1, r1, #1
127	mcrr	p15, 0, r1, r0, c5	/* invaliate I cache range */
128	mcrr	p15, 0, r1, r0, c14	/* clean & invaliate D cache range */
129	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
130	RET
131END(armv6_idcache_wbinv_range)
132
133/* LINTSTUB: void armv6_idcache_wbinv_all(void); */
134ENTRY_NP(armv6_idcache_wbinv_all)
135	/*
136	 * We assume that the code here can never be out of sync with the
137	 * dcache, so that we can safely flush the Icache and fall through
138	 * into the Dcache purging code.
139	 */
140	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
141	/* Fall through to purge Dcache. */
142
143/* LINTSTUB: void armv6_dcache_wbinv_all(void); */
144ENTRY(armv6_dcache_wbinv_all)
145	mcr	p15, 0, r0, c7, c14, 0	/* clean & invalidate D cache */
146	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
147	RET
148END(armv6_idcache_wbinv_all)
149END(armv6_dcache_wbinv_all)
150
151ENTRY(armv6_idcache_inv_all)
152	mov	r0, #0
153	mcr	p15, 0, r0, c7, c7, 0	/* invalidate all I+D cache */
154	RET
155END(armv6_idcache_inv_all)
156
157