cpufunc_asm_armv6.S revision 269796
1/*	$NetBSD: cpufunc_asm_armv6.S,v 1.4 2010/12/10 02:06:22 bsh Exp $	*/
2
3/*
4 * Copyright (c) 2002, 2005 ARM Limited
5 * Portions Copyright (c) 2007 Microsoft
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the company may not be used to endorse or promote
17 *    products derived from this software without specific prior written
18 *    permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
24 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * ARMv6 assembly functions for manipulating caches.
33 * These routines can be used by any core that supports the mcrr address
34 * range operations.
35 */
36
37/*
38 * $FreeBSD: stable/10/sys/arm/arm/cpufunc_asm_armv6.S 269796 2014-08-11 01:29:28Z ian $
39 */
40
41#include <machine/asm.h>
42
43	.arch	armv6
44
45/*
46 * Functions to set the MMU Translation Table Base register
47 *
48 * We need to clean and flush the cache as it uses virtual
49 * addresses that are about to change.
50 */
51ENTRY(armv6_setttb)
52	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
53
54	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
55
56	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
57	RET
58END(armv6_setttb)
59
60/*
61 * Cache operations.
62 */
63
64/* LINTSTUB: void armv6_icache_sync_range(vaddr_t, vsize_t); */
65ENTRY_NP(armv6_icache_sync_range)
66	add	r1, r1, r0
67	sub	r1, r1, #1
68	mcrr	p15, 0, r1, r0, c5	/* invalidate I cache range */
69	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
70	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
71	RET
72END(armv6_icache_sync_range)
73
74/* LINTSTUB: void armv6_icache_sync_all(void); */
75ENTRY_NP(armv6_icache_sync_all)
76	/*
77	 * We assume that the code here can never be out of sync with the
78	 * dcache, so that we can safely flush the Icache and fall through
79	 * into the Dcache cleaning code.
80	 */
81	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
82	mcr	p15, 0, r0, c7, c10, 0	/* Clean D cache */
83	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
84	RET
85END(armv6_icache_sync_all)
86
87/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */
88ENTRY(armv6_dcache_wb_range)
89	add	r1, r1, r0
90	sub	r1, r1, #1
91	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
92	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
93	RET
94END(armv6_dcache_wb_range)
95
96/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
97ENTRY(armv6_dcache_wbinv_range)
98	add	r1, r1, r0
99	sub	r1, r1, #1
100	mcrr	p15, 0, r1, r0, c14	/* clean and invaliate D cache range */
101	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
102	RET
103END(armv6_dcache_wbinv_range)
104
105/*
106 * Note, we must not invalidate everything.  If the range is too big we
107 * must use wb-inv of the entire cache.
108 *
109 * LINTSTUB: void armv6_dcache_inv_range(vaddr_t, vsize_t);
110 */
111ENTRY(armv6_dcache_inv_range)
112	add	r1, r1, r0
113	sub	r1, r1, #1
114	mcrr	p15, 0, r1, r0, c6	/* invaliate D cache range */
115	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
116	RET
117END(armv6_dcache_inv_range)
118
119/* LINTSTUB: void armv6_idcache_wbinv_range(vaddr_t, vsize_t); */
120ENTRY(armv6_idcache_wbinv_range)
121	add	r1, r1, r0
122	sub	r1, r1, #1
123	mcrr	p15, 0, r1, r0, c5	/* invaliate I cache range */
124	mcrr	p15, 0, r1, r0, c14	/* clean & invaliate D cache range */
125	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
126	RET
127END(armv6_idcache_wbinv_range)
128
129/* LINTSTUB: void armv6_idcache_wbinv_all(void); */
130ENTRY_NP(armv6_idcache_wbinv_all)
131	/*
132	 * We assume that the code here can never be out of sync with the
133	 * dcache, so that we can safely flush the Icache and fall through
134	 * into the Dcache purging code.
135	 */
136	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
137	/* Fall through to purge Dcache. */
138
139/* LINTSTUB: void armv6_dcache_wbinv_all(void); */
140EENTRY(armv6_dcache_wbinv_all)
141	mcr	p15, 0, r0, c7, c14, 0	/* clean & invalidate D cache */
142	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
143	RET
144EEND(armv6_dcache_wbinv_all)
145END(armv6_idcache_wbinv_all)
146
147ENTRY(armv6_idcache_inv_all)
148	mov	r0, #0
149	mcr	p15, 0, r0, c7, c7, 0	/* invalidate all I+D cache */
150	RET
151END(armv6_idcache_inv_all)
152
153