cpuinfo.c revision 325810
1/*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpuinfo.c 325810 2017-11-14 16:03:07Z jhb $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33
34#include <machine/cpu.h>
35#include <machine/cpuinfo.h>
36#include <machine/elf.h>
37#include <machine/md_var.h>
38
39struct cpuinfo cpuinfo =
40{
41	/* Use safe defaults for start */
42	.dcache_line_size = 32,
43	.dcache_line_mask = 31,
44	.icache_line_size = 32,
45	.icache_line_mask = 31,
46};
47
48/* Read and parse CPU id scheme */
49void
50cpuinfo_init(void)
51{
52#if __ARM_ARCH >= 6
53	uint32_t tmp;
54#endif
55
56	cpuinfo.midr = cp15_midr_get();
57	/* Test old version id schemes first */
58	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
59		if (CPU_ID_ISOLD(cpuinfo.midr)) {
60			/* obsolete ARMv2 or ARMv3 CPU */
61			cpuinfo.midr = 0;
62			return;
63		}
64		if (CPU_ID_IS7(cpuinfo.midr)) {
65			if ((cpuinfo.midr & (1 << 23)) == 0) {
66				/* obsolete ARMv3 CPU */
67				cpuinfo.midr = 0;
68				return;
69			}
70			/* ARMv4T CPU */
71			cpuinfo.architecture = 1;
72			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
73		} else {
74			/* ARM new id scheme */
75			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
76			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
77		}
78	} else {
79		/* non ARM -> must be new id scheme */
80		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
81		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
82	}
83	/* Parse rest of MIDR  */
84	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
85	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
86	cpuinfo.patch = cpuinfo.midr & 0x0F;
87
88	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
89	cpuinfo.ctr = cp15_ctr_get();
90	cpuinfo.tcmtr = cp15_tcmtr_get();
91#if __ARM_ARCH >= 6
92	cpuinfo.tlbtr = cp15_tlbtr_get();
93	cpuinfo.mpidr = cp15_mpidr_get();
94	cpuinfo.revidr = cp15_revidr_get();
95#endif
96
97	/* if CPU is not v7 cpu id scheme */
98	if (cpuinfo.architecture != 0xF)
99		return;
100#if __ARM_ARCH >= 6
101	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
102	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
103	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
104	cpuinfo.id_afr0 = cp15_id_afr0_get();
105	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
106	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
107	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
108	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
109	cpuinfo.id_isar0 = cp15_id_isar0_get();
110	cpuinfo.id_isar1 = cp15_id_isar1_get();
111	cpuinfo.id_isar2 = cp15_id_isar2_get();
112	cpuinfo.id_isar3 = cp15_id_isar3_get();
113	cpuinfo.id_isar4 = cp15_id_isar4_get();
114	cpuinfo.id_isar5 = cp15_id_isar5_get();
115
116/* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
117	cpuinfo.cbar = cp15_cbar_get();
118*/
119	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
120		cpuinfo.ccsidr = cp15_ccsidr_get();
121		cpuinfo.clidr = cp15_clidr_get();
122	}
123
124	/* Test if revidr is implemented */
125	if (cpuinfo.revidr == cpuinfo.midr)
126		cpuinfo.revidr = 0;
127
128	/* parsed bits of above registers */
129	/* id_mmfr0 */
130	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
131	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
132	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
133	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
134	/* id_mmfr2 */
135	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
136	/* id_mmfr3 */
137	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
138	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
139	/* id_pfr1 */
140	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
141	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
142	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
143
144	/* L1 Cache sizes */
145	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
146		cpuinfo.dcache_line_size =
147		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
148		cpuinfo.icache_line_size =
149		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
150	} else {
151		cpuinfo.dcache_line_size =
152		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
153		cpuinfo.icache_line_size =
154		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
155	}
156	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
157	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
158
159	/* Fill AT_HWCAP bits. */
160	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Requierd for all CPUs */
161	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Requierd for v6+ CPUs */
162
163	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
164	if (tmp >= 1)
165		elf_hwcap |= HWCAP_IDIVT;
166	if (tmp >= 2)
167		elf_hwcap |= HWCAP_IDIVA;
168
169	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
170	if (tmp >= 1)
171		elf_hwcap |= HWCAP_THUMB;
172
173	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
174	if (tmp >= 1)
175		elf_hwcap |= HWCAP_THUMBEE;
176
177	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
178	if (tmp >= 5)
179		elf_hwcap |= HWCAP_LPAE;
180
181	/* Fill AT_HWCAP2 bits. */
182	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
183	if (tmp >= 1)
184		elf_hwcap2 |= HWCAP2_AES;
185	if (tmp >= 2)
186		elf_hwcap2 |= HWCAP2_PMULL;
187
188	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
189	if (tmp >= 1)
190		elf_hwcap2 |= HWCAP2_SHA1;
191
192	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
193	if (tmp >= 1)
194		elf_hwcap2 |= HWCAP2_SHA2;
195
196	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
197	if (tmp >= 1)
198		elf_hwcap2 |= HWCAP2_CRC32;
199#endif
200}
201
202/*
203 * Get bits that must be set or cleared in ACLR register.
204 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
205 * Its expected that SCU is in operational state before this
206 * function is called.
207 */
208void
209cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
210{
211	*actlr_mask = 0;
212	*actlr_set = 0;
213
214	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
215		switch (cpuinfo.part_number) {
216		case CPU_ARCH_CORTEX_A73:
217		case CPU_ARCH_CORTEX_A72:
218		case CPU_ARCH_CORTEX_A57:
219		case CPU_ARCH_CORTEX_A53:
220			/* Nothing to do for AArch32 */
221			break;
222		case CPU_ARCH_CORTEX_A17:
223		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
224			/*
225			 * Enable SMP mode
226			 */
227			*actlr_mask = (1 << 6);
228			*actlr_set = (1 << 6);
229			break;
230		case CPU_ARCH_CORTEX_A15:
231			/*
232			 * Enable snoop-delayed exclusive handling
233			 * Enable SMP mode
234			 */
235			*actlr_mask = (1U << 31) |(1 << 6);
236			*actlr_set = (1U << 31) |(1 << 6);
237			break;
238		case CPU_ARCH_CORTEX_A9:
239			/*
240			 * Disable exclusive L1/L2 cache control
241			 * Enable SMP mode
242			 * Enable Cache and TLB maintenance broadcast
243			 */
244			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
245			*actlr_set = (1 << 6) | (1 << 0);
246			break;
247		case CPU_ARCH_CORTEX_A8:
248			/*
249			 * Enable L2 cache
250			 * Enable L1 data cache hardware alias checks
251			 */
252			*actlr_mask = (1 << 1) | (1 << 0);
253			*actlr_set = (1 << 1);
254			break;
255		case CPU_ARCH_CORTEX_A7:
256			/*
257			 * Enable SMP mode
258			 */
259			*actlr_mask = (1 << 6);
260			*actlr_set = (1 << 6);
261			break;
262		case CPU_ARCH_CORTEX_A5:
263			/*
264			 * Disable exclusive L1/L2 cache control
265			 * Enable SMP mode
266			 * Enable Cache and TLB maintenance broadcast
267			 */
268			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
269			*actlr_set = (1 << 6) | (1 << 0);
270			break;
271		case CPU_ARCH_ARM1176:
272			/*
273			 * Restrict cache size to 16KB
274			 * Enable the return stack
275			 * Enable dynamic branch prediction
276			 * Enable static branch prediction
277			 */
278			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
279			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
280			break;
281		}
282		return;
283	}
284}
285