cpuinfo.c revision 331968
1/*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpuinfo.c 331968 2018-04-04 01:56:46Z mmel $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/sysctl.h>
35
36#include <machine/cpu.h>
37#include <machine/cpuinfo.h>
38#include <machine/elf.h>
39#include <machine/md_var.h>
40
41#if __ARM_ARCH >= 6
42void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
43#endif
44
45struct cpuinfo cpuinfo =
46{
47	/* Use safe defaults for start */
48	.dcache_line_size = 32,
49	.dcache_line_mask = 31,
50	.icache_line_size = 32,
51	.icache_line_mask = 31,
52};
53
54static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
55    "CPU");
56static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
57    "CPU quirks");
58
59/*
60 * Tunable CPU quirks.
61 * Be careful, ACTRL cannot be changed if CPU is started in secure
62 * mode(world) and write to ACTRL can cause exception!
63 * These quirks are intended for optimizing CPU performance, not for
64 * applying errata workarounds. Nobody can expect that CPU with unfixed
65 * errata is stable enough to execute the kernel until quirks are applied.
66 */
67static uint32_t cpu_quirks_actlr_mask;
68SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
69    CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
70    "Bits to be masked in ACTLR");
71
72static uint32_t cpu_quirks_actlr_set;
73SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
74    CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
75    "Bits to be set in ACTLR");
76
77
78/* Read and parse CPU id scheme */
79void
80cpuinfo_init(void)
81{
82#if __ARM_ARCH >= 6
83	uint32_t tmp;
84#endif
85
86	/*
87	 * Prematurely fetch CPU quirks. Standard fetch for tunable
88	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
89	 * Keep names in sync with sysctls.
90	 */
91	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
92	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
93
94	cpuinfo.midr = cp15_midr_get();
95	/* Test old version id schemes first */
96	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
97		if (CPU_ID_ISOLD(cpuinfo.midr)) {
98			/* obsolete ARMv2 or ARMv3 CPU */
99			cpuinfo.midr = 0;
100			return;
101		}
102		if (CPU_ID_IS7(cpuinfo.midr)) {
103			if ((cpuinfo.midr & (1 << 23)) == 0) {
104				/* obsolete ARMv3 CPU */
105				cpuinfo.midr = 0;
106				return;
107			}
108			/* ARMv4T CPU */
109			cpuinfo.architecture = 1;
110			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
111		} else {
112			/* ARM new id scheme */
113			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
114			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
115		}
116	} else {
117		/* non ARM -> must be new id scheme */
118		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
119		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
120	}
121	/* Parse rest of MIDR  */
122	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
123	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
124	cpuinfo.patch = cpuinfo.midr & 0x0F;
125
126	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
127	cpuinfo.ctr = cp15_ctr_get();
128	cpuinfo.tcmtr = cp15_tcmtr_get();
129#if __ARM_ARCH >= 6
130	cpuinfo.tlbtr = cp15_tlbtr_get();
131	cpuinfo.mpidr = cp15_mpidr_get();
132	cpuinfo.revidr = cp15_revidr_get();
133#endif
134
135	/* if CPU is not v7 cpu id scheme */
136	if (cpuinfo.architecture != 0xF)
137		return;
138#if __ARM_ARCH >= 6
139	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
140	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
141	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
142	cpuinfo.id_afr0 = cp15_id_afr0_get();
143	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
144	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
145	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
146	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
147	cpuinfo.id_isar0 = cp15_id_isar0_get();
148	cpuinfo.id_isar1 = cp15_id_isar1_get();
149	cpuinfo.id_isar2 = cp15_id_isar2_get();
150	cpuinfo.id_isar3 = cp15_id_isar3_get();
151	cpuinfo.id_isar4 = cp15_id_isar4_get();
152	cpuinfo.id_isar5 = cp15_id_isar5_get();
153
154/* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
155	cpuinfo.cbar = cp15_cbar_get();
156*/
157	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
158		cpuinfo.ccsidr = cp15_ccsidr_get();
159		cpuinfo.clidr = cp15_clidr_get();
160	}
161
162	/* Test if revidr is implemented */
163	if (cpuinfo.revidr == cpuinfo.midr)
164		cpuinfo.revidr = 0;
165
166	/* parsed bits of above registers */
167	/* id_mmfr0 */
168	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
169	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
170	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
171	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
172	/* id_mmfr2 */
173	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
174	/* id_mmfr3 */
175	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
176	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
177	/* id_pfr1 */
178	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
179	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
180	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
181
182	/* L1 Cache sizes */
183	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
184		cpuinfo.dcache_line_size =
185		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
186		cpuinfo.icache_line_size =
187		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
188	} else {
189		cpuinfo.dcache_line_size =
190		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
191		cpuinfo.icache_line_size =
192		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
193	}
194	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
195	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
196
197	/* Fill AT_HWCAP bits. */
198	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
199	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
200
201	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
202	if (tmp >= 1)
203		elf_hwcap |= HWCAP_IDIVT;
204	if (tmp >= 2)
205		elf_hwcap |= HWCAP_IDIVA;
206
207	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
208	if (tmp >= 1)
209		elf_hwcap |= HWCAP_THUMB;
210
211	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
212	if (tmp >= 1)
213		elf_hwcap |= HWCAP_THUMBEE;
214
215	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
216	if (tmp >= 5)
217		elf_hwcap |= HWCAP_LPAE;
218
219	/* Fill AT_HWCAP2 bits. */
220	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
221	if (tmp >= 1)
222		elf_hwcap2 |= HWCAP2_AES;
223	if (tmp >= 2)
224		elf_hwcap2 |= HWCAP2_PMULL;
225
226	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
227	if (tmp >= 1)
228		elf_hwcap2 |= HWCAP2_SHA1;
229
230	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
231	if (tmp >= 1)
232		elf_hwcap2 |= HWCAP2_SHA2;
233
234	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
235	if (tmp >= 1)
236		elf_hwcap2 |= HWCAP2_CRC32;
237#endif
238}
239
240#if __ARM_ARCH >= 6
241/*
242 * Get bits that must be set or cleared in ACLR register.
243 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
244 * Its expected that SCU is in operational state before this
245 * function is called.
246 */
247static void
248cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
249{
250
251	*actlr_mask = 0;
252	*actlr_set = 0;
253
254	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
255		switch (cpuinfo.part_number) {
256		case CPU_ARCH_CORTEX_A73:
257		case CPU_ARCH_CORTEX_A72:
258		case CPU_ARCH_CORTEX_A57:
259		case CPU_ARCH_CORTEX_A53:
260			/* Nothing to do for AArch32 */
261			break;
262		case CPU_ARCH_CORTEX_A17:
263		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
264			/*
265			 * Enable SMP mode
266			 */
267			*actlr_mask = (1 << 6);
268			*actlr_set = (1 << 6);
269			break;
270		case CPU_ARCH_CORTEX_A15:
271			/*
272			 * Enable snoop-delayed exclusive handling
273			 * Enable SMP mode
274			 */
275			*actlr_mask = (1U << 31) |(1 << 6);
276			*actlr_set = (1U << 31) |(1 << 6);
277			break;
278		case CPU_ARCH_CORTEX_A9:
279			/*
280			 * Disable exclusive L1/L2 cache control
281			 * Enable SMP mode
282			 * Enable Cache and TLB maintenance broadcast
283			 */
284			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
285			*actlr_set = (1 << 6) | (1 << 0);
286			break;
287		case CPU_ARCH_CORTEX_A8:
288			/*
289			 * Enable L2 cache
290			 * Enable L1 data cache hardware alias checks
291			 */
292			*actlr_mask = (1 << 1) | (1 << 0);
293			*actlr_set = (1 << 1);
294			break;
295		case CPU_ARCH_CORTEX_A7:
296			/*
297			 * Enable SMP mode
298			 */
299			*actlr_mask = (1 << 6);
300			*actlr_set = (1 << 6);
301			break;
302		case CPU_ARCH_CORTEX_A5:
303			/*
304			 * Disable exclusive L1/L2 cache control
305			 * Enable SMP mode
306			 * Enable Cache and TLB maintenance broadcast
307			 */
308			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
309			*actlr_set = (1 << 6) | (1 << 0);
310			break;
311		case CPU_ARCH_ARM1176:
312			/*
313			 * Restrict cache size to 16KB
314			 * Enable the return stack
315			 * Enable dynamic branch prediction
316			 * Enable static branch prediction
317			 */
318			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
319			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
320			break;
321		}
322		return;
323	}
324}
325
326/* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
327void
328cpuinfo_reinit_mmu(uint32_t ttb)
329{
330	uint32_t actlr_mask;
331	uint32_t actlr_set;
332
333	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
334	actlr_mask |= cpu_quirks_actlr_mask;
335	actlr_set |= cpu_quirks_actlr_set;
336	reinit_mmu(ttb, actlr_mask, actlr_set);
337}
338
339#endif /* __ARM_ARCH >= 6 */
340