vfp.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
5 * Copyright (c) 2012 Mark Tinguely
6 *
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm/arm/vfp.c 330897 2018-03-14 03:19:51Z eadler $");
33
34#ifdef VFP
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40
41#include <machine/armreg.h>
42#include <machine/elf.h>
43#include <machine/frame.h>
44#include <machine/md_var.h>
45#include <machine/pcb.h>
46#include <machine/undefined.h>
47#include <machine/vfp.h>
48
49/* function prototypes */
50static int vfp_bounce(u_int, u_int, struct trapframe *, int);
51static void vfp_restore(struct vfp_state *);
52
53extern int vfp_exists;
54static struct undefined_handler vfp10_uh, vfp11_uh;
55/* If true the VFP unit has 32 double registers, otherwise it has 16 */
56static int is_d32;
57
58/*
59 * About .fpu directives in this file...
60 *
61 * We should need simply .fpu vfpv3, but clang 3.5 has a quirk where setting
62 * vfpv3 doesn't imply that vfp2 features are also available -- both have to be
63 * explicitly set to get all the features of both.  This is probably a bug in
64 * clang, so it may get fixed and require changes here some day.  Other changes
65 * are probably coming in clang too, because there is email and open PRs
66 * indicating they want to completely disable the ability to use .fpu and
67 * similar directives in inline asm.  That would be catastrophic for us,
68 * hopefully they come to their senses.  There was also some discusion of a new
69 * syntax such as .push fpu=vfpv3; ...; .pop fpu; and that would be ideal for
70 * us, better than what we have now really.
71 *
72 * For gcc, each .fpu directive completely overrides the prior directive, unlike
73 * with clang, but luckily on gcc saying v3 implies all the v2 features as well.
74 */
75
76#define fmxr(reg, val) \
77    __asm __volatile("	.fpu vfpv2\n .fpu vfpv3\n"			\
78		     "	vmsr	" __STRING(reg) ", %0"   :: "r"(val));
79
80#define fmrx(reg) \
81({ u_int val = 0;\
82    __asm __volatile(" .fpu vfpv2\n .fpu vfpv3\n"			\
83		     "	vmrs	%0, " __STRING(reg) : "=r"(val));	\
84    val; \
85})
86
87static u_int
88get_coprocessorACR(void)
89{
90	u_int val;
91	__asm __volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (val) : : "cc");
92	return val;
93}
94
95static void
96set_coprocessorACR(u_int val)
97{
98	__asm __volatile("mcr p15, 0, %0, c1, c0, 2\n\t"
99	 : : "r" (val) : "cc");
100	isb();
101}
102
103
104	/* called for each cpu */
105void
106vfp_init(void)
107{
108	u_int fpsid, fpexc, tmp;
109	u_int coproc, vfp_arch;
110
111	coproc = get_coprocessorACR();
112	coproc |= COPROC10 | COPROC11;
113	set_coprocessorACR(coproc);
114
115	fpsid = fmrx(fpsid);		/* read the vfp system id */
116	fpexc = fmrx(fpexc);		/* read the vfp exception reg */
117
118	if (!(fpsid & VFPSID_HARDSOFT_IMP)) {
119		vfp_exists = 1;
120		is_d32 = 0;
121		PCPU_SET(vfpsid, fpsid);	/* save the fpsid */
122		elf_hwcap |= HWCAP_VFP;
123
124		vfp_arch =
125		    (fpsid & VFPSID_SUBVERSION2_MASK) >> VFPSID_SUBVERSION_OFF;
126
127		if (vfp_arch >= VFP_ARCH3) {
128			tmp = fmrx(mvfr0);
129			PCPU_SET(vfpmvfr0, tmp);
130			elf_hwcap |= HWCAP_VFPv3;
131
132			if ((tmp & VMVFR0_RB_MASK) == 2) {
133				elf_hwcap |= HWCAP_VFPD32;
134				is_d32 = 1;
135			} else
136				elf_hwcap |= HWCAP_VFPv3D16;
137
138			tmp = fmrx(mvfr1);
139			PCPU_SET(vfpmvfr1, tmp);
140
141			if (PCPU_GET(cpuid) == 0) {
142				if ((tmp & VMVFR1_FZ_MASK) == 0x1) {
143					/* Denormals arithmetic support */
144					initial_fpscr &= ~VFPSCR_FZ;
145					thread0.td_pcb->pcb_vfpstate.fpscr =
146					    initial_fpscr;
147				}
148			}
149
150			if ((tmp & VMVFR1_LS_MASK) >> VMVFR1_LS_OFF == 1 &&
151			    (tmp & VMVFR1_I_MASK) >> VMVFR1_I_OFF == 1 &&
152			    (tmp & VMVFR1_SP_MASK) >> VMVFR1_SP_OFF == 1)
153				elf_hwcap |= HWCAP_NEON;
154			if ((tmp & VMVFR1_FMAC_MASK) >>  VMVFR1_FMAC_OFF == 1)
155				elf_hwcap |= HWCAP_VFPv4;
156		}
157
158		/* initialize the coprocess 10 and 11 calls
159		 * These are called to restore the registers and enable
160		 * the VFP hardware.
161		 */
162		if (vfp10_uh.uh_handler == NULL) {
163			vfp10_uh.uh_handler = vfp_bounce;
164			vfp11_uh.uh_handler = vfp_bounce;
165			install_coproc_handler_static(10, &vfp10_uh);
166			install_coproc_handler_static(11, &vfp11_uh);
167		}
168	}
169}
170
171SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
172
173
174/* start VFP unit, restore the vfp registers from the PCB  and retry
175 * the instruction
176 */
177static int
178vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
179{
180	u_int cpu, fpexc;
181	struct pcb *curpcb;
182	ksiginfo_t ksi;
183
184	if ((code & FAULT_USER) == 0)
185		panic("undefined floating point instruction in supervisor mode");
186
187	critical_enter();
188
189	/*
190	 * If the VFP is already on and we got an undefined instruction, then
191	 * something tried to executate a truly invalid instruction that maps to
192	 * the VFP.
193	 */
194	fpexc = fmrx(fpexc);
195	if (fpexc & VFPEXC_EN) {
196		/* Clear any exceptions */
197		fmxr(fpexc, fpexc & ~(VFPEXC_EX | VFPEXC_FP2V));
198
199		/* kill the process - we do not handle emulation */
200		critical_exit();
201
202		if (fpexc & VFPEXC_EX) {
203			/* We have an exception, signal a SIGFPE */
204			ksiginfo_init_trap(&ksi);
205			ksi.ksi_signo = SIGFPE;
206			if (fpexc & VFPEXC_UFC)
207				ksi.ksi_code = FPE_FLTUND;
208			else if (fpexc & VFPEXC_OFC)
209				ksi.ksi_code = FPE_FLTOVF;
210			else if (fpexc & VFPEXC_IOC)
211				ksi.ksi_code = FPE_FLTINV;
212			ksi.ksi_addr = (void *)addr;
213			trapsignal(curthread, &ksi);
214			return 0;
215		}
216
217		return 1;
218	}
219
220	/*
221	 * If the last time this thread used the VFP it was on this core, and
222	 * the last thread to use the VFP on this core was this thread, then the
223	 * VFP state is valid, otherwise restore this thread's state to the VFP.
224	 */
225	fmxr(fpexc, fpexc | VFPEXC_EN);
226	curpcb = curthread->td_pcb;
227	cpu = PCPU_GET(cpuid);
228	if (curpcb->pcb_vfpcpu != cpu || curthread != PCPU_GET(fpcurthread)) {
229		vfp_restore(&curpcb->pcb_vfpstate);
230		curpcb->pcb_vfpcpu = cpu;
231		PCPU_SET(fpcurthread, curthread);
232	}
233
234	critical_exit();
235	return (0);
236}
237
238/*
239 * Restore the given state to the VFP hardware.
240 */
241static void
242vfp_restore(struct vfp_state *vfpsave)
243{
244	uint32_t fpexc;
245
246	/* On vfpv3 we may need to restore FPINST and FPINST2 */
247	fpexc = vfpsave->fpexec;
248	if (fpexc & VFPEXC_EX) {
249		fmxr(fpinst, vfpsave->fpinst);
250		if (fpexc & VFPEXC_FP2V)
251			fmxr(fpinst2, vfpsave->fpinst2);
252	}
253	fmxr(fpscr, vfpsave->fpscr);
254
255	__asm __volatile(
256	    " .fpu	vfpv2\n"
257	    " .fpu	vfpv3\n"
258	    " vldmia	%0!, {d0-d15}\n"	/* d0-d15 */
259	    " cmp	%1, #0\n"		/* -D16 or -D32? */
260	    " vldmiane	%0!, {d16-d31}\n"	/* d16-d31 */
261	    " addeq	%0, %0, #128\n"		/* skip missing regs */
262	    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
263	    );
264
265	fmxr(fpexc, fpexc);
266}
267
268/*
269 * If the VFP is on, save its current state and turn it off if requested to do
270 * so.  If the VFP is not on, does not change the values at *vfpsave.  Caller is
271 * responsible for preventing a context switch while this is running.
272 */
273void
274vfp_store(struct vfp_state *vfpsave, boolean_t disable_vfp)
275{
276	uint32_t fpexc;
277
278	fpexc = fmrx(fpexc);		/* Is the vfp enabled? */
279	if (fpexc & VFPEXC_EN) {
280		vfpsave->fpexec = fpexc;
281		vfpsave->fpscr = fmrx(fpscr);
282
283		/* On vfpv3 we may need to save FPINST and FPINST2 */
284		if (fpexc & VFPEXC_EX) {
285			vfpsave->fpinst = fmrx(fpinst);
286			if (fpexc & VFPEXC_FP2V)
287				vfpsave->fpinst2 = fmrx(fpinst2);
288			fpexc &= ~VFPEXC_EX;
289		}
290
291		__asm __volatile(
292		    " .fpu	vfpv2\n"
293		    " .fpu	vfpv3\n"
294		    " vstmia	%0!, {d0-d15}\n"	/* d0-d15 */
295		    " cmp	%1, #0\n"		/* -D16 or -D32? */
296		    " vstmiane	r0!, {d16-d31}\n"	/* d16-d31 */
297		    " addeq	%0, %0, #128\n"		/* skip missing regs */
298		    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
299		    );
300
301		if (disable_vfp)
302			fmxr(fpexc , fpexc & ~VFPEXC_EN);
303	}
304}
305
306/*
307 * The current thread is dying.  If the state currently in the hardware belongs
308 * to the current thread, set fpcurthread to NULL to indicate that the VFP
309 * hardware state does not belong to any thread.  If the VFP is on, turn it off.
310 * Called only from cpu_throw(), so we don't have to worry about a context
311 * switch here.
312 */
313void
314vfp_discard(struct thread *td)
315{
316	u_int tmp;
317
318	if (PCPU_GET(fpcurthread) == td)
319		PCPU_SET(fpcurthread, NULL);
320
321	tmp = fmrx(fpexc);
322	if (tmp & VFPEXC_EN)
323		fmxr(fpexc, tmp & ~VFPEXC_EN);
324}
325
326#endif
327
328