aeabi_vfp_float.S revision 266314
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD: stable/10/lib/libc/arm/aeabi/aeabi_vfp_float.S 266314 2014-05-17 14:22:37Z ian $");
30
31#include "aeabi_vfp.h"
32
33.fpu	vfp
34.syntax	unified
35
36/* int __aeabi_fcmpeq(float, float) */
37AEABI_ENTRY(fcmpeq)
38	LOAD_SREGS(s0, s1, r0, r1)
39	vcmp.f32 s0, s1
40	vmrs     APSR_nzcv, fpscr
41	movne    r0, #0
42	moveq    r0, #1
43	RET
44AEABI_END(fcmpeq)
45
46/* int __aeabi_fcmplt(float, float) */
47AEABI_ENTRY(fcmplt)
48	LOAD_SREGS(s0, s1, r0, r1)
49	vcmp.f32 s0, s1
50	vmrs     APSR_nzcv, fpscr
51	movcs    r0, #0
52	movlt    r0, #1
53	RET
54AEABI_END(fcmplt)
55
56/* int __aeabi_fcmple(float, float) */
57AEABI_ENTRY(fcmple)
58	LOAD_SREGS(s0, s1, r0, r1)
59	vcmp.f32 s0, s1
60	vmrs     APSR_nzcv, fpscr
61	movhi    r0, #0
62	movls    r0, #1
63	RET
64AEABI_END(fcmple)
65
66/* int __aeabi_fcmpge(float, float) */
67AEABI_ENTRY(fcmpge)
68	LOAD_SREGS(s0, s1, r0, r1)
69	vcmp.f32 s0, s1
70	vmrs     APSR_nzcv, fpscr
71	movlt    r0, #0
72	movge    r0, #1
73	RET
74AEABI_END(fcmpge)
75
76/* int __aeabi_fcmpgt(float, float) */
77AEABI_ENTRY(fcmpgt)
78	LOAD_SREGS(s0, s1, r0, r1)
79	vcmp.f32 s0, s1
80	vmrs     APSR_nzcv, fpscr
81	movle    r0, #0
82	movgt    r0, #1
83	RET
84AEABI_END(fcmpgt)
85
86/* int __aeabi_fcmpun(float, float) */
87AEABI_ENTRY(fcmpun)
88	LOAD_SREGS(s0, s1, r0, r1)
89	vcmp.f32 s0, s1
90	vmrs     APSR_nzcv, fpscr
91	movvc    r0, #0
92	movvs    r0, #1
93	RET
94AEABI_END(fcmpun)
95
96/* int __aeabi_f2iz(float) */
97AEABI_ENTRY(f2iz)
98	LOAD_SREG(s0, r0)
99#if 0
100	/*
101	 * This should be the correct instruction, but binutils incorrectly
102	 * encodes it as the version that used FPSCR to determine the rounding.
103	 * When binutils is fixed we can use this again.
104	 */
105	vcvt.s32.f32 s0, s0
106#else
107	ftosizs      s0, s0
108#endif
109	vmov         r0, s0
110	RET
111AEABI_END(f2iz)
112
113/* double __aeabi_f2d(float) */
114AEABI_ENTRY(f2d)
115	LOAD_SREG(s0, r0)
116	vcvt.f64.f32 d0, s0
117	UNLOAD_DREG(r0, r1, d0)
118	RET
119AEABI_END(f2d)
120
121/* float __aeabi_i2f(int) */
122AEABI_ENTRY(i2f)
123	vmov         s0, r0
124	vcvt.f32.s32 s0, s0
125	UNLOAD_SREG(r0, s0)
126	RET
127AEABI_END(i2f)
128
129/* float __aeabi_fadd(float, float) */
130AEABI_ENTRY(fadd)
131	LOAD_SREGS(s0, s1, r0, r1)
132	vadd.f32 s0, s0, s1
133	UNLOAD_SREG(r0, s0)
134	RET
135AEABI_END(fadd)
136
137/* float __aeabi_fmul(float, float) */
138AEABI_ENTRY(fdiv)
139	LOAD_SREGS(s0, s1, r0, r1)
140	vdiv.f32 s0, s0, s1
141	UNLOAD_SREG(r0, s0)
142	RET
143AEABI_END(fdiv)
144
145/* float __aeabi_fmul(float, float) */
146AEABI_ENTRY(fmul)
147	LOAD_SREGS(s0, s1, r0, r1)
148	vmul.f32 s0, s0, s1
149	UNLOAD_SREG(r0, s0)
150	RET
151AEABI_END(fmul)
152
153/* float __aeabi_fsub(float, float) */
154AEABI_ENTRY(fsub)
155	LOAD_SREGS(s0, s1, r0, r1)
156	vsub.f32 s0, s0, s1
157	UNLOAD_SREG(r0, s0)
158	RET
159AEABI_END(fsub)
160
161