1/*
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 *	This product includes software developed by the University of
12 *	California, Lawrence Berkeley Laboratory.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)fpu_explode.c	8.1 (Berkeley) 6/11/93
39 *	$NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD$");
44
45/*
46 * FPU subroutines: `explode' the machine's `packed binary' format numbers
47 * into our internal format.
48 */
49
50#include <sys/param.h>
51
52#ifdef FPU_DEBUG
53#include <stdio.h>
54#endif
55
56#include <machine/frame.h>
57#include <machine/fp.h>
58#include <machine/fsr.h>
59#include <machine/ieee.h>
60#include <machine/instr.h>
61
62#include "fpu_arith.h"
63#include "fpu_emu.h"
64#include "fpu_extern.h"
65#include "__sparc_utrap_private.h"
66
67/*
68 * N.B.: in all of the following, we assume the FP format is
69 *
70 *	---------------------------
71 *	| s | exponent | fraction |
72 *	---------------------------
73 *
74 * (which represents -1**s * 1.fraction * 2**exponent), so that the
75 * sign bit is way at the top (bit 31), the exponent is next, and
76 * then the remaining bits mark the fraction.  A zero exponent means
77 * zero or denormalized (0.fraction rather than 1.fraction), and the
78 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
79 *
80 * Since the sign bit is always the topmost bit---this holds even for
81 * integers---we set that outside all the *tof functions.  Each function
82 * returns the class code for the new number (but note that we use
83 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
84 */
85
86/*
87 * int -> fpn.
88 */
89int
90__fpu_itof(fp, i)
91	struct fpn *fp;
92	u_int i;
93{
94
95	if (i == 0)
96		return (FPC_ZERO);
97	/*
98	 * The value FP_1 represents 2^FP_LG, so set the exponent
99	 * there and let normalization fix it up.  Convert negative
100	 * numbers to sign-and-magnitude.  Note that this relies on
101	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
102	 */
103	fp->fp_exp = FP_LG;
104	/*
105	 * The sign bit decides whether i should be interpreted as
106	 * a signed or unsigned entity.
107	 */
108	if (fp->fp_sign && (int)i < 0)
109		fp->fp_mant[0] = -i;
110	else
111		fp->fp_mant[0] = i;
112	fp->fp_mant[1] = 0;
113	fp->fp_mant[2] = 0;
114	fp->fp_mant[3] = 0;
115	__fpu_norm(fp);
116	return (FPC_NUM);
117}
118
119/*
120 * 64-bit int -> fpn.
121 */
122int
123__fpu_xtof(fp, i)
124	struct fpn *fp;
125	u_int64_t i;
126{
127
128	if (i == 0)
129		return (FPC_ZERO);
130	/*
131	 * The value FP_1 represents 2^FP_LG, so set the exponent
132	 * there and let normalization fix it up.  Convert negative
133	 * numbers to sign-and-magnitude.  Note that this relies on
134	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
135	 */
136	fp->fp_exp = FP_LG2;
137	/*
138	 * The sign bit decides whether i should be interpreted as
139	 * a signed or unsigned entity.
140	 */
141	if (fp->fp_sign && (int64_t)i < 0)
142		*((int64_t *)fp->fp_mant) = -i;
143	else
144		*((int64_t *)fp->fp_mant) = i;
145	fp->fp_mant[2] = 0;
146	fp->fp_mant[3] = 0;
147	__fpu_norm(fp);
148	return (FPC_NUM);
149}
150
151#define	mask(nbits) ((1L << (nbits)) - 1)
152
153/*
154 * All external floating formats convert to internal in the same manner,
155 * as defined here.  Note that only normals get an implied 1.0 inserted.
156 */
157#define	FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
158	if (exp == 0) { \
159		if (allfrac == 0) \
160			return (FPC_ZERO); \
161		fp->fp_exp = 1 - expbias; \
162		fp->fp_mant[0] = f0; \
163		fp->fp_mant[1] = f1; \
164		fp->fp_mant[2] = f2; \
165		fp->fp_mant[3] = f3; \
166		__fpu_norm(fp); \
167		return (FPC_NUM); \
168	} \
169	if (exp == (2 * expbias + 1)) { \
170		if (allfrac == 0) \
171			return (FPC_INF); \
172		fp->fp_mant[0] = f0; \
173		fp->fp_mant[1] = f1; \
174		fp->fp_mant[2] = f2; \
175		fp->fp_mant[3] = f3; \
176		return (FPC_QNAN); \
177	} \
178	fp->fp_exp = exp - expbias; \
179	fp->fp_mant[0] = FP_1 | f0; \
180	fp->fp_mant[1] = f1; \
181	fp->fp_mant[2] = f2; \
182	fp->fp_mant[3] = f3; \
183	return (FPC_NUM)
184
185/*
186 * 32-bit single precision -> fpn.
187 * We assume a single occupies at most (64-FP_LG) bits in the internal
188 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
189 */
190int
191__fpu_stof(fp, i)
192	struct fpn *fp;
193	u_int i;
194{
195	int exp;
196	u_int frac, f0, f1;
197#define SNG_SHIFT (SNG_FRACBITS - FP_LG)
198
199	exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
200	frac = i & mask(SNG_FRACBITS);
201	f0 = frac >> SNG_SHIFT;
202	f1 = frac << (32 - SNG_SHIFT);
203	FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
204}
205
206/*
207 * 64-bit double -> fpn.
208 * We assume this uses at most (96-FP_LG) bits.
209 */
210int
211__fpu_dtof(fp, i, j)
212	struct fpn *fp;
213	u_int i, j;
214{
215	int exp;
216	u_int frac, f0, f1, f2;
217#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
218
219	exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
220	frac = i & mask(DBL_FRACBITS - 32);
221	f0 = frac >> DBL_SHIFT;
222	f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
223	f2 = j << (32 - DBL_SHIFT);
224	frac |= j;
225	FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
226}
227
228/*
229 * 128-bit extended -> fpn.
230 */
231int
232__fpu_qtof(fp, i, j, k, l)
233	struct fpn *fp;
234	u_int i, j, k, l;
235{
236	int exp;
237	u_int frac, f0, f1, f2, f3;
238#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG))	/* left shift! */
239
240	/*
241	 * Note that ext and fpn `line up', hence no shifting needed.
242	 */
243	exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
244	frac = i & mask(EXT_FRACBITS - 3 * 32);
245	f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
246	f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
247	f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
248	f3 = l << EXT_SHIFT;
249	frac |= j | k | l;
250	FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
251}
252
253/*
254 * Explode the contents of a / regpair / regquad.
255 * If the input is a signalling NaN, an NV (invalid) exception
256 * will be set.  (Note that nothing but NV can occur until ALU
257 * operations are performed.)
258 */
259void
260__fpu_explode(fe, fp, type, reg)
261	struct fpemu *fe;
262	struct fpn *fp;
263	int type, reg;
264{
265	u_int64_t l0, l1;
266	u_int32_t s;
267
268	if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) {
269		l0 = __fpu_getreg64(reg & ~1);
270		fp->fp_sign = l0 >> 63;
271	} else {
272		s = __fpu_getreg(reg);
273		fp->fp_sign = s >> 31;
274	}
275	fp->fp_sticky = 0;
276	switch (type) {
277	case FTYPE_LNG:
278		s = __fpu_xtof(fp, l0);
279		break;
280
281	case FTYPE_INT:
282		s = __fpu_itof(fp, s);
283		break;
284
285	case FTYPE_SNG:
286		s = __fpu_stof(fp, s);
287		break;
288
289	case FTYPE_DBL:
290		s = __fpu_dtof(fp, l0 >> 32, l0 & 0xffffffff);
291		break;
292
293	case FTYPE_EXT:
294		l1 = __fpu_getreg64((reg & ~1) + 2);
295		s = __fpu_qtof(fp, l0 >> 32, l0 & 0xffffffff, l1 >> 32,
296		    l1 & 0xffffffff);
297		break;
298
299	default:
300		__utrap_panic("fpu_explode");
301	}
302
303	if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
304		/*
305		 * Input is a signalling NaN.  All operations that return
306		 * an input NaN operand put it through a ``NaN conversion'',
307		 * which basically just means ``turn on the quiet bit''.
308		 * We do this here so that all NaNs internally look quiet
309		 * (we can tell signalling ones by their class).
310		 */
311		fp->fp_mant[0] |= FP_QUIETBIT;
312		fe->fe_cx = FSR_NV;	/* assert invalid operand */
313		s = FPC_SNAN;
314	}
315	fp->fp_class = s;
316	DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
317		((type == FTYPE_INT) ? 'i' :
318			((type == FTYPE_SNG) ? 's' :
319				((type == FTYPE_DBL) ? 'd' :
320					((type == FTYPE_EXT) ? 'q' : '?')))),
321		reg));
322	DUMPFPN(FPE_REG, fp);
323	DPRINTF(FPE_REG, ("\n"));
324}
325