1/*
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 *	This product includes software developed by the University of
12 *	California, Lawrence Berkeley Laboratory.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)fpu_explode.c	8.1 (Berkeley) 6/11/93
39 *	$NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $
40 */
41
42#include <sys/cdefs.h>
43
44/*
45 * FPU subroutines: `explode' the machine's `packed binary' format numbers
46 * into our internal format.
47 */
48
49#include <sys/param.h>
50
51#ifdef FPU_DEBUG
52#include <stdio.h>
53#endif
54
55#include "fsr.h"
56
57#include "fpu_arith.h"
58#include "fpu_emu.h"
59#include "fpu_extern.h"
60#include "ieee.h"
61#include "instr.h"
62
63
64#ifdef _KERNEL_MODE
65extern void panic(const char*, ...);
66#else
67#include <OS.h>
68#endif
69
70/*
71 * N.B.: in all of the following, we assume the FP format is
72 *
73 *	---------------------------
74 *	| s | exponent | fraction |
75 *	---------------------------
76 *
77 * (which represents -1**s * 1.fraction * 2**exponent), so that the
78 * sign bit is way at the top (bit 31), the exponent is next, and
79 * then the remaining bits mark the fraction.  A zero exponent means
80 * zero or denormalized (0.fraction rather than 1.fraction), and the
81 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
82 *
83 * Since the sign bit is always the topmost bit---this holds even for
84 * integers---we set that outside all the *tof functions.  Each function
85 * returns the class code for the new number (but note that we use
86 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
87 */
88
89/*
90 * int -> fpn.
91 */
92int
93__fpu_itof(fp, i)
94	struct fpn *fp;
95	uint32_t i;
96{
97
98	if (i == 0)
99		return (FPC_ZERO);
100	/*
101	 * The value FP_1 represents 2^FP_LG, so set the exponent
102	 * there and let normalization fix it up.  Convert negative
103	 * numbers to sign-and-magnitude.  Note that this relies on
104	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
105	 */
106	fp->fp_exp = FP_LG;
107	/*
108	 * The sign bit decides whether i should be interpreted as
109	 * a signed or unsigned entity.
110	 */
111	if (fp->fp_sign && (int)i < 0)
112		fp->fp_mant[0] = -i;
113	else
114		fp->fp_mant[0] = i;
115	fp->fp_mant[1] = 0;
116	fp->fp_mant[2] = 0;
117	fp->fp_mant[3] = 0;
118	__fpu_norm(fp);
119	return (FPC_NUM);
120}
121
122/*
123 * 64-bit int -> fpn.
124 */
125int
126__fpu_xtof(fp, i)
127	struct fpn *fp;
128	uint64_t i;
129{
130
131	if (i == 0)
132		return (FPC_ZERO);
133	/*
134	 * The value FP_1 represents 2^FP_LG, so set the exponent
135	 * there and let normalization fix it up.  Convert negative
136	 * numbers to sign-and-magnitude.  Note that this relies on
137	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
138	 */
139	fp->fp_exp = FP_LG2;
140	/*
141	 * The sign bit decides whether i should be interpreted as
142	 * a signed or unsigned entity.
143	 */
144	if (fp->fp_sign && (int64_t)i < 0)
145		*((int64_t *)fp->fp_mant) = -i;
146	else
147		*((int64_t *)fp->fp_mant) = i;
148	fp->fp_mant[2] = 0;
149	fp->fp_mant[3] = 0;
150	__fpu_norm(fp);
151	return (FPC_NUM);
152}
153
154#define	mask(nbits) ((1L << (nbits)) - 1)
155
156/*
157 * All external floating formats convert to internal in the same manner,
158 * as defined here.  Note that only normals get an implied 1.0 inserted.
159 */
160#define	FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
161	if (exp == 0) { \
162		if (allfrac == 0) \
163			return (FPC_ZERO); \
164		fp->fp_exp = 1 - expbias; \
165		fp->fp_mant[0] = f0; \
166		fp->fp_mant[1] = f1; \
167		fp->fp_mant[2] = f2; \
168		fp->fp_mant[3] = f3; \
169		__fpu_norm(fp); \
170		return (FPC_NUM); \
171	} \
172	if (exp == (2 * expbias + 1)) { \
173		if (allfrac == 0) \
174			return (FPC_INF); \
175		fp->fp_mant[0] = f0; \
176		fp->fp_mant[1] = f1; \
177		fp->fp_mant[2] = f2; \
178		fp->fp_mant[3] = f3; \
179		return (FPC_QNAN); \
180	} \
181	fp->fp_exp = exp - expbias; \
182	fp->fp_mant[0] = FP_1 | f0; \
183	fp->fp_mant[1] = f1; \
184	fp->fp_mant[2] = f2; \
185	fp->fp_mant[3] = f3; \
186	return (FPC_NUM)
187
188/*
189 * 32-bit single precision -> fpn.
190 * We assume a single occupies at most (64-FP_LG) bits in the internal
191 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
192 */
193int
194__fpu_stof(fp, i)
195	struct fpn *fp;
196	uint32_t i;
197{
198	int exp;
199	uint32_t frac, f0, f1;
200#define SNG_SHIFT (SNG_FRACBITS - FP_LG)
201
202	exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
203	frac = i & mask(SNG_FRACBITS);
204	f0 = frac >> SNG_SHIFT;
205	f1 = frac << (32 - SNG_SHIFT);
206	FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
207}
208
209/*
210 * 64-bit double -> fpn.
211 * We assume this uses at most (96-FP_LG) bits.
212 */
213int
214__fpu_dtof(fp, i, j)
215	struct fpn *fp;
216	uint32_t i, j;
217{
218	int exp;
219	uint32_t frac, f0, f1, f2;
220#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
221
222	exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
223	frac = i & mask(DBL_FRACBITS - 32);
224	f0 = frac >> DBL_SHIFT;
225	f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
226	f2 = j << (32 - DBL_SHIFT);
227	frac |= j;
228	FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
229}
230
231/*
232 * 128-bit extended -> fpn.
233 */
234int
235__fpu_qtof(fp, i, j, k, l)
236	struct fpn *fp;
237	uint32_t i, j, k, l;
238{
239	int exp;
240	uint32_t frac, f0, f1, f2, f3;
241#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG))	/* left shift! */
242
243	/*
244	 * Note that ext and fpn `line up', hence no shifting needed.
245	 */
246	exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
247	frac = i & mask(EXT_FRACBITS - 3 * 32);
248	f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
249	f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
250	f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
251	f3 = l << EXT_SHIFT;
252	frac |= j | k | l;
253	FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
254}
255
256/*
257 * Explode the contents of a / regpair / regquad.
258 * If the input is a signalling NaN, an NV (invalid) exception
259 * will be set.  (Note that nothing but NV can occur until ALU
260 * operations are performed.)
261 */
262void
263__fpu_explode(fe, fp, type, reg)
264	struct fpemu *fe;
265	struct fpn *fp;
266	int type, reg;
267{
268	uint64_t l0 = 0, l1;
269	uint32_t s = 0;
270
271	if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) {
272		l0 = __fpu_getreg64(reg & ~1);
273		fp->fp_sign = l0 >> 63;
274	} else {
275		s = __fpu_getreg(reg);
276		fp->fp_sign = s >> 31;
277	}
278	fp->fp_sticky = 0;
279	switch (type) {
280	case FTYPE_LNG:
281		s = __fpu_xtof(fp, l0);
282		break;
283
284	case FTYPE_INT:
285		s = __fpu_itof(fp, s);
286		break;
287
288	case FTYPE_SNG:
289		s = __fpu_stof(fp, s);
290		break;
291
292	case FTYPE_DBL:
293		s = __fpu_dtof(fp, l0 >> 32, l0 & 0xffffffff);
294		break;
295
296	case FTYPE_EXT:
297		l1 = __fpu_getreg64((reg & ~1) + 2);
298		s = __fpu_qtof(fp, l0 >> 32, l0 & 0xffffffff, l1 >> 32,
299		    l1 & 0xffffffff);
300		break;
301
302	default:
303#ifdef _KERNEL_MODE
304		panic("fpu_explode");
305#else
306		debugger("fpu_explode");
307#endif
308	}
309
310	if (s == (uint32_t)FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
311		/*
312		 * Input is a signalling NaN.  All operations that return
313		 * an input NaN operand put it through a ``NaN conversion'',
314		 * which basically just means ``turn on the quiet bit''.
315		 * We do this here so that all NaNs internally look quiet
316		 * (we can tell signalling ones by their class).
317		 */
318		fp->fp_mant[0] |= FP_QUIETBIT;
319		fe->fe_cx = FSR_NV;	/* assert invalid operand */
320		s = FPC_SNAN;
321	}
322	fp->fp_class = s;
323	DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
324		((type == FTYPE_INT) ? 'i' :
325			((type == FTYPE_SNG) ? 's' :
326				((type == FTYPE_DBL) ? 'd' :
327					((type == FTYPE_EXT) ? 'q' : '?')))),
328		reg));
329	DUMPFPN(FPE_REG, fp);
330	DPRINTF(FPE_REG, ("\n"));
331}
332