1//===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a configuration header for soft-float routines in compiler-rt.
10// This file does not provide any part of the compiler-rt interface, but defines
11// many useful constants and utility routines that are used in the
12// implementation of the soft-float routines in compiler-rt.
13//
14// Assumes that float, double and long double correspond to the IEEE-754
15// binary32, binary64 and binary 128 types, respectively, and that integer
16// endianness matches floating point endianness on the target platform.
17//
18//===----------------------------------------------------------------------===//
19
20#ifndef FP_LIB_HEADER
21#define FP_LIB_HEADER
22
23#include "int_lib.h"
24#include "int_math.h"
25#include "int_types.h"
26#include <limits.h>
27#include <stdbool.h>
28#include <stdint.h>
29
30#if defined SINGLE_PRECISION
31
32typedef uint16_t half_rep_t;
33typedef uint32_t rep_t;
34typedef uint64_t twice_rep_t;
35typedef int32_t srep_t;
36typedef float fp_t;
37#define HALF_REP_C UINT16_C
38#define REP_C UINT32_C
39#define significandBits 23
40
41static __inline int rep_clz(rep_t a) { return clzsi(a); }
42
43// 32x32 --> 64 bit multiply
44static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
45  const uint64_t product = (uint64_t)a * b;
46  *hi = product >> 32;
47  *lo = product;
48}
49COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);
50
51#elif defined DOUBLE_PRECISION
52
53typedef uint32_t half_rep_t;
54typedef uint64_t rep_t;
55typedef int64_t srep_t;
56typedef double fp_t;
57#define HALF_REP_C UINT32_C
58#define REP_C UINT64_C
59#define significandBits 52
60
61static __inline int rep_clz(rep_t a) {
62#if defined __LP64__
63  return __builtin_clzl(a);
64#else
65  if (a & REP_C(0xffffffff00000000))
66    return clzsi(a >> 32);
67  else
68    return 32 + clzsi(a & REP_C(0xffffffff));
69#endif
70}
71
72#define loWord(a) (a & 0xffffffffU)
73#define hiWord(a) (a >> 32)
74
75// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
76// many 64-bit platforms have this operation, but they tend to have hardware
77// floating-point, so we don't bother with a special case for them here.
78static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
79  // Each of the component 32x32 -> 64 products
80  const uint64_t plolo = loWord(a) * loWord(b);
81  const uint64_t plohi = loWord(a) * hiWord(b);
82  const uint64_t philo = hiWord(a) * loWord(b);
83  const uint64_t phihi = hiWord(a) * hiWord(b);
84  // Sum terms that contribute to lo in a way that allows us to get the carry
85  const uint64_t r0 = loWord(plolo);
86  const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo);
87  *lo = r0 + (r1 << 32);
88  // Sum terms contributing to hi with the carry from lo
89  *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi;
90}
91#undef loWord
92#undef hiWord
93
94COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
95
96#elif defined QUAD_PRECISION
97#if defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
98typedef uint64_t half_rep_t;
99typedef __uint128_t rep_t;
100typedef __int128_t srep_t;
101typedef tf_float fp_t;
102#define HALF_REP_C UINT64_C
103#define REP_C (__uint128_t)
104#if defined(CRT_HAS_IEEE_TF)
105// Note: Since there is no explicit way to tell compiler the constant is a
106// 128-bit integer, we let the constant be casted to 128-bit integer
107#define significandBits 112
108#define TF_MANT_DIG (significandBits + 1)
109
110static __inline int rep_clz(rep_t a) {
111  const union {
112    __uint128_t ll;
113#if _YUGA_BIG_ENDIAN
114    struct {
115      uint64_t high, low;
116    } s;
117#else
118    struct {
119      uint64_t low, high;
120    } s;
121#endif
122  } uu = {.ll = a};
123
124  uint64_t word;
125  uint64_t add;
126
127  if (uu.s.high) {
128    word = uu.s.high;
129    add = 0;
130  } else {
131    word = uu.s.low;
132    add = 64;
133  }
134  return __builtin_clzll(word) + add;
135}
136
137#define Word_LoMask UINT64_C(0x00000000ffffffff)
138#define Word_HiMask UINT64_C(0xffffffff00000000)
139#define Word_FullMask UINT64_C(0xffffffffffffffff)
140#define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask)
141#define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask)
142#define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask)
143#define Word_4(a) (uint64_t)(a & Word_LoMask)
144
145// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
146// many 64-bit platforms have this operation, but they tend to have hardware
147// floating-point, so we don't bother with a special case for them here.
148static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
149
150  const uint64_t product11 = Word_1(a) * Word_1(b);
151  const uint64_t product12 = Word_1(a) * Word_2(b);
152  const uint64_t product13 = Word_1(a) * Word_3(b);
153  const uint64_t product14 = Word_1(a) * Word_4(b);
154  const uint64_t product21 = Word_2(a) * Word_1(b);
155  const uint64_t product22 = Word_2(a) * Word_2(b);
156  const uint64_t product23 = Word_2(a) * Word_3(b);
157  const uint64_t product24 = Word_2(a) * Word_4(b);
158  const uint64_t product31 = Word_3(a) * Word_1(b);
159  const uint64_t product32 = Word_3(a) * Word_2(b);
160  const uint64_t product33 = Word_3(a) * Word_3(b);
161  const uint64_t product34 = Word_3(a) * Word_4(b);
162  const uint64_t product41 = Word_4(a) * Word_1(b);
163  const uint64_t product42 = Word_4(a) * Word_2(b);
164  const uint64_t product43 = Word_4(a) * Word_3(b);
165  const uint64_t product44 = Word_4(a) * Word_4(b);
166
167  const __uint128_t sum0 = (__uint128_t)product44;
168  const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43;
169  const __uint128_t sum2 =
170      (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42;
171  const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 +
172                           (__uint128_t)product32 + (__uint128_t)product41;
173  const __uint128_t sum4 =
174      (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31;
175  const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21;
176  const __uint128_t sum6 = (__uint128_t)product11;
177
178  const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32);
179  const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) +
180                         (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);
181
182  *lo = r0 + (r1 << 64);
183  *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
184        (sum5 << 32) + (sum6 << 64);
185}
186#undef Word_1
187#undef Word_2
188#undef Word_3
189#undef Word_4
190#undef Word_HiMask
191#undef Word_LoMask
192#undef Word_FullMask
193#endif // defined(CRT_HAS_IEEE_TF)
194#else
195typedef long double fp_t;
196#endif // defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
197#else
198#error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
199#endif
200
201#if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) ||                  \
202    (defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE))
203#define typeWidth (sizeof(rep_t) * CHAR_BIT)
204
205static __inline rep_t toRep(fp_t x) {
206  const union {
207    fp_t f;
208    rep_t i;
209  } rep = {.f = x};
210  return rep.i;
211}
212
213static __inline fp_t fromRep(rep_t x) {
214  const union {
215    fp_t f;
216    rep_t i;
217  } rep = {.i = x};
218  return rep.f;
219}
220
221#if !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
222#define exponentBits (typeWidth - significandBits - 1)
223#define maxExponent ((1 << exponentBits) - 1)
224#define exponentBias (maxExponent >> 1)
225
226#define implicitBit (REP_C(1) << significandBits)
227#define significandMask (implicitBit - 1U)
228#define signBit (REP_C(1) << (significandBits + exponentBits))
229#define absMask (signBit - 1U)
230#define exponentMask (absMask ^ significandMask)
231#define oneRep ((rep_t)exponentBias << significandBits)
232#define infRep exponentMask
233#define quietBit (implicitBit >> 1)
234#define qnanRep (exponentMask | quietBit)
235
236static __inline int normalize(rep_t *significand) {
237  const int shift = rep_clz(*significand) - rep_clz(implicitBit);
238  *significand <<= shift;
239  return 1 - shift;
240}
241
242static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) {
243  *hi = *hi << count | *lo >> (typeWidth - count);
244  *lo = *lo << count;
245}
246
247static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo,
248                                              unsigned int count) {
249  if (count < typeWidth) {
250    const bool sticky = (*lo << (typeWidth - count)) != 0;
251    *lo = *hi << (typeWidth - count) | *lo >> count | sticky;
252    *hi = *hi >> count;
253  } else if (count < 2 * typeWidth) {
254    const bool sticky = *hi << (2 * typeWidth - count) | *lo;
255    *lo = *hi >> (count - typeWidth) | sticky;
256    *hi = 0;
257  } else {
258    const bool sticky = *hi | *lo;
259    *lo = sticky;
260    *hi = 0;
261  }
262}
263
264// Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids
265// pulling in a libm dependency from compiler-rt, but is not meant to replace
266// it (i.e. code calling logb() should get the one from libm, not this), hence
267// the __compiler_rt prefix.
268static __inline fp_t __compiler_rt_logbX(fp_t x) {
269  rep_t rep = toRep(x);
270  int exp = (rep & exponentMask) >> significandBits;
271
272  // Abnormal cases:
273  // 1) +/- inf returns +inf; NaN returns NaN
274  // 2) 0.0 returns -inf
275  if (exp == maxExponent) {
276    if (((rep & signBit) == 0) || (x != x)) {
277      return x; // NaN or +inf: return x
278    } else {
279      return -x; // -inf: return -x
280    }
281  } else if (x == 0.0) {
282    // 0.0: return -inf
283    return fromRep(infRep | signBit);
284  }
285
286  if (exp != 0) {
287    // Normal number
288    return exp - exponentBias; // Unbias exponent
289  } else {
290    // Subnormal number; normalize and repeat
291    rep &= absMask;
292    const int shift = 1 - normalize(&rep);
293    exp = (rep & exponentMask) >> significandBits;
294    return exp - exponentBias - shift; // Unbias exponent
295  }
296}
297
298// Avoid using scalbn from libm. Unlike libc/libm scalbn, this function never
299// sets errno on underflow/overflow.
300static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
301  const rep_t rep = toRep(x);
302  int exp = (rep & exponentMask) >> significandBits;
303
304  if (x == 0.0 || exp == maxExponent)
305    return x; // +/- 0.0, NaN, or inf: return x
306
307  // Normalize subnormal input.
308  rep_t sig = rep & significandMask;
309  if (exp == 0) {
310    exp += normalize(&sig);
311    sig &= ~implicitBit; // clear the implicit bit again
312  }
313
314  if (__builtin_sadd_overflow(exp, y, &exp)) {
315    // Saturate the exponent, which will guarantee an underflow/overflow below.
316    exp = (y >= 0) ? INT_MAX : INT_MIN;
317  }
318
319  // Return this value: [+/-] 1.sig * 2 ** (exp - exponentBias).
320  const rep_t sign = rep & signBit;
321  if (exp >= maxExponent) {
322    // Overflow, which could produce infinity or the largest-magnitude value,
323    // depending on the rounding mode.
324    return fromRep(sign | ((rep_t)(maxExponent - 1) << significandBits)) * 2.0f;
325  } else if (exp <= 0) {
326    // Subnormal or underflow. Use floating-point multiply to handle truncation
327    // correctly.
328    fp_t tmp = fromRep(sign | (REP_C(1) << significandBits) | sig);
329    exp += exponentBias - 1;
330    if (exp < 1)
331      exp = 1;
332    tmp *= fromRep((rep_t)exp << significandBits);
333    return tmp;
334  } else
335    return fromRep(sign | ((rep_t)exp << significandBits) | sig);
336}
337
338#endif // !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
339
340// Avoid using fmax from libm.
341static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
342  // If either argument is NaN, return the other argument. If both are NaN,
343  // arbitrarily return the second one. Otherwise, if both arguments are +/-0,
344  // arbitrarily return the first one.
345  return (crt_isnan(x) || x < y) ? y : x;
346}
347
348#endif
349
350#if defined(SINGLE_PRECISION)
351
352static __inline fp_t __compiler_rt_logbf(fp_t x) {
353  return __compiler_rt_logbX(x);
354}
355static __inline fp_t __compiler_rt_scalbnf(fp_t x, int y) {
356  return __compiler_rt_scalbnX(x, y);
357}
358static __inline fp_t __compiler_rt_fmaxf(fp_t x, fp_t y) {
359#if defined(__aarch64__)
360  // Use __builtin_fmaxf which turns into an fmaxnm instruction on AArch64.
361  return __builtin_fmaxf(x, y);
362#else
363  // __builtin_fmaxf frequently turns into a libm call, so inline the function.
364  return __compiler_rt_fmaxX(x, y);
365#endif
366}
367
368#elif defined(DOUBLE_PRECISION)
369
370static __inline fp_t __compiler_rt_logb(fp_t x) {
371  return __compiler_rt_logbX(x);
372}
373static __inline fp_t __compiler_rt_scalbn(fp_t x, int y) {
374  return __compiler_rt_scalbnX(x, y);
375}
376static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
377#if defined(__aarch64__)
378  // Use __builtin_fmax which turns into an fmaxnm instruction on AArch64.
379  return __builtin_fmax(x, y);
380#else
381  // __builtin_fmax frequently turns into a libm call, so inline the function.
382  return __compiler_rt_fmaxX(x, y);
383#endif
384}
385
386#elif defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE)
387// The generic implementation only works for ieee754 floating point. For other
388// floating point types, continue to rely on the libm implementation for now.
389#if defined(CRT_HAS_IEEE_TF)
390static __inline tf_float __compiler_rt_logbtf(tf_float x) {
391  return __compiler_rt_logbX(x);
392}
393static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
394  return __compiler_rt_scalbnX(x, y);
395}
396static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
397  return __compiler_rt_fmaxX(x, y);
398}
399#define __compiler_rt_logbl __compiler_rt_logbtf
400#define __compiler_rt_scalbnl __compiler_rt_scalbntf
401#define __compiler_rt_fmaxl __compiler_rt_fmaxtf
402#define crt_fabstf crt_fabsf128
403#define crt_copysigntf crt_copysignf128
404#elif defined(CRT_LDBL_128BIT)
405static __inline tf_float __compiler_rt_logbtf(tf_float x) {
406  return crt_logbl(x);
407}
408static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
409  return crt_scalbnl(x, y);
410}
411static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
412  return crt_fmaxl(x, y);
413}
414#define __compiler_rt_logbl crt_logbl
415#define __compiler_rt_scalbnl crt_scalbnl
416#define __compiler_rt_fmaxl crt_fmaxl
417#define crt_fabstf crt_fabsl
418#define crt_copysigntf crt_copysignl
419#else
420#error Unsupported TF mode type
421#endif
422
423#endif // *_PRECISION
424
425#endif // FP_LIB_HEADER
426