aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/builtins/fp_lib.h
blob: 3fb13a033a14ce49bafdc64b51408978458e3beb (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
//===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a configuration header for soft-float routines in compiler-rt.
// This file does not provide any part of the compiler-rt interface, but defines
// many useful constants and utility routines that are used in the
// implementation of the soft-float routines in compiler-rt.
//
// Assumes that float, double and long double correspond to the IEEE-754
// binary32, binary64 and binary 128 types, respectively, and that integer
// endianness matches floating point endianness on the target platform.
//
//===----------------------------------------------------------------------===//

#ifndef FP_LIB_HEADER
#define FP_LIB_HEADER

#include "int_lib.h"
#include "int_math.h"
#include <limits.h>
#include <stdbool.h>
#include <stdint.h>

// x86_64 FreeBSD prior v9.3 define fixed-width types incorrectly in
// 32-bit mode.
#if defined(__FreeBSD__) && defined(__i386__)
#include <sys/param.h>
#if __FreeBSD_version < 903000 // v9.3
#define uint64_t unsigned long long
#define int64_t long long
#undef UINT64_C
#define UINT64_C(c) (c##ULL)
#endif
#endif

#if defined SINGLE_PRECISION

typedef uint16_t half_rep_t;
typedef uint32_t rep_t;
typedef uint64_t twice_rep_t;
typedef int32_t srep_t;
typedef float fp_t;
#define HALF_REP_C UINT16_C
#define REP_C UINT32_C
#define significandBits 23

static __inline int rep_clz(rep_t a) { return clzsi(a); }

// 32x32 --> 64 bit multiply
static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
  const uint64_t product = (uint64_t)a * b;
  *hi = product >> 32;
  *lo = product;
}
COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);

#elif defined DOUBLE_PRECISION

typedef uint32_t half_rep_t;
typedef uint64_t rep_t;
typedef int64_t srep_t;
typedef double fp_t;
#define HALF_REP_C UINT32_C
#define REP_C UINT64_C
#define significandBits 52

static __inline int rep_clz(rep_t a) {
#if defined __LP64__
  return __builtin_clzl(a);
#else
  if (a & REP_C(0xffffffff00000000))
    return clzsi(a >> 32);
  else
    return 32 + clzsi(a & REP_C(0xffffffff));
#endif
}

#define loWord(a) (a & 0xffffffffU)
#define hiWord(a) (a >> 32)

// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
  // Each of the component 32x32 -> 64 products
  const uint64_t plolo = loWord(a) * loWord(b);
  const uint64_t plohi = loWord(a) * hiWord(b);
  const uint64_t philo = hiWord(a) * loWord(b);
  const uint64_t phihi = hiWord(a) * hiWord(b);
  // Sum terms that contribute to lo in a way that allows us to get the carry
  const uint64_t r0 = loWord(plolo);
  const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo);
  *lo = r0 + (r1 << 32);
  // Sum terms contributing to hi with the carry from lo
  *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi;
}
#undef loWord
#undef hiWord

COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);

#elif defined QUAD_PRECISION
#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
#define CRT_LDBL_128BIT
typedef uint64_t half_rep_t;
typedef __uint128_t rep_t;
typedef __int128_t srep_t;
typedef long double fp_t;
#define HALF_REP_C UINT64_C
#define REP_C (__uint128_t)
// Note: Since there is no explicit way to tell compiler the constant is a
// 128-bit integer, we let the constant be casted to 128-bit integer
#define significandBits 112

static __inline int rep_clz(rep_t a) {
  const union {
    __uint128_t ll;
#if _YUGA_BIG_ENDIAN
    struct {
      uint64_t high, low;
    } s;
#else
    struct {
      uint64_t low, high;
    } s;
#endif
  } uu = {.ll = a};

  uint64_t word;
  uint64_t add;

  if (uu.s.high) {
    word = uu.s.high;
    add = 0;
  } else {
    word = uu.s.low;
    add = 64;
  }
  return __builtin_clzll(word) + add;
}

#define Word_LoMask UINT64_C(0x00000000ffffffff)
#define Word_HiMask UINT64_C(0xffffffff00000000)
#define Word_FullMask UINT64_C(0xffffffffffffffff)
#define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask)
#define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask)
#define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask)
#define Word_4(a) (uint64_t)(a & Word_LoMask)

// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {

  const uint64_t product11 = Word_1(a) * Word_1(b);
  const uint64_t product12 = Word_1(a) * Word_2(b);
  const uint64_t product13 = Word_1(a) * Word_3(b);
  const uint64_t product14 = Word_1(a) * Word_4(b);
  const uint64_t product21 = Word_2(a) * Word_1(b);
  const uint64_t product22 = Word_2(a) * Word_2(b);
  const uint64_t product23 = Word_2(a) * Word_3(b);
  const uint64_t product24 = Word_2(a) * Word_4(b);
  const uint64_t product31 = Word_3(a) * Word_1(b);
  const uint64_t product32 = Word_3(a) * Word_2(b);
  const uint64_t product33 = Word_3(a) * Word_3(b);
  const uint64_t product34 = Word_3(a) * Word_4(b);
  const uint64_t product41 = Word_4(a) * Word_1(b);
  const uint64_t product42 = Word_4(a) * Word_2(b);
  const uint64_t product43 = Word_4(a) * Word_3(b);
  const uint64_t product44 = Word_4(a) * Word_4(b);

  const __uint128_t sum0 = (__uint128_t)product44;
  const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43;
  const __uint128_t sum2 =
      (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42;
  const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 +
                           (__uint128_t)product32 + (__uint128_t)product41;
  const __uint128_t sum4 =
      (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31;
  const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21;
  const __uint128_t sum6 = (__uint128_t)product11;

  const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32);
  const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) +
                         (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);

  *lo = r0 + (r1 << 64);
  *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
        (sum5 << 32) + (sum6 << 64);
}
#undef Word_1
#undef Word_2
#undef Word_3
#undef Word_4
#undef Word_HiMask
#undef Word_LoMask
#undef Word_FullMask
#endif // __LDBL_MANT_DIG__ == 113 && __SIZEOF_INT128__
#else
#error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
#endif

#if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) ||                  \
    defined(CRT_LDBL_128BIT)
#define typeWidth (sizeof(rep_t) * CHAR_BIT)
#define exponentBits (typeWidth - significandBits - 1)
#define maxExponent ((1 << exponentBits) - 1)
#define exponentBias (maxExponent >> 1)

#define implicitBit (REP_C(1) << significandBits)
#define significandMask (implicitBit - 1U)
#define signBit (REP_C(1) << (significandBits + exponentBits))
#define absMask (signBit - 1U)
#define exponentMask (absMask ^ significandMask)
#define oneRep ((rep_t)exponentBias << significandBits)
#define infRep exponentMask
#define quietBit (implicitBit >> 1)
#define qnanRep (exponentMask | quietBit)

static __inline rep_t toRep(fp_t x) {
  const union {
    fp_t f;
    rep_t i;
  } rep = {.f = x};
  return rep.i;
}

static __inline fp_t fromRep(rep_t x) {
  const union {
    fp_t f;
    rep_t i;
  } rep = {.i = x};
  return rep.f;
}

static __inline int normalize(rep_t *significand) {
  const int shift = rep_clz(*significand) - rep_clz(implicitBit);
  *significand <<= shift;
  return 1 - shift;
}

static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) {
  *hi = *hi << count | *lo >> (typeWidth - count);
  *lo = *lo << count;
}

static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo,
                                              unsigned int count) {
  if (count < typeWidth) {
    const bool sticky = (*lo << (typeWidth - count)) != 0;
    *lo = *hi << (typeWidth - count) | *lo >> count | sticky;
    *hi = *hi >> count;
  } else if (count < 2 * typeWidth) {
    const bool sticky = *hi << (2 * typeWidth - count) | *lo;
    *lo = *hi >> (count - typeWidth) | sticky;
    *hi = 0;
  } else {
    const bool sticky = *hi | *lo;
    *lo = sticky;
    *hi = 0;
  }
}

// Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids
// pulling in a libm dependency from compiler-rt, but is not meant to replace
// it (i.e. code calling logb() should get the one from libm, not this), hence
// the __compiler_rt prefix.
static __inline fp_t __compiler_rt_logbX(fp_t x) {
  rep_t rep = toRep(x);
  int exp = (rep & exponentMask) >> significandBits;

  // Abnormal cases:
  // 1) +/- inf returns +inf; NaN returns NaN
  // 2) 0.0 returns -inf
  if (exp == maxExponent) {
    if (((rep & signBit) == 0) || (x != x)) {
      return x; // NaN or +inf: return x
    } else {
      return -x; // -inf: return -x
    }
  } else if (x == 0.0) {
    // 0.0: return -inf
    return fromRep(infRep | signBit);
  }

  if (exp != 0) {
    // Normal number
    return exp - exponentBias; // Unbias exponent
  } else {
    // Subnormal number; normalize and repeat
    rep &= absMask;
    const int shift = 1 - normalize(&rep);
    exp = (rep & exponentMask) >> significandBits;
    return exp - exponentBias - shift; // Unbias exponent
  }
}

// Avoid using scalbn from libm. Unlike libc/libm scalbn, this function never
// sets errno on underflow/overflow.
static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
  const rep_t rep = toRep(x);
  int exp = (rep & exponentMask) >> significandBits;

  if (x == 0.0 || exp == maxExponent)
    return x; // +/- 0.0, NaN, or inf: return x

  // Normalize subnormal input.
  rep_t sig = rep & significandMask;
  if (exp == 0) {
    exp += normalize(&sig);
    sig &= ~implicitBit; // clear the implicit bit again
  }

  if (__builtin_sadd_overflow(exp, y, &exp)) {
    // Saturate the exponent, which will guarantee an underflow/overflow below.
    exp = (y >= 0) ? INT_MAX : INT_MIN;
  }

  // Return this value: [+/-] 1.sig * 2 ** (exp - exponentBias).
  const rep_t sign = rep & signBit;
  if (exp >= maxExponent) {
    // Overflow, which could produce infinity or the largest-magnitude value,
    // depending on the rounding mode.
    return fromRep(sign | ((rep_t)(maxExponent - 1) << significandBits)) * 2.0f;
  } else if (exp <= 0) {
    // Subnormal or underflow. Use floating-point multiply to handle truncation
    // correctly.
    fp_t tmp = fromRep(sign | (REP_C(1) << significandBits) | sig);
    exp += exponentBias - 1;
    if (exp < 1)
      exp = 1;
    tmp *= fromRep((rep_t)exp << significandBits);
    return tmp;
  } else
    return fromRep(sign | ((rep_t)exp << significandBits) | sig);
}

// Avoid using fmax from libm.
static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
  // If either argument is NaN, return the other argument. If both are NaN,
  // arbitrarily return the second one. Otherwise, if both arguments are +/-0,
  // arbitrarily return the first one.
  return (crt_isnan(x) || x < y) ? y : x;
}

#endif

#if defined(SINGLE_PRECISION)

static __inline fp_t __compiler_rt_logbf(fp_t x) {
  return __compiler_rt_logbX(x);
}
static __inline fp_t __compiler_rt_scalbnf(fp_t x, int y) {
  return __compiler_rt_scalbnX(x, y);
}
static __inline fp_t __compiler_rt_fmaxf(fp_t x, fp_t y) {
#if defined(__aarch64__)
  // Use __builtin_fmaxf which turns into an fmaxnm instruction on AArch64.
  return __builtin_fmaxf(x, y);
#else
  // __builtin_fmaxf frequently turns into a libm call, so inline the function.
  return __compiler_rt_fmaxX(x, y);
#endif
}

#elif defined(DOUBLE_PRECISION)

static __inline fp_t __compiler_rt_logb(fp_t x) {
  return __compiler_rt_logbX(x);
}
static __inline fp_t __compiler_rt_scalbn(fp_t x, int y) {
  return __compiler_rt_scalbnX(x, y);
}
static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
#if defined(__aarch64__)
  // Use __builtin_fmax which turns into an fmaxnm instruction on AArch64.
  return __builtin_fmax(x, y);
#else
  // __builtin_fmax frequently turns into a libm call, so inline the function.
  return __compiler_rt_fmaxX(x, y);
#endif
}

#elif defined(QUAD_PRECISION)

#if defined(CRT_LDBL_128BIT)
static __inline fp_t __compiler_rt_logbl(fp_t x) {
  return __compiler_rt_logbX(x);
}
static __inline fp_t __compiler_rt_scalbnl(fp_t x, int y) {
  return __compiler_rt_scalbnX(x, y);
}
static __inline fp_t __compiler_rt_fmaxl(fp_t x, fp_t y) {
  return __compiler_rt_fmaxX(x, y);
}
#else
// The generic implementation only works for ieee754 floating point. For other
// floating point types, continue to rely on the libm implementation for now.
static __inline long double __compiler_rt_logbl(long double x) {
  return crt_logbl(x);
}
static __inline long double __compiler_rt_scalbnl(long double x, int y) {
  return crt_scalbnl(x, y);
}
static __inline long double __compiler_rt_fmaxl(long double x, long double y) {
  return crt_fmaxl(x, y);
}
#endif // CRT_LDBL_128BIT

#endif // *_PRECISION

#endif // FP_LIB_HEADER