aboutsummaryrefslogtreecommitdiff
path: root/lib/builtins/fp_extend_impl.inc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/builtins/fp_extend_impl.inc')
-rw-r--r--lib/builtins/fp_extend_impl.inc129
1 files changed, 64 insertions, 65 deletions
diff --git a/lib/builtins/fp_extend_impl.inc b/lib/builtins/fp_extend_impl.inc
index b785cc7687ad..d1c9c02a00c5 100644
--- a/lib/builtins/fp_extend_impl.inc
+++ b/lib/builtins/fp_extend_impl.inc
@@ -1,9 +1,8 @@
//=-lib/fp_extend_impl.inc - low precision -> high precision conversion -*-- -//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -28,81 +27,81 @@
//
// Finally, the following assumptions are made:
//
-// 1. floating-point types and integer types have the same endianness on the
-// target platform
+// 1. Floating-point types and integer types have the same endianness on the
+// target platform.
//
-// 2. quiet NaNs, if supported, are indicated by the leading bit of the
-// significand field being set
+// 2. Quiet NaNs, if supported, are indicated by the leading bit of the
+// significand field being set.
//
//===----------------------------------------------------------------------===//
#include "fp_extend.h"
static __inline dst_t __extendXfYf2__(src_t a) {
- // Various constants whose values follow from the type parameters.
- // Any reasonable optimizer will fold and propagate all of these.
- const int srcBits = sizeof(src_t)*CHAR_BIT;
- const int srcExpBits = srcBits - srcSigBits - 1;
- const int srcInfExp = (1 << srcExpBits) - 1;
- const int srcExpBias = srcInfExp >> 1;
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const int srcBits = sizeof(src_t) * CHAR_BIT;
+ const int srcExpBits = srcBits - srcSigBits - 1;
+ const int srcInfExp = (1 << srcExpBits) - 1;
+ const int srcExpBias = srcInfExp >> 1;
- const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
- const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
- const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
- const src_rep_t srcAbsMask = srcSignMask - 1;
- const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
- const src_rep_t srcNaNCode = srcQNaN - 1;
+ const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
+ const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
+ const src_rep_t srcAbsMask = srcSignMask - 1;
+ const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
+ const src_rep_t srcNaNCode = srcQNaN - 1;
- const int dstBits = sizeof(dst_t)*CHAR_BIT;
- const int dstExpBits = dstBits - dstSigBits - 1;
- const int dstInfExp = (1 << dstExpBits) - 1;
- const int dstExpBias = dstInfExp >> 1;
+ const int dstBits = sizeof(dst_t) * CHAR_BIT;
+ const int dstExpBits = dstBits - dstSigBits - 1;
+ const int dstInfExp = (1 << dstExpBits) - 1;
+ const int dstExpBias = dstInfExp >> 1;
- const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
+ const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
- // Break a into a sign and representation of the absolute value
- const src_rep_t aRep = srcToRep(a);
- const src_rep_t aAbs = aRep & srcAbsMask;
- const src_rep_t sign = aRep & srcSignMask;
- dst_rep_t absResult;
+ // Break a into a sign and representation of the absolute value.
+ const src_rep_t aRep = srcToRep(a);
+ const src_rep_t aAbs = aRep & srcAbsMask;
+ const src_rep_t sign = aRep & srcSignMask;
+ dst_rep_t absResult;
- // If sizeof(src_rep_t) < sizeof(int), the subtraction result is promoted
- // to (signed) int. To avoid that, explicitly cast to src_rep_t.
- if ((src_rep_t)(aAbs - srcMinNormal) < srcInfinity - srcMinNormal) {
- // a is a normal number.
- // Extend to the destination type by shifting the significand and
- // exponent into the proper position and rebiasing the exponent.
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
- absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
- }
+ // If sizeof(src_rep_t) < sizeof(int), the subtraction result is promoted
+ // to (signed) int. To avoid that, explicitly cast to src_rep_t.
+ if ((src_rep_t)(aAbs - srcMinNormal) < srcInfinity - srcMinNormal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
+ absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
+ }
- else if (aAbs >= srcInfinity) {
- // a is NaN or infinity.
- // Conjure the result by beginning with infinity, then setting the qNaN
- // bit (if needed) and right-aligning the rest of the trailing NaN
- // payload field.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
- absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
- absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
- }
+ else if (aAbs >= srcInfinity) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
+ absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ }
- else if (aAbs) {
- // a is denormal.
- // renormalize the significand and clear the leading bit, then insert
- // the correct adjusted exponent in the destination type.
- const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
- absResult ^= dstMinNormal;
- const int resultExponent = dstExpBias - srcExpBias - scale + 1;
- absResult |= (dst_rep_t)resultExponent << dstSigBits;
- }
+ else if (aAbs) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
+ absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
+ absResult ^= dstMinNormal;
+ const int resultExponent = dstExpBias - srcExpBias - scale + 1;
+ absResult |= (dst_rep_t)resultExponent << dstSigBits;
+ }
- else {
- // a is zero.
- absResult = 0;
- }
+ else {
+ // a is zero.
+ absResult = 0;
+ }
- // Apply the signbit to (dst_t)abs(a).
- const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
- return dstFromRep(result);
+ // Apply the signbit to the absolute value.
+ const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
+ return dstFromRep(result);
}