aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Moench-Tegeder <cmt@FreeBSD.org>2021-10-02 21:08:43 +0000
committerChristoph Moench-Tegeder <cmt@FreeBSD.org>2021-10-02 21:08:43 +0000
commitedae8a1c040482b1cc559eee4f6d626288d54965 (patch)
tree16a0b9cac8490caee34d51dae315f5cd68845d25
parentbb34cce5effc6313580bd02c3b58a3e6a4e6dd1c (diff)
downloadports-edae8a1c040482b1cc559eee4f6d626288d54965.tar.gz
ports-edae8a1c040482b1cc559eee4f6d626288d54965.zip
www/firefox: fix build on 32bit
this brings the double_t-related typedefs in line with our definitions and brings back the rounding helpers for those. PR: 258804 Reported by: Felix Palmen
-rw-r--r--www/firefox/files/patch-bug1729459_comment1293
1 files changed, 93 insertions, 0 deletions
diff --git a/www/firefox/files/patch-bug1729459_comment12 b/www/firefox/files/patch-bug1729459_comment12
new file mode 100644
index 000000000000..dda42170f71b
--- /dev/null
+++ b/www/firefox/files/patch-bug1729459_comment12
@@ -0,0 +1,93 @@
+--- modules/fdlibm/src/math_private.h.orig 2021-09-30 19:32:33.764224000 +0200
++++ modules/fdlibm/src/math_private.h 2021-10-02 22:15:33.265122000 +0200
+@@ -30,7 +30,11 @@
+ * Adapted from https://github.com/freebsd/freebsd-src/search?q=__double_t
+ */
+
+-typedef double __double_t;
++#ifdef __LP64__
++typedef double __double_t;
++#else
++typedef long double __double_t;
++#endif
+ typedef __double_t double_t;
+
+ /*
+@@ -630,7 +634,37 @@
+ return ((double)(x + 0x1.8p52) - 0x1.8p52);
+ }
+
++static inline float
++rnintf(__float_t x)
++{
++ /*
++ * As for rnint(), except we could just call that to handle the
++ * extra precision case, usually without losing efficiency.
++ */
++ return ((float)(x + 0x1.8p23F) - 0x1.8p23F);
++}
++
++#ifdef LDBL_MANT_DIG
+ /*
++ * The complications for extra precision are smaller for rnintl() since it
++ * can safely assume that the rounding precision has been increased from
++ * its default to FP_PE on x86. We don't exploit that here to get small
++ * optimizations from limiting the rangle to double. We just need it for
++ * the magic number to work with long doubles. ld128 callers should use
++ * rnint() instead of this if possible. ld80 callers should prefer
++ * rnintl() since for amd64 this avoids swapping the register set, while
++ * for i386 it makes no difference (assuming FP_PE), and for other arches
++ * it makes little difference.
++ */
++static inline long double
++rnintl(long double x)
++{
++ return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 -
++ __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2);
++}
++#endif /* LDBL_MANT_DIG */
++
++/*
+ * irint() and i64rint() give the same result as casting to their integer
+ * return type provided their arg is a floating point integer. They can
+ * sometimes be more efficient because no rounding is required.
+@@ -644,6 +678,39 @@
+ sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
+ #else
+ #define irint(x) ((int)(x))
++#endif
++
++#define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */
++
++#if defined(__i386__) && defined(__GNUCLIKE_ASM)
++static __inline int
++irintf(float x)
++{
++ int n;
++
++ __asm("fistl %0" : "=m" (n) : "t" (x));
++ return (n);
++}
++
++static __inline int
++irintd(double x)
++{
++ int n;
++
++ __asm("fistl %0" : "=m" (n) : "t" (x));
++ return (n);
++}
++#endif
++
++#if (defined(__amd64__) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
++static __inline int
++irintl(long double x)
++{
++ int n;
++
++ __asm("fistl %0" : "=m" (n) : "t" (x));
++ return (n);
++}
+ #endif
+
+ #ifdef DEBUG