diff options
author | Andrew Turner <andrew@FreeBSD.org> | 2024-02-29 11:39:12 +0000 |
---|---|---|
committer | Andrew Turner <andrew@FreeBSD.org> | 2024-02-29 11:39:12 +0000 |
commit | 5a02ffc32e777041dd2dad4e651ed2a0865a0a5d (patch) | |
tree | c208745cf2910a3730a2b1b42e812bbac6be8942 /contrib/arm-optimized-routines/pl/math/sv_log_2u5.c | |
parent | b52dceb838116391996909ff50b49f950ee01f48 (diff) | |
parent | edc5c0de794f521eb620d2b6cbaee2434442a8f3 (diff) | |
download | src-5a02ffc32e777041dd2dad4e651ed2a0865a0a5d.tar.gz src-5a02ffc32e777041dd2dad4e651ed2a0865a0a5d.zip |
Update the Arm Optimized Routine library to v24.01
Sponsored by: Arm Ltd
Diffstat (limited to 'contrib/arm-optimized-routines/pl/math/sv_log_2u5.c')
-rw-r--r-- | contrib/arm-optimized-routines/pl/math/sv_log_2u5.c | 101 |
1 files changed, 46 insertions, 55 deletions
diff --git a/contrib/arm-optimized-routines/pl/math/sv_log_2u5.c b/contrib/arm-optimized-routines/pl/math/sv_log_2u5.c index 7f06fd31ebf1..2530c9e3f62c 100644 --- a/contrib/arm-optimized-routines/pl/math/sv_log_2u5.c +++ b/contrib/arm-optimized-routines/pl/math/sv_log_2u5.c @@ -9,77 +9,68 @@ #include "pl_sig.h" #include "pl_test.h" -#if SV_SUPPORTED +#define P(i) sv_f64 (__v_log_data.poly[i]) +#define N (1 << V_LOG_TABLE_BITS) +#define Off (0x3fe6900900000000) +#define MaxTop (0x7ff) +#define MinTop (0x001) +#define ThreshTop (0x7fe) /* MaxTop - MinTop. */ -#define A(i) __sv_log_data.poly[i] -#define Ln2 (0x1.62e42fefa39efp-1) -#define N (1 << SV_LOG_TABLE_BITS) -#define OFF (0x3fe6900900000000) - -double -optr_aor_log_f64 (double); - -static NOINLINE sv_f64_t -__sv_log_specialcase (sv_f64_t x, sv_f64_t y, svbool_t cmp) +static svfloat64_t NOINLINE +special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp) { - return sv_call_f64 (optr_aor_log_f64, x, y, cmp); + return sv_call_f64 (log, x, y, cmp); } -/* SVE port of Neon log algorithm from math/. +/* SVE port of AdvSIMD log algorithm. Maximum measured error is 2.17 ulp: - __sv_log(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2 - want 0x1.ffffff1cca045p-2. */ -sv_f64_t -__sv_log_x (sv_f64_t x, const svbool_t pg) + SV_NAME_D1 (log)(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2 + want 0x1.ffffff1cca045p-2. */ +svfloat64_t SV_NAME_D1 (log) (svfloat64_t x, const svbool_t pg) { - sv_u64_t ix = sv_as_u64_f64 (x); - sv_u64_t top = svlsr_n_u64_x (pg, ix, 48); - svbool_t cmp = svcmpge_u64 (pg, svsub_n_u64_x (pg, top, 0x0010), - sv_u64 (0x7ff0 - 0x0010)); + svuint64_t ix = svreinterpret_u64 (x); + svuint64_t top = svlsr_x (pg, ix, 52); + svbool_t cmp = svcmpge (pg, svsub_x (pg, top, MinTop), sv_u64 (ThreshTop)); - /* x = 2^k z; where z is in range [OFF,2*OFF) and exact. + /* x = 2^k z; where z is in range [Off,2*Off) and exact. The range is split into N subintervals. The ith subinterval contains z and c is near its center. */ - sv_u64_t tmp = svsub_n_u64_x (pg, ix, OFF); - /* Equivalent to (tmp >> (52 - SV_LOG_TABLE_BITS)) % N, since N is a power - of 2. */ - sv_u64_t i - = svand_n_u64_x (pg, svlsr_n_u64_x (pg, tmp, (52 - SV_LOG_TABLE_BITS)), - N - 1); - sv_s64_t k - = svasr_n_s64_x (pg, sv_as_s64_u64 (tmp), 52); /* Arithmetic shift. */ - sv_u64_t iz = svsub_u64_x (pg, ix, svand_n_u64_x (pg, tmp, 0xfffULL << 52)); - sv_f64_t z = sv_as_f64_u64 (iz); + svuint64_t tmp = svsub_x (pg, ix, Off); + /* Calculate table index = (tmp >> (52 - V_LOG_TABLE_BITS)) % N. + The actual value of i is double this due to table layout. */ + svuint64_t i + = svand_x (pg, svlsr_x (pg, tmp, (51 - V_LOG_TABLE_BITS)), (N - 1) << 1); + svint64_t k + = svasr_x (pg, svreinterpret_s64 (tmp), 52); /* Arithmetic shift. */ + svuint64_t iz = svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52)); + svfloat64_t z = svreinterpret_f64 (iz); /* Lookup in 2 global lists (length N). */ - sv_f64_t invc = sv_lookup_f64_x (pg, __sv_log_data.invc, i); - sv_f64_t logc = sv_lookup_f64_x (pg, __sv_log_data.logc, i); + svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i); + svfloat64_t logc = svld1_gather_index (pg, &__v_log_data.table[0].logc, i); /* log(x) = log1p(z/c-1) + log(c) + k*Ln2. */ - sv_f64_t r = sv_fma_f64_x (pg, z, invc, sv_f64 (-1.0)); - sv_f64_t kd = sv_to_f64_s64_x (pg, k); + svfloat64_t r = svmad_x (pg, invc, z, -1); + svfloat64_t kd = svcvt_f64_x (pg, k); /* hi = r + log(c) + k*Ln2. */ - sv_f64_t hi = sv_fma_n_f64_x (pg, Ln2, kd, svadd_f64_x (pg, logc, r)); + svfloat64_t hi = svmla_x (pg, svadd_x (pg, logc, r), kd, __v_log_data.ln2); /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ - sv_f64_t r2 = svmul_f64_x (pg, r, r); - sv_f64_t y = sv_fma_n_f64_x (pg, A (3), r, sv_f64 (A (2))); - sv_f64_t p = sv_fma_n_f64_x (pg, A (1), r, sv_f64 (A (0))); - y = sv_fma_n_f64_x (pg, A (4), r2, y); - y = sv_fma_f64_x (pg, y, r2, p); - y = sv_fma_f64_x (pg, y, r2, hi); + svfloat64_t r2 = svmul_x (pg, r, r); + svfloat64_t y = svmla_x (pg, P (2), r, P (3)); + svfloat64_t p = svmla_x (pg, P (0), r, P (1)); + y = svmla_x (pg, y, r2, P (4)); + y = svmla_x (pg, p, r2, y); if (unlikely (svptest_any (pg, cmp))) - return __sv_log_specialcase (x, y, cmp); - return y; + return special_case (x, svmla_x (svnot_z (pg, cmp), hi, r2, y), cmp); + return svmla_x (pg, hi, r2, y); } -PL_ALIAS (__sv_log_x, _ZGVsMxv_log) - PL_SIG (SV, D, 1, log, 0.01, 11.1) -PL_TEST_ULP (__sv_log, 1.68) -PL_TEST_INTERVAL (__sv_log, -0.0, -0x1p126, 100) -PL_TEST_INTERVAL (__sv_log, 0x1p-149, 0x1p-126, 4000) -PL_TEST_INTERVAL (__sv_log, 0x1p-126, 0x1p-23, 50000) -PL_TEST_INTERVAL (__sv_log, 0x1p-23, 1.0, 50000) -PL_TEST_INTERVAL (__sv_log, 1.0, 100, 50000) -PL_TEST_INTERVAL (__sv_log, 100, inf, 50000) -#endif // SV_SUPPORTED +PL_TEST_ULP (SV_NAME_D1 (log), 1.68) +PL_TEST_INTERVAL (SV_NAME_D1 (log), -0.0, -inf, 1000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 0, 0x1p-149, 1000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 0x1p-149, 0x1p-126, 4000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 0x1p-126, 0x1p-23, 50000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 0x1p-23, 1.0, 50000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 1.0, 100, 50000) +PL_TEST_INTERVAL (SV_NAME_D1 (log), 100, inf, 50000) |