AArch64: Improve codegen for SVE pow

Move constants to struct.  Improve memory access with indexed/unpredicated
instructions.  Eliminate register spills.  Speedup on Neoverse V1: 24%.

Reviewed-by: Wilco Dijkstra  <Wilco.Dijkstra@arm.com>
This commit is contained in:
Yat Long Poon 2025-02-13 18:02:01 +00:00 committed by Wilco Dijkstra
parent f5ff34cb3c
commit 0b195651db

View file

@ -44,19 +44,18 @@
/* Data is defined in v_pow_log_data.c. */ /* Data is defined in v_pow_log_data.c. */
#define N_LOG (1 << V_POW_LOG_TABLE_BITS) #define N_LOG (1 << V_POW_LOG_TABLE_BITS)
#define A __v_pow_log_data.poly
#define Off 0x3fe6955500000000 #define Off 0x3fe6955500000000
/* Data is defined in v_pow_exp_data.c. */ /* Data is defined in v_pow_exp_data.c. */
#define N_EXP (1 << V_POW_EXP_TABLE_BITS) #define N_EXP (1 << V_POW_EXP_TABLE_BITS)
#define SignBias (0x800 << V_POW_EXP_TABLE_BITS) #define SignBias (0x800 << V_POW_EXP_TABLE_BITS)
#define C __v_pow_exp_data.poly
#define SmallExp 0x3c9 /* top12(0x1p-54). */ #define SmallExp 0x3c9 /* top12(0x1p-54). */
#define BigExp 0x408 /* top12(512.). */ #define BigExp 0x408 /* top12(512.). */
#define ThresExp 0x03f /* BigExp - SmallExp. */ #define ThresExp 0x03f /* BigExp - SmallExp. */
#define HugeExp 0x409 /* top12(1024.). */ #define HugeExp 0x409 /* top12(1024.). */
/* Constants associated with pow. */ /* Constants associated with pow. */
#define SmallBoundX 0x1p-126
#define SmallPowX 0x001 /* top12(0x1p-126). */ #define SmallPowX 0x001 /* top12(0x1p-126). */
#define BigPowX 0x7ff /* top12(INFINITY). */ #define BigPowX 0x7ff /* top12(INFINITY). */
#define ThresPowX 0x7fe /* BigPowX - SmallPowX. */ #define ThresPowX 0x7fe /* BigPowX - SmallPowX. */
@ -64,6 +63,31 @@
#define BigPowY 0x43e /* top12(0x1.749p62). */ #define BigPowY 0x43e /* top12(0x1.749p62). */
#define ThresPowY 0x080 /* BigPowY - SmallPowY. */ #define ThresPowY 0x080 /* BigPowY - SmallPowY. */
static const struct data
{
double log_c0, log_c2, log_c4, log_c6, ln2_hi, ln2_lo;
double log_c1, log_c3, log_c5, off;
double n_over_ln2, exp_c2, ln2_over_n_hi, ln2_over_n_lo;
double exp_c0, exp_c1;
} data = {
.log_c0 = -0x1p-1,
.log_c1 = -0x1.555555555556p-1,
.log_c2 = 0x1.0000000000006p-1,
.log_c3 = 0x1.999999959554ep-1,
.log_c4 = -0x1.555555529a47ap-1,
.log_c5 = -0x1.2495b9b4845e9p0,
.log_c6 = 0x1.0002b8b263fc3p0,
.off = Off,
.exp_c0 = 0x1.fffffffffffd4p-2,
.exp_c1 = 0x1.5555571d6ef9p-3,
.exp_c2 = 0x1.5555576a5adcep-5,
.ln2_hi = 0x1.62e42fefa3800p-1,
.ln2_lo = 0x1.ef35793c76730p-45,
.n_over_ln2 = 0x1.71547652b82fep0 * N_EXP,
.ln2_over_n_hi = 0x1.62e42fefc0000p-9,
.ln2_over_n_lo = -0x1.c610ca86c3899p-45,
};
/* Check if x is an integer. */ /* Check if x is an integer. */
static inline svbool_t static inline svbool_t
sv_isint (svbool_t pg, svfloat64_t x) sv_isint (svbool_t pg, svfloat64_t x)
@ -82,7 +106,7 @@ sv_isnotint (svbool_t pg, svfloat64_t x)
static inline svbool_t static inline svbool_t
sv_isodd (svbool_t pg, svfloat64_t x) sv_isodd (svbool_t pg, svfloat64_t x)
{ {
svfloat64_t y = svmul_x (pg, x, 0.5); svfloat64_t y = svmul_x (svptrue_b64 (), x, 0.5);
return sv_isnotint (pg, y); return sv_isnotint (pg, y);
} }
@ -121,7 +145,7 @@ zeroinfnan (uint64_t i)
static inline svbool_t static inline svbool_t
sv_zeroinfnan (svbool_t pg, svuint64_t i) sv_zeroinfnan (svbool_t pg, svuint64_t i)
{ {
return svcmpge (pg, svsub_x (pg, svmul_x (pg, i, 2), 1), return svcmpge (pg, svsub_x (pg, svadd_x (pg, i, i), 1),
2 * asuint64 (INFINITY) - 1); 2 * asuint64 (INFINITY) - 1);
} }
@ -174,16 +198,17 @@ sv_call_specialcase (svfloat64_t x1, svuint64_t u1, svuint64_t u2,
additional 15 bits precision. IX is the bit representation of x, but additional 15 bits precision. IX is the bit representation of x, but
normalized in the subnormal range using the sign bit for the exponent. */ normalized in the subnormal range using the sign bit for the exponent. */
static inline svfloat64_t static inline svfloat64_t
sv_log_inline (svbool_t pg, svuint64_t ix, svfloat64_t *tail) sv_log_inline (svbool_t pg, svuint64_t ix, svfloat64_t *tail,
const struct data *d)
{ {
/* x = 2^k z; where z is in range [Off,2*Off) and exact. /* x = 2^k z; where z is in range [Off,2*Off) and exact.
The range is split into N subintervals. The range is split into N subintervals.
The ith subinterval contains z and c is near its center. */ The ith subinterval contains z and c is near its center. */
svuint64_t tmp = svsub_x (pg, ix, Off); svuint64_t tmp = svsub_x (pg, ix, d->off);
svuint64_t i = svand_x (pg, svlsr_x (pg, tmp, 52 - V_POW_LOG_TABLE_BITS), svuint64_t i = svand_x (pg, svlsr_x (pg, tmp, 52 - V_POW_LOG_TABLE_BITS),
sv_u64 (N_LOG - 1)); sv_u64 (N_LOG - 1));
svint64_t k = svasr_x (pg, svreinterpret_s64 (tmp), 52); svint64_t k = svasr_x (pg, svreinterpret_s64 (tmp), 52);
svuint64_t iz = svsub_x (pg, ix, svand_x (pg, tmp, sv_u64 (0xfffULL << 52))); svuint64_t iz = svsub_x (pg, ix, svlsl_x (pg, svreinterpret_u64 (k), 52));
svfloat64_t z = svreinterpret_f64 (iz); svfloat64_t z = svreinterpret_f64 (iz);
svfloat64_t kd = svcvt_f64_x (pg, k); svfloat64_t kd = svcvt_f64_x (pg, k);
@ -199,40 +224,85 @@ sv_log_inline (svbool_t pg, svuint64_t ix, svfloat64_t *tail)
|z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */ |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */
svfloat64_t r = svmad_x (pg, z, invc, -1.0); svfloat64_t r = svmad_x (pg, z, invc, -1.0);
/* k*Ln2 + log(c) + r. */ /* k*Ln2 + log(c) + r. */
svfloat64_t t1 = svmla_x (pg, logc, kd, __v_pow_log_data.ln2_hi);
svfloat64_t ln2_hilo = svld1rq_f64 (svptrue_b64 (), &d->ln2_hi);
svfloat64_t t1 = svmla_lane_f64 (logc, kd, ln2_hilo, 0);
svfloat64_t t2 = svadd_x (pg, t1, r); svfloat64_t t2 = svadd_x (pg, t1, r);
svfloat64_t lo1 = svmla_x (pg, logctail, kd, __v_pow_log_data.ln2_lo); svfloat64_t lo1 = svmla_lane_f64 (logctail, kd, ln2_hilo, 1);
svfloat64_t lo2 = svadd_x (pg, svsub_x (pg, t1, t2), r); svfloat64_t lo2 = svadd_x (pg, svsub_x (pg, t1, t2), r);
/* Evaluation is optimized assuming superscalar pipelined execution. */ /* Evaluation is optimized assuming superscalar pipelined execution. */
svfloat64_t ar = svmul_x (pg, r, -0.5); /* A[0] = -0.5. */
svfloat64_t ar2 = svmul_x (pg, r, ar); svfloat64_t log_c02 = svld1rq_f64 (svptrue_b64 (), &d->log_c0);
svfloat64_t ar3 = svmul_x (pg, r, ar2); svfloat64_t ar = svmul_lane_f64 (r, log_c02, 0);
svfloat64_t ar2 = svmul_x (svptrue_b64 (), r, ar);
svfloat64_t ar3 = svmul_x (svptrue_b64 (), r, ar2);
/* k*Ln2 + log(c) + r + A[0]*r*r. */ /* k*Ln2 + log(c) + r + A[0]*r*r. */
svfloat64_t hi = svadd_x (pg, t2, ar2); svfloat64_t hi = svadd_x (pg, t2, ar2);
svfloat64_t lo3 = svmla_x (pg, svneg_x (pg, ar2), ar, r); svfloat64_t lo3 = svmls_x (pg, ar2, ar, r);
svfloat64_t lo4 = svadd_x (pg, svsub_x (pg, t2, hi), ar2); svfloat64_t lo4 = svadd_x (pg, svsub_x (pg, t2, hi), ar2);
/* p = log1p(r) - r - A[0]*r*r. */ /* p = log1p(r) - r - A[0]*r*r. */
/* p = (ar3 * (A[1] + r * A[2] + ar2 * (A[3] + r * A[4] + ar2 * (A[5] + r * /* p = (ar3 * (A[1] + r * A[2] + ar2 * (A[3] + r * A[4] + ar2 * (A[5] + r *
A[6])))). */ A[6])))). */
svfloat64_t a56 = svmla_x (pg, sv_f64 (A[5]), r, A[6]);
svfloat64_t a34 = svmla_x (pg, sv_f64 (A[3]), r, A[4]); svfloat64_t log_c46 = svld1rq_f64 (svptrue_b64 (), &d->log_c4);
svfloat64_t a12 = svmla_x (pg, sv_f64 (A[1]), r, A[2]); svfloat64_t a56 = svmla_lane_f64 (sv_f64 (d->log_c5), r, log_c46, 1);
svfloat64_t a34 = svmla_lane_f64 (sv_f64 (d->log_c3), r, log_c46, 0);
svfloat64_t a12 = svmla_lane_f64 (sv_f64 (d->log_c1), r, log_c02, 1);
svfloat64_t p = svmla_x (pg, a34, ar2, a56); svfloat64_t p = svmla_x (pg, a34, ar2, a56);
p = svmla_x (pg, a12, ar2, p); p = svmla_x (pg, a12, ar2, p);
p = svmul_x (pg, ar3, p); p = svmul_x (svptrue_b64 (), ar3, p);
svfloat64_t lo = svadd_x ( svfloat64_t lo = svadd_x (
pg, svadd_x (pg, svadd_x (pg, svadd_x (pg, lo1, lo2), lo3), lo4), p); pg, svadd_x (pg, svsub_x (pg, svadd_x (pg, lo1, lo2), lo3), lo4), p);
svfloat64_t y = svadd_x (pg, hi, lo); svfloat64_t y = svadd_x (pg, hi, lo);
*tail = svadd_x (pg, svsub_x (pg, hi, y), lo); *tail = svadd_x (pg, svsub_x (pg, hi, y), lo);
return y; return y;
} }
static inline svfloat64_t
sv_exp_core (svbool_t pg, svfloat64_t x, svfloat64_t xtail,
svuint64_t sign_bias, svfloat64_t *tmp, svuint64_t *sbits,
svuint64_t *ki, const struct data *d)
{
/* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
/* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */
svfloat64_t n_over_ln2_and_c2 = svld1rq_f64 (svptrue_b64 (), &d->n_over_ln2);
svfloat64_t z = svmul_lane_f64 (x, n_over_ln2_and_c2, 0);
/* z - kd is in [-1, 1] in non-nearest rounding modes. */
svfloat64_t kd = svrinta_x (pg, z);
*ki = svreinterpret_u64 (svcvt_s64_x (pg, kd));
svfloat64_t ln2_over_n_hilo
= svld1rq_f64 (svptrue_b64 (), &d->ln2_over_n_hi);
svfloat64_t r = x;
r = svmls_lane_f64 (r, kd, ln2_over_n_hilo, 0);
r = svmls_lane_f64 (r, kd, ln2_over_n_hilo, 1);
/* The code assumes 2^-200 < |xtail| < 2^-8/N. */
r = svadd_x (pg, r, xtail);
/* 2^(k/N) ~= scale. */
svuint64_t idx = svand_x (pg, *ki, N_EXP - 1);
svuint64_t top
= svlsl_x (pg, svadd_x (pg, *ki, sign_bias), 52 - V_POW_EXP_TABLE_BITS);
/* This is only a valid scale when -1023*N < k < 1024*N. */
*sbits = svld1_gather_index (pg, __v_pow_exp_data.sbits, idx);
*sbits = svadd_x (pg, *sbits, top);
/* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
*tmp = svmla_lane_f64 (sv_f64 (d->exp_c1), r, n_over_ln2_and_c2, 1);
*tmp = svmla_x (pg, sv_f64 (d->exp_c0), r, *tmp);
*tmp = svmla_x (pg, r, r2, *tmp);
svfloat64_t scale = svreinterpret_f64 (*sbits);
/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
is no spurious underflow here even without fma. */
z = svmla_x (pg, scale, scale, *tmp);
return z;
}
/* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|. /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
The sign_bias argument is SignBias or 0 and sets the sign to -1 or 1. */ The sign_bias argument is SignBias or 0 and sets the sign to -1 or 1. */
static inline svfloat64_t static inline svfloat64_t
sv_exp_inline (svbool_t pg, svfloat64_t x, svfloat64_t xtail, sv_exp_inline (svbool_t pg, svfloat64_t x, svfloat64_t xtail,
svuint64_t sign_bias) svuint64_t sign_bias, const struct data *d)
{ {
/* 3 types of special cases: tiny (uflow and spurious uflow), huge (oflow) /* 3 types of special cases: tiny (uflow and spurious uflow), huge (oflow)
and other cases of large values of x (scale * (1 + TMP) oflow). */ and other cases of large values of x (scale * (1 + TMP) oflow). */
@ -240,73 +310,46 @@ sv_exp_inline (svbool_t pg, svfloat64_t x, svfloat64_t xtail,
/* |x| is large (|x| >= 512) or tiny (|x| <= 0x1p-54). */ /* |x| is large (|x| >= 512) or tiny (|x| <= 0x1p-54). */
svbool_t uoflow = svcmpge (pg, svsub_x (pg, abstop, SmallExp), ThresExp); svbool_t uoflow = svcmpge (pg, svsub_x (pg, abstop, SmallExp), ThresExp);
/* Conditions special, uflow and oflow are all expressed as uoflow && svfloat64_t tmp;
something, hence do not bother computing anything if no lane in uoflow is svuint64_t sbits, ki;
true. */
svbool_t special = svpfalse_b ();
svbool_t uflow = svpfalse_b ();
svbool_t oflow = svpfalse_b ();
if (__glibc_unlikely (svptest_any (pg, uoflow))) if (__glibc_unlikely (svptest_any (pg, uoflow)))
{ {
svfloat64_t z
= sv_exp_core (pg, x, xtail, sign_bias, &tmp, &sbits, &ki, d);
/* |x| is tiny (|x| <= 0x1p-54). */ /* |x| is tiny (|x| <= 0x1p-54). */
uflow = svcmpge (pg, svsub_x (pg, abstop, SmallExp), 0x80000000); svbool_t uflow
= svcmpge (pg, svsub_x (pg, abstop, SmallExp), 0x80000000);
uflow = svand_z (pg, uoflow, uflow); uflow = svand_z (pg, uoflow, uflow);
/* |x| is huge (|x| >= 1024). */ /* |x| is huge (|x| >= 1024). */
oflow = svcmpge (pg, abstop, HugeExp); svbool_t oflow = svcmpge (pg, abstop, HugeExp);
oflow = svand_z (pg, uoflow, svbic_z (pg, oflow, uflow)); oflow = svand_z (pg, uoflow, svbic_z (pg, oflow, uflow));
/* For large |x| values (512 < |x| < 1024) scale * (1 + TMP) can overflow /* For large |x| values (512 < |x| < 1024) scale * (1 + TMP) can overflow
or underflow. */ or underflow. */
special = svbic_z (pg, uoflow, svorr_z (pg, uflow, oflow)); svbool_t special = svbic_z (pg, uoflow, svorr_z (pg, uflow, oflow));
/* Update result with special and large cases. */
z = sv_call_specialcase (tmp, sbits, ki, z, special);
/* Handle underflow and overflow. */
svbool_t x_is_neg = svcmplt (pg, x, 0);
svuint64_t sign_mask
= svlsl_x (pg, sign_bias, 52 - V_POW_EXP_TABLE_BITS);
svfloat64_t res_uoflow
= svsel (x_is_neg, sv_f64 (0.0), sv_f64 (INFINITY));
res_uoflow = svreinterpret_f64 (
svorr_x (pg, svreinterpret_u64 (res_uoflow), sign_mask));
/* Avoid spurious underflow for tiny x. */
svfloat64_t res_spurious_uflow
= svreinterpret_f64 (svorr_x (pg, sign_mask, 0x3ff0000000000000));
z = svsel (oflow, res_uoflow, z);
z = svsel (uflow, res_spurious_uflow, z);
return z;
} }
/* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */ return sv_exp_core (pg, x, xtail, sign_bias, &tmp, &sbits, &ki, d);
/* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */
svfloat64_t z = svmul_x (pg, x, __v_pow_exp_data.n_over_ln2);
/* z - kd is in [-1, 1] in non-nearest rounding modes. */
svfloat64_t shift = sv_f64 (__v_pow_exp_data.shift);
svfloat64_t kd = svadd_x (pg, z, shift);
svuint64_t ki = svreinterpret_u64 (kd);
kd = svsub_x (pg, kd, shift);
svfloat64_t r = x;
r = svmls_x (pg, r, kd, __v_pow_exp_data.ln2_over_n_hi);
r = svmls_x (pg, r, kd, __v_pow_exp_data.ln2_over_n_lo);
/* The code assumes 2^-200 < |xtail| < 2^-8/N. */
r = svadd_x (pg, r, xtail);
/* 2^(k/N) ~= scale. */
svuint64_t idx = svand_x (pg, ki, N_EXP - 1);
svuint64_t top
= svlsl_x (pg, svadd_x (pg, ki, sign_bias), 52 - V_POW_EXP_TABLE_BITS);
/* This is only a valid scale when -1023*N < k < 1024*N. */
svuint64_t sbits = svld1_gather_index (pg, __v_pow_exp_data.sbits, idx);
sbits = svadd_x (pg, sbits, top);
/* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */
svfloat64_t r2 = svmul_x (pg, r, r);
svfloat64_t tmp = svmla_x (pg, sv_f64 (C[1]), r, C[2]);
tmp = svmla_x (pg, sv_f64 (C[0]), r, tmp);
tmp = svmla_x (pg, r, r2, tmp);
svfloat64_t scale = svreinterpret_f64 (sbits);
/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
is no spurious underflow here even without fma. */
z = svmla_x (pg, scale, scale, tmp);
/* Update result with special and large cases. */
if (__glibc_unlikely (svptest_any (pg, special)))
z = sv_call_specialcase (tmp, sbits, ki, z, special);
/* Handle underflow and overflow. */
svuint64_t sign_bit = svlsr_x (pg, svreinterpret_u64 (x), 63);
svbool_t x_is_neg = svcmpne (pg, sign_bit, 0);
svuint64_t sign_mask = svlsl_x (pg, sign_bias, 52 - V_POW_EXP_TABLE_BITS);
svfloat64_t res_uoflow = svsel (x_is_neg, sv_f64 (0.0), sv_f64 (INFINITY));
res_uoflow = svreinterpret_f64 (
svorr_x (pg, svreinterpret_u64 (res_uoflow), sign_mask));
z = svsel (oflow, res_uoflow, z);
/* Avoid spurious underflow for tiny x. */
svfloat64_t res_spurious_uflow
= svreinterpret_f64 (svorr_x (pg, sign_mask, 0x3ff0000000000000));
z = svsel (uflow, res_spurious_uflow, z);
return z;
} }
static inline double static inline double
@ -341,47 +384,39 @@ pow_sc (double x, double y)
svfloat64_t SV_NAME_D2 (pow) (svfloat64_t x, svfloat64_t y, const svbool_t pg) svfloat64_t SV_NAME_D2 (pow) (svfloat64_t x, svfloat64_t y, const svbool_t pg)
{ {
const struct data *d = ptr_barrier (&data);
/* This preamble handles special case conditions used in the final scalar /* This preamble handles special case conditions used in the final scalar
fallbacks. It also updates ix and sign_bias, that are used in the core fallbacks. It also updates ix and sign_bias, that are used in the core
computation too, i.e., exp( y * log (x) ). */ computation too, i.e., exp( y * log (x) ). */
svuint64_t vix0 = svreinterpret_u64 (x); svuint64_t vix0 = svreinterpret_u64 (x);
svuint64_t viy0 = svreinterpret_u64 (y); svuint64_t viy0 = svreinterpret_u64 (y);
svuint64_t vtopx0 = svlsr_x (svptrue_b64 (), vix0, 52);
/* Negative x cases. */ /* Negative x cases. */
svuint64_t sign_bit = svlsr_m (pg, vix0, 63); svbool_t xisneg = svcmplt (pg, x, 0);
svbool_t xisneg = svcmpeq (pg, sign_bit, 1);
/* Set sign_bias and ix depending on sign of x and nature of y. */ /* Set sign_bias and ix depending on sign of x and nature of y. */
svbool_t yisnotint_xisneg = svpfalse_b (); svbool_t yint_or_xpos = pg;
svuint64_t sign_bias = sv_u64 (0); svuint64_t sign_bias = sv_u64 (0);
svuint64_t vix = vix0; svuint64_t vix = vix0;
svuint64_t vtopx1 = vtopx0;
if (__glibc_unlikely (svptest_any (pg, xisneg))) if (__glibc_unlikely (svptest_any (pg, xisneg)))
{ {
/* Determine nature of y. */ /* Determine nature of y. */
yisnotint_xisneg = sv_isnotint (xisneg, y); yint_or_xpos = sv_isint (xisneg, y);
svbool_t yisint_xisneg = sv_isint (xisneg, y);
svbool_t yisodd_xisneg = sv_isodd (xisneg, y); svbool_t yisodd_xisneg = sv_isodd (xisneg, y);
/* ix set to abs(ix) if y is integer. */ /* ix set to abs(ix) if y is integer. */
vix = svand_m (yisint_xisneg, vix0, 0x7fffffffffffffff); vix = svand_m (yint_or_xpos, vix0, 0x7fffffffffffffff);
vtopx1 = svand_m (yisint_xisneg, vtopx0, 0x7ff);
/* Set to SignBias if x is negative and y is odd. */ /* Set to SignBias if x is negative and y is odd. */
sign_bias = svsel (yisodd_xisneg, sv_u64 (SignBias), sv_u64 (0)); sign_bias = svsel (yisodd_xisneg, sv_u64 (SignBias), sv_u64 (0));
} }
/* Special cases of x or y: zero, inf and nan. */
svbool_t xspecial = sv_zeroinfnan (pg, vix0);
svbool_t yspecial = sv_zeroinfnan (pg, viy0);
svbool_t special = svorr_z (pg, xspecial, yspecial);
/* Small cases of x: |x| < 0x1p-126. */ /* Small cases of x: |x| < 0x1p-126. */
svuint64_t vabstopx0 = svand_x (pg, vtopx0, 0x7ff); svbool_t xsmall = svaclt (yint_or_xpos, x, SmallBoundX);
svbool_t xsmall = svcmplt (pg, vabstopx0, SmallPowX); if (__glibc_unlikely (svptest_any (yint_or_xpos, xsmall)))
if (__glibc_unlikely (svptest_any (pg, xsmall)))
{ {
/* Normalize subnormal x so exponent becomes negative. */ /* Normalize subnormal x so exponent becomes negative. */
svbool_t topx_is_null = svcmpeq (xsmall, vtopx1, 0); svuint64_t vtopx = svlsr_x (svptrue_b64 (), vix, 52);
svbool_t topx_is_null = svcmpeq (xsmall, vtopx, 0);
svuint64_t vix_norm = svreinterpret_u64 (svmul_m (xsmall, x, 0x1p52)); svuint64_t vix_norm = svreinterpret_u64 (svmul_m (xsmall, x, 0x1p52));
vix_norm = svand_m (xsmall, vix_norm, 0x7fffffffffffffff); vix_norm = svand_m (xsmall, vix_norm, 0x7fffffffffffffff);
@ -391,20 +426,24 @@ svfloat64_t SV_NAME_D2 (pow) (svfloat64_t x, svfloat64_t y, const svbool_t pg)
/* y_hi = log(ix, &y_lo). */ /* y_hi = log(ix, &y_lo). */
svfloat64_t vlo; svfloat64_t vlo;
svfloat64_t vhi = sv_log_inline (pg, vix, &vlo); svfloat64_t vhi = sv_log_inline (yint_or_xpos, vix, &vlo, d);
/* z = exp(y_hi, y_lo, sign_bias). */ /* z = exp(y_hi, y_lo, sign_bias). */
svfloat64_t vehi = svmul_x (pg, y, vhi); svfloat64_t vehi = svmul_x (svptrue_b64 (), y, vhi);
svfloat64_t velo = svmul_x (pg, y, vlo); svfloat64_t vemi = svmls_x (yint_or_xpos, vehi, y, vhi);
svfloat64_t vemi = svmls_x (pg, vehi, y, vhi); svfloat64_t velo = svnmls_x (yint_or_xpos, vemi, y, vlo);
velo = svsub_x (pg, velo, vemi); svfloat64_t vz = sv_exp_inline (yint_or_xpos, vehi, velo, sign_bias, d);
svfloat64_t vz = sv_exp_inline (pg, vehi, velo, sign_bias);
/* Cases of finite y and finite negative x. */ /* Cases of finite y and finite negative x. */
vz = svsel (yisnotint_xisneg, sv_f64 (__builtin_nan ("")), vz); vz = svsel (yint_or_xpos, vz, sv_f64 (__builtin_nan ("")));
/* Special cases of x or y: zero, inf and nan. */
svbool_t xspecial = sv_zeroinfnan (svptrue_b64 (), vix0);
svbool_t yspecial = sv_zeroinfnan (svptrue_b64 (), viy0);
svbool_t special = svorr_z (svptrue_b64 (), xspecial, yspecial);
/* Cases of zero/inf/nan x or y. */ /* Cases of zero/inf/nan x or y. */
if (__glibc_unlikely (svptest_any (pg, special))) if (__glibc_unlikely (svptest_any (svptrue_b64 (), special)))
vz = sv_call2_f64 (pow_sc, x, y, vz, special); vz = sv_call2_f64 (pow_sc, x, y, vz, special);
return vz; return vz;