blob: bc725ea4dd5d4f9c919674ab93d7af2c6c197a73 [file] [log] [blame]
//===-- Single-precision sin function -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "src/math/sinf.h"
#include "sincosf_utils.h"
#include "src/__support/FPUtil/BasicOperations.h"
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/PolyEval.h"
#include "src/__support/FPUtil/except_value_utils.h"
#include "src/__support/FPUtil/multiply_add.h"
#include "src/__support/common.h"
#include <errno.h>
#if defined(LIBC_TARGET_HAS_FMA)
#include "range_reduction_fma.h"
// using namespace __llvm_libc::fma;
using __llvm_libc::fma::N_EXCEPTS;
using __llvm_libc::fma::SinfExcepts;
#else
#include "range_reduction.h"
// using namespace __llvm_libc::generic;
using __llvm_libc::generic::N_EXCEPTS;
using __llvm_libc::generic::SinfExcepts;
#endif
namespace __llvm_libc {
LLVM_LIBC_FUNCTION(float, sinf, (float x)) {
using FPBits = typename fputil::FPBits<float>;
FPBits xbits(x);
uint32_t x_u = xbits.uintval();
uint32_t x_abs = x_u & 0x7fff'ffffU;
double xd = static_cast<double>(x);
// Range reduction:
// For |x| > pi/16, we perform range reduction as follows:
// Find k and y such that:
// x = (k + y) * pi/16
// k is an integer
// |y| < 0.5
// For small range (|x| < 2^46 when FMA instructions are available, 2^22
// otherwise), this is done by performing:
// k = round(x * 16/pi)
// y = x * 16/pi - k
// For large range, we will omit all the higher parts of 16/pi such that the
// least significant bits of their full products with x are larger than 31,
// since sin((k + y + 32*i) * pi/16) = sin(x + i * 2pi) = sin(x).
//
// When FMA instructions are not available, we store the digits of 16/pi in
// chunks of 28-bit precision. This will make sure that the products:
// x * SIXTEEN_OVER_PI_28[i] are all exact.
// When FMA instructions are available, we simply store the digits of 16/pi in
// chunks of doubles (53-bit of precision).
// So when multiplying by the largest values of single precision, the
// resulting output should be correct up to 2^(-208 + 128) ~ 2^-80. By the
// worst-case analysis of range reduction, |y| >= 2^-38, so this should give
// us more than 40 bits of accuracy. For the worst-case estimation of range
// reduction, see for instances:
// Elementary Functions by J-M. Muller, Chapter 11,
// Handbook of Floating-Point Arithmetic by J-M. Muller et. al.,
// Chapter 10.2.
//
// Once k and y are computed, we then deduce the answer by the sine of sum
// formula:
// sin(x) = sin((k + y)*pi/16)
// = sin(y*pi/16) * cos(k*pi/16) + cos(y*pi/16) * sin(k*pi/16)
// The values of sin(k*pi/16) and cos(k*pi/16) for k = 0..31 are precomputed
// and stored using a vector of 32 doubles. Sin(y*pi/16) and cos(y*pi/16) are
// computed using degree-7 and degree-8 minimax polynomials generated by
// Sollya respectively.
// |x| <= pi/16
if (unlikely(x_abs <= 0x3e49'0fdbU)) {
// |x| < 0x1.d12ed2p-12f
if (unlikely(x_abs < 0x39e8'9769U)) {
if (unlikely(x_abs == 0U)) {
// For signed zeros.
return x;
}
// When |x| < 2^-12, the relative error of the approximation sin(x) ~ x
// is:
// |sin(x) - x| / |sin(x)| < |x^3| / (6|x|)
// = x^2 / 6
// < 2^-25
// < epsilon(1)/2.
// So the correctly rounded values of sin(x) are:
// = x - sign(x)*eps(x) if rounding mode = FE_TOWARDZERO,
// or (rounding mode = FE_UPWARD and x is
// negative),
// = x otherwise.
// To simplify the rounding decision and make it more efficient, we use
// fma(x, -2^-25, x) instead.
// An exhaustive test shows that this formula work correctly for all
// rounding modes up to |x| < 0x1.c555dep-11f.
// Note: to use the formula x - 2^-25*x to decide the correct rounding, we
// do need fma(x, -2^-25, x) to prevent underflow caused by -2^-25*x when
// |x| < 2^-125. For targets without FMA instructions, we simply use
// double for intermediate results as it is more efficient than using an
// emulated version of FMA.
#if defined(LIBC_TARGET_HAS_FMA)
return fputil::multiply_add(x, -0x1.0p-25f, x);
#else
return static_cast<float>(fputil::multiply_add(xd, -0x1.0p-25, xd));
#endif // LIBC_TARGET_HAS_FMA
}
// |x| < pi/16.
double xsq = xd * xd;
// Degree-9 polynomial approximation:
// sin(x) ~ x + a_3 x^3 + a_5 x^5 + a_7 x^7 + a_9 x^9
// = x (1 + a_3 x^2 + ... + a_9 x^8)
// = x * P(x^2)
// generated by Sollya with the following commands:
// > display = hexadecimal;
// > Q = fpminimax(sin(x)/x, [|0, 2, 4, 6, 8|], [|1, D...|], [0, pi/16]);
double result =
fputil::polyeval(xsq, 1.0, -0x1.55555555554c6p-3, 0x1.1111111085e65p-7,
-0x1.a019f70fb4d4fp-13, 0x1.718d179815e74p-19);
return xd * result;
}
using ExceptChecker = typename fputil::ExceptionChecker<float, N_EXCEPTS>;
{
float result;
if (ExceptChecker::check_odd_func(SinfExcepts, x_abs, xbits.get_sign(),
result))
return result;
}
if (unlikely(x_abs >= 0x7f80'0000U)) {
if (x_abs == 0x7f80'0000U) {
errno = EDOM;
fputil::set_except(FE_INVALID);
}
return x +
FPBits::build_nan(1 << (fputil::MantissaWidth<float>::VALUE - 1));
}
// Combine the results with the sine of sum formula:
// sin(x) = sin((k + y)*pi/16)
// = sin(y*pi/16) * cos(k*pi/16) + cos(y*pi/16) * sin(k*pi/16)
// = sin_y * cos_k + (1 + cosm1_y) * sin_k
// = sin_y * cos_k + (cosm1_y * sin_k + sin_k)
double sin_k, cos_k, sin_y, cosm1_y;
sincosf_eval(xd, x_abs, sin_k, cos_k, sin_y, cosm1_y);
return fputil::multiply_add(sin_y, cos_k,
fputil::multiply_add(cosm1_y, sin_k, sin_k));
}
} // namespace __llvm_libc