1/*
2 * Double-precision vector cos function.
3 *
4 * Copyright (c) 2019-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8#include "mathlib.h"
9#include "v_math.h"
10
11static const struct data
12{
13  float64x2_t poly[7];
14  float64x2_t range_val, shift, inv_pi, half_pi, pi_1, pi_2, pi_3;
15} data = {
16  /* Worst-case error is 3.3 ulp in [-pi/2, pi/2].  */
17  .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
18	    V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
19	    V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
20	    V2 (-0x1.9e9540300a1p-41) },
21  .inv_pi = V2 (0x1.45f306dc9c883p-2),
22  .half_pi = V2 (0x1.921fb54442d18p+0),
23  .pi_1 = V2 (0x1.921fb54442d18p+1),
24  .pi_2 = V2 (0x1.1a62633145c06p-53),
25  .pi_3 = V2 (0x1.c1cd129024e09p-106),
26  .shift = V2 (0x1.8p52),
27  .range_val = V2 (0x1p23)
28};
29
30#define C(i) d->poly[i]
31
32static float64x2_t VPCS_ATTR NOINLINE
33special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
34{
35  y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
36  return v_call_f64 (cos, x, y, cmp);
37}
38
39float64x2_t VPCS_ATTR V_NAME_D1 (cos) (float64x2_t x)
40{
41  const struct data *d = ptr_barrier (&data);
42  float64x2_t n, r, r2, r3, r4, t1, t2, t3, y;
43  uint64x2_t odd, cmp;
44
45#if WANT_SIMD_EXCEPT
46  r = vabsq_f64 (x);
47  cmp = vcgeq_u64 (vreinterpretq_u64_f64 (r),
48		   vreinterpretq_u64_f64 (d->range_val));
49  if (unlikely (v_any_u64 (cmp)))
50    /* If fenv exceptions are to be triggered correctly, set any special lanes
51       to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
52       special-case handler later.  */
53    r = vbslq_f64 (cmp, v_f64 (1.0), r);
54#else
55  cmp = vcageq_f64 (x, d->range_val);
56  r = x;
57#endif
58
59  /* n = rint((|x|+pi/2)/pi) - 0.5.  */
60  n = vfmaq_f64 (d->shift, d->inv_pi, vaddq_f64 (r, d->half_pi));
61  odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
62  n = vsubq_f64 (n, d->shift);
63  n = vsubq_f64 (n, v_f64 (0.5));
64
65  /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
66  r = vfmsq_f64 (r, d->pi_1, n);
67  r = vfmsq_f64 (r, d->pi_2, n);
68  r = vfmsq_f64 (r, d->pi_3, n);
69
70  /* sin(r) poly approx.  */
71  r2 = vmulq_f64 (r, r);
72  r3 = vmulq_f64 (r2, r);
73  r4 = vmulq_f64 (r2, r2);
74
75  t1 = vfmaq_f64 (C (4), C (5), r2);
76  t2 = vfmaq_f64 (C (2), C (3), r2);
77  t3 = vfmaq_f64 (C (0), C (1), r2);
78
79  y = vfmaq_f64 (t1, C (6), r4);
80  y = vfmaq_f64 (t2, y, r4);
81  y = vfmaq_f64 (t3, y, r4);
82  y = vfmaq_f64 (r, y, r3);
83
84  if (unlikely (v_any_u64 (cmp)))
85    return special_case (x, y, odd, cmp);
86  return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
87}
88