wide/
f64x2_.rs

1use super::*;
2
3pick! {
4  if #[cfg(target_feature="sse2")] {
5    #[derive(Default, Clone, Copy, PartialEq)]
6    #[repr(C, align(16))]
7    pub struct f64x2 { sse: m128d }
8  } else if #[cfg(target_feature="simd128")] {
9    use core::arch::wasm32::*;
10
11    #[derive(Clone, Copy)]
12    #[repr(transparent)]
13    pub struct f64x2 { simd: v128 }
14
15    impl Default for f64x2 {
16      fn default() -> Self {
17        Self::splat(0.0)
18      }
19    }
20
21    impl PartialEq for f64x2 {
22      fn eq(&self, other: &Self) -> bool {
23        u64x2_all_true(f64x2_eq(self.simd, other.simd))
24      }
25    }
26  } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
27    use core::arch::aarch64::*;
28    #[repr(C)]
29    #[derive(Copy, Clone)]
30    pub struct f64x2 { neon : float64x2_t }
31
32    impl Default for f64x2 {
33      #[inline]
34      #[must_use]
35      fn default() -> Self {
36        unsafe { Self { neon: vdupq_n_f64(0.0)} }
37      }
38    }
39
40    impl PartialEq for f64x2 {
41      #[inline]
42      #[must_use]
43      fn eq(&self, other: &Self) -> bool {
44        unsafe
45        { let e = vceqq_f64(self.neon, other.neon);
46          vgetq_lane_u64(e,0) == u64::MAX && vgetq_lane_u64(e,1) == u64::MAX
47        }
48      }
49
50    }
51  } else {
52    #[derive(Default, Clone, Copy, PartialEq)]
53    #[repr(C, align(16))]
54    pub struct f64x2 { arr: [f64;2] }
55  }
56}
57
58macro_rules! const_f64_as_f64x2 {
59  ($i:ident, $f:expr) => {
60    pub const $i: f64x2 =
61      unsafe { ConstUnionHack128bit { f64a2: [$f; 2] }.f64x2 };
62  };
63}
64
65impl f64x2 {
66  const_f64_as_f64x2!(ONE, 1.0);
67  const_f64_as_f64x2!(ZERO, 0.0);
68  const_f64_as_f64x2!(HALF, 0.5);
69  const_f64_as_f64x2!(E, core::f64::consts::E);
70  const_f64_as_f64x2!(FRAC_1_PI, core::f64::consts::FRAC_1_PI);
71  const_f64_as_f64x2!(FRAC_2_PI, core::f64::consts::FRAC_2_PI);
72  const_f64_as_f64x2!(FRAC_2_SQRT_PI, core::f64::consts::FRAC_2_SQRT_PI);
73  const_f64_as_f64x2!(FRAC_1_SQRT_2, core::f64::consts::FRAC_1_SQRT_2);
74  const_f64_as_f64x2!(FRAC_PI_2, core::f64::consts::FRAC_PI_2);
75  const_f64_as_f64x2!(FRAC_PI_3, core::f64::consts::FRAC_PI_3);
76  const_f64_as_f64x2!(FRAC_PI_4, core::f64::consts::FRAC_PI_4);
77  const_f64_as_f64x2!(FRAC_PI_6, core::f64::consts::FRAC_PI_6);
78  const_f64_as_f64x2!(FRAC_PI_8, core::f64::consts::FRAC_PI_8);
79  const_f64_as_f64x2!(LN_2, core::f64::consts::LN_2);
80  const_f64_as_f64x2!(LN_10, core::f64::consts::LN_10);
81  const_f64_as_f64x2!(LOG2_E, core::f64::consts::LOG2_E);
82  const_f64_as_f64x2!(LOG10_E, core::f64::consts::LOG10_E);
83  const_f64_as_f64x2!(LOG10_2, core::f64::consts::LOG10_2);
84  const_f64_as_f64x2!(LOG2_10, core::f64::consts::LOG2_10);
85  const_f64_as_f64x2!(PI, core::f64::consts::PI);
86  const_f64_as_f64x2!(SQRT_2, core::f64::consts::SQRT_2);
87  const_f64_as_f64x2!(TAU, core::f64::consts::TAU);
88}
89
90unsafe impl Zeroable for f64x2 {}
91unsafe impl Pod for f64x2 {}
92
93impl Add for f64x2 {
94  type Output = Self;
95  #[inline]
96  #[must_use]
97  fn add(self, rhs: Self) -> Self::Output {
98    pick! {
99      if #[cfg(target_feature="sse2")] {
100        Self { sse: add_m128d(self.sse, rhs.sse) }
101      } else if #[cfg(target_feature="simd128")] {
102        Self { simd: f64x2_add(self.simd, rhs.simd) }
103      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
104        unsafe { Self { neon: vaddq_f64(self.neon, rhs.neon) } }
105      } else {
106        Self { arr: [
107          self.arr[0] + rhs.arr[0],
108          self.arr[1] + rhs.arr[1],
109        ]}
110      }
111    }
112  }
113}
114
115impl Sub for f64x2 {
116  type Output = Self;
117  #[inline]
118  #[must_use]
119  fn sub(self, rhs: Self) -> Self::Output {
120    pick! {
121      if #[cfg(target_feature="sse2")] {
122        Self { sse: sub_m128d(self.sse, rhs.sse) }
123      } else if #[cfg(target_feature="simd128")] {
124        Self { simd: f64x2_sub(self.simd, rhs.simd) }
125      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
126        unsafe { Self { neon: vsubq_f64(self.neon, rhs.neon) } }
127      } else {
128        Self { arr: [
129          self.arr[0] - rhs.arr[0],
130          self.arr[1] - rhs.arr[1],
131        ]}
132      }
133    }
134  }
135}
136
137impl Mul for f64x2 {
138  type Output = Self;
139  #[inline]
140  #[must_use]
141  fn mul(self, rhs: Self) -> Self::Output {
142    pick! {
143      if #[cfg(target_feature="sse2")] {
144        Self { sse: mul_m128d(self.sse, rhs.sse) }
145      } else if #[cfg(target_feature="simd128")] {
146        Self { simd: f64x2_mul(self.simd, rhs.simd) }
147      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
148        unsafe {Self { neon: vmulq_f64(self.neon, rhs.neon) }}
149      } else {
150        Self { arr: [
151          self.arr[0] * rhs.arr[0],
152          self.arr[1] * rhs.arr[1],
153        ]}
154      }
155    }
156  }
157}
158
159impl Div for f64x2 {
160  type Output = Self;
161  #[inline]
162  #[must_use]
163  fn div(self, rhs: Self) -> Self::Output {
164    pick! {
165      if #[cfg(target_feature="sse2")] {
166        Self { sse: div_m128d(self.sse, rhs.sse) }
167      } else if #[cfg(target_feature="simd128")] {
168        Self { simd: f64x2_div(self.simd, rhs.simd) }
169      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
170        unsafe {Self { neon: vdivq_f64(self.neon, rhs.neon) }}
171      } else {
172        Self { arr: [
173          self.arr[0] / rhs.arr[0],
174          self.arr[1] / rhs.arr[1],
175        ]}
176      }
177    }
178  }
179}
180
181impl Add<f64> for f64x2 {
182  type Output = Self;
183  #[inline]
184  #[must_use]
185  fn add(self, rhs: f64) -> Self::Output {
186    self.add(Self::splat(rhs))
187  }
188}
189
190impl Sub<f64> for f64x2 {
191  type Output = Self;
192  #[inline]
193  #[must_use]
194  fn sub(self, rhs: f64) -> Self::Output {
195    self.sub(Self::splat(rhs))
196  }
197}
198
199impl Mul<f64> for f64x2 {
200  type Output = Self;
201  #[inline]
202  #[must_use]
203  fn mul(self, rhs: f64) -> Self::Output {
204    self.mul(Self::splat(rhs))
205  }
206}
207
208impl Div<f64> for f64x2 {
209  type Output = Self;
210  #[inline]
211  #[must_use]
212  fn div(self, rhs: f64) -> Self::Output {
213    self.div(Self::splat(rhs))
214  }
215}
216
217impl Add<f64x2> for f64 {
218  type Output = f64x2;
219  #[inline]
220  #[must_use]
221  fn add(self, rhs: f64x2) -> Self::Output {
222    f64x2::splat(self).add(rhs)
223  }
224}
225
226impl Sub<f64x2> for f64 {
227  type Output = f64x2;
228  #[inline]
229  #[must_use]
230  fn sub(self, rhs: f64x2) -> Self::Output {
231    f64x2::splat(self).sub(rhs)
232  }
233}
234
235impl Mul<f64x2> for f64 {
236  type Output = f64x2;
237  #[inline]
238  #[must_use]
239  fn mul(self, rhs: f64x2) -> Self::Output {
240    f64x2::splat(self).mul(rhs)
241  }
242}
243
244impl Div<f64x2> for f64 {
245  type Output = f64x2;
246  #[inline]
247  #[must_use]
248  fn div(self, rhs: f64x2) -> Self::Output {
249    f64x2::splat(self).div(rhs)
250  }
251}
252
253impl BitAnd for f64x2 {
254  type Output = Self;
255  #[inline]
256  #[must_use]
257  fn bitand(self, rhs: Self) -> Self::Output {
258    pick! {
259      if #[cfg(target_feature="sse2")] {
260        Self { sse: bitand_m128d(self.sse, rhs.sse) }
261      } else if #[cfg(target_feature="simd128")] {
262        Self { simd: v128_and(self.simd, rhs.simd) }
263      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
264        unsafe {Self { neon: vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
265      } else {
266        Self { arr: [
267          f64::from_bits(self.arr[0].to_bits() & rhs.arr[0].to_bits()),
268          f64::from_bits(self.arr[1].to_bits() & rhs.arr[1].to_bits()),
269        ]}
270      }
271    }
272  }
273}
274
275impl BitOr for f64x2 {
276  type Output = Self;
277  #[inline]
278  #[must_use]
279  fn bitor(self, rhs: Self) -> Self::Output {
280    pick! {
281      if #[cfg(target_feature="sse2")] {
282        Self { sse: bitor_m128d(self.sse, rhs.sse) }
283      } else if #[cfg(target_feature="simd128")] {
284        Self { simd: v128_or(self.simd, rhs.simd) }
285      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
286        unsafe {Self { neon: vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
287      } else {
288        Self { arr: [
289          f64::from_bits(self.arr[0].to_bits() | rhs.arr[0].to_bits()),
290          f64::from_bits(self.arr[1].to_bits() | rhs.arr[1].to_bits()),
291        ]}
292      }
293    }
294  }
295}
296
297impl BitXor for f64x2 {
298  type Output = Self;
299  #[inline]
300  #[must_use]
301  fn bitxor(self, rhs: Self) -> Self::Output {
302    pick! {
303      if #[cfg(target_feature="sse2")] {
304        Self { sse: bitxor_m128d(self.sse, rhs.sse) }
305      } else if #[cfg(target_feature="simd128")] {
306        Self { simd: v128_xor(self.simd, rhs.simd) }
307      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
308        unsafe {Self { neon: vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(self.neon), vreinterpretq_u64_f64(rhs.neon))) }}
309      } else {
310        Self { arr: [
311          f64::from_bits(self.arr[0].to_bits() ^ rhs.arr[0].to_bits()),
312          f64::from_bits(self.arr[1].to_bits() ^ rhs.arr[1].to_bits()),
313        ]}
314      }
315    }
316  }
317}
318
319impl CmpEq for f64x2 {
320  type Output = Self;
321  #[inline]
322  #[must_use]
323  fn cmp_eq(self, rhs: Self) -> Self::Output {
324    pick! {
325      if #[cfg(target_feature="sse2")] {
326        Self { sse: cmp_eq_mask_m128d(self.sse, rhs.sse) }
327      } else if #[cfg(target_feature="simd128")] {
328        Self { simd: f64x2_eq(self.simd, rhs.simd) }
329      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
330        unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }}
331      } else {
332        Self { arr: [
333          if self.arr[0] == rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
334          if self.arr[1] == rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
335        ]}
336      }
337    }
338  }
339}
340
341impl CmpGe for f64x2 {
342  type Output = Self;
343  #[inline]
344  #[must_use]
345  fn cmp_ge(self, rhs: Self) -> Self::Output {
346    pick! {
347      if #[cfg(target_feature="sse2")] {
348        Self { sse: cmp_ge_mask_m128d(self.sse, rhs.sse) }
349      } else if #[cfg(target_feature="simd128")] {
350        Self { simd: f64x2_ge(self.simd, rhs.simd) }
351      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
352        unsafe {Self { neon: vreinterpretq_f64_u64(vcgeq_f64(self.neon, rhs.neon)) }}
353      } else {
354        Self { arr: [
355          if self.arr[0] >= rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
356          if self.arr[1] >= rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
357        ]}
358      }
359    }
360  }
361}
362
363impl CmpGt for f64x2 {
364  type Output = Self;
365  #[inline]
366  #[must_use]
367  fn cmp_gt(self, rhs: Self) -> Self::Output {
368    pick! {
369      if #[cfg(target_feature="avx")] {
370        Self { sse: cmp_op_mask_m128d::<{cmp_op!(GreaterThanOrdered)}>(self.sse, rhs.sse) }
371      } else if #[cfg(target_feature="sse2")] {
372        Self { sse: cmp_gt_mask_m128d(self.sse, rhs.sse) }
373      } else if #[cfg(target_feature="simd128")] {
374        Self { simd: f64x2_gt(self.simd, rhs.simd) }
375      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
376        unsafe {Self { neon: vreinterpretq_f64_u64(vcgtq_f64(self.neon, rhs.neon)) }}
377      } else {
378        Self { arr: [
379          if self.arr[0] > rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
380          if self.arr[1] > rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
381        ]}
382      }
383    }
384  }
385}
386
387impl CmpNe for f64x2 {
388  type Output = Self;
389  #[inline]
390  #[must_use]
391  fn cmp_ne(self, rhs: Self) -> Self::Output {
392    pick! {
393      if #[cfg(target_feature="sse2")] {
394        Self { sse: cmp_neq_mask_m128d(self.sse, rhs.sse) }
395      } else if #[cfg(target_feature="simd128")] {
396        Self { simd: f64x2_ne(self.simd, rhs.simd) }
397      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
398        unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, rhs.neon)) }.not() }
399      } else {
400        Self { arr: [
401          if self.arr[0] != rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
402          if self.arr[1] != rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
403        ]}
404      }
405    }
406  }
407}
408
409impl CmpLe for f64x2 {
410  type Output = Self;
411  #[inline]
412  #[must_use]
413  fn cmp_le(self, rhs: Self) -> Self::Output {
414    pick! {
415      if #[cfg(target_feature="sse2")] {
416        Self { sse: cmp_le_mask_m128d(self.sse, rhs.sse) }
417      } else if #[cfg(target_feature="simd128")] {
418        Self { simd: f64x2_le(self.simd, rhs.simd) }
419      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
420        unsafe {Self { neon: vreinterpretq_f64_u64(vcleq_f64(self.neon, rhs.neon)) }}
421      } else {
422        Self { arr: [
423          if self.arr[0] <= rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
424          if self.arr[1] <= rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
425        ]}
426      }
427    }
428  }
429}
430
431impl CmpLt for f64x2 {
432  type Output = Self;
433  #[inline]
434  #[must_use]
435  fn cmp_lt(self, rhs: Self) -> Self::Output {
436    pick! {
437      if #[cfg(target_feature="sse2")] {
438        Self { sse: cmp_lt_mask_m128d(self.sse, rhs.sse) }
439      } else if #[cfg(target_feature="simd128")] {
440        Self { simd: f64x2_lt(self.simd, rhs.simd) }
441      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
442        unsafe {Self { neon: vreinterpretq_f64_u64(vcltq_f64(self.neon, rhs.neon)) }}
443      } else {
444        Self { arr: [
445          if self.arr[0] < rhs.arr[0] { f64::from_bits(u64::MAX) } else { 0.0 },
446          if self.arr[1] < rhs.arr[1] { f64::from_bits(u64::MAX) } else { 0.0 },
447        ]}
448      }
449    }
450  }
451}
452
453impl f64x2 {
454  #[inline]
455  #[must_use]
456  pub fn new(array: [f64; 2]) -> Self {
457    Self::from(array)
458  }
459  #[inline]
460  #[must_use]
461  pub fn blend(self, t: Self, f: Self) -> Self {
462    pick! {
463      if #[cfg(target_feature="sse4.1")] {
464        Self { sse: blend_varying_m128d(f.sse, t.sse, self.sse) }
465      } else if #[cfg(target_feature="simd128")] {
466        Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
467      } else {
468        generic_bit_blend(self, t, f)
469      }
470    }
471  }
472  #[inline]
473  #[must_use]
474  pub fn abs(self) -> Self {
475    pick! {
476      if #[cfg(target_feature="simd128")] {
477        Self { simd: f64x2_abs(self.simd) }
478      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
479        unsafe {Self { neon: vabsq_f64(self.neon) }}
480      } else {
481        let non_sign_bits = f64x2::from(f64::from_bits(i64::MAX as u64));
482        self & non_sign_bits
483      }
484    }
485  }
486
487  /// Calculates the lanewise maximum of both vectors. This is a faster
488  /// implementation than `max`, but it doesn't specify any behavior if NaNs are
489  /// involved.
490  #[inline]
491  #[must_use]
492  pub fn fast_max(self, rhs: Self) -> Self {
493    pick! {
494      if #[cfg(target_feature="sse2")] {
495        Self { sse: max_m128d(self.sse, rhs.sse) }
496      } else if #[cfg(target_feature="simd128")] {
497        Self {
498          simd: f64x2_pmax(self.simd, rhs.simd),
499        }
500      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
501        unsafe {Self { neon: vmaxq_f64(self.neon, rhs.neon) }}
502      } else {
503        Self { arr: [
504          if self.arr[0] < rhs.arr[0] { rhs.arr[0] } else { self.arr[0] },
505          if self.arr[1] < rhs.arr[1] { rhs.arr[1] } else { self.arr[1] },
506        ]}
507      }
508    }
509  }
510
511  /// Calculates the lanewise maximum of both vectors. If either lane is NaN,
512  /// the other lane gets chosen. Use `fast_max` for a faster implementation
513  /// that doesn't handle NaNs.
514  #[inline]
515  #[must_use]
516  pub fn max(self, rhs: Self) -> Self {
517    pick! {
518      if #[cfg(target_feature="sse2")] {
519        // max_m128d seems to do rhs < self ? self : rhs. So if there's any NaN
520        // involved, it chooses rhs, so we need to specifically check rhs for
521        // NaN.
522        rhs.is_nan().blend(self, Self { sse: max_m128d(self.sse, rhs.sse) })
523      } else if #[cfg(target_feature="simd128")] {
524        // WASM has two max intrinsics:
525        // - max: This propagates NaN, that's the opposite of what we need.
526        // - pmax: This is defined as self < rhs ? rhs : self, which basically
527        //   chooses self if either is NaN.
528        //
529        // pmax is what we want, but we need to specifically check self for NaN.
530        Self {
531          simd: v128_bitselect(
532            rhs.simd,
533            f64x2_pmax(self.simd, rhs.simd),
534            f64x2_ne(self.simd, self.simd), // NaN check
535          )
536        }
537      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
538        unsafe {Self { neon: vmaxnmq_f64(self.neon, rhs.neon) }}
539            } else {
540        Self { arr: [
541          self.arr[0].max(rhs.arr[0]),
542          self.arr[1].max(rhs.arr[1]),
543        ]}
544      }
545    }
546  }
547
548  /// Calculates the lanewise minimum of both vectors. This is a faster
549  /// implementation than `min`, but it doesn't specify any behavior if NaNs are
550  /// involved.
551  #[inline]
552  #[must_use]
553  pub fn fast_min(self, rhs: Self) -> Self {
554    pick! {
555      if #[cfg(target_feature="sse2")] {
556        Self { sse: min_m128d(self.sse, rhs.sse) }
557      } else if #[cfg(target_feature="simd128")] {
558        Self {
559          simd: f64x2_pmin(self.simd, rhs.simd),
560        }
561      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
562        unsafe {Self { neon: vminq_f64(self.neon, rhs.neon) }}
563      } else {
564        Self { arr: [
565          if self.arr[0] < rhs.arr[0] { self.arr[0] } else { rhs.arr[0] },
566          if self.arr[1] < rhs.arr[1] { self.arr[1] } else { rhs.arr[1] },
567        ]}
568      }
569    }
570  }
571
572  /// Calculates the lanewise minimum of both vectors. If either lane is NaN,
573  /// the other lane gets chosen. Use `fast_min` for a faster implementation
574  /// that doesn't handle NaNs.
575  #[inline]
576  #[must_use]
577  pub fn min(self, rhs: Self) -> Self {
578    pick! {
579      if #[cfg(target_feature="sse2")] {
580        // min_m128d seems to do rhs < self ? rhs : self. So if there's any NaN
581        // involved, it chooses rhs, so we need to specifically check rhs for
582        // NaN.
583        rhs.is_nan().blend(self, Self { sse: min_m128d(self.sse, rhs.sse) })
584      } else if #[cfg(target_feature="simd128")] {
585        // WASM has two min intrinsics:
586        // - min: This propagates NaN, that's the opposite of what we need.
587        // - pmin: This is defined as rhs < self ? rhs : self, which basically
588        //   chooses self if either is NaN.
589        //
590        // pmin is what we want, but we need to specifically check self for NaN.
591        Self {
592          simd: v128_bitselect(
593            rhs.simd,
594            f64x2_pmin(self.simd, rhs.simd),
595            f64x2_ne(self.simd, self.simd), // NaN check
596          )
597        }
598      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
599        unsafe {Self { neon: vminnmq_f64(self.neon, rhs.neon) }}
600      } else {
601        Self { arr: [
602          self.arr[0].min(rhs.arr[0]),
603          self.arr[1].min(rhs.arr[1]),
604        ]}
605      }
606    }
607  }
608
609  #[inline]
610  #[must_use]
611  pub fn is_nan(self) -> Self {
612    pick! {
613      if #[cfg(target_feature="sse2")] {
614        Self { sse: cmp_unord_mask_m128d(self.sse, self.sse) }
615      } else if #[cfg(target_feature="simd128")] {
616        Self { simd: f64x2_ne(self.simd, self.simd) }
617      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
618        unsafe {Self { neon: vreinterpretq_f64_u64(vceqq_f64(self.neon, self.neon)) }.not() }
619      } else {
620        Self { arr: [
621          if self.arr[0].is_nan() { f64::from_bits(u64::MAX) } else { 0.0 },
622          if self.arr[1].is_nan() { f64::from_bits(u64::MAX) } else { 0.0 },
623        ]}
624      }
625    }
626  }
627  #[inline]
628  #[must_use]
629  pub fn is_finite(self) -> Self {
630    let shifted_exp_mask = u64x2::from(0xFFE0000000000000);
631    let u: u64x2 = cast(self);
632    let shift_u = u << 1_u64;
633    let out = !(shift_u & shifted_exp_mask).cmp_eq(shifted_exp_mask);
634    cast(out)
635  }
636  #[inline]
637  #[must_use]
638  pub fn is_inf(self) -> Self {
639    let shifted_inf = u64x2::from(0xFFE0000000000000);
640    let u: u64x2 = cast(self);
641    let shift_u = u << 1_u64;
642    let out = (shift_u).cmp_eq(shifted_inf);
643    cast(out)
644  }
645
646  #[inline]
647  #[must_use]
648  pub fn round(self) -> Self {
649    pick! {
650      if #[cfg(target_feature="sse4.1")] {
651        Self { sse: round_m128d::<{round_op!(Nearest)}>(self.sse) }
652      } else if #[cfg(target_feature="simd128")] {
653        Self { simd: f64x2_nearest(self.simd) }
654      } else {
655        let sign_mask = f64x2::from(-0.0);
656        let magic = f64x2::from(f64::from_bits(0x43300000_00000000));
657        let sign = self & sign_mask;
658        let signed_magic = magic | sign;
659        self + signed_magic - signed_magic
660      }
661    }
662  }
663  #[inline]
664  #[must_use]
665  pub fn round_int(self) -> i64x2 {
666    let rounded: [f64; 2] = cast(self.round());
667    cast([rounded[0] as i64, rounded[1] as i64])
668  }
669  #[inline]
670  #[must_use]
671  pub fn mul_add(self, m: Self, a: Self) -> Self {
672    pick! {
673      if #[cfg(all(target_feature="fma"))] {
674        Self { sse: fused_mul_add_m128d(self.sse, m.sse, a.sse) }
675      } else {
676        (self * m) + a
677      }
678    }
679  }
680
681  #[inline]
682  #[must_use]
683  pub fn mul_sub(self, m: Self, a: Self) -> Self {
684    pick! {
685      if #[cfg(all(target_feature="fma"))] {
686        Self { sse: fused_mul_sub_m128d(self.sse, m.sse, a.sse) }
687      } else {
688        (self * m) - a
689      }
690    }
691  }
692
693  #[inline]
694  #[must_use]
695  pub fn mul_neg_add(self, m: Self, a: Self) -> Self {
696    pick! {
697        if #[cfg(all(target_feature="fma"))] {
698          Self { sse: fused_mul_neg_add_m128d(self.sse, m.sse, a.sse) }
699        } else {
700          a - (self * m)
701        }
702    }
703  }
704
705  #[inline]
706  #[must_use]
707  pub fn mul_neg_sub(self, m: Self, a: Self) -> Self {
708    pick! {
709        if #[cfg(all(target_feature="fma"))] {
710          Self { sse: fused_mul_neg_sub_m128d(self.sse, m.sse, a.sse) }
711        } else {
712          -(self * m) - a
713        }
714    }
715  }
716
717  #[inline]
718  #[must_use]
719  pub fn flip_signs(self, signs: Self) -> Self {
720    self ^ (signs & Self::from(-0.0))
721  }
722
723  #[inline]
724  #[must_use]
725  pub fn copysign(self, sign: Self) -> Self {
726    let magnitude_mask = Self::from(f64::from_bits(u64::MAX >> 1));
727    (self & magnitude_mask) | (sign & Self::from(-0.0))
728  }
729
730  #[allow(non_upper_case_globals)]
731  #[inline]
732  pub fn asin_acos(self) -> (Self, Self) {
733    // Based on the Agner Fog "vector class library":
734    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
735    const_f64_as_f64x2!(R4asin, 2.967721961301243206100E-3);
736    const_f64_as_f64x2!(R3asin, -5.634242780008963776856E-1);
737    const_f64_as_f64x2!(R2asin, 6.968710824104713396794E0);
738    const_f64_as_f64x2!(R1asin, -2.556901049652824852289E1);
739    const_f64_as_f64x2!(R0asin, 2.853665548261061424989E1);
740
741    const_f64_as_f64x2!(S3asin, -2.194779531642920639778E1);
742    const_f64_as_f64x2!(S2asin, 1.470656354026814941758E2);
743    const_f64_as_f64x2!(S1asin, -3.838770957603691357202E2);
744    const_f64_as_f64x2!(S0asin, 3.424398657913078477438E2);
745
746    const_f64_as_f64x2!(P5asin, 4.253011369004428248960E-3);
747    const_f64_as_f64x2!(P4asin, -6.019598008014123785661E-1);
748    const_f64_as_f64x2!(P3asin, 5.444622390564711410273E0);
749    const_f64_as_f64x2!(P2asin, -1.626247967210700244449E1);
750    const_f64_as_f64x2!(P1asin, 1.956261983317594739197E1);
751    const_f64_as_f64x2!(P0asin, -8.198089802484824371615E0);
752
753    const_f64_as_f64x2!(Q4asin, -1.474091372988853791896E1);
754    const_f64_as_f64x2!(Q3asin, 7.049610280856842141659E1);
755    const_f64_as_f64x2!(Q2asin, -1.471791292232726029859E2);
756    const_f64_as_f64x2!(Q1asin, 1.395105614657485689735E2);
757    const_f64_as_f64x2!(Q0asin, -4.918853881490881290097E1);
758
759    let xa = self.abs();
760
761    let big = xa.cmp_ge(f64x2::splat(0.625));
762
763    let x1 = big.blend(f64x2::splat(1.0) - xa, xa * xa);
764
765    let x2 = x1 * x1;
766    let x3 = x2 * x1;
767    let x4 = x2 * x2;
768    let x5 = x4 * x1;
769
770    let do_big = big.any();
771    let do_small = !big.all();
772
773    let mut rx = f64x2::default();
774    let mut sx = f64x2::default();
775    let mut px = f64x2::default();
776    let mut qx = f64x2::default();
777
778    if do_big {
779      rx = x3.mul_add(R3asin, x2 * R2asin)
780        + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
781      sx =
782        x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
783    }
784    if do_small {
785      px = x3.mul_add(P3asin, P0asin)
786        + x4.mul_add(P4asin, x1 * P1asin)
787        + x5.mul_add(P5asin, x2 * P2asin);
788      qx = x4.mul_add(Q4asin, x5)
789        + x3.mul_add(Q3asin, x1 * Q1asin)
790        + x2.mul_add(Q2asin, Q0asin);
791    };
792
793    let vx = big.blend(rx, px);
794    let wx = big.blend(sx, qx);
795
796    let y1 = vx / wx * x1;
797
798    let mut z1 = f64x2::default();
799    let mut z2 = f64x2::default();
800    if do_big {
801      let xb = (x1 + x1).sqrt();
802      z1 = xb.mul_add(y1, xb);
803    }
804
805    if do_small {
806      z2 = xa.mul_add(y1, xa);
807    }
808
809    // asin
810    let z3 = f64x2::FRAC_PI_2 - z1;
811    let asin = big.blend(z3, z2);
812    let asin = asin.flip_signs(self);
813
814    // acos
815    let z3 = self.cmp_lt(f64x2::ZERO).blend(f64x2::PI - z1, z1);
816    let z4 = f64x2::FRAC_PI_2 - z2.flip_signs(self);
817    let acos = big.blend(z3, z4);
818
819    (asin, acos)
820  }
821
822  #[allow(non_upper_case_globals)]
823  #[inline]
824  pub fn acos(self) -> Self {
825    // Based on the Agner Fog "vector class library":
826    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
827    const_f64_as_f64x2!(R4asin, 2.967721961301243206100E-3);
828    const_f64_as_f64x2!(R3asin, -5.634242780008963776856E-1);
829    const_f64_as_f64x2!(R2asin, 6.968710824104713396794E0);
830    const_f64_as_f64x2!(R1asin, -2.556901049652824852289E1);
831    const_f64_as_f64x2!(R0asin, 2.853665548261061424989E1);
832
833    const_f64_as_f64x2!(S3asin, -2.194779531642920639778E1);
834    const_f64_as_f64x2!(S2asin, 1.470656354026814941758E2);
835    const_f64_as_f64x2!(S1asin, -3.838770957603691357202E2);
836    const_f64_as_f64x2!(S0asin, 3.424398657913078477438E2);
837
838    const_f64_as_f64x2!(P5asin, 4.253011369004428248960E-3);
839    const_f64_as_f64x2!(P4asin, -6.019598008014123785661E-1);
840    const_f64_as_f64x2!(P3asin, 5.444622390564711410273E0);
841    const_f64_as_f64x2!(P2asin, -1.626247967210700244449E1);
842    const_f64_as_f64x2!(P1asin, 1.956261983317594739197E1);
843    const_f64_as_f64x2!(P0asin, -8.198089802484824371615E0);
844
845    const_f64_as_f64x2!(Q4asin, -1.474091372988853791896E1);
846    const_f64_as_f64x2!(Q3asin, 7.049610280856842141659E1);
847    const_f64_as_f64x2!(Q2asin, -1.471791292232726029859E2);
848    const_f64_as_f64x2!(Q1asin, 1.395105614657485689735E2);
849    const_f64_as_f64x2!(Q0asin, -4.918853881490881290097E1);
850
851    let xa = self.abs();
852
853    let big = xa.cmp_ge(f64x2::splat(0.625));
854
855    let x1 = big.blend(f64x2::splat(1.0) - xa, xa * xa);
856
857    let x2 = x1 * x1;
858    let x3 = x2 * x1;
859    let x4 = x2 * x2;
860    let x5 = x4 * x1;
861
862    let do_big = big.any();
863    let do_small = !big.all();
864
865    let mut rx = f64x2::default();
866    let mut sx = f64x2::default();
867    let mut px = f64x2::default();
868    let mut qx = f64x2::default();
869
870    if do_big {
871      rx = x3.mul_add(R3asin, x2 * R2asin)
872        + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
873      sx =
874        x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
875    }
876    if do_small {
877      px = x3.mul_add(P3asin, P0asin)
878        + x4.mul_add(P4asin, x1 * P1asin)
879        + x5.mul_add(P5asin, x2 * P2asin);
880      qx = x4.mul_add(Q4asin, x5)
881        + x3.mul_add(Q3asin, x1 * Q1asin)
882        + x2.mul_add(Q2asin, Q0asin);
883    };
884
885    let vx = big.blend(rx, px);
886    let wx = big.blend(sx, qx);
887
888    let y1 = vx / wx * x1;
889
890    let mut z1 = f64x2::default();
891    let mut z2 = f64x2::default();
892    if do_big {
893      let xb = (x1 + x1).sqrt();
894      z1 = xb.mul_add(y1, xb);
895    }
896
897    if do_small {
898      z2 = xa.mul_add(y1, xa);
899    }
900
901    // acos
902    let z3 = self.cmp_lt(f64x2::ZERO).blend(f64x2::PI - z1, z1);
903    let z4 = f64x2::FRAC_PI_2 - z2.flip_signs(self);
904    let acos = big.blend(z3, z4);
905
906    acos
907  }
908
909  #[allow(non_upper_case_globals)]
910  #[inline]
911  pub fn asin(self) -> Self {
912    // Based on the Agner Fog "vector class library":
913    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
914    const_f64_as_f64x2!(R4asin, 2.967721961301243206100E-3);
915    const_f64_as_f64x2!(R3asin, -5.634242780008963776856E-1);
916    const_f64_as_f64x2!(R2asin, 6.968710824104713396794E0);
917    const_f64_as_f64x2!(R1asin, -2.556901049652824852289E1);
918    const_f64_as_f64x2!(R0asin, 2.853665548261061424989E1);
919
920    const_f64_as_f64x2!(S3asin, -2.194779531642920639778E1);
921    const_f64_as_f64x2!(S2asin, 1.470656354026814941758E2);
922    const_f64_as_f64x2!(S1asin, -3.838770957603691357202E2);
923    const_f64_as_f64x2!(S0asin, 3.424398657913078477438E2);
924
925    const_f64_as_f64x2!(P5asin, 4.253011369004428248960E-3);
926    const_f64_as_f64x2!(P4asin, -6.019598008014123785661E-1);
927    const_f64_as_f64x2!(P3asin, 5.444622390564711410273E0);
928    const_f64_as_f64x2!(P2asin, -1.626247967210700244449E1);
929    const_f64_as_f64x2!(P1asin, 1.956261983317594739197E1);
930    const_f64_as_f64x2!(P0asin, -8.198089802484824371615E0);
931
932    const_f64_as_f64x2!(Q4asin, -1.474091372988853791896E1);
933    const_f64_as_f64x2!(Q3asin, 7.049610280856842141659E1);
934    const_f64_as_f64x2!(Q2asin, -1.471791292232726029859E2);
935    const_f64_as_f64x2!(Q1asin, 1.395105614657485689735E2);
936    const_f64_as_f64x2!(Q0asin, -4.918853881490881290097E1);
937
938    let xa = self.abs();
939
940    let big = xa.cmp_ge(f64x2::splat(0.625));
941
942    let x1 = big.blend(f64x2::splat(1.0) - xa, xa * xa);
943
944    let x2 = x1 * x1;
945    let x3 = x2 * x1;
946    let x4 = x2 * x2;
947    let x5 = x4 * x1;
948
949    let do_big = big.any();
950    let do_small = !big.all();
951
952    let mut rx = f64x2::default();
953    let mut sx = f64x2::default();
954    let mut px = f64x2::default();
955    let mut qx = f64x2::default();
956
957    if do_big {
958      rx = x3.mul_add(R3asin, x2 * R2asin)
959        + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
960      sx =
961        x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
962    }
963    if do_small {
964      px = x3.mul_add(P3asin, P0asin)
965        + x4.mul_add(P4asin, x1 * P1asin)
966        + x5.mul_add(P5asin, x2 * P2asin);
967      qx = x4.mul_add(Q4asin, x5)
968        + x3.mul_add(Q3asin, x1 * Q1asin)
969        + x2.mul_add(Q2asin, Q0asin);
970    };
971
972    let vx = big.blend(rx, px);
973    let wx = big.blend(sx, qx);
974
975    let y1 = vx / wx * x1;
976
977    let mut z1 = f64x2::default();
978    let mut z2 = f64x2::default();
979    if do_big {
980      let xb = (x1 + x1).sqrt();
981      z1 = xb.mul_add(y1, xb);
982    }
983
984    if do_small {
985      z2 = xa.mul_add(y1, xa);
986    }
987
988    // asin
989    let z3 = f64x2::FRAC_PI_2 - z1;
990    let asin = big.blend(z3, z2);
991    let asin = asin.flip_signs(self);
992
993    asin
994  }
995
996  #[allow(non_upper_case_globals)]
997  #[inline]
998  pub fn atan(self) -> Self {
999    // Based on the Agner Fog "vector class library":
1000    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
1001    const_f64_as_f64x2!(MORE_BITS, 6.123233995736765886130E-17);
1002    const_f64_as_f64x2!(MORE_BITS_O2, 6.123233995736765886130E-17 * 0.5);
1003    const_f64_as_f64x2!(T3PO8, core::f64::consts::SQRT_2 + 1.0);
1004
1005    const_f64_as_f64x2!(P4atan, -8.750608600031904122785E-1);
1006    const_f64_as_f64x2!(P3atan, -1.615753718733365076637E1);
1007    const_f64_as_f64x2!(P2atan, -7.500855792314704667340E1);
1008    const_f64_as_f64x2!(P1atan, -1.228866684490136173410E2);
1009    const_f64_as_f64x2!(P0atan, -6.485021904942025371773E1);
1010
1011    const_f64_as_f64x2!(Q4atan, 2.485846490142306297962E1);
1012    const_f64_as_f64x2!(Q3atan, 1.650270098316988542046E2);
1013    const_f64_as_f64x2!(Q2atan, 4.328810604912902668951E2);
1014    const_f64_as_f64x2!(Q1atan, 4.853903996359136964868E2);
1015    const_f64_as_f64x2!(Q0atan, 1.945506571482613964425E2);
1016
1017    let t = self.abs();
1018
1019    // small:  t < 0.66
1020    // medium: t <= t <= 2.4142 (1+sqrt(2))
1021    // big:    t > 2.4142
1022    let notbig = t.cmp_le(T3PO8);
1023    let notsmal = t.cmp_ge(Self::splat(0.66));
1024
1025    let mut s = notbig.blend(Self::FRAC_PI_4, Self::FRAC_PI_2);
1026    s = notsmal & s;
1027    let mut fac = notbig.blend(MORE_BITS_O2, MORE_BITS);
1028    fac = notsmal & fac;
1029
1030    // small:  z = t / 1.0;
1031    // medium: z = (t-1.0) / (t+1.0);
1032    // big:    z = -1.0 / t;
1033    let mut a = notbig & t;
1034    a = notsmal.blend(a - Self::ONE, a);
1035    let mut b = notbig & Self::ONE;
1036    b = notsmal.blend(b + t, b);
1037    let z = a / b;
1038
1039    let zz = z * z;
1040
1041    let px = polynomial_4!(zz, P0atan, P1atan, P2atan, P3atan, P4atan);
1042    let qx = polynomial_5n!(zz, Q0atan, Q1atan, Q2atan, Q3atan, Q4atan);
1043
1044    let mut re = (px / qx).mul_add(z * zz, z);
1045    re += s + fac;
1046
1047    // get sign bit
1048    re = (self.sign_bit()).blend(-re, re);
1049
1050    re
1051  }
1052
1053  #[allow(non_upper_case_globals)]
1054  #[inline]
1055  pub fn atan2(self, x: Self) -> Self {
1056    // Based on the Agner Fog "vector class library":
1057    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
1058    const_f64_as_f64x2!(MORE_BITS, 6.123233995736765886130E-17);
1059    const_f64_as_f64x2!(MORE_BITS_O2, 6.123233995736765886130E-17 * 0.5);
1060    const_f64_as_f64x2!(T3PO8, core::f64::consts::SQRT_2 + 1.0);
1061
1062    const_f64_as_f64x2!(P4atan, -8.750608600031904122785E-1);
1063    const_f64_as_f64x2!(P3atan, -1.615753718733365076637E1);
1064    const_f64_as_f64x2!(P2atan, -7.500855792314704667340E1);
1065    const_f64_as_f64x2!(P1atan, -1.228866684490136173410E2);
1066    const_f64_as_f64x2!(P0atan, -6.485021904942025371773E1);
1067
1068    const_f64_as_f64x2!(Q4atan, 2.485846490142306297962E1);
1069    const_f64_as_f64x2!(Q3atan, 1.650270098316988542046E2);
1070    const_f64_as_f64x2!(Q2atan, 4.328810604912902668951E2);
1071    const_f64_as_f64x2!(Q1atan, 4.853903996359136964868E2);
1072    const_f64_as_f64x2!(Q0atan, 1.945506571482613964425E2);
1073
1074    let y = self;
1075
1076    // move in first octant
1077    let x1 = x.abs();
1078    let y1 = y.abs();
1079    let swapxy = y1.cmp_gt(x1);
1080    // swap x and y if y1 > x1
1081    let mut x2 = swapxy.blend(y1, x1);
1082    let mut y2 = swapxy.blend(x1, y1);
1083
1084    // check for special case: x and y are both +/- INF
1085    let both_infinite = x.is_inf() & y.is_inf();
1086    if both_infinite.any() {
1087      let minus_one = -Self::ONE;
1088      x2 = both_infinite.blend(x2 & minus_one, x2);
1089      y2 = both_infinite.blend(y2 & minus_one, y2);
1090    }
1091
1092    // x = y = 0 gives NAN here
1093    let t = y2 / x2;
1094
1095    // small:  t < 0.66
1096    // medium: t <= t <= 2.4142 (1+sqrt(2))
1097    // big:    t > 2.4142
1098    let notbig = t.cmp_le(T3PO8);
1099    let notsmal = t.cmp_ge(Self::splat(0.66));
1100
1101    let mut s = notbig.blend(Self::FRAC_PI_4, Self::FRAC_PI_2);
1102    s = notsmal & s;
1103    let mut fac = notbig.blend(MORE_BITS_O2, MORE_BITS);
1104    fac = notsmal & fac;
1105
1106    // small:  z = t / 1.0;
1107    // medium: z = (t-1.0) / (t+1.0);
1108    // big:    z = -1.0 / t;
1109    let mut a = notbig & t;
1110    a = notsmal.blend(a - Self::ONE, a);
1111    let mut b = notbig & Self::ONE;
1112    b = notsmal.blend(b + t, b);
1113    let z = a / b;
1114
1115    let zz = z * z;
1116
1117    let px = polynomial_4!(zz, P0atan, P1atan, P2atan, P3atan, P4atan);
1118    let qx = polynomial_5n!(zz, Q0atan, Q1atan, Q2atan, Q3atan, Q4atan);
1119
1120    let mut re = (px / qx).mul_add(z * zz, z);
1121    re += s + fac;
1122
1123    // move back in place
1124    re = swapxy.blend(Self::FRAC_PI_2 - re, re);
1125    re = ((x | y).cmp_eq(Self::ZERO)).blend(Self::ZERO, re);
1126    re = (x.sign_bit()).blend(Self::PI - re, re);
1127
1128    // get sign bit
1129    re = (y.sign_bit()).blend(-re, re);
1130
1131    re
1132  }
1133
1134  #[inline]
1135  #[must_use]
1136  #[allow(non_upper_case_globals)]
1137  pub fn sin_cos(self) -> (Self, Self) {
1138    // Based on the Agner Fog "vector class library":
1139    // https://github.com/vectorclass/version2/blob/master/vectormath_trig.h
1140
1141    const_f64_as_f64x2!(P0sin, -1.66666666666666307295E-1);
1142    const_f64_as_f64x2!(P1sin, 8.33333333332211858878E-3);
1143    const_f64_as_f64x2!(P2sin, -1.98412698295895385996E-4);
1144    const_f64_as_f64x2!(P3sin, 2.75573136213857245213E-6);
1145    const_f64_as_f64x2!(P4sin, -2.50507477628578072866E-8);
1146    const_f64_as_f64x2!(P5sin, 1.58962301576546568060E-10);
1147
1148    const_f64_as_f64x2!(P0cos, 4.16666666666665929218E-2);
1149    const_f64_as_f64x2!(P1cos, -1.38888888888730564116E-3);
1150    const_f64_as_f64x2!(P2cos, 2.48015872888517045348E-5);
1151    const_f64_as_f64x2!(P3cos, -2.75573141792967388112E-7);
1152    const_f64_as_f64x2!(P4cos, 2.08757008419747316778E-9);
1153    const_f64_as_f64x2!(P5cos, -1.13585365213876817300E-11);
1154
1155    const_f64_as_f64x2!(DP1, 7.853981554508209228515625E-1 * 2.);
1156    const_f64_as_f64x2!(DP2, 7.94662735614792836714E-9 * 2.);
1157    const_f64_as_f64x2!(DP3, 3.06161699786838294307E-17 * 2.);
1158
1159    const_f64_as_f64x2!(TWO_OVER_PI, 2.0 / core::f64::consts::PI);
1160
1161    let xa = self.abs();
1162
1163    let y = (xa * TWO_OVER_PI).round();
1164    let q = y.round_int();
1165
1166    let x = y.mul_neg_add(DP3, y.mul_neg_add(DP2, y.mul_neg_add(DP1, xa)));
1167
1168    let x2 = x * x;
1169    let mut s = polynomial_5!(x2, P0sin, P1sin, P2sin, P3sin, P4sin, P5sin);
1170    let mut c = polynomial_5!(x2, P0cos, P1cos, P2cos, P3cos, P4cos, P5cos);
1171    s = (x * x2).mul_add(s, x);
1172    c =
1173      (x2 * x2).mul_add(c, x2.mul_neg_add(f64x2::from(0.5), f64x2::from(1.0)));
1174
1175    let swap = !((q & i64x2::from(1)).cmp_eq(i64x2::from(0)));
1176
1177    let mut overflow: f64x2 = cast(q.cmp_gt(i64x2::from(0x80000000000000)));
1178    overflow &= xa.is_finite();
1179    s = overflow.blend(f64x2::from(0.0), s);
1180    c = overflow.blend(f64x2::from(1.0), c);
1181
1182    // calc sin
1183    let mut sin1 = cast::<_, f64x2>(swap).blend(c, s);
1184    let sign_sin: i64x2 = (q << 62) ^ cast::<_, i64x2>(self);
1185    sin1 = sin1.flip_signs(cast(sign_sin));
1186
1187    // calc cos
1188    let mut cos1 = cast::<_, f64x2>(swap).blend(s, c);
1189    let sign_cos: i64x2 = ((q + i64x2::from(1)) & i64x2::from(2)) << 62;
1190    cos1 ^= cast::<_, f64x2>(sign_cos);
1191
1192    (sin1, cos1)
1193  }
1194  #[inline]
1195  #[must_use]
1196  pub fn sin(self) -> Self {
1197    let (s, _) = self.sin_cos();
1198    s
1199  }
1200  #[inline]
1201  #[must_use]
1202  pub fn cos(self) -> Self {
1203    let (_, c) = self.sin_cos();
1204    c
1205  }
1206  #[inline]
1207  #[must_use]
1208  pub fn tan(self) -> Self {
1209    let (s, c) = self.sin_cos();
1210    s / c
1211  }
1212  #[inline]
1213  #[must_use]
1214  pub fn to_degrees(self) -> Self {
1215    const_f64_as_f64x2!(RAD_TO_DEG_RATIO, 180.0_f64 / core::f64::consts::PI);
1216    self * RAD_TO_DEG_RATIO
1217  }
1218  #[inline]
1219  #[must_use]
1220  pub fn to_radians(self) -> Self {
1221    const_f64_as_f64x2!(DEG_TO_RAD_RATIO, core::f64::consts::PI / 180.0_f64);
1222    self * DEG_TO_RAD_RATIO
1223  }
1224  #[inline]
1225  #[must_use]
1226  pub fn sqrt(self) -> Self {
1227    pick! {
1228      if #[cfg(target_feature="sse2")] {
1229        Self { sse: sqrt_m128d(self.sse) }
1230      } else if #[cfg(target_feature="simd128")] {
1231        Self { simd: f64x2_sqrt(self.simd) }
1232      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
1233        unsafe {Self { neon: vsqrtq_f64(self.neon) }}
1234      } else if #[cfg(feature="std")] {
1235        Self { arr: [
1236          self.arr[0].sqrt(),
1237          self.arr[1].sqrt(),
1238        ]}
1239      } else {
1240        Self { arr: [
1241          software_sqrt(self.arr[0]),
1242          software_sqrt(self.arr[1]),
1243        ]}
1244      }
1245    }
1246  }
1247  #[inline]
1248  #[must_use]
1249  pub fn move_mask(self) -> i32 {
1250    pick! {
1251      if #[cfg(target_feature="sse2")] {
1252        move_mask_m128d(self.sse)
1253      } else if #[cfg(target_feature="simd128")] {
1254        u64x2_bitmask(self.simd) as i32
1255      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
1256        unsafe
1257        {
1258          let e = vreinterpretq_u64_f64(self.neon);
1259
1260          (vgetq_lane_u64(e,0) >> 63 | ((vgetq_lane_u64(e,1) >> 62) & 0x2)) as i32
1261        }
1262      } else {
1263        (((self.arr[0].to_bits() as i64) < 0) as i32) << 0 |
1264        (((self.arr[1].to_bits() as i64) < 0) as i32) << 1
1265      }
1266    }
1267  }
1268  #[inline]
1269  #[must_use]
1270  pub fn any(self) -> bool {
1271    pick! {
1272      if #[cfg(target_feature="simd128")] {
1273        v128_any_true(self.simd)
1274      } else {
1275        self.move_mask() != 0
1276      }
1277    }
1278  }
1279  #[inline]
1280  #[must_use]
1281  pub fn all(self) -> bool {
1282    pick! {
1283      if #[cfg(target_feature="simd128")] {
1284        u64x2_all_true(self.simd)
1285      } else {
1286        // two lanes
1287        self.move_mask() == 0b11
1288      }
1289    }
1290  }
1291  #[inline]
1292  #[must_use]
1293  pub fn none(self) -> bool {
1294    !self.any()
1295  }
1296
1297  #[inline]
1298  #[allow(non_upper_case_globals)]
1299  fn vm_pow2n(self) -> Self {
1300    const_f64_as_f64x2!(pow2_52, 4503599627370496.0);
1301    const_f64_as_f64x2!(bias, 1023.0);
1302    let a = self + (bias + pow2_52);
1303    let c = cast::<_, i64x2>(a) << 52;
1304    cast::<_, f64x2>(c)
1305  }
1306
1307  /// Calculate the exponent of a packed f64x2
1308  #[inline]
1309  #[must_use]
1310  #[allow(non_upper_case_globals)]
1311  pub fn exp(self) -> Self {
1312    const_f64_as_f64x2!(P2, 1.0 / 2.0);
1313    const_f64_as_f64x2!(P3, 1.0 / 6.0);
1314    const_f64_as_f64x2!(P4, 1. / 24.);
1315    const_f64_as_f64x2!(P5, 1. / 120.);
1316    const_f64_as_f64x2!(P6, 1. / 720.);
1317    const_f64_as_f64x2!(P7, 1. / 5040.);
1318    const_f64_as_f64x2!(P8, 1. / 40320.);
1319    const_f64_as_f64x2!(P9, 1. / 362880.);
1320    const_f64_as_f64x2!(P10, 1. / 3628800.);
1321    const_f64_as_f64x2!(P11, 1. / 39916800.);
1322    const_f64_as_f64x2!(P12, 1. / 479001600.);
1323    const_f64_as_f64x2!(P13, 1. / 6227020800.);
1324    const_f64_as_f64x2!(LN2D_HI, 0.693145751953125);
1325    const_f64_as_f64x2!(LN2D_LO, 1.42860682030941723212E-6);
1326    let max_x = f64x2::from(708.39);
1327    let r = (self * Self::LOG2_E).round();
1328    let x = r.mul_neg_add(LN2D_HI, self);
1329    let x = r.mul_neg_add(LN2D_LO, x);
1330    let z =
1331      polynomial_13!(x, P2, P3, P4, P5, P6, P7, P8, P9, P10, P11, P12, P13);
1332    let n2 = Self::vm_pow2n(r);
1333    let z = (z + Self::ONE) * n2;
1334    // check for overflow
1335    let in_range = self.abs().cmp_lt(max_x);
1336    let in_range = in_range & self.is_finite();
1337    in_range.blend(z, Self::ZERO)
1338  }
1339
1340  #[inline]
1341  #[allow(non_upper_case_globals)]
1342  fn exponent(self) -> f64x2 {
1343    const_f64_as_f64x2!(pow2_52, 4503599627370496.0);
1344    const_f64_as_f64x2!(bias, 1023.0);
1345    let a = cast::<_, u64x2>(self);
1346    let b = a >> 52;
1347    let c = b | cast::<_, u64x2>(pow2_52);
1348    let d = cast::<_, f64x2>(c);
1349    let e = d - (pow2_52 + bias);
1350    e
1351  }
1352
1353  #[inline]
1354  #[allow(non_upper_case_globals)]
1355  fn fraction_2(self) -> Self {
1356    let t1 = cast::<_, u64x2>(self);
1357    let t2 = cast::<_, u64x2>(
1358      (t1 & u64x2::from(0x000FFFFFFFFFFFFF)) | u64x2::from(0x3FE0000000000000),
1359    );
1360    cast::<_, f64x2>(t2)
1361  }
1362
1363  #[inline]
1364  fn is_zero_or_subnormal(self) -> Self {
1365    let t = cast::<_, i64x2>(self);
1366    let t = t & i64x2::splat(0x7FF0000000000000);
1367    i64x2::round_float(t.cmp_eq(i64x2::splat(0)))
1368  }
1369
1370  #[inline]
1371  fn infinity() -> Self {
1372    cast::<_, f64x2>(i64x2::splat(0x7FF0000000000000))
1373  }
1374
1375  #[inline]
1376  fn nan_log() -> Self {
1377    cast::<_, f64x2>(i64x2::splat(0x7FF8000000000000 | 0x101 << 29))
1378  }
1379
1380  #[inline]
1381  fn nan_pow() -> Self {
1382    cast::<_, f64x2>(i64x2::splat(0x7FF8000000000000 | 0x101 << 29))
1383  }
1384
1385  #[inline]
1386  fn sign_bit(self) -> Self {
1387    let t1 = cast::<_, i64x2>(self);
1388    let t2 = t1 >> 63;
1389    !cast::<_, f64x2>(t2).cmp_eq(f64x2::ZERO)
1390  }
1391
1392  /// horizontal add of all the elements of the vector
1393  #[inline]
1394  #[must_use]
1395  pub fn reduce_add(self) -> f64 {
1396    pick! {
1397      if #[cfg(target_feature="ssse3")] {
1398        let a = add_horizontal_m128d(self.sse, self.sse);
1399        a.to_array()[0]
1400      } else if #[cfg(any(target_feature="sse2", target_feature="simd128"))] {
1401        let a: [f64;2] = cast(self);
1402        a.iter().sum()
1403      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
1404        unsafe { vgetq_lane_f64(self.neon,0) + vgetq_lane_f64(self.neon,1) }
1405      } else {
1406        self.arr.iter().sum()
1407      }
1408    }
1409  }
1410
1411  #[inline]
1412  #[must_use]
1413  #[allow(non_upper_case_globals)]
1414  pub fn ln(self) -> Self {
1415    const_f64_as_f64x2!(P0, 7.70838733755885391666E0);
1416    const_f64_as_f64x2!(P1, 1.79368678507819816313E1);
1417    const_f64_as_f64x2!(P2, 1.44989225341610930846E1);
1418    const_f64_as_f64x2!(P3, 4.70579119878881725854E0);
1419    const_f64_as_f64x2!(P4, 4.97494994976747001425E-1);
1420    const_f64_as_f64x2!(P5, 1.01875663804580931796E-4);
1421
1422    const_f64_as_f64x2!(Q0, 2.31251620126765340583E1);
1423    const_f64_as_f64x2!(Q1, 7.11544750618563894466E1);
1424    const_f64_as_f64x2!(Q2, 8.29875266912776603211E1);
1425    const_f64_as_f64x2!(Q3, 4.52279145837532221105E1);
1426    const_f64_as_f64x2!(Q4, 1.12873587189167450590E1);
1427    const_f64_as_f64x2!(LN2F_HI, 0.693359375);
1428    const_f64_as_f64x2!(LN2F_LO, -2.12194440e-4);
1429    const_f64_as_f64x2!(VM_SQRT2, 1.414213562373095048801);
1430    const_f64_as_f64x2!(VM_SMALLEST_NORMAL, 1.17549435E-38);
1431
1432    let x1 = self;
1433    let x = Self::fraction_2(x1);
1434    let e = Self::exponent(x1);
1435    let mask = x.cmp_gt(VM_SQRT2 * f64x2::HALF);
1436    let x = (!mask).blend(x + x, x);
1437    let fe = mask.blend(e + Self::ONE, e);
1438    let x = x - Self::ONE;
1439    let px = polynomial_5!(x, P0, P1, P2, P3, P4, P5);
1440    let x2 = x * x;
1441    let px = x2 * x * px;
1442    let qx = polynomial_5n!(x, Q0, Q1, Q2, Q3, Q4);
1443    let res = px / qx;
1444    let res = fe.mul_add(LN2F_LO, res);
1445    let res = res + x2.mul_neg_add(f64x2::HALF, x);
1446    let res = fe.mul_add(LN2F_HI, res);
1447    let overflow = !self.is_finite();
1448    let underflow = x1.cmp_lt(VM_SMALLEST_NORMAL);
1449    let mask = overflow | underflow;
1450    if !mask.any() {
1451      res
1452    } else {
1453      let is_zero = self.is_zero_or_subnormal();
1454      let res = underflow.blend(Self::nan_log(), res);
1455      let res = is_zero.blend(Self::infinity(), res);
1456      let res = overflow.blend(self, res);
1457      res
1458    }
1459  }
1460
1461  #[inline]
1462  #[must_use]
1463  pub fn log2(self) -> Self {
1464    Self::ln(self) * Self::LOG2_E
1465  }
1466  #[inline]
1467  #[must_use]
1468  pub fn log10(self) -> Self {
1469    Self::ln(self) * Self::LOG10_E
1470  }
1471
1472  #[inline]
1473  #[must_use]
1474  #[allow(non_upper_case_globals)]
1475  pub fn pow_f64x2(self, y: Self) -> Self {
1476    const_f64_as_f64x2!(ln2d_hi, 0.693145751953125);
1477    const_f64_as_f64x2!(ln2d_lo, 1.42860682030941723212E-6);
1478    const_f64_as_f64x2!(P0log, 2.0039553499201281259648E1);
1479    const_f64_as_f64x2!(P1log, 5.7112963590585538103336E1);
1480    const_f64_as_f64x2!(P2log, 6.0949667980987787057556E1);
1481    const_f64_as_f64x2!(P3log, 2.9911919328553073277375E1);
1482    const_f64_as_f64x2!(P4log, 6.5787325942061044846969E0);
1483    const_f64_as_f64x2!(P5log, 4.9854102823193375972212E-1);
1484    const_f64_as_f64x2!(P6log, 4.5270000862445199635215E-5);
1485    const_f64_as_f64x2!(Q0log, 6.0118660497603843919306E1);
1486    const_f64_as_f64x2!(Q1log, 2.1642788614495947685003E2);
1487    const_f64_as_f64x2!(Q2log, 3.0909872225312059774938E2);
1488    const_f64_as_f64x2!(Q3log, 2.2176239823732856465394E2);
1489    const_f64_as_f64x2!(Q4log, 8.3047565967967209469434E1);
1490    const_f64_as_f64x2!(Q5log, 1.5062909083469192043167E1);
1491
1492    // Taylor expansion constants
1493    const_f64_as_f64x2!(p2, 1.0 / 2.0); // coefficients for Taylor expansion of exp
1494    const_f64_as_f64x2!(p3, 1.0 / 6.0);
1495    const_f64_as_f64x2!(p4, 1.0 / 24.0);
1496    const_f64_as_f64x2!(p5, 1.0 / 120.0);
1497    const_f64_as_f64x2!(p6, 1.0 / 720.0);
1498    const_f64_as_f64x2!(p7, 1.0 / 5040.0);
1499    const_f64_as_f64x2!(p8, 1.0 / 40320.0);
1500    const_f64_as_f64x2!(p9, 1.0 / 362880.0);
1501    const_f64_as_f64x2!(p10, 1.0 / 3628800.0);
1502    const_f64_as_f64x2!(p11, 1.0 / 39916800.0);
1503    const_f64_as_f64x2!(p12, 1.0 / 479001600.0);
1504    const_f64_as_f64x2!(p13, 1.0 / 6227020800.0);
1505
1506    let x1 = self.abs();
1507    let x = x1.fraction_2();
1508    let mask = x.cmp_gt(f64x2::SQRT_2 * f64x2::HALF);
1509    let x = (!mask).blend(x + x, x);
1510    let x = x - f64x2::ONE;
1511    let x2 = x * x;
1512    let px = polynomial_6!(x, P0log, P1log, P2log, P3log, P4log, P5log, P6log);
1513    let px = px * x * x2;
1514    let qx = polynomial_6n!(x, Q0log, Q1log, Q2log, Q3log, Q4log, Q5log);
1515    let lg1 = px / qx;
1516
1517    let ef = x1.exponent();
1518    let ef = mask.blend(ef + f64x2::ONE, ef);
1519    let e1 = (ef * y).round();
1520    let yr = ef.mul_sub(y, e1);
1521
1522    let lg = f64x2::HALF.mul_neg_add(x2, x) + lg1;
1523    let x2err = (f64x2::HALF * x).mul_sub(x, f64x2::HALF * x2);
1524    let lg_err = f64x2::HALF.mul_add(x2, lg - x) - lg1;
1525
1526    let e2 = (lg * y * f64x2::LOG2_E).round();
1527    let v = lg.mul_sub(y, e2 * ln2d_hi);
1528    let v = e2.mul_neg_add(ln2d_lo, v);
1529    let v = v - (lg_err + x2err).mul_sub(y, yr * f64x2::LN_2);
1530
1531    let x = v;
1532    let e3 = (x * f64x2::LOG2_E).round();
1533    let x = e3.mul_neg_add(f64x2::LN_2, x);
1534    let z =
1535      polynomial_13m!(x, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13)
1536        + f64x2::ONE;
1537    let ee = e1 + e2 + e3;
1538    let ei = cast::<_, i64x2>(ee.round_int());
1539    let ej = cast::<_, i64x2>(ei + (cast::<_, i64x2>(z) >> 52));
1540
1541    let overflow = cast::<_, f64x2>(!ej.cmp_lt(i64x2::splat(0x07FF)))
1542      | ee.cmp_gt(f64x2::splat(3000.0));
1543    let underflow = cast::<_, f64x2>(!ej.cmp_gt(i64x2::splat(0x000)))
1544      | ee.cmp_lt(f64x2::splat(-3000.0));
1545
1546    // Add exponent by integer addition
1547    let z = cast::<_, f64x2>(cast::<_, i64x2>(z) + (ei << 52));
1548
1549    // Check for overflow/underflow
1550    let z = if (overflow | underflow).any() {
1551      let z = underflow.blend(f64x2::ZERO, z);
1552      overflow.blend(Self::infinity(), z)
1553    } else {
1554      z
1555    };
1556
1557    // Check for self == 0
1558    let x_zero = self.is_zero_or_subnormal();
1559    let z = x_zero.blend(
1560      y.cmp_lt(f64x2::ZERO).blend(
1561        Self::infinity(),
1562        y.cmp_eq(f64x2::ZERO).blend(f64x2::ONE, f64x2::ZERO),
1563      ),
1564      z,
1565    );
1566
1567    let x_sign = self.sign_bit();
1568    let z = if x_sign.any() {
1569      // Y into an integer
1570      let yi = y.cmp_eq(y.round());
1571      // Is y odd?
1572      let y_odd = cast::<_, i64x2>(y.round_int() << 63).round_float();
1573
1574      let z1 =
1575        yi.blend(z | y_odd, self.cmp_eq(Self::ZERO).blend(z, Self::nan_pow()));
1576      x_sign.blend(z1, z)
1577    } else {
1578      z
1579    };
1580
1581    let x_finite = self.is_finite();
1582    let y_finite = y.is_finite();
1583    let e_finite = ee.is_finite();
1584
1585    if (x_finite & y_finite & (e_finite | x_zero)).all() {
1586      return z;
1587    }
1588
1589    (self.is_nan() | y.is_nan()).blend(self + y, z)
1590  }
1591
1592  #[inline]
1593  pub fn powf(self, y: f64) -> Self {
1594    Self::pow_f64x2(self, f64x2::splat(y))
1595  }
1596
1597  #[inline]
1598  pub fn to_array(self) -> [f64; 2] {
1599    cast(self)
1600  }
1601
1602  #[inline]
1603  pub fn as_array_ref(&self) -> &[f64; 2] {
1604    cast_ref(self)
1605  }
1606
1607  #[inline]
1608  pub fn as_array_mut(&mut self) -> &mut [f64; 2] {
1609    cast_mut(self)
1610  }
1611}
1612
1613impl Not for f64x2 {
1614  type Output = Self;
1615  #[inline]
1616  fn not(self) -> Self {
1617    pick! {
1618      if #[cfg(target_feature="sse2")] {
1619        Self { sse: self.sse.not() }
1620      } else if #[cfg(target_feature="simd128")] {
1621        Self { simd: v128_not(self.simd) }
1622      } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
1623        unsafe {Self { neon: vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_f64(self.neon))) }}
1624      } else {
1625        Self { arr: [
1626          f64::from_bits(!self.arr[0].to_bits()),
1627          f64::from_bits(!self.arr[1].to_bits()),
1628        ]}
1629      }
1630    }
1631  }
1632}