1use super::*;
2
3pick! {
4 if #[cfg(target_feature="avx")] {
5 #[derive(Default, Clone, Copy, PartialEq)]
6 #[repr(C, align(32))]
7 pub struct f64x4 { avx: m256d }
8 } else {
9 #[derive(Default, Clone, Copy, PartialEq)]
10 #[repr(C, align(32))]
11 pub struct f64x4 { a : f64x2, b : f64x2 }
12 }
13}
14
15macro_rules! const_f64_as_f64x4 {
16 ($i:ident, $f:expr) => {
17 pub const $i: f64x4 =
18 unsafe { ConstUnionHack256bit { f64a4: [$f; 4] }.f64x4 };
19 };
20}
21
22impl f64x4 {
23 const_f64_as_f64x4!(ONE, 1.0);
24 const_f64_as_f64x4!(ZERO, 0.0);
25 const_f64_as_f64x4!(HALF, 0.5);
26 const_f64_as_f64x4!(E, core::f64::consts::E);
27 const_f64_as_f64x4!(FRAC_1_PI, core::f64::consts::FRAC_1_PI);
28 const_f64_as_f64x4!(FRAC_2_PI, core::f64::consts::FRAC_2_PI);
29 const_f64_as_f64x4!(FRAC_2_SQRT_PI, core::f64::consts::FRAC_2_SQRT_PI);
30 const_f64_as_f64x4!(FRAC_1_SQRT_2, core::f64::consts::FRAC_1_SQRT_2);
31 const_f64_as_f64x4!(FRAC_PI_2, core::f64::consts::FRAC_PI_2);
32 const_f64_as_f64x4!(FRAC_PI_3, core::f64::consts::FRAC_PI_3);
33 const_f64_as_f64x4!(FRAC_PI_4, core::f64::consts::FRAC_PI_4);
34 const_f64_as_f64x4!(FRAC_PI_6, core::f64::consts::FRAC_PI_6);
35 const_f64_as_f64x4!(FRAC_PI_8, core::f64::consts::FRAC_PI_8);
36 const_f64_as_f64x4!(LN_2, core::f64::consts::LN_2);
37 const_f64_as_f64x4!(LN_10, core::f64::consts::LN_10);
38 const_f64_as_f64x4!(LOG2_E, core::f64::consts::LOG2_E);
39 const_f64_as_f64x4!(LOG10_E, core::f64::consts::LOG10_E);
40 const_f64_as_f64x4!(LOG10_2, core::f64::consts::LOG10_2);
41 const_f64_as_f64x4!(LOG2_10, core::f64::consts::LOG2_10);
42 const_f64_as_f64x4!(PI, core::f64::consts::PI);
43 const_f64_as_f64x4!(SQRT_2, core::f64::consts::SQRT_2);
44 const_f64_as_f64x4!(TAU, core::f64::consts::TAU);
45}
46
47unsafe impl Zeroable for f64x4 {}
48unsafe impl Pod for f64x4 {}
49
50impl Add for f64x4 {
51 type Output = Self;
52 #[inline]
53 #[must_use]
54 fn add(self, rhs: Self) -> Self::Output {
55 pick! {
56 if #[cfg(target_feature="avx")] {
57 Self { avx: add_m256d(self.avx, rhs.avx) }
58 } else {
59 Self {
60 a : self.a.add(rhs.a),
61 b : self.b.add(rhs.b),
62 }
63 }
64 }
65 }
66}
67
68impl Sub for f64x4 {
69 type Output = Self;
70 #[inline]
71 #[must_use]
72 fn sub(self, rhs: Self) -> Self::Output {
73 pick! {
74 if #[cfg(target_feature="avx")] {
75 Self { avx: sub_m256d(self.avx, rhs.avx) }
76 } else {
77 Self {
78 a : self.a.sub(rhs.a),
79 b : self.b.sub(rhs.b),
80 }
81 }
82 }
83 }
84}
85
86impl Mul for f64x4 {
87 type Output = Self;
88 #[inline]
89 #[must_use]
90 fn mul(self, rhs: Self) -> Self::Output {
91 pick! {
92 if #[cfg(target_feature="avx")] {
93 Self { avx: mul_m256d(self.avx, rhs.avx) }
94 } else {
95 Self {
96 a : self.a.mul(rhs.a),
97 b : self.b.mul(rhs.b),
98 }
99 }
100 }
101 }
102}
103
104impl Div for f64x4 {
105 type Output = Self;
106 #[inline]
107 #[must_use]
108 fn div(self, rhs: Self) -> Self::Output {
109 pick! {
110 if #[cfg(target_feature="avx")] {
111 Self { avx: div_m256d(self.avx, rhs.avx) }
112 } else {
113 Self {
114 a : self.a.div(rhs.a),
115 b : self.b.div(rhs.b),
116 }
117 }
118 }
119 }
120}
121
122impl Add<f64> for f64x4 {
123 type Output = Self;
124 #[inline]
125 #[must_use]
126 fn add(self, rhs: f64) -> Self::Output {
127 self.add(Self::splat(rhs))
128 }
129}
130
131impl Sub<f64> for f64x4 {
132 type Output = Self;
133 #[inline]
134 #[must_use]
135 fn sub(self, rhs: f64) -> Self::Output {
136 self.sub(Self::splat(rhs))
137 }
138}
139
140impl Mul<f64> for f64x4 {
141 type Output = Self;
142 #[inline]
143 #[must_use]
144 fn mul(self, rhs: f64) -> Self::Output {
145 self.mul(Self::splat(rhs))
146 }
147}
148
149impl Div<f64> for f64x4 {
150 type Output = Self;
151 #[inline]
152 #[must_use]
153 fn div(self, rhs: f64) -> Self::Output {
154 self.div(Self::splat(rhs))
155 }
156}
157
158impl Add<f64x4> for f64 {
159 type Output = f64x4;
160 #[inline]
161 #[must_use]
162 fn add(self, rhs: f64x4) -> Self::Output {
163 f64x4::splat(self).add(rhs)
164 }
165}
166
167impl Sub<f64x4> for f64 {
168 type Output = f64x4;
169 #[inline]
170 #[must_use]
171 fn sub(self, rhs: f64x4) -> Self::Output {
172 f64x4::splat(self).sub(rhs)
173 }
174}
175
176impl Mul<f64x4> for f64 {
177 type Output = f64x4;
178 #[inline]
179 #[must_use]
180 fn mul(self, rhs: f64x4) -> Self::Output {
181 f64x4::splat(self).mul(rhs)
182 }
183}
184
185impl Div<f64x4> for f64 {
186 type Output = f64x4;
187 #[inline]
188 #[must_use]
189 fn div(self, rhs: f64x4) -> Self::Output {
190 f64x4::splat(self).div(rhs)
191 }
192}
193
194impl BitAnd for f64x4 {
195 type Output = Self;
196 #[inline]
197 #[must_use]
198 fn bitand(self, rhs: Self) -> Self::Output {
199 pick! {
200 if #[cfg(target_feature="avx")] {
201 Self { avx: bitand_m256d(self.avx, rhs.avx) }
202 } else {
203 Self {
204 a : self.a.bitand(rhs.a),
205 b : self.b.bitand(rhs.b),
206 }
207 }
208 }
209 }
210}
211
212impl BitOr for f64x4 {
213 type Output = Self;
214 #[inline]
215 #[must_use]
216 fn bitor(self, rhs: Self) -> Self::Output {
217 pick! {
218 if #[cfg(target_feature="avx")] {
219 Self { avx: bitor_m256d(self.avx, rhs.avx) }
220 } else {
221 Self {
222 a : self.a.bitor(rhs.a),
223 b : self.b.bitor(rhs.b),
224 }
225 }
226 }
227 }
228}
229
230impl BitXor for f64x4 {
231 type Output = Self;
232 #[inline]
233 #[must_use]
234 fn bitxor(self, rhs: Self) -> Self::Output {
235 pick! {
236 if #[cfg(target_feature="avx")] {
237 Self { avx: bitxor_m256d(self.avx, rhs.avx) }
238 } else {
239 Self {
240 a : self.a.bitxor(rhs.a),
241 b : self.b.bitxor(rhs.b),
242 }
243 }
244 }
245 }
246}
247
248impl CmpEq for f64x4 {
249 type Output = Self;
250 #[inline]
251 #[must_use]
252 fn cmp_eq(self, rhs: Self) -> Self::Output {
253 pick! {
254 if #[cfg(target_feature="avx")]{
255 Self { avx: cmp_op_mask_m256d::<{cmp_op!(EqualOrdered)}>(self.avx, rhs.avx) }
256 } else {
257 Self {
258 a : self.a.cmp_eq(rhs.a),
259 b : self.b.cmp_eq(rhs.b),
260 }
261 }
262 }
263 }
264}
265
266impl CmpGe for f64x4 {
267 type Output = Self;
268 #[inline]
269 #[must_use]
270 fn cmp_ge(self, rhs: Self) -> Self::Output {
271 pick! {
272 if #[cfg(target_feature="avx")]{
273 Self { avx: cmp_op_mask_m256d::<{cmp_op!(GreaterEqualOrdered)}>(self.avx, rhs.avx) }
274 } else {
275 Self {
276 a : self.a.cmp_ge(rhs.a),
277 b : self.b.cmp_ge(rhs.b),
278 }
279 }
280 }
281 }
282}
283
284impl CmpGt for f64x4 {
285 type Output = Self;
286 #[inline]
287 #[must_use]
288 fn cmp_gt(self, rhs: Self) -> Self::Output {
289 pick! {
290 if #[cfg(target_feature="avx")]{
291 Self { avx: cmp_op_mask_m256d::<{cmp_op!( GreaterThanOrdered)}>(self.avx, rhs.avx) }
292 } else {
293 Self {
294 a : self.a.cmp_gt(rhs.a),
295 b : self.b.cmp_gt(rhs.b),
296 }
297 }
298 }
299 }
300}
301
302impl CmpNe for f64x4 {
303 type Output = Self;
304 #[inline]
305 #[must_use]
306 fn cmp_ne(self, rhs: Self) -> Self::Output {
307 pick! {
308 if #[cfg(target_feature="avx")]{
309 Self { avx: cmp_op_mask_m256d::<{cmp_op!(NotEqualOrdered)}>(self.avx, rhs.avx) }
310 } else {
311 Self {
312 a : self.a.cmp_ne(rhs.a),
313 b : self.b.cmp_ne(rhs.b),
314 }
315 }
316 }
317 }
318}
319
320impl CmpLe for f64x4 {
321 type Output = Self;
322 #[inline]
323 #[must_use]
324 fn cmp_le(self, rhs: Self) -> Self::Output {
325 pick! {
326 if #[cfg(target_feature="avx")]{
327 Self { avx: cmp_op_mask_m256d::<{cmp_op!(LessEqualOrdered)}>(self.avx, rhs.avx) }
328 } else {
329 Self {
330 a : self.a.cmp_le(rhs.a),
331 b : self.b.cmp_le(rhs.b),
332 }
333 }
334 }
335 }
336}
337
338impl CmpLt for f64x4 {
339 type Output = Self;
340 #[inline]
341 #[must_use]
342 fn cmp_lt(self, rhs: Self) -> Self::Output {
343 pick! {
344 if #[cfg(target_feature="avx")]{
345 Self { avx: cmp_op_mask_m256d::<{cmp_op!(LessThanOrdered)}>(self.avx, rhs.avx) }
346 } else {
347 Self {
348 a : self.a.cmp_lt(rhs.a),
349 b : self.b.cmp_lt(rhs.b),
350 }
351 }
352 }
353 }
354}
355
356impl f64x4 {
357 #[inline]
358 #[must_use]
359 pub fn new(array: [f64; 4]) -> Self {
360 Self::from(array)
361 }
362 #[inline]
363 #[must_use]
364 pub fn blend(self, t: Self, f: Self) -> Self {
365 pick! {
366 if #[cfg(target_feature="avx")] {
367 Self { avx: blend_varying_m256d(f.avx, t.avx, self.avx) }
368 } else {
369 Self {
370 a : self.a.blend(t.a, f.a),
371 b : self.b.blend(t.b, f.b),
372 }
373 }
374 }
375 }
376
377 #[inline]
378 #[must_use]
379 pub fn abs(self) -> Self {
380 pick! {
381 if #[cfg(target_feature="avx")] {
382 let non_sign_bits = f64x4::from(f64::from_bits(i64::MAX as u64));
383 self & non_sign_bits
384 } else {
385 Self {
386 a : self.a.abs(),
387 b : self.b.abs(),
388 }
389 }
390 }
391 }
392
393 #[inline]
397 #[must_use]
398 pub fn fast_max(self, rhs: Self) -> Self {
399 pick! {
400 if #[cfg(target_feature="avx")] {
401 Self { avx: max_m256d(self.avx, rhs.avx) }
402 } else {
403 Self {
404 a : self.a.fast_max(rhs.a),
405 b : self.b.fast_max(rhs.b),
406 }
407 }
408 }
409 }
410
411 #[inline]
415 #[must_use]
416 pub fn max(self, rhs: Self) -> Self {
417 pick! {
418 if #[cfg(target_feature="avx")] {
419 rhs.is_nan().blend(self, Self { avx: max_m256d(self.avx, rhs.avx) })
423 } else {
424 Self {
425 a : self.a.max(rhs.a),
426 b : self.b.max(rhs.b),
427 }
428 }
429 }
430 }
431
432 #[inline]
436 #[must_use]
437 pub fn fast_min(self, rhs: Self) -> Self {
438 pick! {
439 if #[cfg(target_feature="avx")] {
440 Self { avx: min_m256d(self.avx, rhs.avx) }
441 } else {
442 Self {
443 a : self.a.fast_min(rhs.a),
444 b : self.b.fast_min(rhs.b),
445 }
446 }
447 }
448 }
449
450 #[inline]
454 #[must_use]
455 pub fn min(self, rhs: Self) -> Self {
456 pick! {
457 if #[cfg(target_feature="avx")] {
458 rhs.is_nan().blend(self, Self { avx: min_m256d(self.avx, rhs.avx) })
462 } else {
463 Self {
464 a : self.a.min(rhs.a),
465 b : self.b.min(rhs.b),
466 }
467 }
468 }
469 }
470
471 #[inline]
472 #[must_use]
473 pub fn is_nan(self) -> Self {
474 pick! {
475 if #[cfg(target_feature="avx")] {
476 Self { avx: cmp_op_mask_m256d::<{cmp_op!(Unordered)}>(self.avx, self.avx ) }
477 } else {
478 Self {
479 a : self.a.is_nan(),
480 b : self.b.is_nan(),
481 }
482 }
483 }
484 }
485
486 #[inline]
487 #[must_use]
488 pub fn is_finite(self) -> Self {
489 let shifted_exp_mask = u64x4::from(0xFFE0000000000000);
490 let u: u64x4 = cast(self);
491 let shift_u = u << 1_u64;
492 let out = !(shift_u & shifted_exp_mask).cmp_eq(shifted_exp_mask);
493 cast(out)
494 }
495
496 #[inline]
497 #[must_use]
498 pub fn is_inf(self) -> Self {
499 let shifted_inf = u64x4::from(0xFFE0000000000000);
500 let u: u64x4 = cast(self);
501 let shift_u = u << 1_u64;
502 let out = (shift_u).cmp_eq(shifted_inf);
503 cast(out)
504 }
505
506 #[inline]
507 #[must_use]
508 pub fn round(self) -> Self {
509 pick! {
510 if #[cfg(target_feature="avx")] {
511 Self { avx: round_m256d::<{round_op!(Nearest)}>(self.avx) }
512 } else {
513 Self {
514 a : self.a.round(),
515 b : self.b.round(),
516 }
517 }
518 }
519 }
520
521 #[inline]
522 #[must_use]
523 pub fn round_int(self) -> i64x4 {
524 let rounded: [f64; 4] = cast(self.round());
526 cast([
527 rounded[0] as i64,
528 rounded[1] as i64,
529 rounded[2] as i64,
530 rounded[3] as i64,
531 ])
532 }
533
534 #[inline]
535 #[must_use]
536 pub fn mul_add(self, m: Self, a: Self) -> Self {
537 pick! {
538 if #[cfg(all(target_feature="avx",target_feature="fma"))] {
539 Self { avx: fused_mul_add_m256d(self.avx, m.avx, a.avx) }
540 } else if #[cfg(target_feature="avx")] {
541 (self * m) + a
543 } else {
544 Self {
545 a : self.a.mul_add(m.a, a.a),
546 b : self.b.mul_add(m.b, a.b),
547 }
548 }
549 }
550 }
551
552 #[inline]
553 #[must_use]
554 pub fn mul_sub(self, m: Self, a: Self) -> Self {
555 pick! {
556 if #[cfg(all(target_feature="avx",target_feature="fma"))] {
557 Self { avx: fused_mul_sub_m256d(self.avx, m.avx, a.avx) }
558 } else if #[cfg(target_feature="avx")] {
559 (self * m) - a
561 } else {
562 Self {
563 a : self.a.mul_sub(m.a, a.a),
564 b : self.b.mul_sub(m.b, a.b),
565 }
566 }
567 }
568 }
569
570 #[inline]
571 #[must_use]
572 pub fn mul_neg_add(self, m: Self, a: Self) -> Self {
573 pick! {
574 if #[cfg(all(target_feature="avx",target_feature="fma"))] {
575 Self { avx: fused_mul_neg_add_m256d(self.avx, m.avx, a.avx) }
576 } else if #[cfg(target_feature="avx")] {
577 a - (self * m)
579 } else {
580 Self {
581 a : self.a.mul_neg_add(m.a, a.a),
582 b : self.b.mul_neg_add(m.b, a.b),
583 }
584 }
585 }
586 }
587
588 #[inline]
589 #[must_use]
590 pub fn mul_neg_sub(self, m: Self, a: Self) -> Self {
591 pick! {
592 if #[cfg(all(target_feature="avx",target_feature="fma"))] {
593 Self { avx: fused_mul_neg_sub_m256d(self.avx, m.avx, a.avx) }
594 } else if #[cfg(target_feature="avx")] {
595 -(self * m) - a
597 } else {
598 Self {
599 a : self.a.mul_neg_sub(m.a, a.a),
600 b : self.b.mul_neg_sub(m.b, a.b),
601 }
602 }
603 }
604 }
605
606 #[inline]
607 #[must_use]
608 pub fn flip_signs(self, signs: Self) -> Self {
609 self ^ (signs & Self::from(-0.0))
610 }
611
612 #[inline]
613 #[must_use]
614 pub fn copysign(self, sign: Self) -> Self {
615 let magnitude_mask = Self::from(f64::from_bits(u64::MAX >> 1));
616 (self & magnitude_mask) | (sign & Self::from(-0.0))
617 }
618
619 #[allow(non_upper_case_globals)]
620 #[inline]
621 pub fn asin_acos(self) -> (Self, Self) {
622 const_f64_as_f64x4!(R4asin, 2.967721961301243206100E-3);
625 const_f64_as_f64x4!(R3asin, -5.634242780008963776856E-1);
626 const_f64_as_f64x4!(R2asin, 6.968710824104713396794E0);
627 const_f64_as_f64x4!(R1asin, -2.556901049652824852289E1);
628 const_f64_as_f64x4!(R0asin, 2.853665548261061424989E1);
629
630 const_f64_as_f64x4!(S3asin, -2.194779531642920639778E1);
631 const_f64_as_f64x4!(S2asin, 1.470656354026814941758E2);
632 const_f64_as_f64x4!(S1asin, -3.838770957603691357202E2);
633 const_f64_as_f64x4!(S0asin, 3.424398657913078477438E2);
634
635 const_f64_as_f64x4!(P5asin, 4.253011369004428248960E-3);
636 const_f64_as_f64x4!(P4asin, -6.019598008014123785661E-1);
637 const_f64_as_f64x4!(P3asin, 5.444622390564711410273E0);
638 const_f64_as_f64x4!(P2asin, -1.626247967210700244449E1);
639 const_f64_as_f64x4!(P1asin, 1.956261983317594739197E1);
640 const_f64_as_f64x4!(P0asin, -8.198089802484824371615E0);
641
642 const_f64_as_f64x4!(Q4asin, -1.474091372988853791896E1);
643 const_f64_as_f64x4!(Q3asin, 7.049610280856842141659E1);
644 const_f64_as_f64x4!(Q2asin, -1.471791292232726029859E2);
645 const_f64_as_f64x4!(Q1asin, 1.395105614657485689735E2);
646 const_f64_as_f64x4!(Q0asin, -4.918853881490881290097E1);
647
648 let xa = self.abs();
649
650 let big = xa.cmp_ge(f64x4::splat(0.625));
651
652 let x1 = big.blend(f64x4::splat(1.0) - xa, xa * xa);
653
654 let x2 = x1 * x1;
655 let x3 = x2 * x1;
656 let x4 = x2 * x2;
657 let x5 = x4 * x1;
658
659 let do_big = big.any();
660 let do_small = !big.all();
661
662 let mut rx = f64x4::default();
663 let mut sx = f64x4::default();
664 let mut px = f64x4::default();
665 let mut qx = f64x4::default();
666
667 if do_big {
668 rx = x3.mul_add(R3asin, x2 * R2asin)
669 + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
670 sx =
671 x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
672 }
673
674 if do_small {
675 px = x3.mul_add(P3asin, P0asin)
676 + x4.mul_add(P4asin, x1 * P1asin)
677 + x5.mul_add(P5asin, x2 * P2asin);
678 qx = x4.mul_add(Q4asin, x5)
679 + x3.mul_add(Q3asin, x1 * Q1asin)
680 + x2.mul_add(Q2asin, Q0asin);
681 };
682
683 let vx = big.blend(rx, px);
684 let wx = big.blend(sx, qx);
685
686 let y1 = vx / wx * x1;
687
688 let mut z1 = f64x4::default();
689 let mut z2 = f64x4::default();
690 if do_big {
691 let xb = (x1 + x1).sqrt();
692 z1 = xb.mul_add(y1, xb);
693 }
694
695 if do_small {
696 z2 = xa.mul_add(y1, xa);
697 }
698
699 let z3 = f64x4::FRAC_PI_2 - z1;
701 let asin = big.blend(z3, z2);
702 let asin = asin.flip_signs(self);
703
704 let z3 = self.cmp_lt(f64x4::ZERO).blend(f64x4::PI - z1, z1);
706 let z4 = f64x4::FRAC_PI_2 - z2.flip_signs(self);
707 let acos = big.blend(z3, z4);
708
709 (asin, acos)
710 }
711
712 #[allow(non_upper_case_globals)]
713 #[inline]
714 pub fn acos(self) -> Self {
715 const_f64_as_f64x4!(R4asin, 2.967721961301243206100E-3);
718 const_f64_as_f64x4!(R3asin, -5.634242780008963776856E-1);
719 const_f64_as_f64x4!(R2asin, 6.968710824104713396794E0);
720 const_f64_as_f64x4!(R1asin, -2.556901049652824852289E1);
721 const_f64_as_f64x4!(R0asin, 2.853665548261061424989E1);
722
723 const_f64_as_f64x4!(S3asin, -2.194779531642920639778E1);
724 const_f64_as_f64x4!(S2asin, 1.470656354026814941758E2);
725 const_f64_as_f64x4!(S1asin, -3.838770957603691357202E2);
726 const_f64_as_f64x4!(S0asin, 3.424398657913078477438E2);
727
728 const_f64_as_f64x4!(P5asin, 4.253011369004428248960E-3);
729 const_f64_as_f64x4!(P4asin, -6.019598008014123785661E-1);
730 const_f64_as_f64x4!(P3asin, 5.444622390564711410273E0);
731 const_f64_as_f64x4!(P2asin, -1.626247967210700244449E1);
732 const_f64_as_f64x4!(P1asin, 1.956261983317594739197E1);
733 const_f64_as_f64x4!(P0asin, -8.198089802484824371615E0);
734
735 const_f64_as_f64x4!(Q4asin, -1.474091372988853791896E1);
736 const_f64_as_f64x4!(Q3asin, 7.049610280856842141659E1);
737 const_f64_as_f64x4!(Q2asin, -1.471791292232726029859E2);
738 const_f64_as_f64x4!(Q1asin, 1.395105614657485689735E2);
739 const_f64_as_f64x4!(Q0asin, -4.918853881490881290097E1);
740
741 let xa = self.abs();
742
743 let big = xa.cmp_ge(f64x4::splat(0.625));
744
745 let x1 = big.blend(f64x4::splat(1.0) - xa, xa * xa);
746
747 let x2 = x1 * x1;
748 let x3 = x2 * x1;
749 let x4 = x2 * x2;
750 let x5 = x4 * x1;
751
752 let do_big = big.any();
753 let do_small = !big.all();
754
755 let mut rx = f64x4::default();
756 let mut sx = f64x4::default();
757 let mut px = f64x4::default();
758 let mut qx = f64x4::default();
759
760 if do_big {
761 rx = x3.mul_add(R3asin, x2 * R2asin)
762 + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
763 sx =
764 x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
765 }
766 if do_small {
767 px = x3.mul_add(P3asin, P0asin)
768 + x4.mul_add(P4asin, x1 * P1asin)
769 + x5.mul_add(P5asin, x2 * P2asin);
770 qx = x4.mul_add(Q4asin, x5)
771 + x3.mul_add(Q3asin, x1 * Q1asin)
772 + x2.mul_add(Q2asin, Q0asin);
773 };
774
775 let vx = big.blend(rx, px);
776 let wx = big.blend(sx, qx);
777
778 let y1 = vx / wx * x1;
779
780 let mut z1 = f64x4::default();
781 let mut z2 = f64x4::default();
782 if do_big {
783 let xb = (x1 + x1).sqrt();
784 z1 = xb.mul_add(y1, xb);
785 }
786
787 if do_small {
788 z2 = xa.mul_add(y1, xa);
789 }
790
791 let z3 = self.cmp_lt(f64x4::ZERO).blend(f64x4::PI - z1, z1);
793 let z4 = f64x4::FRAC_PI_2 - z2.flip_signs(self);
794 let acos = big.blend(z3, z4);
795
796 acos
797 }
798 #[inline]
799 #[must_use]
800 #[allow(non_upper_case_globals)]
801 pub fn asin(self) -> Self {
802 const_f64_as_f64x4!(R4asin, 2.967721961301243206100E-3);
805 const_f64_as_f64x4!(R3asin, -5.634242780008963776856E-1);
806 const_f64_as_f64x4!(R2asin, 6.968710824104713396794E0);
807 const_f64_as_f64x4!(R1asin, -2.556901049652824852289E1);
808 const_f64_as_f64x4!(R0asin, 2.853665548261061424989E1);
809
810 const_f64_as_f64x4!(S3asin, -2.194779531642920639778E1);
811 const_f64_as_f64x4!(S2asin, 1.470656354026814941758E2);
812 const_f64_as_f64x4!(S1asin, -3.838770957603691357202E2);
813 const_f64_as_f64x4!(S0asin, 3.424398657913078477438E2);
814
815 const_f64_as_f64x4!(P5asin, 4.253011369004428248960E-3);
816 const_f64_as_f64x4!(P4asin, -6.019598008014123785661E-1);
817 const_f64_as_f64x4!(P3asin, 5.444622390564711410273E0);
818 const_f64_as_f64x4!(P2asin, -1.626247967210700244449E1);
819 const_f64_as_f64x4!(P1asin, 1.956261983317594739197E1);
820 const_f64_as_f64x4!(P0asin, -8.198089802484824371615E0);
821
822 const_f64_as_f64x4!(Q4asin, -1.474091372988853791896E1);
823 const_f64_as_f64x4!(Q3asin, 7.049610280856842141659E1);
824 const_f64_as_f64x4!(Q2asin, -1.471791292232726029859E2);
825 const_f64_as_f64x4!(Q1asin, 1.395105614657485689735E2);
826 const_f64_as_f64x4!(Q0asin, -4.918853881490881290097E1);
827
828 let xa = self.abs();
829
830 let big = xa.cmp_ge(f64x4::splat(0.625));
831
832 let x1 = big.blend(f64x4::splat(1.0) - xa, xa * xa);
833
834 let x2 = x1 * x1;
835 let x3 = x2 * x1;
836 let x4 = x2 * x2;
837 let x5 = x4 * x1;
838
839 let do_big = big.any();
840 let do_small = !big.all();
841
842 let mut rx = f64x4::default();
843 let mut sx = f64x4::default();
844 let mut px = f64x4::default();
845 let mut qx = f64x4::default();
846
847 if do_big {
848 rx = x3.mul_add(R3asin, x2 * R2asin)
849 + x4.mul_add(R4asin, x1.mul_add(R1asin, R0asin));
850 sx =
851 x3.mul_add(S3asin, x4) + x2.mul_add(S2asin, x1.mul_add(S1asin, S0asin));
852 }
853 if do_small {
854 px = x3.mul_add(P3asin, P0asin)
855 + x4.mul_add(P4asin, x1 * P1asin)
856 + x5.mul_add(P5asin, x2 * P2asin);
857 qx = x4.mul_add(Q4asin, x5)
858 + x3.mul_add(Q3asin, x1 * Q1asin)
859 + x2.mul_add(Q2asin, Q0asin);
860 };
861
862 let vx = big.blend(rx, px);
863 let wx = big.blend(sx, qx);
864
865 let y1 = vx / wx * x1;
866
867 let mut z1 = f64x4::default();
868 let mut z2 = f64x4::default();
869 if do_big {
870 let xb = (x1 + x1).sqrt();
871 z1 = xb.mul_add(y1, xb);
872 }
873
874 if do_small {
875 z2 = xa.mul_add(y1, xa);
876 }
877
878 let z3 = f64x4::FRAC_PI_2 - z1;
880 let asin = big.blend(z3, z2);
881 let asin = asin.flip_signs(self);
882
883 asin
884 }
885
886 #[allow(non_upper_case_globals)]
887 #[inline]
888 pub fn atan(self) -> Self {
889 const_f64_as_f64x4!(MORE_BITS, 6.123233995736765886130E-17);
892 const_f64_as_f64x4!(MORE_BITS_O2, 6.123233995736765886130E-17 * 0.5);
893 const_f64_as_f64x4!(T3PO8, core::f64::consts::SQRT_2 + 1.0);
894
895 const_f64_as_f64x4!(P4atan, -8.750608600031904122785E-1);
896 const_f64_as_f64x4!(P3atan, -1.615753718733365076637E1);
897 const_f64_as_f64x4!(P2atan, -7.500855792314704667340E1);
898 const_f64_as_f64x4!(P1atan, -1.228866684490136173410E2);
899 const_f64_as_f64x4!(P0atan, -6.485021904942025371773E1);
900
901 const_f64_as_f64x4!(Q4atan, 2.485846490142306297962E1);
902 const_f64_as_f64x4!(Q3atan, 1.650270098316988542046E2);
903 const_f64_as_f64x4!(Q2atan, 4.328810604912902668951E2);
904 const_f64_as_f64x4!(Q1atan, 4.853903996359136964868E2);
905 const_f64_as_f64x4!(Q0atan, 1.945506571482613964425E2);
906
907 let t = self.abs();
908
909 let notbig = t.cmp_le(T3PO8);
913 let notsmal = t.cmp_ge(Self::splat(0.66));
914
915 let mut s = notbig.blend(Self::FRAC_PI_4, Self::FRAC_PI_2);
916 s = notsmal & s;
917 let mut fac = notbig.blend(MORE_BITS_O2, MORE_BITS);
918 fac = notsmal & fac;
919
920 let mut a = notbig & t;
924 a = notsmal.blend(a - Self::ONE, a);
925 let mut b = notbig & Self::ONE;
926 b = notsmal.blend(b + t, b);
927 let z = a / b;
928
929 let zz = z * z;
930
931 let px = polynomial_4!(zz, P0atan, P1atan, P2atan, P3atan, P4atan);
932 let qx = polynomial_5n!(zz, Q0atan, Q1atan, Q2atan, Q3atan, Q4atan);
933
934 let mut re = (px / qx).mul_add(z * zz, z);
935 re += s + fac;
936
937 re = (self.sign_bit()).blend(-re, re);
939
940 re
941 }
942
943 #[allow(non_upper_case_globals)]
944 #[inline]
945 pub fn atan2(self, x: Self) -> Self {
946 const_f64_as_f64x4!(MORE_BITS, 6.123233995736765886130E-17);
949 const_f64_as_f64x4!(MORE_BITS_O2, 6.123233995736765886130E-17 * 0.5);
950 const_f64_as_f64x4!(T3PO8, core::f64::consts::SQRT_2 + 1.0);
951
952 const_f64_as_f64x4!(P4atan, -8.750608600031904122785E-1);
953 const_f64_as_f64x4!(P3atan, -1.615753718733365076637E1);
954 const_f64_as_f64x4!(P2atan, -7.500855792314704667340E1);
955 const_f64_as_f64x4!(P1atan, -1.228866684490136173410E2);
956 const_f64_as_f64x4!(P0atan, -6.485021904942025371773E1);
957
958 const_f64_as_f64x4!(Q4atan, 2.485846490142306297962E1);
959 const_f64_as_f64x4!(Q3atan, 1.650270098316988542046E2);
960 const_f64_as_f64x4!(Q2atan, 4.328810604912902668951E2);
961 const_f64_as_f64x4!(Q1atan, 4.853903996359136964868E2);
962 const_f64_as_f64x4!(Q0atan, 1.945506571482613964425E2);
963
964 let y = self;
965
966 let x1 = x.abs();
968 let y1 = y.abs();
969 let swapxy = y1.cmp_gt(x1);
970 let mut x2 = swapxy.blend(y1, x1);
972 let mut y2 = swapxy.blend(x1, y1);
973
974 let both_infinite = x.is_inf() & y.is_inf();
976 if both_infinite.any() {
977 let minus_one = -Self::ONE;
978 x2 = both_infinite.blend(x2 & minus_one, x2);
979 y2 = both_infinite.blend(y2 & minus_one, y2);
980 }
981
982 let t = y2 / x2;
984
985 let notbig = t.cmp_le(T3PO8);
989 let notsmal = t.cmp_ge(Self::splat(0.66));
990
991 let mut s = notbig.blend(Self::FRAC_PI_4, Self::FRAC_PI_2);
992 s = notsmal & s;
993 let mut fac = notbig.blend(MORE_BITS_O2, MORE_BITS);
994 fac = notsmal & fac;
995
996 let mut a = notbig & t;
1000 a = notsmal.blend(a - Self::ONE, a);
1001 let mut b = notbig & Self::ONE;
1002 b = notsmal.blend(b + t, b);
1003 let z = a / b;
1004
1005 let zz = z * z;
1006
1007 let px = polynomial_4!(zz, P0atan, P1atan, P2atan, P3atan, P4atan);
1008 let qx = polynomial_5n!(zz, Q0atan, Q1atan, Q2atan, Q3atan, Q4atan);
1009
1010 let mut re = (px / qx).mul_add(z * zz, z);
1011 re += s + fac;
1012
1013 re = swapxy.blend(Self::FRAC_PI_2 - re, re);
1015 re = ((x | y).cmp_eq(Self::ZERO)).blend(Self::ZERO, re);
1016 re = (x.sign_bit()).blend(Self::PI - re, re);
1017
1018 re = (y.sign_bit()).blend(-re, re);
1020
1021 re
1022 }
1023
1024 #[inline]
1025 #[must_use]
1026 #[allow(non_upper_case_globals)]
1027 pub fn sin_cos(self) -> (Self, Self) {
1028 const_f64_as_f64x4!(P0sin, -1.66666666666666307295E-1);
1032 const_f64_as_f64x4!(P1sin, 8.33333333332211858878E-3);
1033 const_f64_as_f64x4!(P2sin, -1.98412698295895385996E-4);
1034 const_f64_as_f64x4!(P3sin, 2.75573136213857245213E-6);
1035 const_f64_as_f64x4!(P4sin, -2.50507477628578072866E-8);
1036 const_f64_as_f64x4!(P5sin, 1.58962301576546568060E-10);
1037
1038 const_f64_as_f64x4!(P0cos, 4.16666666666665929218E-2);
1039 const_f64_as_f64x4!(P1cos, -1.38888888888730564116E-3);
1040 const_f64_as_f64x4!(P2cos, 2.48015872888517045348E-5);
1041 const_f64_as_f64x4!(P3cos, -2.75573141792967388112E-7);
1042 const_f64_as_f64x4!(P4cos, 2.08757008419747316778E-9);
1043 const_f64_as_f64x4!(P5cos, -1.13585365213876817300E-11);
1044
1045 const_f64_as_f64x4!(DP1, 7.853981554508209228515625E-1 * 2.);
1046 const_f64_as_f64x4!(DP2, 7.94662735614792836714E-9 * 2.);
1047 const_f64_as_f64x4!(DP3, 3.06161699786838294307E-17 * 2.);
1048
1049 const_f64_as_f64x4!(TWO_OVER_PI, 2.0 / core::f64::consts::PI);
1050
1051 let xa = self.abs();
1052
1053 let y = (xa * TWO_OVER_PI).round();
1054 let q = y.round_int();
1055
1056 let x = y.mul_neg_add(DP3, y.mul_neg_add(DP2, y.mul_neg_add(DP1, xa)));
1057
1058 let x2 = x * x;
1059 let mut s = polynomial_5!(x2, P0sin, P1sin, P2sin, P3sin, P4sin, P5sin);
1060 let mut c = polynomial_5!(x2, P0cos, P1cos, P2cos, P3cos, P4cos, P5cos);
1061 s = (x * x2).mul_add(s, x);
1062 c =
1063 (x2 * x2).mul_add(c, x2.mul_neg_add(f64x4::from(0.5), f64x4::from(1.0)));
1064
1065 let swap = !((q & i64x4::from(1)).cmp_eq(i64x4::from(0)));
1066
1067 let mut overflow: f64x4 = cast(q.cmp_gt(i64x4::from(0x80000000000000)));
1068 overflow &= xa.is_finite();
1069 s = overflow.blend(f64x4::from(0.0), s);
1070 c = overflow.blend(f64x4::from(1.0), c);
1071
1072 let mut sin1 = cast::<_, f64x4>(swap).blend(c, s);
1074 let sign_sin: i64x4 = (q << 62) ^ cast::<_, i64x4>(self);
1075 sin1 = sin1.flip_signs(cast(sign_sin));
1076
1077 let mut cos1 = cast::<_, f64x4>(swap).blend(s, c);
1079 let sign_cos: i64x4 = ((q + i64x4::from(1)) & i64x4::from(2)) << 62;
1080 cos1 ^= cast::<_, f64x4>(sign_cos);
1081
1082 (sin1, cos1)
1083 }
1084 #[inline]
1085 #[must_use]
1086 pub fn sin(self) -> Self {
1087 let (s, _) = self.sin_cos();
1088 s
1089 }
1090 #[inline]
1091 #[must_use]
1092 pub fn cos(self) -> Self {
1093 let (_, c) = self.sin_cos();
1094 c
1095 }
1096 #[inline]
1097 #[must_use]
1098 pub fn tan(self) -> Self {
1099 let (s, c) = self.sin_cos();
1100 s / c
1101 }
1102 #[inline]
1103 #[must_use]
1104 pub fn to_degrees(self) -> Self {
1105 const_f64_as_f64x4!(RAD_TO_DEG_RATIO, 180.0_f64 / core::f64::consts::PI);
1106 self * RAD_TO_DEG_RATIO
1107 }
1108 #[inline]
1109 #[must_use]
1110 pub fn to_radians(self) -> Self {
1111 const_f64_as_f64x4!(DEG_TO_RAD_RATIO, core::f64::consts::PI / 180.0_f64);
1112 self * DEG_TO_RAD_RATIO
1113 }
1114 #[inline]
1115 #[must_use]
1116 pub fn sqrt(self) -> Self {
1117 pick! {
1118 if #[cfg(target_feature="avx")] {
1119 Self { avx: sqrt_m256d(self.avx) }
1120 } else {
1121 Self {
1122 a : self.a.sqrt(),
1123 b : self.b.sqrt(),
1124 }
1125 }
1126 }
1127 }
1128 #[inline]
1129 #[must_use]
1130 pub fn move_mask(self) -> i32 {
1131 pick! {
1132 if #[cfg(target_feature="avx")] {
1133 move_mask_m256d(self.avx)
1134 } else {
1135 (self.b.move_mask() << 2) | self.a.move_mask()
1136 }
1137 }
1138 }
1139 #[inline]
1140 #[must_use]
1141 pub fn any(self) -> bool {
1142 pick! {
1143 if #[cfg(target_feature="avx")] {
1144 move_mask_m256d(self.avx) != 0
1145 } else {
1146 self.a.any() || self.b.any()
1147 }
1148 }
1149 }
1150 #[inline]
1151 #[must_use]
1152 pub fn all(self) -> bool {
1153 pick! {
1154 if #[cfg(target_feature="avx")] {
1155 move_mask_m256d(self.avx) == 0b1111
1156 } else {
1157 self.a.all() && self.b.all()
1158 }
1159 }
1160 }
1161 #[inline]
1162 #[must_use]
1163 pub fn none(self) -> bool {
1164 !self.any()
1165 }
1166
1167 #[inline]
1168 #[allow(non_upper_case_globals)]
1169 fn vm_pow2n(self) -> Self {
1170 const_f64_as_f64x4!(pow2_52, 4503599627370496.0);
1171 const_f64_as_f64x4!(bias, 1023.0);
1172 let a = self + (bias + pow2_52);
1173 let c = cast::<_, i64x4>(a) << 52;
1174 cast::<_, f64x4>(c)
1175 }
1176
1177 #[inline]
1179 #[must_use]
1180 #[allow(non_upper_case_globals)]
1181 pub fn exp(self) -> Self {
1182 const_f64_as_f64x4!(P2, 1.0 / 2.0);
1183 const_f64_as_f64x4!(P3, 1.0 / 6.0);
1184 const_f64_as_f64x4!(P4, 1. / 24.);
1185 const_f64_as_f64x4!(P5, 1. / 120.);
1186 const_f64_as_f64x4!(P6, 1. / 720.);
1187 const_f64_as_f64x4!(P7, 1. / 5040.);
1188 const_f64_as_f64x4!(P8, 1. / 40320.);
1189 const_f64_as_f64x4!(P9, 1. / 362880.);
1190 const_f64_as_f64x4!(P10, 1. / 3628800.);
1191 const_f64_as_f64x4!(P11, 1. / 39916800.);
1192 const_f64_as_f64x4!(P12, 1. / 479001600.);
1193 const_f64_as_f64x4!(P13, 1. / 6227020800.);
1194 const_f64_as_f64x4!(LN2D_HI, 0.693145751953125);
1195 const_f64_as_f64x4!(LN2D_LO, 1.42860682030941723212E-6);
1196 let max_x = f64x4::from(708.39);
1197 let r = (self * Self::LOG2_E).round();
1198 let x = r.mul_neg_add(LN2D_HI, self);
1199 let x = r.mul_neg_add(LN2D_LO, x);
1200 let z =
1201 polynomial_13!(x, P2, P3, P4, P5, P6, P7, P8, P9, P10, P11, P12, P13);
1202 let n2 = Self::vm_pow2n(r);
1203 let z = (z + Self::ONE) * n2;
1204 let in_range = self.abs().cmp_lt(max_x);
1206 let in_range = in_range & self.is_finite();
1207 in_range.blend(z, Self::ZERO)
1208 }
1209
1210 #[inline]
1211 #[allow(non_upper_case_globals)]
1212 fn exponent(self) -> f64x4 {
1213 const_f64_as_f64x4!(pow2_52, 4503599627370496.0);
1214 const_f64_as_f64x4!(bias, 1023.0);
1215 let a = cast::<_, u64x4>(self);
1216 let b = a >> 52;
1217 let c = b | cast::<_, u64x4>(pow2_52);
1218 let d = cast::<_, f64x4>(c);
1219 let e = d - (pow2_52 + bias);
1220 e
1221 }
1222
1223 #[inline]
1224 #[allow(non_upper_case_globals)]
1225 fn fraction_2(self) -> Self {
1226 let t1 = cast::<_, u64x4>(self);
1227 let t2 = cast::<_, u64x4>(
1228 (t1 & u64x4::from(0x000FFFFFFFFFFFFF)) | u64x4::from(0x3FE0000000000000),
1229 );
1230 cast::<_, f64x4>(t2)
1231 }
1232 #[inline]
1233 fn is_zero_or_subnormal(self) -> Self {
1234 let t = cast::<_, i64x4>(self);
1235 let t = t & i64x4::splat(0x7FF0000000000000);
1236 i64x4::round_float(t.cmp_eq(i64x4::splat(0)))
1237 }
1238 #[inline]
1239 fn infinity() -> Self {
1240 cast::<_, f64x4>(i64x4::splat(0x7FF0000000000000))
1241 }
1242 #[inline]
1243 fn nan_log() -> Self {
1244 cast::<_, f64x4>(i64x4::splat(0x7FF8000000000000 | 0x101 << 29))
1245 }
1246 #[inline]
1247 fn nan_pow() -> Self {
1248 cast::<_, f64x4>(i64x4::splat(0x7FF8000000000000 | 0x101 << 29))
1249 }
1250 #[inline]
1251 fn sign_bit(self) -> Self {
1252 let t1 = cast::<_, i64x4>(self);
1253 let t2 = t1 >> 63;
1254 !cast::<_, f64x4>(t2).cmp_eq(f64x4::ZERO)
1255 }
1256
1257 #[inline]
1259 pub fn reduce_add(self) -> f64 {
1260 pick! {
1261 if #[cfg(target_feature="avx")] {
1262 let lo = cast_to_m128d_from_m256d(self.avx);
1264 let hi = extract_m128d_from_m256d::<1>(self.avx);
1265 let lo = add_m128d(lo,hi);
1266 let hi64 = unpack_high_m128d(lo,lo);
1267 let sum = add_m128d_s(lo,hi64);
1268 get_f64_from_m128d_s(sum)
1269 } else {
1270 self.a.reduce_add() + self.b.reduce_add()
1271 }
1272 }
1273 }
1274
1275 #[inline]
1277 #[must_use]
1278 #[allow(non_upper_case_globals)]
1279 pub fn ln(self) -> Self {
1280 const_f64_as_f64x4!(HALF, 0.5);
1281 const_f64_as_f64x4!(P0, 7.70838733755885391666E0);
1282 const_f64_as_f64x4!(P1, 1.79368678507819816313E1);
1283 const_f64_as_f64x4!(P2, 1.44989225341610930846E1);
1284 const_f64_as_f64x4!(P3, 4.70579119878881725854E0);
1285 const_f64_as_f64x4!(P4, 4.97494994976747001425E-1);
1286 const_f64_as_f64x4!(P5, 1.01875663804580931796E-4);
1287
1288 const_f64_as_f64x4!(Q0, 2.31251620126765340583E1);
1289 const_f64_as_f64x4!(Q1, 7.11544750618563894466E1);
1290 const_f64_as_f64x4!(Q2, 8.29875266912776603211E1);
1291 const_f64_as_f64x4!(Q3, 4.52279145837532221105E1);
1292 const_f64_as_f64x4!(Q4, 1.12873587189167450590E1);
1293 const_f64_as_f64x4!(LN2F_HI, 0.693359375);
1294 const_f64_as_f64x4!(LN2F_LO, -2.12194440e-4);
1295 const_f64_as_f64x4!(VM_SQRT2, 1.414213562373095048801);
1296 const_f64_as_f64x4!(VM_SMALLEST_NORMAL, 1.17549435E-38);
1297
1298 let x1 = self;
1299 let x = Self::fraction_2(x1);
1300 let e = Self::exponent(x1);
1301 let mask = x.cmp_gt(VM_SQRT2 * HALF);
1302 let x = (!mask).blend(x + x, x);
1303 let fe = mask.blend(e + Self::ONE, e);
1304 let x = x - Self::ONE;
1305 let px = polynomial_5!(x, P0, P1, P2, P3, P4, P5);
1306 let x2 = x * x;
1307 let px = x2 * x * px;
1308 let qx = polynomial_5n!(x, Q0, Q1, Q2, Q3, Q4);
1309 let res = px / qx;
1310 let res = fe.mul_add(LN2F_LO, res);
1311 let res = res + x2.mul_neg_add(HALF, x);
1312 let res = fe.mul_add(LN2F_HI, res);
1313 let overflow = !self.is_finite();
1314 let underflow = x1.cmp_lt(VM_SMALLEST_NORMAL);
1315 let mask = overflow | underflow;
1316 if !mask.any() {
1317 res
1318 } else {
1319 let is_zero = self.is_zero_or_subnormal();
1320 let res = underflow.blend(Self::nan_log(), res);
1321 let res = is_zero.blend(Self::infinity(), res);
1322 let res = overflow.blend(self, res);
1323 res
1324 }
1325 }
1326
1327 #[inline]
1328 #[must_use]
1329 pub fn log2(self) -> Self {
1330 Self::ln(self) * Self::LOG2_E
1331 }
1332 #[inline]
1333 #[must_use]
1334 pub fn log10(self) -> Self {
1335 Self::ln(self) * Self::LOG10_E
1336 }
1337
1338 #[inline]
1339 #[must_use]
1340 #[allow(non_upper_case_globals)]
1341 pub fn pow_f64x4(self, y: Self) -> Self {
1342 const_f64_as_f64x4!(ln2d_hi, 0.693145751953125);
1343 const_f64_as_f64x4!(ln2d_lo, 1.42860682030941723212E-6);
1344 const_f64_as_f64x4!(P0log, 2.0039553499201281259648E1);
1345 const_f64_as_f64x4!(P1log, 5.7112963590585538103336E1);
1346 const_f64_as_f64x4!(P2log, 6.0949667980987787057556E1);
1347 const_f64_as_f64x4!(P3log, 2.9911919328553073277375E1);
1348 const_f64_as_f64x4!(P4log, 6.5787325942061044846969E0);
1349 const_f64_as_f64x4!(P5log, 4.9854102823193375972212E-1);
1350 const_f64_as_f64x4!(P6log, 4.5270000862445199635215E-5);
1351 const_f64_as_f64x4!(Q0log, 6.0118660497603843919306E1);
1352 const_f64_as_f64x4!(Q1log, 2.1642788614495947685003E2);
1353 const_f64_as_f64x4!(Q2log, 3.0909872225312059774938E2);
1354 const_f64_as_f64x4!(Q3log, 2.2176239823732856465394E2);
1355 const_f64_as_f64x4!(Q4log, 8.3047565967967209469434E1);
1356 const_f64_as_f64x4!(Q5log, 1.5062909083469192043167E1);
1357
1358 const_f64_as_f64x4!(p2, 1.0 / 2.0); const_f64_as_f64x4!(p3, 1.0 / 6.0);
1361 const_f64_as_f64x4!(p4, 1.0 / 24.0);
1362 const_f64_as_f64x4!(p5, 1.0 / 120.0);
1363 const_f64_as_f64x4!(p6, 1.0 / 720.0);
1364 const_f64_as_f64x4!(p7, 1.0 / 5040.0);
1365 const_f64_as_f64x4!(p8, 1.0 / 40320.0);
1366 const_f64_as_f64x4!(p9, 1.0 / 362880.0);
1367 const_f64_as_f64x4!(p10, 1.0 / 3628800.0);
1368 const_f64_as_f64x4!(p11, 1.0 / 39916800.0);
1369 const_f64_as_f64x4!(p12, 1.0 / 479001600.0);
1370 const_f64_as_f64x4!(p13, 1.0 / 6227020800.0);
1371
1372 let x1 = self.abs();
1373 let x = x1.fraction_2();
1374 let mask = x.cmp_gt(f64x4::SQRT_2 * f64x4::HALF);
1375 let x = (!mask).blend(x + x, x);
1376 let x = x - f64x4::ONE;
1377 let x2 = x * x;
1378 let px = polynomial_6!(x, P0log, P1log, P2log, P3log, P4log, P5log, P6log);
1379 let px = px * x * x2;
1380 let qx = polynomial_6n!(x, Q0log, Q1log, Q2log, Q3log, Q4log, Q5log);
1381 let lg1 = px / qx;
1382
1383 let ef = x1.exponent();
1384 let ef = mask.blend(ef + f64x4::ONE, ef);
1385 let e1 = (ef * y).round();
1386 let yr = ef.mul_sub(y, e1);
1387
1388 let lg = f64x4::HALF.mul_neg_add(x2, x) + lg1;
1389 let x2err = (f64x4::HALF * x).mul_sub(x, f64x4::HALF * x2);
1390 let lg_err = f64x4::HALF.mul_add(x2, lg - x) - lg1;
1391
1392 let e2 = (lg * y * f64x4::LOG2_E).round();
1393 let v = lg.mul_sub(y, e2 * ln2d_hi);
1394 let v = e2.mul_neg_add(ln2d_lo, v);
1395 let v = v - (lg_err + x2err).mul_sub(y, yr * f64x4::LN_2);
1396
1397 let x = v;
1398 let e3 = (x * f64x4::LOG2_E).round();
1399 let x = e3.mul_neg_add(f64x4::LN_2, x);
1400 let z =
1401 polynomial_13m!(x, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13)
1402 + f64x4::ONE;
1403 let ee = e1 + e2 + e3;
1404 let ei = cast::<_, i64x4>(ee.round_int());
1405 let ej = cast::<_, i64x4>(ei + (cast::<_, i64x4>(z) >> 52));
1406
1407 let overflow = cast::<_, f64x4>(!ej.cmp_lt(i64x4::splat(0x07FF)))
1408 | ee.cmp_gt(f64x4::splat(3000.0));
1409 let underflow = cast::<_, f64x4>(!ej.cmp_gt(i64x4::splat(0x000)))
1410 | ee.cmp_lt(f64x4::splat(-3000.0));
1411
1412 let z = cast::<_, f64x4>(cast::<_, i64x4>(z) + (ei << 52));
1414
1415 let z = if (overflow | underflow).any() {
1417 let z = underflow.blend(f64x4::ZERO, z);
1418 overflow.blend(Self::infinity(), z)
1419 } else {
1420 z
1421 };
1422
1423 let x_zero = self.is_zero_or_subnormal();
1425 let z = x_zero.blend(
1426 y.cmp_lt(f64x4::ZERO).blend(
1427 Self::infinity(),
1428 y.cmp_eq(f64x4::ZERO).blend(f64x4::ONE, f64x4::ZERO),
1429 ),
1430 z,
1431 );
1432
1433 let x_sign = self.sign_bit();
1434
1435 let z = if x_sign.any() {
1436 let yi = y.cmp_eq(y.round());
1438 let y_odd = cast::<_, i64x4>(y.round_int() << 63).round_float();
1440 let z1 =
1441 yi.blend(z | y_odd, self.cmp_eq(Self::ZERO).blend(z, Self::nan_pow()));
1442 x_sign.blend(z1, z)
1443 } else {
1444 z
1445 };
1446
1447 let x_finite = self.is_finite();
1448 let y_finite = y.is_finite();
1449 let e_finite = ee.is_finite();
1450
1451 if (x_finite & y_finite & (e_finite | x_zero)).all() {
1452 return z;
1453 }
1454
1455 (self.is_nan() | y.is_nan()).blend(self + y, z)
1456 }
1457 #[inline]
1458 pub fn powf(self, y: f64) -> Self {
1459 Self::pow_f64x4(self, f64x4::splat(y))
1460 }
1461
1462 #[inline]
1463 pub fn to_array(self) -> [f64; 4] {
1464 cast(self)
1465 }
1466
1467 #[inline]
1468 pub fn as_array_ref(&self) -> &[f64; 4] {
1469 cast_ref(self)
1470 }
1471
1472 #[inline]
1473 pub fn as_array_mut(&mut self) -> &mut [f64; 4] {
1474 cast_mut(self)
1475 }
1476}
1477
1478impl Not for f64x4 {
1479 type Output = Self;
1480 #[inline]
1481 fn not(self) -> Self {
1482 pick! {
1483 if #[cfg(target_feature="avx")] {
1484 Self { avx: self.avx.not() }
1485 } else {
1486 Self {
1487 a : self.a.not(),
1488 b : self.b.not(),
1489 }
1490 }
1491 }
1492 }
1493}