1use super::*;
2
3pick! {
4 if #[cfg(target_feature="sse2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(16))]
7 pub struct u32x4 { sse: m128i }
8 } else if #[cfg(target_feature="simd128")] {
9 use core::arch::wasm32::*;
10
11 #[derive(Clone, Copy)]
12 #[repr(transparent)]
13 pub struct u32x4 { simd: v128 }
14
15 impl Default for u32x4 {
16 fn default() -> Self {
17 Self::splat(0)
18 }
19 }
20
21 impl PartialEq for u32x4 {
22 fn eq(&self, other: &Self) -> bool {
23 u32x4_all_true(u32x4_eq(self.simd, other.simd))
24 }
25 }
26
27 impl Eq for u32x4 { }
28 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
29 use core::arch::aarch64::*;
30 #[repr(C)]
31 #[derive(Copy, Clone)]
32 pub struct u32x4 { neon : uint32x4_t }
33
34 impl Default for u32x4 {
35 #[inline]
36 #[must_use]
37 fn default() -> Self {
38 Self::splat(0)
39 }
40 }
41
42 impl PartialEq for u32x4 {
43 #[inline]
44 #[must_use]
45 fn eq(&self, other: &Self) -> bool {
46 unsafe { vminvq_u32(vceqq_u32(self.neon, other.neon))==u32::MAX }
47 }
48 }
49
50 impl Eq for u32x4 { }
51} else {
52 #[derive(Default, Clone, Copy, PartialEq, Eq)]
53 #[repr(C, align(16))]
54 pub struct u32x4 { arr: [u32;4] }
55 }
56}
57
58int_uint_consts!(u32, 4, u32x4, u32x4, u32a4, const_u32_as_u32x4, 128);
59
60unsafe impl Zeroable for u32x4 {}
61unsafe impl Pod for u32x4 {}
62
63impl Add for u32x4 {
64 type Output = Self;
65 #[inline]
66 #[must_use]
67 fn add(self, rhs: Self) -> Self::Output {
68 pick! {
69 if #[cfg(target_feature="sse2")] {
70 Self { sse: add_i32_m128i(self.sse, rhs.sse) }
71 } else if #[cfg(target_feature="simd128")] {
72 Self { simd: u32x4_add(self.simd, rhs.simd) }
73 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
74 unsafe { Self { neon: vaddq_u32(self.neon, rhs.neon) } }
75 } else {
76 Self { arr: [
77 self.arr[0].wrapping_add(rhs.arr[0]),
78 self.arr[1].wrapping_add(rhs.arr[1]),
79 self.arr[2].wrapping_add(rhs.arr[2]),
80 self.arr[3].wrapping_add(rhs.arr[3]),
81 ]}
82 }
83 }
84 }
85}
86
87impl Sub for u32x4 {
88 type Output = Self;
89 #[inline]
90 #[must_use]
91 fn sub(self, rhs: Self) -> Self::Output {
92 pick! {
93 if #[cfg(target_feature="sse2")] {
94 Self { sse: sub_i32_m128i(self.sse, rhs.sse) }
95 } else if #[cfg(target_feature="simd128")] {
96 Self { simd: u32x4_sub(self.simd, rhs.simd) }
97 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
98 unsafe {Self { neon: vsubq_u32(self.neon, rhs.neon) }}
99 } else {
100 Self { arr: [
101 self.arr[0].wrapping_sub(rhs.arr[0]),
102 self.arr[1].wrapping_sub(rhs.arr[1]),
103 self.arr[2].wrapping_sub(rhs.arr[2]),
104 self.arr[3].wrapping_sub(rhs.arr[3]),
105 ]}
106 }
107 }
108 }
109}
110
111impl Mul for u32x4 {
112 type Output = Self;
113 #[inline]
114 #[must_use]
115 fn mul(self, rhs: Self) -> Self::Output {
116 pick! {
117 if #[cfg(target_feature="sse4.1")] {
118 Self { sse: mul_32_m128i(self.sse, rhs.sse) }
119 } else if #[cfg(target_feature="simd128")] {
120 Self { simd: u32x4_mul(self.simd, rhs.simd) }
121 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
122 unsafe {Self { neon: vmulq_u32(self.neon, rhs.neon) }}
123 } else {
124 let arr1: [u32; 4] = cast(self);
125 let arr2: [u32; 4] = cast(rhs);
126 cast([
127 arr1[0].wrapping_mul(arr2[0]),
128 arr1[1].wrapping_mul(arr2[1]),
129 arr1[2].wrapping_mul(arr2[2]),
130 arr1[3].wrapping_mul(arr2[3]),
131 ])
132 }
133 }
134 }
135}
136
137impl Add<u32> for u32x4 {
138 type Output = Self;
139 #[inline]
140 #[must_use]
141 fn add(self, rhs: u32) -> Self::Output {
142 self.add(Self::splat(rhs))
143 }
144}
145
146impl Sub<u32> for u32x4 {
147 type Output = Self;
148 #[inline]
149 #[must_use]
150 fn sub(self, rhs: u32) -> Self::Output {
151 self.sub(Self::splat(rhs))
152 }
153}
154
155impl Mul<u32> for u32x4 {
156 type Output = Self;
157 #[inline]
158 #[must_use]
159 fn mul(self, rhs: u32) -> Self::Output {
160 self.mul(Self::splat(rhs))
161 }
162}
163
164impl Add<u32x4> for u32 {
165 type Output = u32x4;
166 #[inline]
167 #[must_use]
168 fn add(self, rhs: u32x4) -> Self::Output {
169 u32x4::splat(self).add(rhs)
170 }
171}
172
173impl Sub<u32x4> for u32 {
174 type Output = u32x4;
175 #[inline]
176 #[must_use]
177 fn sub(self, rhs: u32x4) -> Self::Output {
178 u32x4::splat(self).sub(rhs)
179 }
180}
181
182impl Mul<u32x4> for u32 {
183 type Output = u32x4;
184 #[inline]
185 #[must_use]
186 fn mul(self, rhs: u32x4) -> Self::Output {
187 u32x4::splat(self).mul(rhs)
188 }
189}
190
191impl BitAnd for u32x4 {
192 type Output = Self;
193 #[inline]
194 #[must_use]
195 fn bitand(self, rhs: Self) -> Self::Output {
196 pick! {
197 if #[cfg(target_feature="sse2")] {
198 Self { sse: bitand_m128i(self.sse, rhs.sse) }
199 } else if #[cfg(target_feature="simd128")] {
200 Self { simd: v128_and(self.simd, rhs.simd) }
201 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
202 unsafe {Self { neon: vandq_u32(self.neon, rhs.neon) }}
203 } else {
204 Self { arr: [
205 self.arr[0].bitand(rhs.arr[0]),
206 self.arr[1].bitand(rhs.arr[1]),
207 self.arr[2].bitand(rhs.arr[2]),
208 self.arr[3].bitand(rhs.arr[3]),
209 ]}
210 }
211 }
212 }
213}
214
215impl BitOr for u32x4 {
216 type Output = Self;
217 #[inline]
218 #[must_use]
219 fn bitor(self, rhs: Self) -> Self::Output {
220 pick! {
221 if #[cfg(target_feature="sse2")] {
222 Self { sse: bitor_m128i(self.sse, rhs.sse) }
223 } else if #[cfg(target_feature="simd128")] {
224 Self { simd: v128_or(self.simd, rhs.simd) }
225 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
226 unsafe {Self { neon: vorrq_u32(self.neon, rhs.neon) }}
227 } else {
228 Self { arr: [
229 self.arr[0].bitor(rhs.arr[0]),
230 self.arr[1].bitor(rhs.arr[1]),
231 self.arr[2].bitor(rhs.arr[2]),
232 self.arr[3].bitor(rhs.arr[3]),
233 ]}
234 }
235 }
236 }
237}
238
239impl BitXor for u32x4 {
240 type Output = Self;
241 #[inline]
242 #[must_use]
243 fn bitxor(self, rhs: Self) -> Self::Output {
244 pick! {
245 if #[cfg(target_feature="sse2")] {
246 Self { sse: bitxor_m128i(self.sse, rhs.sse) }
247 } else if #[cfg(target_feature="simd128")] {
248 Self { simd: v128_xor(self.simd, rhs.simd) }
249 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
250 unsafe {Self { neon: veorq_u32(self.neon, rhs.neon) }}
251 } else {
252 Self { arr: [
253 self.arr[0].bitxor(rhs.arr[0]),
254 self.arr[1].bitxor(rhs.arr[1]),
255 self.arr[2].bitxor(rhs.arr[2]),
256 self.arr[3].bitxor(rhs.arr[3]),
257 ]}
258 }
259 }
260 }
261}
262
263macro_rules! impl_shl_t_for_u32x4 {
264 ($($shift_type:ty),+ $(,)?) => {
265 $(impl Shl<$shift_type> for u32x4 {
266 type Output = Self;
267 #[inline]
269 #[must_use]
270 fn shl(self, rhs: $shift_type) -> Self::Output {
271 pick! {
272 if #[cfg(target_feature="sse2")] {
273 let shift = cast([rhs as u64, 0]);
274 Self { sse: shl_all_u32_m128i(self.sse, shift) }
275 } else if #[cfg(target_feature="simd128")] {
276 Self { simd: u32x4_shl(self.simd, rhs as u32) }
277 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
278 unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32(rhs as i32)) }}
279 } else {
280 let u = rhs as u64;
281 Self { arr: [
282 self.arr[0] << u,
283 self.arr[1] << u,
284 self.arr[2] << u,
285 self.arr[3] << u,
286 ]}
287 }
288 }
289 }
290 })+
291 };
292}
293impl_shl_t_for_u32x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
294
295macro_rules! impl_shr_t_for_u32x4 {
296 ($($shift_type:ty),+ $(,)?) => {
297 $(impl Shr<$shift_type> for u32x4 {
298 type Output = Self;
299 #[inline]
301 #[must_use]
302 fn shr(self, rhs: $shift_type) -> Self::Output {
303 pick! {
304 if #[cfg(target_feature="sse2")] {
305 let shift = cast([rhs as u64, 0]);
306 Self { sse: shr_all_u32_m128i(self.sse, shift) }
307 } else if #[cfg(target_feature="simd128")] {
308 Self { simd: u32x4_shr(self.simd, rhs as u32) }
309 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
310 unsafe {Self { neon: vshlq_u32(self.neon, vmovq_n_s32( -(rhs as i32))) }}
311 } else {
312 let u = rhs as u64;
313 Self { arr: [
314 self.arr[0] >> u,
315 self.arr[1] >> u,
316 self.arr[2] >> u,
317 self.arr[3] >> u,
318 ]}
319 }
320 }
321 }
322 })+
323 };
324}
325impl_shr_t_for_u32x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
326
327impl u32x4 {
328 #[inline]
329 #[must_use]
330 pub fn new(array: [u32; 4]) -> Self {
331 Self::from(array)
332 }
333 #[inline]
334 #[must_use]
335 pub fn cmp_eq(self, rhs: Self) -> Self {
336 pick! {
337 if #[cfg(target_feature="sse2")] {
338 Self { sse: cmp_eq_mask_i32_m128i(self.sse, rhs.sse) }
339 } else if #[cfg(target_feature="simd128")] {
340 Self { simd: u32x4_eq(self.simd, rhs.simd) }
341 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
342 unsafe {Self { neon: vceqq_u32(self.neon, rhs.neon) }}
343 } else {
344 Self { arr: [
345 if self.arr[0] == rhs.arr[0] { u32::MAX } else { 0 },
346 if self.arr[1] == rhs.arr[1] { u32::MAX } else { 0 },
347 if self.arr[2] == rhs.arr[2] { u32::MAX } else { 0 },
348 if self.arr[3] == rhs.arr[3] { u32::MAX } else { 0 },
349 ]}
350 }
351 }
352 }
353 #[inline]
354 #[must_use]
355 pub fn cmp_gt(self, rhs: Self) -> Self {
356 pick! {
357 if #[cfg(target_feature="sse2")] {
358 Self { sse: cmp_gt_mask_i32_m128i(self.sse,rhs.sse) }
359 } else if #[cfg(target_feature="simd128")] {
360 Self { simd: u32x4_gt(self.simd, rhs.simd) }
361 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
362 unsafe {Self { neon: vcgtq_u32(self.neon, rhs.neon) }}
363 } else {
364 Self { arr: [
365 if self.arr[0] > rhs.arr[0] { u32::MAX } else { 0 },
366 if self.arr[1] > rhs.arr[1] { u32::MAX } else { 0 },
367 if self.arr[2] > rhs.arr[2] { u32::MAX } else { 0 },
368 if self.arr[3] > rhs.arr[3] { u32::MAX } else { 0 },
369 ]}
370 }
371 }
372 }
373 #[inline]
374 #[must_use]
375 pub fn cmp_lt(self, rhs: Self) -> Self {
376 pick! {
377 if #[cfg(target_feature="sse2")] {
378 Self { sse: cmp_lt_mask_i32_m128i(self.sse,rhs.sse) }
379 } else if #[cfg(target_feature="simd128")] {
380 Self { simd: u32x4_lt(self.simd, rhs.simd) }
381 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
382 unsafe {Self { neon: vcltq_u32(self.neon, rhs.neon) }}
383 } else {
384 Self { arr: [
385 if self.arr[0] < rhs.arr[0] { u32::MAX } else { 0 },
386 if self.arr[1] < rhs.arr[1] { u32::MAX } else { 0 },
387 if self.arr[2] < rhs.arr[2] { u32::MAX } else { 0 },
388 if self.arr[3] < rhs.arr[3] { u32::MAX } else { 0 },
389 ]}
390 }
391 }
392 }
393 #[inline]
394 #[must_use]
395 pub fn blend(self, t: Self, f: Self) -> Self {
396 pick! {
397 if #[cfg(target_feature="sse4.1")] {
398 Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
399 } else if #[cfg(target_feature="simd128")] {
400 Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
401 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
402 unsafe {Self { neon: vbslq_u32(self.neon, t.neon, f.neon) }}
403 } else {
404 generic_bit_blend(self, t, f)
405 }
406 }
407 }
408 #[inline]
409 #[must_use]
410 pub fn max(self, rhs: Self) -> Self {
411 pick! {
412 if #[cfg(target_feature="sse4.1")] {
413 Self { sse: max_u32_m128i(self.sse, rhs.sse) }
414 } else if #[cfg(target_feature="simd128")] {
415 Self { simd: u32x4_max(self.simd, rhs.simd) }
416 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
417 unsafe {Self { neon: vmaxq_u32(self.neon, rhs.neon) }}
418 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
419 unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
420 } else {
421 let arr: [u32; 4] = cast(self);
422 let rhs: [u32; 4] = cast(rhs);
423 cast([
424 arr[0].max(rhs[0]),
425 arr[1].max(rhs[1]),
426 arr[2].max(rhs[2]),
427 arr[3].max(rhs[3]),
428 ])
429 }
430 }
431 }
432 #[inline]
433 #[must_use]
434 pub fn min(self, rhs: Self) -> Self {
435 pick! {
436 if #[cfg(target_feature="sse4.1")] {
437 Self { sse: min_u32_m128i(self.sse, rhs.sse) }
438 } else if #[cfg(target_feature="simd128")] {
439 Self { simd: u32x4_min(self.simd, rhs.simd) }
440 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
441 unsafe {Self { neon: vminq_u32(self.neon, rhs.neon) }}
442 } else {
443 let arr: [u32; 4] = cast(self);
444 let rhs: [u32; 4] = cast(rhs);
445 cast([
446 arr[0].min(rhs[0]),
447 arr[1].min(rhs[1]),
448 arr[2].min(rhs[2]),
449 arr[3].min(rhs[3]),
450 ])
451 }
452 }
453 }
454
455 #[inline]
456 pub fn to_array(self) -> [u32; 4] {
457 cast(self)
458 }
459
460 #[inline]
461 pub fn as_array_ref(&self) -> &[u32; 4] {
462 cast_ref(self)
463 }
464
465 #[inline]
466 pub fn as_array_mut(&mut self) -> &mut [u32; 4] {
467 cast_mut(self)
468 }
469}