diff --git a/coresimd/aarch64/neon.rs b/coresimd/aarch64/neon.rs index 3a3e9c612f..7fdd580335 100644 --- a/coresimd/aarch64/neon.rs +++ b/coresimd/aarch64/neon.rs @@ -1,9 +1,11 @@ //! ARMv8 ASIMD intrinsics +#![allow(non_camel_case_types)] + // FIXME: replace neon with asimd use coresimd::arm::*; -use coresimd::simd_llvm::simd_add; +use coresimd::simd_llvm::*; #[cfg(test)] use stdsimd_test::assert_instr; @@ -12,8 +14,57 @@ types! { pub struct float64x1_t(f64); // FIXME: check this! /// ARM-specific 128-bit wide vector of two packed `f64`. pub struct float64x2_t(f64, f64); + /// ARM-specific 64-bit wide vector of one packed `p64`. + pub struct poly64x1_t(i64); // FIXME: check this! + /// ARM-specific 64-bit wide vector of two packed `p64`. + pub struct poly64x2_t(i64, i64); // FIXME: check this! } +/// ARM-specific type containing two `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); +/// ARM-specific type containing three `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); +/// ARM-specific type containing four `int8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x16x4_t( + pub int8x16_t, + pub int8x16_t, + pub int8x16_t, + pub int8x16_t, +); + +/// ARM-specific type containing two `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); +/// ARM-specific type containing three `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); +/// ARM-specific type containing four `uint8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x16x4_t( + pub uint8x16_t, + pub uint8x16_t, + pub uint8x16_t, + pub uint8x16_t, +); + +/// ARM-specific type containing two `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); +/// ARM-specific type containing three `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); +/// ARM-specific type containing four `poly8x16_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x16x4_t( + pub poly8x16_t, + pub poly8x16_t, + pub poly8x16_t, + pub poly8x16_t, +); + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"] @@ -115,6 +166,71 @@ extern "C" { fn vpmaxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t; #[link_name = "llvm.aarch64.neon.fmaxp.v2f64"] fn vpmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t; + + #[link_name = "llvm.aarch64.neon.tbl1.v8i8"] + fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbl1.v16i8"] + fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbx1.v8i8"] + fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbx1.v16i8"] + fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbl2.v8i8"] + fn vqtbl2(a0: int8x16_t, a1: int8x16_t, b: uint8x8_t) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbl2.v16i8"] + fn vqtbl2q(a0: int8x16_t, a1: int8x16_t, b: uint8x16_t) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbx2.v8i8"] + fn vqtbx2( + a: int8x8_t, b0: int8x16_t, b1: int8x16_t, c: uint8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbx2.v16i8"] + fn vqtbx2q( + a: int8x16_t, b0: int8x16_t, b1: int8x16_t, c: uint8x16_t, + ) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbl3.v8i8"] + fn vqtbl3( + a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbl3.v16i8"] + fn vqtbl3q( + a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x16_t, + ) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbx3.v8i8"] + fn vqtbx3( + a: int8x8_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, c: uint8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbx3.v16i8"] + fn vqtbx3q( + a: int8x16_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, + c: uint8x16_t, + ) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbl4.v8i8"] + fn vqtbl4( + a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, a3: int8x16_t, + b: uint8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbl4.v16i8"] + fn vqtbl4q( + a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, a3: int8x16_t, + b: uint8x16_t, + ) -> int8x16_t; + + #[link_name = "llvm.aarch64.neon.tbx4.v8i8"] + fn vqtbx4( + a: int8x8_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, + b3: int8x16_t, c: uint8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.aarch64.neon.tbx4.v16i8"] + fn vqtbx4q( + a: int8x16_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, + b3: int8x16_t, c: uint8x16_t, + ) -> int8x16_t; } /// Vector add. @@ -517,6 +633,936 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { vpmaxq_f64_(a, b) } +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { + simd_shuffle16( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { + simd_shuffle8(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { + simd_shuffle4(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { + simd_shuffle2(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { + simd_shuffle16( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { + simd_shuffle8(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { + simd_shuffle4(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { + simd_shuffle2(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { + simd_shuffle2(low, high, [0, 1]) +} + +/* FIXME: 16-bit float +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { + simd_shuffle8(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} +*/ + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_f32( + low: float32x2_t, high: float32x2_t, +) -> float32x4_t { + simd_shuffle4(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { + simd_shuffle16( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { + simd_shuffle8(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(mov))] +pub unsafe fn vcombine_f64( + low: float64x1_t, high: float64x1_t, +) -> float64x2_t { + simd_shuffle2(low, high, [0, 1]) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a, ::mem::zeroed()), ::mem::transmute(b)) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a, ::mem::zeroed()), b) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a, ::mem::zeroed()), b) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a.0, a.1), ::mem::transmute(b)) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a.0, a.1), b) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a.0, a.1), b) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + vqtbl2_s8( + int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, ::mem::zeroed())), + ::mem::transmute(b), + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + vqtbl2_u8( + uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, ::mem::zeroed())), + b, + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + vqtbl2_p8( + poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, ::mem::zeroed())), + b, + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + vqtbl2_s8( + int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)), + ::mem::transmute(b), + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + vqtbl2_u8( + uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)), + b, + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + vqtbl2_p8( + poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)), + b, + ) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + use coresimd::simd::i8x8; + let r = vqtbx1_s8(a, vcombine_s8(b, ::mem::zeroed()), ::mem::transmute(c)); + let m: int8x8_t = simd_lt(c, ::mem::transmute(i8x8::splat(8))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + use coresimd::simd::u8x8; + let r = vqtbx1_u8(a, vcombine_u8(b, ::mem::zeroed()), c); + let m: int8x8_t = simd_lt(c, ::mem::transmute(u8x8::splat(8))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + use coresimd::simd::u8x8; + let r = vqtbx1_p8(a, vcombine_p8(b, ::mem::zeroed()), c); + let m: int8x8_t = simd_lt(c, ::mem::transmute(u8x8::splat(8))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vqtbx1_s8(a, vcombine_s8(b.0, b.1), ::mem::transmute(c)) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx2_u8( + a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t, +) -> uint8x8_t { + vqtbx1_u8(a, vcombine_u8(b.0, b.1), c) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx2_p8( + a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t, +) -> poly8x8_t { + vqtbx1_p8(a, vcombine_p8(b.0, b.1), c) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + use coresimd::simd::i8x8; + let r = vqtbx2_s8( + a, + int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, ::mem::zeroed())), + ::mem::transmute(c), + ); + let m: int8x8_t = simd_lt(c, ::mem::transmute(i8x8::splat(24))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx3_u8( + a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t, +) -> uint8x8_t { + use coresimd::simd::u8x8; + let r = vqtbx2_u8( + a, + uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, ::mem::zeroed())), + c, + ); + let m: int8x8_t = simd_lt(c, ::mem::transmute(u8x8::splat(24))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx3_p8( + a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t, +) -> poly8x8_t { + use coresimd::simd::u8x8; + let r = vqtbx2_p8( + a, + poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, ::mem::zeroed())), + c, + ); + let m: int8x8_t = simd_lt(c, ::mem::transmute(u8x8::splat(24))); + simd_select(m, r, a) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vqtbx2_s8( + a, + int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, b.3)), + ::mem::transmute(c), + ) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx4_u8( + a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t, +) -> uint8x8_t { + vqtbx2_u8( + a, + uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, b.3)), + c, + ) +} + +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vtbx4_p8( + a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t, +) -> poly8x8_t { + vqtbx2_p8( + a, + poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, b.3)), + c, + ) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1_s8(t: int8x16_t, idx: uint8x8_t) -> int8x8_t { + vqtbl1(t, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1q_s8(t: int8x16_t, idx: uint8x16_t) -> int8x16_t { + vqtbl1q(t, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1_u8(t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vqtbl1(::mem::transmute(t), ::mem::transmute(idx))) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1q_u8(t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t { + ::mem::transmute(vqtbl1q(::mem::transmute(t), ::mem::transmute(idx))) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1_p8(t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vqtbl1(::mem::transmute(t), ::mem::transmute(idx))) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl1q_p8(t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t { + ::mem::transmute(vqtbl1q(::mem::transmute(t), ::mem::transmute(idx))) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1_s8( + a: int8x8_t, t: int8x16_t, idx: uint8x8_t, +) -> int8x8_t { + vqtbx1(a, t, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1q_s8( + a: int8x16_t, t: int8x16_t, idx: uint8x16_t, +) -> int8x16_t { + vqtbx1q(a, t, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1_u8( + a: uint8x8_t, t: uint8x16_t, idx: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vqtbx1( + ::mem::transmute(a), + ::mem::transmute(t), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1q_u8( + a: uint8x16_t, t: uint8x16_t, idx: uint8x16_t, +) -> uint8x16_t { + ::mem::transmute(vqtbx1q( + ::mem::transmute(a), + ::mem::transmute(t), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1_p8( + a: poly8x8_t, t: poly8x16_t, idx: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vqtbx1( + ::mem::transmute(a), + ::mem::transmute(t), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx1q_p8( + a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t, +) -> poly8x16_t { + ::mem::transmute(vqtbx1q( + ::mem::transmute(a), + ::mem::transmute(t), + ::mem::transmute(idx), + )) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2_s8(t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t { + vqtbl2(t.0, t.1, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2q_s8(t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t { + vqtbl2q(t.0, t.1, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2_u8(t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vqtbl2( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2q_u8(t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t { + ::mem::transmute(vqtbl2q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2_p8(t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vqtbl2( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl2q_p8(t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t { + ::mem::transmute(vqtbl2q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2_s8( + a: int8x8_t, t: int8x16x2_t, idx: uint8x8_t, +) -> int8x8_t { + vqtbx2(a, t.0, t.1, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2q_s8( + a: int8x16_t, t: int8x16x2_t, idx: uint8x16_t, +) -> int8x16_t { + vqtbx2q(a, t.0, t.1, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2_u8( + a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vqtbx2( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2q_u8( + a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t, +) -> uint8x16_t { + ::mem::transmute(vqtbx2q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2_p8( + a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vqtbx2( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx2q_p8( + a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t, +) -> poly8x16_t { + ::mem::transmute(vqtbx2q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(idx), + )) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3_s8(t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t { + vqtbl3(t.0, t.1, t.2, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3q_s8(t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t { + vqtbl3q(t.0, t.1, t.2, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vqtbl3( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t { + ::mem::transmute(vqtbl3q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vqtbl3( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t { + ::mem::transmute(vqtbl3q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3_s8( + a: int8x8_t, t: int8x16x3_t, idx: uint8x8_t, +) -> int8x8_t { + vqtbx3(a, t.0, t.1, t.2, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3q_s8( + a: int8x16_t, t: int8x16x3_t, idx: uint8x16_t, +) -> int8x16_t { + vqtbx3q(a, t.0, t.1, t.2, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3_u8( + a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vqtbx3( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3q_u8( + a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t, +) -> uint8x16_t { + ::mem::transmute(vqtbx3q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3_p8( + a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vqtbx3( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx3q_p8( + a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t, +) -> poly8x16_t { + ::mem::transmute(vqtbx3q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(idx), + )) +} + +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4_s8(t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t { + vqtbl4(t.0, t.1, t.2, t.3, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4q_s8(t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t { + vqtbl4q(t.0, t.1, t.2, t.3, idx) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vqtbl4( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t { + ::mem::transmute(vqtbl4q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vqtbl4( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +pub unsafe fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t { + ::mem::transmute(vqtbl4q( + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4_s8( + a: int8x8_t, t: int8x16x4_t, idx: uint8x8_t, +) -> int8x8_t { + vqtbx4(a, t.0, t.1, t.2, t.3, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4q_s8( + a: int8x16_t, t: int8x16x4_t, idx: uint8x16_t, +) -> int8x16_t { + vqtbx4q(a, t.0, t.1, t.2, t.3, idx) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4_u8( + a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vqtbx4( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4q_u8( + a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t, +) -> uint8x16_t { + ::mem::transmute(vqtbx4q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4_p8( + a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vqtbx4( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} +/// Extended table look-up +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +pub unsafe fn vqtbx4q_p8( + a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t, +) -> poly8x16_t { + ::mem::transmute(vqtbx4q( + ::mem::transmute(a), + ::mem::transmute(t.0), + ::mem::transmute(t.1), + ::mem::transmute(t.2), + ::mem::transmute(t.3), + ::mem::transmute(idx), + )) +} + #[cfg(test)] mod tests { use coresimd::aarch64::*; @@ -977,4 +2023,46 @@ mod tests { )); assert_eq!(r, e); } + + macro_rules! test_vcombine { + ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { + #[allow(unused_assignments)] + #[simd_test(enable = "neon")] + unsafe fn $test_id() { + let a = [$($a),*]; + let b = [$($b),*]; + let e = [$($a),* $(, $b)*]; + let c = $fn_id(::mem::transmute(a), ::mem::transmute(b)); + let mut d = e; + d = ::mem::transmute(c); + assert_eq!(d, e); + } + } + } + + test_vcombine!(test_vcombine_s8 => vcombine_s8([3_i8, -4, 5, -6, 7, 8, 9, 10], [13_i8, -14, 15, -16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_u8 => vcombine_u8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_p8 => vcombine_p8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + + test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); + test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + // FIXME: 16-bit floats + // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], + // [13_f16, 14., 15., 16.])); + + test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); + test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); + // note: poly32x4 does not exist, and neither does vcombine_p32 + test_vcombine!(test_vcombine_f32 => vcombine_f32([3_f32, -4.], [13_f32, -14.])); + + test_vcombine!(test_vcombine_s64 => vcombine_s64([-3_i64], [13_i64])); + test_vcombine!(test_vcombine_u64 => vcombine_u64([3_u64], [13_u64])); + test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); + test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); + } + +#[cfg(test)] +#[path = "../arm/table_lookup_tests.rs"] +mod table_lookup_tests; diff --git a/coresimd/arm/neon.rs b/coresimd/arm/neon.rs index 9ad9b1a03a..f823b25c05 100644 --- a/coresimd/arm/neon.rs +++ b/coresimd/arm/neon.rs @@ -65,6 +65,46 @@ types! { pub struct uint64x2_t(u64, u64); } +/// ARM-specific type containing two `int8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t); +/// ARM-specific type containing three `int8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); +/// ARM-specific type containing four `int8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); + +/// ARM-specific type containing two `uint8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); +/// ARM-specific type containing three `uint8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); +/// ARM-specific type containing four `uint8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct uint8x8x4_t( + pub uint8x8_t, + pub uint8x8_t, + pub uint8x8_t, + pub uint8x8_t, +); + +/// ARM-specific type containing two `poly8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); +/// ARM-specific type containing three `poly8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); +/// ARM-specific type containing four `poly8x8_t` vectors. +#[derive(Copy, Clone)] +pub struct poly8x8x4_t( + pub poly8x8_t, + pub poly8x8_t, + pub poly8x8_t, + pub poly8x8_t, +); + #[allow(improper_ctypes)] extern "C" { #[cfg_attr( @@ -194,6 +234,35 @@ extern "C" { fn vpmaxf_v2f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } +#[cfg(target_arch = "arm")] +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.arm.neon.vtbl1"] + fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbl2"] + fn vtbl2(a: int8x8_t, b: int8x8_t, b: int8x8_t) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbl3"] + fn vtbl3(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbl4"] + fn vtbl4( + a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, + ) -> int8x8_t; + + #[link_name = "llvm.arm.neon.vtbx1"] + fn vtbx1(a: int8x8_t, b: int8x8_t, b: int8x8_t) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbx2"] + fn vtbx2(a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbx3"] + fn vtbx3( + a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, + ) -> int8x8_t; + #[link_name = "llvm.arm.neon.vtbx4"] + fn vtbx4( + a: int8x8_t, b: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, + e: int8x8_t, + ) -> int8x8_t; +} + /// Vector add. #[inline] #[target_feature(enable = "neon")] @@ -695,6 +764,308 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpmaxf_v2f32(a, b) } +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vtbl1(a, b) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vtbl1(::mem::transmute(a), ::mem::transmute(b))) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + ::mem::transmute(vtbl1(::mem::transmute(a), ::mem::transmute(b))) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vtbl2(a.0, a.1, b) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vtbl2( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(b), + )) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vtbl2( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(b), + )) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + vtbl3(a.0, a.1, a.2, b) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vtbl3( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(a.2), + ::mem::transmute(b), + )) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vtbl3( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(a.2), + ::mem::transmute(b), + )) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + vtbl4(a.0, a.1, a.2, a.3, b) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vtbl4( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(a.2), + ::mem::transmute(a.3), + ::mem::transmute(b), + )) +} + +/// Table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbl))] +pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vtbl4( + ::mem::transmute(a.0), + ::mem::transmute(a.1), + ::mem::transmute(a.2), + ::mem::transmute(a.3), + ::mem::transmute(b), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + vtbx1(a, b, c) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + ::mem::transmute(vtbx1( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + ::mem::transmute(vtbx1( + ::mem::transmute(a), + ::mem::transmute(b), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vtbx2(a, b.0, b.1, c) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_u8( + a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vtbx2( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx2_p8( + a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vtbx2( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + vtbx3(a, b.0, b.1, b.2, c) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_u8( + a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vtbx3( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(b.2), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx3_p8( + a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vtbx3( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(b.2), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + vtbx4(a, b.0, b.1, b.2, b.3, c) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_u8( + a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t, +) -> uint8x8_t { + ::mem::transmute(vtbx4( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(b.2), + ::mem::transmute(b.3), + ::mem::transmute(c), + )) +} + +/// Extended table look-up +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vtbx))] +pub unsafe fn vtbx4_p8( + a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t, +) -> poly8x8_t { + ::mem::transmute(vtbx4( + ::mem::transmute(a), + ::mem::transmute(b.0), + ::mem::transmute(b.1), + ::mem::transmute(b.2), + ::mem::transmute(b.3), + ::mem::transmute(c), + )) +} + #[cfg(test)] mod tests { use coresimd::arm::*; @@ -1220,3 +1591,7 @@ mod tests { assert_eq!(r, e); } } + +#[cfg(test)] +#[path = "table_lookup_tests.rs"] +mod table_lookup_tests; diff --git a/coresimd/arm/table_lookup_tests.rs b/coresimd/arm/table_lookup_tests.rs new file mode 100644 index 0000000000..b153989e53 --- /dev/null +++ b/coresimd/arm/table_lookup_tests.rs @@ -0,0 +1,1042 @@ +//! Tests for ARM+v7+neon table lookup (vtbl, vtbx) intrinsics. +//! +//! These are included in `{arm, aarch64}::neon`. + +use super::*; + +#[cfg(target_arch = "aarch64")] +use coresimd::aarch64::*; + +#[cfg(target_arch = "arm")] +use coresimd::arm::*; + +use coresimd::simd::*; +use std::mem; +use stdsimd_test::simd_test; + +macro_rules! test_vtbl { + ($test_name:ident => $fn_id:ident: + - table[$table_t:ident]: [$($table_v:expr),*] | + $(- ctrl[$ctrl_t:ident]: [$($ctrl_v:expr),*] => [$($exp_v:expr),*])|* + ) => { + #[simd_test(enable = "neon")] + unsafe fn $test_name() { + // create table as array, and transmute it to + // arm's table type + let table: $table_t = ::mem::transmute([$($table_v),*]); + + // For each control vector, perform a table lookup and + // verify the result: + $( + { + let ctrl: $ctrl_t = ::mem::transmute([$($ctrl_v),*]); + let result = $fn_id(table, ::mem::transmute(ctrl)); + let result: $ctrl_t = ::mem::transmute(result); + let expected: $ctrl_t = ::mem::transmute([$($exp_v),*]); + assert_eq!(result, expected); + } + )* + } + } +} + +// ARM+v7+neon and AArch64+neon tests + +test_vtbl!( + test_vtbl1_s8 => vtbl1_s8: + - table[int8x8_t]: [0_i8, -11, 2, 3, 4, 5, 6, 7] | + - ctrl[i8x8]: [3_i8, 4, 1, 6, 0, 2, 7, 5] => [3_i8, 4, -11, 6, 0, 2, 7, 5] | + - ctrl[i8x8]: [3_i8, 8, 1, 9, 10, 2, 15, 5] => [3_i8, 0, -11, 0, 0, 2, 0, 5] +); + +test_vtbl!( + test_vtbl1_u8 => vtbl1_u8: + - table[uint8x8_t]: [0_u8, 1, 2, 3, 4, 5, 6, 7] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 0, 2, 7, 5] => [3_u8, 4, 1, 6, 0, 2, 7, 5] | + - ctrl[u8x8]: [3_u8, 8, 1, 9, 10, 2, 15, 5] => [3_u8, 0, 1, 0, 0, 2, 0, 5] +); + +test_vtbl!( + test_vtbl1_p8 => vtbl1_p8: + - table[poly8x8_t]: [0_u8, 1, 2, 3, 4, 5, 6, 7] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 0, 2, 7, 5] => [3_u8, 4, 1, 6, 0, 2, 7, 5] | + - ctrl[u8x8]: [3_u8, 8, 1, 9, 10, 2, 15, 5] => [3_u8, 0, 1, 0, 0, 2, 0, 5] +); + +test_vtbl!( + test_vtbl2_s8 => vtbl2_s8: + - table[int8x8x2_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121 + ] | + - ctrl[i8x8]: [127_i8, 15, 1, 14, 2, 13, 3, 12] => [0_i8, -121, -17, -72, 34, -116, 51, -104] | + - ctrl[i8x8]: [4_i8, 11, 16, 10, 6, 19, 7, 18] => [68_i8, -117, 0, -84, 102, 0, 119, 0] +); + +test_vtbl!( + test_vtbl2_u8 => vtbl2_u8: + - table[uint8x8x2_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [0_u8, 255, 17, 238, 34, 221, 51, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 187, 0, 170, 102, 0, 119, 0] +); + +test_vtbl!( + test_vtbl2_p8 => vtbl2_p8: + - table[poly8x8x2_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [0_u8, 255, 17, 238, 34, 221, 51, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 187, 0, 170, 102, 0, 119, 0] +); + +test_vtbl!( + test_vtbl3_s8 => vtbl3_s8: + - table[int8x8x3_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121, + 0, 1, -2, 3, 4, -5, 6, 7 + ] | + - ctrl[i8x8]: [127_i8, 15, 1, 19, 2, 13, 21, 12] => [0_i8, -121, -17, 3, 34, -116, -5, -104] | + - ctrl[i8x8]: [4_i8, 11, 16, 10, 6, 27, 7, 18] => [68_i8, -117, 0, -84, 102, 0, 119, -2] +); + +test_vtbl!( + test_vtbl3_u8 => vtbl3_u8: + - table[uint8x8x3_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255, + 0, 1, 2, 3, 4, 5, 6, 7 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 19, 2, 13, 21, 12] => [0_u8, 255, 17, 3, 34, 221, 5, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 27, 7, 18] => [68_u8, 187, 0, 170, 102, 0, 119, 2] +); + +test_vtbl!( + test_vtbl3_p8 => vtbl3_p8: + - table[poly8x8x3_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255, + 0, 1, 2, 3, 4, 5, 6, 7 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 19, 2, 13, 21, 12] => [0_u8, 255, 17, 3, 34, 221, 5, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 27, 7, 18] => [68_u8, 187, 0, 170, 102, 0, 119, 2] +); + +test_vtbl!( + test_vtbl4_s8 => vtbl4_s8: + - table[int8x8x4_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121, + 0, 1, -2, 3, 4, -5, 6, 7, + 8, -9, 10, 11, 12, -13, 14, 15 + ] | + - ctrl[i8x8]: [127_i8, 15, 1, 19, 2, 13, 25, 12] => [0_i8, -121, -17, 3, 34, -116, -9, -104] | + - ctrl[i8x8]: [4_i8, 11, 32, 10, 6, 27, 7, 18] => [68_i8, -117, 0, -84, 102, 11, 119, -2] +); + +test_vtbl!( + test_vtbl4_u8 => vtbl4_u8: + - table[uint8x8x4_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255, + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 19, 2, 13, 21, 12] => [0_u8, 255, 17, 3, 34, 221, 5, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 27, 7, 18] => [68_u8, 187, 0, 170, 102, 11, 119, 2] +); + +test_vtbl!( + test_vtbl4_p8 => vtbl4_p8: + - table[poly8x8x4_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 136, 153, 170, 187, 204, 221, 238, 255, + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 19, 2, 13, 21, 12] => [0_u8, 255, 17, 3, 34, 221, 5, 204] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 27, 7, 18] => [68_u8, 187, 0, 170, 102, 11, 119, 2] +); + +macro_rules! test_vtbx { + ($test_name:ident => $fn_id:ident: + - table[$table_t:ident]: [$($table_v:expr),*] | + - ext[$ext_t:ident]: [$($ext_v:expr),*] | + $(- ctrl[$ctrl_t:ident]: [$($ctrl_v:expr),*] => [$($exp_v:expr),*])|* + ) => { + #[simd_test(enable = "neon")] + unsafe fn $test_name() { + // create table as array, and transmute it to + // arm's table type + let table: $table_t = ::mem::transmute([$($table_v),*]); + let ext: $ext_t = ::mem::transmute([$($ext_v),*]); + + // For each control vector, perform a table lookup and + // verify the result: + $( + { + let ctrl: $ctrl_t = ::mem::transmute([$($ctrl_v),*]); + let result = $fn_id(ext, table, ::mem::transmute(ctrl)); + let result: $ctrl_t = ::mem::transmute(result); + let expected: $ctrl_t = ::mem::transmute([$($exp_v),*]); + assert_eq!(result, expected); + } + )* + } + } +} + +test_vtbx!( + test_vtbx1_s8 => vtbx1_s8: + - table[int8x8_t]: [0_i8, 1, 2, -3, 4, 5, 6, 7] | + - ext[int8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[i8x8]: [3_u8, 4, 1, 6, 0, 2, 7, 5] => [-3_i8, 4, 1, 6, 0, 2, 7, 5] | + - ctrl[i8x8]: [3_u8, 8, 1, 9, 10, 2, 15, 5] => [-3_i8, 51, 1, 53, 54, 2, 56, 5] +); + +test_vtbx!( + test_vtbx1_u8 => vtbx1_u8: + - table[uint8x8_t]: [0_u8, 1, 2, 3, 4, 5, 6, 7] | + - ext[uint8x8_t]: [50_u8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 0, 2, 7, 5] => [3_u8, 4, 1, 6, 0, 2, 7, 5] | + - ctrl[u8x8]: [3_u8, 8, 1, 9, 10, 2, 15, 5] => [3_u8, 51, 1, 53, 54, 2, 56, 5] +); + +test_vtbx!( + test_vtbx1_p8 => vtbx1_p8: + - table[poly8x8_t]: [0_u8, 1, 2, 3, 4, 5, 6, 7] | + - ext[poly8x8_t]: [50_u8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 0, 2, 7, 5] => [3_u8, 4, 1, 6, 0, 2, 7, 5] | + - ctrl[u8x8]: [3_u8, 8, 1, 9, 10, 2, 15, 5] => [3_u8, 51, 1, 53, 54, 2, 56, 5] +); + +test_vtbx!( + test_vtbx2_s8 => vtbx2_s8: + - table[int8x8x2_t]: [0_i8, 1, 2, -3, 4, 5, 6, 7, 8, 9, -10, 11, 12, -13, 14, 15] | + - ext[int8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[i8x8]: [3_u8, 4, 1, 6, 10, 2, 7, 15] => [-3_i8, 4, 1, 6, -10, 2, 7, 15] | + - ctrl[i8x8]: [3_u8, 8, 1, 10, 17, 2, 15, 19] => [-3_i8, 8, 1, -10, 54, 2, 15, 57] +); + +test_vtbx!( + test_vtbx2_u8 => vtbx2_u8: + - table[uint8x8x2_t]: [0_i8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] | + - ext[uint8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 10, 2, 7, 15] => [3_i8, 4, 1, 6, 10, 2, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 1, 10, 17, 2, 15, 19] => [3_i8, 8, 1, 10, 54, 2, 15, 57] +); + +test_vtbx!( + test_vtbx2_p8 => vtbx2_p8: + - table[poly8x8x2_t]: [0_i8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] | + - ext[poly8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 1, 6, 10, 2, 7, 15] => [3_i8, 4, 1, 6, 10, 2, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 1, 10, 17, 2, 15, 19] => [3_i8, 8, 1, 10, 54, 2, 15, 57] +); + +test_vtbx!( + test_vtbx3_s8 => vtbx3_s8: + - table[int8x8x3_t]: [ + 0_i8, 1, 2, -3, 4, 5, 6, 7, + 8, 9, -10, 11, 12, -13, 14, 15, + 16, -17, 18, 19, 20, 21, 22, 23 ] | + - ext[int8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[i8x8]: [3_u8, 4, 17, 22, 10, 2, 7, 15] => [-3_i8, 4, -17, 22, -10, 2, 7, 15] | + - ctrl[i8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 29] => [-3_i8, 8, -17, -10, 54, 2, 19, 57] +); + +test_vtbx!( + test_vtbx3_u8 => vtbx3_u8: + - table[uint8x8x3_t]: [ + 0_i8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23 ] | + - ext[uint8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 17, 22, 10, 2, 7, 15] => [3_i8, 4, 17, 22, 10, 2, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 29] => [3_i8, 8, 17, 10, 54, 2, 19, 57] +); + +test_vtbx!( + test_vtbx3_p8 => vtbx3_p8: + - table[poly8x8x3_t]: [ + 0_i8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23 ] | + - ext[poly8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 4, 17, 22, 10, 2, 7, 15] => [3_i8, 4, 17, 22, 10, 2, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 29] => [3_i8, 8, 17, 10, 54, 2, 19, 57] +); + +test_vtbx!( + test_vtbx4_s8 => vtbx4_s8: + - table[int8x8x4_t]: [ + 0_i8, 1, 2, -3, 4, 5, 6, 7, + 8, 9, -10, 11, 12, -13, 14, 15, + 16, -17, 18, 19, 20, 21, 22, 23, + -24, 25, 26, -27, 28, -29, 30, 31] | + - ext[int8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[i8x8]: [3_u8, 31, 17, 22, 10, 29, 7, 15] => [-3_i8, 31, -17, 22, -10, -29, 7, 15] | + - ctrl[i8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 42] => [-3_i8, 8, -17, -10, 54, 2, 19, 57] +); + +test_vtbx!( + test_vtbx4_u8 => vtbx4_u8: + - table[uint8x8x4_t]: [ + 0_i8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31] | + - ext[uint8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 31, 17, 22, 10, 29, 7, 15] => [3_i8, 31, 17, 22, 10, 29, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 42] => [3_i8, 8, 17, 10, 54, 2, 19, 57] +); + +test_vtbx!( + test_vtbx4_p8 => vtbx4_p8: + - table[poly8x8x4_t]: [ + 0_i8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31] | + - ext[poly8x8_t]: [50_i8, 51, 52, 53, 54, 55, 56, 57] | + - ctrl[u8x8]: [3_u8, 31, 17, 22, 10, 29, 7, 15] => [3_i8, 31, 17, 22, 10, 29, 7, 15] | + - ctrl[u8x8]: [3_u8, 8, 17, 10, 37, 2, 19, 42] => [3_i8, 8, 17, 10, 54, 2, 19, 57] +); + +// Aarch64 tests + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1_s8 => vqtbl1_s8: + - table[int8x16_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121 + ] | + - ctrl[i8x8]: [127_i8, 15, 1, 14, 2, 13, 3, 12] => [0_i8, -121, -17, -72, 34, -116, 51, -104] | + - ctrl[i8x8]: [4_i8, 11, 16, 10, 6, 19, 7, 18] => [68_i8, -117, 0, -84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1q_s8 => vqtbl1q_s8: + - table[int8x16_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121 + ] | + - ctrl[i8x16]: [127_i8, 15, 1, 14, 2, 13, 3, 12, 4_i8, 11, 16, 10, 6, 19, 7, 18] + => [0_i8, -121, -17, -72, 34, -116, 51, -104, 68, -117, 0, -84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1_u8 => vqtbl1_u8: + - table[uint8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [0_u8, 121, 17, 72, 34, 116, 51, 104] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 117, 0, 84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1q_u8 => vqtbl1q_u8: + - table[uint8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ctrl[u8x16]: [127_u8, 15, 1, 14, 2, 13, 3, 12, 4_u8, 11, 16, 10, 6, 19, 7, 18] + => [0_u8, 121, 17, 72, 34, 116, 51, 104, 68, 117, 0, 84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1_p8 => vqtbl1_p8: + - table[poly8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [0_u8, 121, 17, 72, 34, 116, 51, 104] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 117, 0, 84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl1q_p8 => vqtbl1q_p8: + - table[poly8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ctrl[u8x16]: [127_u8, 15, 1, 14, 2, 13, 3, 12, 4_u8, 11, 16, 10, 6, 19, 7, 18] + => [0_u8, 121, 17, 72, 34, 116, 51, 104, 68, 117, 0, 84, 102, 0, 119, 0] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2_s8 => vqtbl2_s8: + - table[int8x16x2_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31 + ] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [0_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 31, 32, 10, 6, 49, 7, 18] => [4_i8, -31, 0, 10, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2q_s8 => vqtbl2q_s8: + - table[int8x16x2_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 31, 32, 10, 6, 49, 7, 18] + => [0_i8, -15, -1, 24, 2, -13, -3, -29, 4, -31, 0, 10, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2_u8 => vqtbl2_u8: + - table[uint8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 31, 32, 10, 6, 49, 7, 18] => [4_u8, 31, 0, 10, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2q_u8 => vqtbl2q_u8: + - table[uint8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 31, 32, 10, 6, 49, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 31, 0, 10, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2_p8 => vqtbl2_p8: + - table[poly8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 31, 32, 10, 6, 49, 7, 18] => [4_u8, 31, 0, 10, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl2q_p8 => vqtbl2q_p8: + - table[poly8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 31, 32, 10, 6, 49, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 31, 0, 10, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3_s8 => vqtbl3_s8: + - table[int8x16x3_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47 + ] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [0_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 32, 46, 51, 6, 49, 7, 18] => [4_i8, 32, 46, 0, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3q_s8 => vqtbl3q_s8: + - table[int8x16x3_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 32, 46, 51, 6, 49, 7, 18] + => [0_i8, -15, -1, 24, 2, -13, -3, -29, 4, 32, 46, 0, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3_u8 => vqtbl3_u8: + - table[uint8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 32, 46, 51, 6, 49, 7, 18] => [4_u8, 32, 46, 0, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3q_u8 => vqtbl3q_u8: + - table[uint8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 32, 46, 51, 6, 49, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 32, 46, 0, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3_p8 => vqtbl3_p8: + - table[poly8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 32, 46, 51, 6, 49, 7, 18] => [4_u8, 32, 46, 0, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl3q_p8 => vqtbl3q_p8: + - table[poly8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 32, 46, 51, 6, 49, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 32, 46, 0, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4_s8 => vqtbl4_s8: + - table[int8x16x4_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47, + 48, -49, 50, -51, 52, -53, 54, -55, + 56, -57, 58, -59, 60, -61, 62, -63 + ] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [0_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 46, 64, 51, 6, 71, 7, 18] => [4_i8, 46, 0, -51, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4q_s8 => vqtbl4q_s8: + - table[int8x16x4_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47, + 48, -49, 50, -51, 52, -53, 54, -55, + 56, -57, 58, -59, 60, -61, 62, -63 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 46, 64, 51, 6, 71, 7, 18] + => [0_i8, -15, -1, 24, 2, -13, -3, -29, 4, 46, 0, -51, 6, 0, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4_u8 => vqtbl4_u8: + - table[uint8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 46, 64, 51, 6, 71, 7, 18] => [4_u8, 46, 0, 51, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4q_u8 => vqtbl4q_u8: + - table[uint8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 46, 64, 51, 6, 71, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 46, 0, 51, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4_p8 => vqtbl4_p8: + - table[poly8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [0_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 46, 64, 51, 6, 71, 7, 18] => [4_u8, 46, 0, 51, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbl!( + test_vqtbl4q_p8 => vqtbl4q_p8: + - table[poly8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 46, 64, 51, 6, 71, 7, 18] + => [0_u8, 15, 1, 24, 2, 13, 3, 29, 4, 46, 0, 51, 6, 0, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1_s8 => vqtbx1_s8: + - table[int8x16_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121 + ] | + - ext[int8x8_t]: [100_i8, -101, 102, -103, 104, -105, 106, -107] | + - ctrl[i8x8]: [127_i8, 15, 1, 14, 2, 13, 3, 12] => [100_i8, -121, -17, -72, 34, -116, 51, -104] | + - ctrl[i8x8]: [4_i8, 11, 16, 10, 6, 19, 7, 18] => [68_i8, -117, 102, -84, 102, -105, 119, -107] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1q_s8 => vqtbx1q_s8: + - table[int8x16_t]: [ + 0_i8, -17, 34, 51, 68, 85, 102, 119, + -106, -93, -84, -117, -104, -116, -72, -121 + ] | + - ext[int8x16_t]: [ + 100_i8, -101, 102, -103, 104, -105, 106, -107, + 108, -109, 110, -111, 112, -113, 114, -115 + ] | + - ctrl[i8x16]: [127_i8, 15, 1, 14, 2, 13, 3, 12, 4_i8, 11, 16, 10, 6, 19, 7, 18] + => [100_i8, -121, -17, -72, 34, -116, 51, -104, 68, -117, 110, -84, 102, -113, 119, -115] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1_u8 => vqtbx1_u8: + - table[uint8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ext[uint8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [100_u8, 121, 17, 72, 34, 116, 51, 104] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 117, 102, 84, 102, 105, 119, 107] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1q_u8 => vqtbx1q_u8: + - table[uint8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ext[uint8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [127_u8, 15, 1, 14, 2, 13, 3, 12, 4_u8, 11, 16, 10, 6, 19, 7, 18] + => [100_u8, 121, 17, 72, 34, 116, 51, 104, 68, 117, 110, 84, 102, 113, 119, 115] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1_p8 => vqtbx1_p8: + - table[poly8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ext[poly8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [127_u8, 15, 1, 14, 2, 13, 3, 12] => [100_u8, 121, 17, 72, 34, 116, 51, 104] | + - ctrl[u8x8]: [4_u8, 11, 16, 10, 6, 19, 7, 18] => [68_u8, 117, 102, 84, 102, 105, 119, 107] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx1q_p8 => vqtbx1q_p8: + - table[poly8x16_t]: [ + 0_u8, 17, 34, 51, 68, 85, 102, 119, + 106, 93, 84, 117, 104, 116, 72, 121 + ] | + - ext[poly8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [127_u8, 15, 1, 14, 2, 13, 3, 12, 4_u8, 11, 16, 10, 6, 19, 7, 18] + => [100_u8, 121, 17, 72, 34, 116, 51, 104, 68, 117, 110, 84, 102, 113, 119, 115] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2_s8 => vqtbx2_s8: + - table[int8x16x2_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31 + ] | + - ext[int8x8_t]: [100_i8, -101, 102, -103, 104, -105, 106, -107] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [100_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 31, 32, 10, 6, 49, 7, 18] => [4_i8, -31, 102, 10, 6, -105, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2q_s8 => vqtbx2q_s8: + - table[int8x16x2_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31 + ] | + - ext[int8x16_t]: [ + 100_i8, -101, 102, -103, 104, -105, 106, -107, + 108, -109, 110, -111, 112, -113, 114, -115 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 31, 32, 10, 6, 49, 7, 18] + => [100_i8, -15, -1, 24, 2, -13, -3, -29, 4, -31, 110, 10, 6, -113, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2_u8 => vqtbx2_u8: + - table[uint8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ext[uint8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 31, 32, 10, 6, 49, 7, 18] => [4_u8, 31, 102, 10, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2q_u8 => vqtbx2q_u8: + - table[uint8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ext[uint8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 31, 32, 10, 6, 49, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 31, 110, 10, 6, 113, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2_p8 => vqtbx2_p8: + - table[poly8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ext[poly8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 31, 32, 10, 6, 49, 7, 18] => [4_u8, 31, 102, 10, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx2q_p8 => vqtbx2q_p8: + - table[poly8x16x2_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 + ] | + - ext[poly8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 31, 32, 10, 6, 49, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 31, 110, 10, 6, 113, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3_s8 => vqtbx3_s8: + - table[int8x16x3_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47 + ] | + - ext[int8x8_t]: [100_i8, -101, 102, -103, 104, -105, 106, -107] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [100_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 32, 46, 51, 6, 49, 7, 18] => [4_i8, 32, 46, -103, 6, -105, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3q_s8 => vqtbx3q_s8: + - table[int8x16x3_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47 + ] | + - ext[int8x16_t]: [ + 100_i8, -101, 102, -103, 104, -105, 106, -107, + 108, -109, 110, -111, 112, -113, 114, -115 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 32, 46, 51, 6, 49, 7, 18] + => [100_i8, -15, -1, 24, 2, -13, -3, -29, 4, 32, 46, -111, 6, -113, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3_u8 => vqtbx3_u8: + - table[uint8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ext[uint8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 32, 46, 51, 6, 49, 7, 18] => [4_u8, 32, 46, 103, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3q_u8 => vqtbx3q_u8: + - table[uint8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ext[uint8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 32, 46, 51, 6, 49, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 32, 46, 111, 6, 113, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3_p8 => vqtbx3_p8: + - table[poly8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ext[poly8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 32, 46, 51, 6, 49, 7, 18] => [4_u8, 32, 46, 103, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx3q_p8 => vqtbx3q_p8: + - table[poly8x16x3_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47 + ] | + - ext[poly8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 32, 46, 51, 6, 49, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 32, 46, 111, 6, 113, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4_s8 => vqtbx4_s8: + - table[int8x16x4_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47, + 48, -49, 50, -51, 52, -53, 54, -55, + 56, -57, 58, -59, 60, -61, 62, -63 + ] | + - ext[int8x8_t]: [100_i8, -101, 102, -103, 104, -105, 106, -107] | + - ctrl[i8x8]: [80_i8, 15, 1, 24, 2, 13, 3, 29] => [100_i8, -15, -1, 24, 2, -13, -3, -29] | + - ctrl[i8x8]: [4_i8, 46, 64, 51, 6, 71, 7, 18] => [4_i8, 46, 102, -51, 6, -105, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4q_s8 => vqtbx4q_s8: + - table[int8x16x4_t]: [ + 0_i8, -1, 2, -3, 4, -5, 6, -7, + 8, -9, 10, -11, 12, -13, 14, -15, + 16, -17, 18, -19, 20, -21, 22, -23, + 24, -25, 26, -27, 28, -29, 30, -31, + 32, -33, 34, -35, 36, -37, 38, -39, + 40, -41, 42, -43, 44, -45, 46, -47, + 48, -49, 50, -51, 52, -53, 54, -55, + 56, -57, 58, -59, 60, -61, 62, -63 + ] | + - ext[int8x16_t]: [ + 100_i8, -101, 102, -103, 104, -105, 106, -107, + 108, -109, 110, -111, 112, -113, 114, -115 + ] | + - ctrl[i8x16]: [80_i8, 15, 1, 24, 2, 13, 3, 29, 4_i8, 46, 64, 51, 6, 71, 7, 18] + => [100_i8, -15, -1, 24, 2, -13, -3, -29, 4, 46, 110, -51, 6, -113, -7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4_u8 => vqtbx4_u8: + - table[uint8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ext[uint8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 46, 64, 51, 6, 71, 7, 18] => [4_u8, 46, 102, 51, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4q_u8 => vqtbx4q_u8: + - table[uint8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ext[uint8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 46, 64, 51, 6, 71, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 46, 110, 51, 6, 113, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4_p8 => vqtbx4_p8: + - table[poly8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ext[poly8x8_t]: [100_u8, 101, 102, 103, 104, 105, 106, 107] | + - ctrl[u8x8]: [80_u8, 15, 1, 24, 2, 13, 3, 29] => [100_u8, 15, 1, 24, 2, 13, 3, 29] | + - ctrl[u8x8]: [4_u8, 46, 64, 51, 6, 71, 7, 18] => [4_u8, 46, 102, 51, 6, 105, 7, 18] +); + +#[cfg(target_arch = "aarch64")] +test_vtbx!( + test_vqtbx4q_p8 => vqtbx4q_p8: + - table[poly8x16x4_t]: [ + 0_u8, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 + ] | + - ext[poly8x16_t]: [ + 100_u8, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115 + ] | + - ctrl[u8x16]: [80_u8, 15, 1, 24, 2, 13, 3, 29, 4_u8, 46, 64, 51, 6, 71, 7, 18] + => [100_u8, 15, 1, 24, 2, 13, 3, 29, 4, 46, 110, 51, 6, 113, 7, 18] +); diff --git a/crates/assert-instr-macro/src/lib.rs b/crates/assert-instr-macro/src/lib.rs index 76a14b37cb..25b5572ad8 100644 --- a/crates/assert-instr-macro/src/lib.rs +++ b/crates/assert-instr-macro/src/lib.rs @@ -94,8 +94,7 @@ pub fn assert_instr( .ident .to_string() .starts_with("target") - }) - .collect::>(); + }).collect::>(); let attrs = Append(&attrs); // Use an ABI on Windows that passes SIMD values in registers, like what