Skip to content

Commit

Permalink
Add _mm_loadu_si64
Browse files Browse the repository at this point in the history
  • Loading branch information
pickfire committed Jul 11, 2020
1 parent 9faced9 commit 0490bcc
Showing 1 changed file with 26 additions and 0 deletions.
26 changes: 26 additions & 0 deletions crates/core_arch/src/x86/sse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1251,6 +1251,25 @@ pub unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 {
simd_shuffle4(a, a, [3, 2, 1, 0])
}

/// Loads unaligned 64-bits of integer data from memory into new vector.
///
/// `mem_addr` does not need to be aligned on any particular boundary.
///
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movups))]
#[stable(feature = "simd_x86", since = "1.46.0")]
pub unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i {
let mut dst = _mm_setzero_si128();
ptr::copy_nonoverlapping(
mem_addr,
&mut dst as *mut __m128i as *mut u8,
8, // == 64 bits == mem::size_of::<__m128i>() / 2
);
dst
}

/// Stores the upper half of `a` (64 bits) into memory.
///
/// This intrinsic corresponds to the `MOVHPS` instruction. The compiler may
Expand Down Expand Up @@ -3658,6 +3677,13 @@ mod tests {
assert_eq_m128(r, e);
}

#[simd_test(enable = "sse2")]
unsafe fn test_mm_loadu_si64() {
let a = _mm_set_epi64x(5, 0);
let r = _mm_loadu_si64(&a as *const _ as *const _);
assert_eq_m128i(a, r);
}

#[simd_test(enable = "sse")]
unsafe fn test_mm_storeh_pi() {
let mut vals = [0.0f32; 8];
Expand Down

0 comments on commit 0490bcc

Please sign in to comment.