diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs index 0f6e6032f9bff..c34f1dbf8569d 100644 --- a/compiler/rustc_codegen_ssa/src/glue.rs +++ b/compiler/rustc_codegen_ssa/src/glue.rs @@ -46,7 +46,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // NOTE: ideally, we want the effects of both `unchecked_smul` and `unchecked_umul` // (resulting in `mul nsw nuw` in LLVM IR), since we know that the multiplication // cannot signed wrap, and that both operands are non-negative. But at the time of writing, - // `BuilderMethods` can't do this, and it doesn't seem to enable any further optimizations. + // the `LLVM-C` binding can't do this, and it doesn't seem to enable any further optimizations. bx.unchecked_smul(info.unwrap(), bx.const_usize(unit.size.bytes())), bx.const_usize(unit.align.abi.bytes()), ) diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index 71a0d1825efec..540831c825335 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -834,7 +834,7 @@ mod impls { #[inline] fn hash_slice(data: &[$ty], state: &mut H) { - let newlen = data.len() * mem::size_of::<$ty>(); + let newlen = mem::size_of_val(data); let ptr = data.as_ptr() as *const u8; // SAFETY: `ptr` is valid and aligned, as this macro is only used // for numeric primitives which have no padding. The new slice only