From 6c9d7fbeedd31398f363185106da292c2cdccb7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Sun, 14 Feb 2021 00:00:00 +0000 Subject: [PATCH 1/6] Add size assertions for interpreter data structures --- compiler/rustc_mir/src/interpret/operand.rs | 9 +++++++++ compiler/rustc_mir/src/interpret/place.rs | 15 +++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs index 88236458a213a..96fe22cab19e5 100644 --- a/compiler/rustc_mir/src/interpret/operand.rs +++ b/compiler/rustc_mir/src/interpret/operand.rs @@ -32,6 +32,9 @@ pub enum Immediate { ScalarPair(ScalarMaybeUninit, ScalarMaybeUninit), } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(Immediate, 56); + impl From> for Immediate { #[inline(always)] fn from(val: ScalarMaybeUninit) -> Self { @@ -92,6 +95,9 @@ pub struct ImmTy<'tcx, Tag = ()> { pub layout: TyAndLayout<'tcx>, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(ImmTy<'_>, 72); + impl std::fmt::Display for ImmTy<'tcx, Tag> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { /// Helper function for printing a scalar to a FmtPrinter @@ -156,6 +162,9 @@ pub struct OpTy<'tcx, Tag = ()> { pub layout: TyAndLayout<'tcx>, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80); + impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> { type Target = Operand; #[inline(always)] diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs index efde7fe6948c2..b79b3d92154b2 100644 --- a/compiler/rustc_mir/src/interpret/place.rs +++ b/compiler/rustc_mir/src/interpret/place.rs @@ -33,6 +33,9 @@ pub enum MemPlaceMeta { Poison, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); + impl MemPlaceMeta { pub fn unwrap_meta(self) -> Scalar { match self { @@ -71,6 +74,9 @@ pub struct MemPlace { pub meta: MemPlaceMeta, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(MemPlace, 56); + #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] pub enum Place { /// A place referring to a value allocated in the `Memory` system. @@ -81,12 +87,18 @@ pub enum Place { Local { frame: usize, local: mir::Local }, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(Place, 64); + #[derive(Copy, Clone, Debug)] pub struct PlaceTy<'tcx, Tag = ()> { place: Place, // Keep this private; it helps enforce invariants. pub layout: TyAndLayout<'tcx>, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80); + impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> { type Target = Place; #[inline(always)] @@ -102,6 +114,9 @@ pub struct MPlaceTy<'tcx, Tag = ()> { pub layout: TyAndLayout<'tcx>, } +#[cfg(target_arch = "x86_64")] +rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72); + impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> { type Target = MemPlace; #[inline(always)] From e915cf45dc48e90653081f9d760a7f4f803ce428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Mon, 15 Feb 2021 00:00:00 +0000 Subject: [PATCH 2/6] Pass OpTy by reference not value --- .../rustc_mir/src/const_eval/eval_queries.rs | 6 +- compiler/rustc_mir/src/const_eval/machine.rs | 16 ++-- compiler/rustc_mir/src/const_eval/mod.rs | 12 +-- compiler/rustc_mir/src/interpret/cast.rs | 10 +-- .../rustc_mir/src/interpret/eval_context.rs | 2 +- compiler/rustc_mir/src/interpret/intern.rs | 8 +- .../rustc_mir/src/interpret/intrinsics.rs | 82 +++++++++---------- compiler/rustc_mir/src/interpret/machine.rs | 4 +- compiler/rustc_mir/src/interpret/operand.rs | 26 +++--- compiler/rustc_mir/src/interpret/operator.rs | 22 ++--- compiler/rustc_mir/src/interpret/place.rs | 32 ++++---- compiler/rustc_mir/src/interpret/step.rs | 26 +++--- .../rustc_mir/src/interpret/terminator.rs | 16 ++-- compiler/rustc_mir/src/interpret/validity.rs | 36 ++++---- compiler/rustc_mir/src/interpret/visitor.rs | 54 ++++++------ .../rustc_mir/src/transform/const_prop.rs | 34 ++++---- 16 files changed, 193 insertions(+), 193 deletions(-) diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs index ed450c0c2a056..e573eeae00314 100644 --- a/compiler/rustc_mir/src/const_eval/eval_queries.rs +++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs @@ -105,7 +105,7 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>( /// type system. pub(super) fn op_to_const<'tcx>( ecx: &CompileTimeEvalContext<'_, 'tcx>, - op: OpTy<'tcx>, + op: &OpTy<'tcx>, ) -> ConstValue<'tcx> { // We do not have value optimizations for everything. // Only scalars and slices, since they are very common. @@ -201,7 +201,7 @@ fn turn_into_const_value<'tcx>( "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead" ); // Turn this into a proper constant. - op_to_const(&ecx, mplace.into()) + op_to_const(&ecx, &mplace.into()) } pub fn eval_to_const_value_raw_provider<'tcx>( @@ -348,7 +348,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>( Some(_) => CtfeValidationMode::Regular, // a `static` None => CtfeValidationMode::Const { inner, allow_static_ptrs: false }, }; - ecx.const_validate_operand(mplace.into(), path, &mut ref_tracking, mode)?; + ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?; inner = true; } }; diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs index f6b950c08c78e..6282288b26e92 100644 --- a/compiler/rustc_mir/src/const_eval/machine.rs +++ b/compiler/rustc_mir/src/const_eval/machine.rs @@ -39,7 +39,7 @@ impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> { // &str assert!(args.len() == 1); - let msg_place = self.deref_operand(args[0])?; + let msg_place = self.deref_operand(&args[0])?; let msg = Symbol::intern(self.read_str(msg_place)?); let span = self.find_closest_untracked_caller_location(); let (file, line, col) = self.location_triple_for_span(span); @@ -284,8 +284,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, }; match intrinsic_name { sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => { - let a = ecx.read_immediate(args[0])?.to_scalar()?; - let b = ecx.read_immediate(args[1])?.to_scalar()?; + let a = ecx.read_immediate(&args[0])?.to_scalar()?; + let b = ecx.read_immediate(&args[1])?.to_scalar()?; let cmp = if intrinsic_name == sym::ptr_guaranteed_eq { ecx.guaranteed_eq(a, b) } else { @@ -294,8 +294,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, ecx.write_scalar(Scalar::from_bool(cmp), dest)?; } sym::const_allocate => { - let size = ecx.read_scalar(args[0])?.to_machine_usize(ecx)?; - let align = ecx.read_scalar(args[1])?.to_machine_usize(ecx)?; + let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?; + let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?; let align = match Align::from_bytes(align) { Ok(a) => a, @@ -330,7 +330,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, use rustc_middle::mir::AssertKind::*; // Convert `AssertKind` to `AssertKind`. let eval_to_int = - |op| ecx.read_immediate(ecx.eval_operand(op, None)?).map(|x| x.to_const_int()); + |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int()); let err = match msg { BoundsCheck { ref len, ref index } => { let len = eval_to_int(len)?; @@ -358,8 +358,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, fn binary_ptr_op( _ecx: &InterpCx<'mir, 'tcx, Self>, _bin_op: mir::BinOp, - _left: ImmTy<'tcx>, - _right: ImmTy<'tcx>, + _left: &ImmTy<'tcx>, + _right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into()) } diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs index 9dd2a8592a7e0..480489c9bc0b1 100644 --- a/compiler/rustc_mir/src/const_eval/mod.rs +++ b/compiler/rustc_mir/src/const_eval/mod.rs @@ -55,8 +55,8 @@ pub(crate) fn destructure_const<'tcx>( return mir::DestructuredConst { variant: None, fields: &[] }; } ty::Adt(def, _) => { - let variant = ecx.read_discriminant(op).unwrap().1; - let down = ecx.operand_downcast(op, variant).unwrap(); + let variant = ecx.read_discriminant(&op).unwrap().1; + let down = ecx.operand_downcast(&op, variant).unwrap(); (def.variants[variant].fields.len(), Some(variant), down) } ty::Tuple(substs) => (substs.len(), None, op), @@ -64,8 +64,8 @@ pub(crate) fn destructure_const<'tcx>( }; let fields_iter = (0..field_count).map(|i| { - let field_op = ecx.operand_field(down, i).unwrap(); - let val = op_to_const(&ecx, field_op); + let field_op = ecx.operand_field(&down, i).unwrap(); + let val = op_to_const(&ecx, &field_op); ty::Const::from_value(tcx, val, field_op.layout.ty) }); let fields = tcx.arena.alloc_from_iter(fields_iter); @@ -81,7 +81,7 @@ pub(crate) fn deref_const<'tcx>( trace!("deref_const: {:?}", val); let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false); let op = ecx.const_to_op(val, None).unwrap(); - let mplace = ecx.deref_operand(op).unwrap(); + let mplace = ecx.deref_operand(&op).unwrap(); if let Scalar::Ptr(ptr) = mplace.ptr { assert_eq!( ecx.memory.get_raw(ptr.alloc_id).unwrap().mutability, @@ -106,5 +106,5 @@ pub(crate) fn deref_const<'tcx>( }, }; - tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, mplace.into())), ty }) + tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, &mplace.into())), ty }) } diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs index 128d8cff95e6b..257012ead6641 100644 --- a/compiler/rustc_mir/src/interpret/cast.rs +++ b/compiler/rustc_mir/src/interpret/cast.rs @@ -17,7 +17,7 @@ use super::{ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn cast( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, cast_kind: CastKind, cast_ty: Ty<'tcx>, dest: PlaceTy<'tcx, M::PointerTag>, @@ -259,7 +259,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn unsize_into_ptr( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, // The pointee types source_ty: Ty<'tcx>, @@ -300,7 +300,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn unsize_into( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, cast_ty: TyAndLayout<'tcx>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { @@ -340,9 +340,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let src_field = self.operand_field(src, i)?; let dst_field = self.place_field(dest, i)?; if src_field.layout.ty == cast_ty_field.ty { - self.copy_op(src_field, dst_field)?; + self.copy_op(&src_field, dst_field)?; } else { - self.unsize_into(src_field, cast_ty_field, dst_field)?; + self.unsize_into(&src_field, cast_ty_field, dst_field)?; } } Ok(()) diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index 7e9594dd6bfd7..7173e1eca5973 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -779,7 +779,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Copy the return value to the caller's stack frame. if let Some(return_place) = frame.return_place { let op = self.access_local(&frame, mir::RETURN_PLACE, None)?; - self.copy_op_transmute(op, return_place)?; + self.copy_op_transmute(&op, return_place)?; trace!("{:?}", self.dump_place(*return_place)); } else { throw_ub!(Unreachable); diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs index 6904ea5b77d16..7f0b74cf6e5f2 100644 --- a/compiler/rustc_mir/src/interpret/intern.rs +++ b/compiler/rustc_mir/src/interpret/intern.rs @@ -167,7 +167,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory fn visit_aggregate( &mut self, - mplace: MPlaceTy<'tcx>, + mplace: &MPlaceTy<'tcx>, fields: impl Iterator>, ) -> InterpResult<'tcx> { // ZSTs cannot contain pointers, so we can skip them. @@ -191,13 +191,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory self.walk_aggregate(mplace, fields) } - fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> { + fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> { // Handle Reference types, as these are the only relocations supported by const eval. // Raw pointers (and boxes) are handled by the `leftover_relocations` logic. let tcx = self.ecx.tcx; let ty = mplace.layout.ty; if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() { - let value = self.ecx.read_immediate(mplace.into())?; + let value = self.ecx.read_immediate(&(*mplace).into())?; let mplace = self.ecx.ref_to_mplace(value)?; assert_eq!(mplace.layout.ty, referenced_ty); // Handle trait object vtables. @@ -338,7 +338,7 @@ where leftover_allocations, inside_unsafe_cell: false, } - .visit_value(mplace); + .visit_value(&mplace); // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining // references are "leftover"-interned, and later validation will show a proper error // and point at the right part of the value causing the problem. diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index f4309c9cd9572..0252dd15888af 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -143,7 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::min_align_of_val | sym::size_of_val => { // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be // dereferencable! - let place = self.ref_to_mplace(self.read_immediate(args[0])?)?; + let place = self.ref_to_mplace(self.read_immediate(&args[0])?)?; let (size, align) = self .size_and_align_of_mplace(place)? .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?; @@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?; let const_ = ty::Const { val: ty::ConstKind::Value(val), ty }; let val = self.const_to_op(&const_, None)?; - self.copy_op(val, dest)?; + self.copy_op(&val, dest)?; } sym::ctpop @@ -189,7 +189,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { | sym::bitreverse => { let ty = substs.type_at(0); let layout_of = self.layout_of(ty)?; - let val = self.read_scalar(args[0])?.check_init()?; + let val = self.read_scalar(&args[0])?.check_init()?; let bits = self.force_bits(val, layout_of.size)?; let kind = match layout_of.abi { Abi::Scalar(ref scalar) => scalar.value, @@ -212,22 +212,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.write_scalar(out_val, dest)?; } sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { - let lhs = self.read_immediate(args[0])?; - let rhs = self.read_immediate(args[1])?; + let lhs = self.read_immediate(&args[0])?; + let rhs = self.read_immediate(&args[1])?; let bin_op = match intrinsic_name { sym::add_with_overflow => BinOp::Add, sym::sub_with_overflow => BinOp::Sub, sym::mul_with_overflow => BinOp::Mul, _ => bug!("Already checked for int ops"), }; - self.binop_with_overflow(bin_op, lhs, rhs, dest)?; + self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?; } sym::saturating_add | sym::saturating_sub => { - let l = self.read_immediate(args[0])?; - let r = self.read_immediate(args[1])?; + let l = self.read_immediate(&args[0])?; + let r = self.read_immediate(&args[1])?; let is_add = intrinsic_name == sym::saturating_add; let (val, overflowed, _ty) = - self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?; + self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, &l, &r)?; let val = if overflowed { let num_bits = l.layout.size.bits(); if l.layout.abi.is_signed() { @@ -269,8 +269,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.write_scalar(val, dest)?; } sym::discriminant_value => { - let place = self.deref_operand(args[0])?; - let discr_val = self.read_discriminant(place.into())?.0; + let place = self.deref_operand(&args[0])?; + let discr_val = self.read_discriminant(&place.into())?.0; self.write_scalar(discr_val, dest)?; } sym::unchecked_shl @@ -280,8 +280,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { | sym::unchecked_mul | sym::unchecked_div | sym::unchecked_rem => { - let l = self.read_immediate(args[0])?; - let r = self.read_immediate(args[1])?; + let l = self.read_immediate(&args[0])?; + let r = self.read_immediate(&args[1])?; let bin_op = match intrinsic_name { sym::unchecked_shl => BinOp::Shl, sym::unchecked_shr => BinOp::Shr, @@ -292,7 +292,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::unchecked_rem => BinOp::Rem, _ => bug!("Already checked for int ops"), }; - let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?; + let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?; if overflowed { let layout = self.layout_of(substs.type_at(0))?; let r_val = self.force_bits(r.to_scalar()?, layout.size)?; @@ -308,9 +308,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) let layout = self.layout_of(substs.type_at(0))?; - let val = self.read_scalar(args[0])?.check_init()?; + let val = self.read_scalar(&args[0])?.check_init()?; let val_bits = self.force_bits(val, layout.size)?; - let raw_shift = self.read_scalar(args[1])?.check_init()?; + let raw_shift = self.read_scalar(&args[1])?.check_init()?; let raw_shift_bits = self.force_bits(raw_shift, layout.size)?; let width_bits = u128::from(layout.size.bits()); let shift_bits = raw_shift_bits % width_bits; @@ -327,15 +327,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::copy | sym::copy_nonoverlapping => { let elem_ty = instance.substs.type_at(0); let elem_layout = self.layout_of(elem_ty)?; - let count = self.read_scalar(args[2])?.to_machine_usize(self)?; + let count = self.read_scalar(&args[2])?.to_machine_usize(self)?; let elem_align = elem_layout.align.abi; let size = elem_layout.size.checked_mul(count, self).ok_or_else(|| { err_ub_format!("overflow computing total size of `{}`", intrinsic_name) })?; - let src = self.read_scalar(args[0])?.check_init()?; + let src = self.read_scalar(&args[0])?.check_init()?; let src = self.memory.check_ptr_access(src, size, elem_align)?; - let dest = self.read_scalar(args[1])?.check_init()?; + let dest = self.read_scalar(&args[1])?.check_init()?; let dest = self.memory.check_ptr_access(dest, size, elem_align)?; if let (Some(src), Some(dest)) = (src, dest) { @@ -348,16 +348,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } sym::offset => { - let ptr = self.read_scalar(args[0])?.check_init()?; - let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?; + let ptr = self.read_scalar(&args[0])?.check_init()?; + let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; let pointee_ty = substs.type_at(0); let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; self.write_scalar(offset_ptr, dest)?; } sym::arith_offset => { - let ptr = self.read_scalar(args[0])?.check_init()?; - let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?; + let ptr = self.read_scalar(&args[0])?.check_init()?; + let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; let pointee_ty = substs.type_at(0); let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); @@ -366,8 +366,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.write_scalar(offset_ptr, dest)?; } sym::ptr_offset_from => { - let a = self.read_immediate(args[0])?.to_scalar()?; - let b = self.read_immediate(args[1])?.to_scalar()?; + let a = self.read_immediate(&args[0])?.to_scalar()?; + let b = self.read_immediate(&args[1])?.to_scalar()?; // Special case: if both scalars are *equal integers* // and not NULL, we pretend there is an allocation of size 0 right there, @@ -406,16 +406,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout); let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout); let (val, _overflowed, _ty) = - self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?; + self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?; let pointee_layout = self.layout_of(substs.type_at(0))?; let val = ImmTy::from_scalar(val, isize_layout); let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout); - self.exact_div(val, size, dest)?; + self.exact_div(&val, &size, dest)?; } } sym::transmute => { - self.copy_op_transmute(args[0], dest)?; + self.copy_op_transmute(&args[0], dest)?; } sym::assert_inhabited => { let ty = instance.substs.type_at(0); @@ -434,9 +434,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } sym::simd_insert => { - let index = u64::from(self.read_scalar(args[1])?.to_u32()?); - let elem = args[2]; - let input = args[0]; + let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); + let elem = &args[2]; + let input = &args[0]; let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx); assert!( index < len, @@ -458,12 +458,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { for i in 0..len { let place = self.place_index(dest, i)?; - let value = if i == index { elem } else { self.operand_index(input, i)? }; - self.copy_op(value, place)?; + let value = if i == index { *elem } else { self.operand_index(input, i)? }; + self.copy_op(&value, place)?; } } sym::simd_extract => { - let index = u64::from(self.read_scalar(args[1])?.to_u32()?); + let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx); assert!( index < len, @@ -477,14 +477,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { "Return type `{}` must match vector element type `{}`", dest.layout.ty, e_ty ); - self.copy_op(self.operand_index(args[0], index)?, dest)?; + self.copy_op(&self.operand_index(&args[0], index)?, dest)?; } sym::likely | sym::unlikely => { // These just return their argument - self.copy_op(args[0], dest)?; + self.copy_op(&args[0], dest)?; } sym::assume => { - let cond = self.read_scalar(args[0])?.check_init()?.to_bool()?; + let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?; if !cond { throw_ub_format!("`assume` intrinsic called with `false`"); } @@ -499,14 +499,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn exact_div( &mut self, - a: ImmTy<'tcx, M::PointerTag>, - b: ImmTy<'tcx, M::PointerTag>, + a: &ImmTy<'tcx, M::PointerTag>, + b: &ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // Performs an exact division, resulting in undefined behavior where // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. // First, check x % y != 0 (or if that computation overflows). - let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?; + let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?; if overflow || res.assert_bits(a.layout.size) != 0 { // Then, check if `b` is -1, which is the "MIN / -1" case. let minus1 = Scalar::from_int(-1, dest.layout.size); @@ -518,7 +518,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } // `Rem` says this is all right, so we can let `Div` do its job. - self.binop_ignore_overflow(BinOp::Div, a, b, dest) + self.binop_ignore_overflow(BinOp::Div, &a, &b, dest) } /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs index 53ac62d435187..91f0587a34176 100644 --- a/compiler/rustc_mir/src/interpret/machine.rs +++ b/compiler/rustc_mir/src/interpret/machine.rs @@ -200,8 +200,8 @@ pub trait Machine<'mir, 'tcx>: Sized { fn binary_ptr_op( ecx: &InterpCx<'mir, 'tcx, Self>, bin_op: mir::BinOp, - left: ImmTy<'tcx, Self::PointerTag>, - right: ImmTy<'tcx, Self::PointerTag>, + left: &ImmTy<'tcx, Self::PointerTag>, + right: &ImmTy<'tcx, Self::PointerTag>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)>; /// Heap allocations via the `box` keyword. diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs index 96fe22cab19e5..626f8915ecf08 100644 --- a/compiler/rustc_mir/src/interpret/operand.rs +++ b/compiler/rustc_mir/src/interpret/operand.rs @@ -231,7 +231,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline] pub fn force_op_ptr( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { match op.try_as_mplace(self) { Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()), @@ -304,7 +304,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// in a `Immediate`, not on which data is stored there currently. pub(crate) fn try_read_immediate( &self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Result, MPlaceTy<'tcx, M::PointerTag>>> { Ok(match src.try_as_mplace(self) { Ok(mplace) => { @@ -322,7 +322,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline(always)] pub fn read_immediate( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> { if let Ok(imm) = self.try_read_immediate(op)? { Ok(imm) @@ -334,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Read a scalar from a place pub fn read_scalar( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { Ok(self.read_immediate(op)?.to_scalar_or_uninit()) } @@ -350,7 +350,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Projection functions pub fn operand_field( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { let base = match op.try_as_mplace(self) { @@ -388,7 +388,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn operand_index( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { if let Ok(index) = usize::try_from(index) { @@ -403,7 +403,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn operand_downcast( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcasts only change the layout @@ -411,14 +411,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(), Err(..) => { let layout = op.layout.for_variant(self, variant); - OpTy { layout, ..op } + OpTy { layout, ..*op } } }) } pub fn operand_projection( &self, - base: OpTy<'tcx, M::PointerTag>, + base: &OpTy<'tcx, M::PointerTag>, proj_elem: mir::PlaceElem<'tcx>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc_middle::mir::ProjectionElem::*; @@ -489,7 +489,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let op = place .projection .iter() - .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?; + .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?; trace!("eval_place_to_op: got {:?}", *op); // Sanity-check the type we ended up with. @@ -599,7 +599,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Read discriminant, return the runtime value as well as the variant index. pub fn read_discriminant( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (Scalar, VariantIdx)> { trace!("read_discriminant_value {:#?}", op.layout); // Get type and layout of the discriminant. @@ -645,7 +645,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?; // Read tag and sanity-check `tag_layout`. - let tag_val = self.read_immediate(self.operand_field(op, tag_field)?)?; + let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; assert_eq!(tag_layout.size, tag_val.layout.size); assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); let tag_val = tag_val.to_scalar()?; @@ -699,7 +699,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let tag_val = ImmTy::from_uint(tag_bits, tag_layout); let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let variant_index_relative_val = - self.binary_op(mir::BinOp::Sub, tag_val, niche_start_val)?; + self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; let variant_index_relative = variant_index_relative_val .to_scalar()? .assert_bits(tag_val.layout.size); diff --git a/compiler/rustc_mir/src/interpret/operator.rs b/compiler/rustc_mir/src/interpret/operator.rs index f5081655015b4..7d2dcedda47a6 100644 --- a/compiler/rustc_mir/src/interpret/operator.rs +++ b/compiler/rustc_mir/src/interpret/operator.rs @@ -14,11 +14,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn binop_with_overflow( &mut self, op: mir::BinOp, - left: ImmTy<'tcx, M::PointerTag>, - right: ImmTy<'tcx, M::PointerTag>, + left: &ImmTy<'tcx, M::PointerTag>, + right: &ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - let (val, overflowed, ty) = self.overflowing_binary_op(op, left, right)?; + let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?; debug_assert_eq!( self.tcx.intern_tup(&[ty, self.tcx.types.bool]), dest.layout.ty, @@ -34,8 +34,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn binop_ignore_overflow( &mut self, op: mir::BinOp, - left: ImmTy<'tcx, M::PointerTag>, - right: ImmTy<'tcx, M::PointerTag>, + left: &ImmTy<'tcx, M::PointerTag>, + right: &ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?; @@ -269,8 +269,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn overflowing_binary_op( &self, bin_op: mir::BinOp, - left: ImmTy<'tcx, M::PointerTag>, - right: ImmTy<'tcx, M::PointerTag>, + left: &ImmTy<'tcx, M::PointerTag>, + right: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { trace!( "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", @@ -347,8 +347,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn binary_op( &self, bin_op: mir::BinOp, - left: ImmTy<'tcx, M::PointerTag>, - right: ImmTy<'tcx, M::PointerTag>, + left: &ImmTy<'tcx, M::PointerTag>, + right: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> { let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?; Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) @@ -359,7 +359,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn overflowing_unary_op( &self, un_op: mir::UnOp, - val: ImmTy<'tcx, M::PointerTag>, + val: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { use rustc_middle::mir::UnOp::*; @@ -409,7 +409,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn unary_op( &self, un_op: mir::UnOp, - val: ImmTy<'tcx, M::PointerTag>, + val: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> { let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?; Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs index b79b3d92154b2..fa21ca56eba94 100644 --- a/compiler/rustc_mir/src/interpret/place.rs +++ b/compiler/rustc_mir/src/interpret/place.rs @@ -248,10 +248,10 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> { /// Note: do not call `as_ref` on the resulting place. This function should only be used to /// read from the resulting mplace, not to get its address back. pub fn try_as_mplace( - self, + &self, cx: &impl HasDataLayout, ) -> Result, ImmTy<'tcx, Tag>> { - match *self { + match **self { Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), Operand::Immediate(_) if self.layout.is_zst() => { Ok(MPlaceTy::dangling(self.layout, cx)) @@ -263,7 +263,7 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> { #[inline(always)] /// Note: do not call `as_ref` on the resulting place. This function should only be used to /// read from the resulting mplace, not to get its address back. - pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { + pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { self.try_as_mplace(cx).unwrap() } } @@ -331,7 +331,7 @@ where /// will always be a MemPlace. Lives in `place.rs` because it creates a place. pub fn deref_operand( &self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let val = self.read_immediate(src)?; trace!("deref to {} on {:?}", val.layout.ty, *val); @@ -551,12 +551,12 @@ where Ok(match proj_elem { Field(field, _) => self.mplace_field(base, field.index())?, Downcast(_, variant) => self.mplace_downcast(base, variant)?, - Deref => self.deref_operand(base.into())?, + Deref => self.deref_operand(&base.into())?, Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.access_local(self.frame(), local, Some(layout))?; - let n = self.read_scalar(n)?; + let n = self.read_scalar(&n)?; let n = u64::try_from( self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?, ) @@ -637,7 +637,7 @@ where Ok(match proj_elem { Field(field, _) => self.place_field(base, field.index())?, Downcast(_, variant) => self.place_downcast(base, variant)?, - Deref => self.deref_operand(self.place_to_op(base)?)?.into(), + Deref => self.deref_operand(&self.place_to_op(base)?)?.into(), // For the other variants, we have to force an allocation. // This matches `operand_projection`. Subslice { .. } | ConstantIndex { .. } | Index(_) => { @@ -697,7 +697,7 @@ where if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(self.place_to_op(dest)?)?; + self.validate_operand(&self.place_to_op(dest)?)?; } Ok(()) @@ -714,7 +714,7 @@ where if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(dest.into())?; + self.validate_operand(&dest.into())?; } Ok(()) @@ -843,14 +843,14 @@ where #[inline(always)] pub fn copy_op( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { self.copy_op_no_validate(src, dest)?; if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(self.place_to_op(dest)?)?; + self.validate_operand(&self.place_to_op(dest)?)?; } Ok(()) @@ -862,7 +862,7 @@ where /// right type. fn copy_op_no_validate( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // We do NOT compare the types for equality, because well-typed code can @@ -921,7 +921,7 @@ where /// have the same size. pub fn copy_op_transmute( &mut self, - src: OpTy<'tcx, M::PointerTag>, + src: &OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { @@ -964,7 +964,7 @@ where if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(dest.into())?; + self.validate_operand(&dest.into())?; } Ok(()) @@ -1118,8 +1118,8 @@ where ImmTy::from_uint(variant_index_relative, tag_layout); let tag_val = self.binary_op( mir::BinOp::Add, - variant_index_relative_val, - niche_start_val, + &variant_index_relative_val, + &niche_start_val, )?; // Write result. let niche_dest = self.place_field(dest, tag_field)?; diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs index fbc72ad8adc96..b4a2bb809af54 100644 --- a/compiler/rustc_mir/src/interpret/step.rs +++ b/compiler/rustc_mir/src/interpret/step.rs @@ -162,29 +162,29 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Use(ref operand) => { // Avoid recomputing the layout let op = self.eval_operand(operand, Some(dest.layout))?; - self.copy_op(op, dest)?; + self.copy_op(&op, dest)?; } BinaryOp(bin_op, ref left, ref right) => { let layout = binop_left_homogeneous(bin_op).then_some(dest.layout); - let left = self.read_immediate(self.eval_operand(left, layout)?)?; + let left = self.read_immediate(&self.eval_operand(left, layout)?)?; let layout = binop_right_homogeneous(bin_op).then_some(left.layout); - let right = self.read_immediate(self.eval_operand(right, layout)?)?; - self.binop_ignore_overflow(bin_op, left, right, dest)?; + let right = self.read_immediate(&self.eval_operand(right, layout)?)?; + self.binop_ignore_overflow(bin_op, &left, &right, dest)?; } CheckedBinaryOp(bin_op, ref left, ref right) => { // Due to the extra boolean in the result, we can never reuse the `dest.layout`. - let left = self.read_immediate(self.eval_operand(left, None)?)?; + let left = self.read_immediate(&self.eval_operand(left, None)?)?; let layout = binop_right_homogeneous(bin_op).then_some(left.layout); - let right = self.read_immediate(self.eval_operand(right, layout)?)?; - self.binop_with_overflow(bin_op, left, right, dest)?; + let right = self.read_immediate(&self.eval_operand(right, layout)?)?; + self.binop_with_overflow(bin_op, &left, &right, dest)?; } UnaryOp(un_op, ref operand) => { // The operand always has the same type as the result. - let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?; - let val = self.unary_op(un_op, val)?; + let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?; + let val = self.unary_op(un_op, &val)?; assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op); self.write_immediate(*val, dest)?; } @@ -208,7 +208,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); let field_dest = self.place_field(dest, field_index)?; - self.copy_op(op, field_dest)?; + self.copy_op(&op, field_dest)?; } } } @@ -221,7 +221,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if let Some(first_ptr) = self.check_mplace_access(dest, None)? { // Write the first. let first = self.mplace_field(dest, 0)?; - self.copy_op(op, first.into())?; + self.copy_op(&op, first.into())?; if length > 1 { let elem_size = first.layout.size; @@ -278,12 +278,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Cast(cast_kind, ref operand, cast_ty) => { let src = self.eval_operand(operand, None)?; let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty); - self.cast(src, cast_kind, cast_ty, dest)?; + self.cast(&src, cast_kind, cast_ty, dest)?; } Discriminant(place) => { let op = self.eval_place_to_op(place, None)?; - let discr_val = self.read_discriminant(op)?.0; + let discr_val = self.read_discriminant(&op)?.0; self.write_scalar(discr_val, dest)?; } } diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs index 575667f9a9525..8c172a581a37c 100644 --- a/compiler/rustc_mir/src/interpret/terminator.rs +++ b/compiler/rustc_mir/src/interpret/terminator.rs @@ -25,7 +25,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Goto { target } => self.go_to_block(target), SwitchInt { ref discr, ref targets, switch_ty } => { - let discr = self.read_immediate(self.eval_operand(discr, None)?)?; + let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); assert_eq!(discr.layout.ty, switch_ty); @@ -38,8 +38,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let res = self .overflowing_binary_op( mir::BinOp::Eq, - discr, - ImmTy::from_uint(const_int, discr.layout), + &discr, + &ImmTy::from_uint(const_int, discr.layout), )? .0; if res.to_bool()? { @@ -58,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (fn_val, abi) = match *func.layout.ty.kind() { ty::FnPtr(sig) => { let caller_abi = sig.abi(); - let fn_ptr = self.read_scalar(func)?.check_init()?; + let fn_ptr = self.read_scalar(&func)?.check_init()?; let fn_val = self.memory.get_fn(fn_ptr)?; (fn_val, caller_abi) } @@ -101,7 +101,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Assert { ref cond, expected, ref msg, target, cleanup } => { let cond_val = - self.read_immediate(self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?; + self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?; if expected == cond_val { self.go_to_block(target); } else { @@ -202,7 +202,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) } // We allow some transmutes here - self.copy_op_transmute(caller_arg, callee_arg) + self.copy_op_transmute(&caller_arg, callee_arg) } /// Call this function -- pushing the stack frame and initializing the arguments. @@ -314,7 +314,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple - let (&untuple_arg, args) = args.split_last().unwrap(); + let (untuple_arg, args) = args.split_last().unwrap(); trace!("eval_fn_call: Will pass last argument by untupling"); Cow::from( args.iter() @@ -397,7 +397,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let receiver_place = match args[0].layout.ty.builtin_deref(true) { Some(_) => { // Built-in pointer. - self.deref_operand(args[0])? + self.deref_operand(&args[0])? } None => { // Unsized self. diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs index 64e7a4d9ca758..9c2ae1c7fe30e 100644 --- a/compiler/rustc_mir/src/interpret/validity.rs +++ b/compiler/rustc_mir/src/interpret/validity.rs @@ -375,7 +375,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' /// Check a reference or `Box`. fn check_safe_pointer( &mut self, - value: OpTy<'tcx, M::PointerTag>, + value: &OpTy<'tcx, M::PointerTag>, kind: &str, ) -> InterpResult<'tcx> { let value = try_validation!( @@ -491,7 +491,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' fn read_scalar( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { Ok(try_validation!( self.ecx.read_scalar(op), @@ -504,7 +504,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' /// at that type. Return `true` if the type is indeed primitive. fn try_visit_primitive( &mut self, - value: OpTy<'tcx, M::PointerTag>, + value: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, bool> { // Go over all the primitive types let ty = value.layout.ty; @@ -552,7 +552,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // actually enforce the strict rules for raw pointers (mostly because // that lets us re-use `ref_to_mplace`). let place = try_validation!( - self.ecx.read_immediate(value).and_then(|i| self.ecx.ref_to_mplace(i)), + self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)), self.path, err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" }, err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" }, @@ -631,7 +631,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' fn visit_scalar( &mut self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, scalar_layout: &Scalar, ) -> InterpResult<'tcx> { let value = self.read_scalar(op)?; @@ -705,7 +705,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> fn read_discriminant( &mut self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, VariantIdx> { self.with_elem(PathElem::EnumTag, move |this| { Ok(try_validation!( @@ -725,9 +725,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> #[inline] fn visit_field( &mut self, - old_op: OpTy<'tcx, M::PointerTag>, + old_op: &OpTy<'tcx, M::PointerTag>, field: usize, - new_op: OpTy<'tcx, M::PointerTag>, + new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let elem = self.aggregate_field_path_elem(old_op.layout, field); self.with_elem(elem, move |this| this.visit_value(new_op)) @@ -736,9 +736,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> #[inline] fn visit_variant( &mut self, - old_op: OpTy<'tcx, M::PointerTag>, + old_op: &OpTy<'tcx, M::PointerTag>, variant_id: VariantIdx, - new_op: OpTy<'tcx, M::PointerTag>, + new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let name = match old_op.layout.ty.kind() { ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name), @@ -752,14 +752,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> #[inline(always)] fn visit_union( &mut self, - _op: OpTy<'tcx, M::PointerTag>, + _op: &OpTy<'tcx, M::PointerTag>, _fields: NonZeroUsize, ) -> InterpResult<'tcx> { Ok(()) } #[inline] - fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { + fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { trace!("visit_value: {:?}, {:?}", *op, op.layout); // Check primitive types -- the leafs of our recursive descend. @@ -816,7 +816,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> fn visit_aggregate( &mut self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, fields: impl Iterator>, ) -> InterpResult<'tcx> { match op.layout.ty.kind() { @@ -918,7 +918,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn validate_operand_internal( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, path: Vec, ref_tracking: Option<&mut RefTracking, Vec>>, ctfe_mode: Option, @@ -929,10 +929,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; // Try to cast to ptr *once* instead of all the time. - let op = self.force_op_ptr(op).unwrap_or(op); + let op = self.force_op_ptr(&op).unwrap_or(*op); // Run it. - match visitor.visit_value(op) { + match visitor.visit_value(&op) { Ok(()) => Ok(()), // Pass through validation failures. Err(err) if matches!(err.kind, err_ub!(ValidationFailure { .. })) => Err(err), @@ -960,7 +960,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline(always)] pub fn const_validate_operand( &self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, path: Vec, ref_tracking: &mut RefTracking, Vec>, ctfe_mode: CtfeValidationMode, @@ -972,7 +972,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// `op` is assumed to cover valid memory if it is an indirect operand. /// It will error if the bits at the destination do not match the ones described by the layout. #[inline(always)] - pub fn validate_operand(&self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { + pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { self.validate_operand_internal(op, vec![], None, None) } } diff --git a/compiler/rustc_mir/src/interpret/visitor.rs b/compiler/rustc_mir/src/interpret/visitor.rs index 097b9ae6ca1cd..7efed19362680 100644 --- a/compiler/rustc_mir/src/interpret/visitor.rs +++ b/compiler/rustc_mir/src/interpret/visitor.rs @@ -18,20 +18,20 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy { fn layout(&self) -> TyAndLayout<'tcx>; /// Makes this into an `OpTy`. - fn to_op(self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>; + fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>; /// Creates this from an `MPlaceTy`. fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self; /// Projects to the given enum variant. fn project_downcast( - self, + &self, ecx: &InterpCx<'mir, 'tcx, M>, variant: VariantIdx, ) -> InterpResult<'tcx, Self>; /// Projects to the n-th field. - fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize) + fn project_field(&self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize) -> InterpResult<'tcx, Self>; } @@ -45,10 +45,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc #[inline(always)] fn to_op( - self, + &self, _ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { - Ok(self) + Ok(*self) } #[inline(always)] @@ -58,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc #[inline(always)] fn project_downcast( - self, + &self, ecx: &InterpCx<'mir, 'tcx, M>, variant: VariantIdx, ) -> InterpResult<'tcx, Self> { @@ -67,7 +67,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tc #[inline(always)] fn project_field( - self, + &self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize, ) -> InterpResult<'tcx, Self> { @@ -85,10 +85,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> #[inline(always)] fn to_op( - self, + &self, _ecx: &InterpCx<'mir, 'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { - Ok(self.into()) + Ok((*self).into()) } #[inline(always)] @@ -98,20 +98,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> #[inline(always)] fn project_downcast( - self, + &self, ecx: &InterpCx<'mir, 'tcx, M>, variant: VariantIdx, ) -> InterpResult<'tcx, Self> { - ecx.mplace_downcast(self, variant) + ecx.mplace_downcast(*self, variant) } #[inline(always)] fn project_field( - self, + &self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize, ) -> InterpResult<'tcx, Self> { - ecx.mplace_field(self, field) + ecx.mplace_field(*self, field) } } @@ -129,7 +129,7 @@ macro_rules! make_value_visitor { #[inline(always)] fn read_discriminant( &mut self, - op: OpTy<'tcx, M::PointerTag>, + op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, VariantIdx> { Ok(self.ecx().read_discriminant(op)?.1) } @@ -137,13 +137,13 @@ macro_rules! make_value_visitor { // Recursive actions, ready to be overloaded. /// Visits the given value, dispatching as appropriate to more specialized visitors. #[inline(always)] - fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx> + fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> { self.walk_value(v) } /// Visits the given value as a union. No automatic recursion can happen here. #[inline(always)] - fn visit_union(&mut self, _v: Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> + fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> { Ok(()) } @@ -153,7 +153,7 @@ macro_rules! make_value_visitor { #[inline(always)] fn visit_aggregate( &mut self, - v: Self::V, + v: &Self::V, fields: impl Iterator>, ) -> InterpResult<'tcx> { self.walk_aggregate(v, fields) @@ -167,9 +167,9 @@ macro_rules! make_value_visitor { #[inline(always)] fn visit_field( &mut self, - _old_val: Self::V, + _old_val: &Self::V, _field: usize, - new_val: Self::V, + new_val: &Self::V, ) -> InterpResult<'tcx> { self.visit_value(new_val) } @@ -179,9 +179,9 @@ macro_rules! make_value_visitor { #[inline(always)] fn visit_variant( &mut self, - _old_val: Self::V, + _old_val: &Self::V, _variant: VariantIdx, - new_val: Self::V, + new_val: &Self::V, ) -> InterpResult<'tcx> { self.visit_value(new_val) } @@ -189,16 +189,16 @@ macro_rules! make_value_visitor { // Default recursors. Not meant to be overloaded. fn walk_aggregate( &mut self, - v: Self::V, + v: &Self::V, fields: impl Iterator>, ) -> InterpResult<'tcx> { // Now iterate over it. for (idx, field_val) in fields.enumerate() { - self.visit_field(v, idx, field_val?)?; + self.visit_field(v, idx, &field_val?)?; } Ok(()) } - fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx> + fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> { trace!("walk_value: type: {}", v.layout().ty); @@ -211,7 +211,7 @@ macro_rules! make_value_visitor { let inner = self.ecx().unpack_dyn_trait(dest)?.1; trace!("walk_value: dyn object layout: {:#?}", inner.layout); // recurse with the inner type - return self.visit_field(v, 0, Value::from_mem_place(inner)); + return self.visit_field(&v, 0, &Value::from_mem_place(inner)); }, // Slices do not need special handling here: they have `Array` field // placement with length 0, so we enter the `Array` case below which @@ -254,11 +254,11 @@ macro_rules! make_value_visitor { // with *its* fields. Variants::Multiple { .. } => { let op = v.to_op(self.ecx())?; - let idx = self.read_discriminant(op)?; + let idx = self.read_discriminant(&op)?; let inner = v.project_downcast(self.ecx(), idx)?; trace!("walk_value: variant layout: {:#?}", inner.layout()); // recurse with the inner type - self.visit_variant(v, idx, inner) + self.visit_variant(v, idx, &inner) } // For single-variant layouts, we already did anything there is to do. Variants::Single { .. } => Ok(()) diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs index fd5c2236902a2..90143c616c5e0 100644 --- a/compiler/rustc_mir/src/transform/const_prop.rs +++ b/compiler/rustc_mir/src/transform/const_prop.rs @@ -228,8 +228,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> fn binary_ptr_op( _ecx: &InterpCx<'mir, 'tcx, Self>, _bin_op: BinOp, - _left: ImmTy<'tcx>, - _right: ImmTy<'tcx>, + _left: &ImmTy<'tcx>, + _right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> { // We can't do this because aliasing of memory can differ between const eval and llvm throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp") @@ -426,7 +426,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Try to read the local as an immediate so that if it is representable as a scalar, we can // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.try_read_immediate(op) { + Some(match self.ecx.try_read_immediate(&op) { Ok(Ok(imm)) => imm.into(), _ => op, }) @@ -548,8 +548,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { source_info: SourceInfo, ) -> Option<()> { if let (val, true) = self.use_ecx(|this| { - let val = this.ecx.read_immediate(this.ecx.eval_operand(arg, None)?)?; - let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, val)?; + let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?; + let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?; Ok((val, overflow)) })? { // `AssertKind` only has an `OverflowNeg` variant, so make sure that is @@ -573,8 +573,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { right: &Operand<'tcx>, source_info: SourceInfo, ) -> Option<()> { - let r = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(right, None)?)); - let l = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(left, None)?)); + let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?)); + let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?)); // Check for exceeding shifts *even if* we cannot evaluate the LHS. if op == BinOp::Shr || op == BinOp::Shl { let r = r?; @@ -609,7 +609,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } } - if let (Some(l), Some(r)) = (l, r) { + if let (Some(l), Some(r)) = (&l, &r) { // The remaining operators are handled through `overflowing_binary_op`. if self.use_ecx(|this| { let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?; @@ -630,7 +630,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { match *operand { Operand::Copy(l) | Operand::Move(l) => { if let Some(value) = self.get_const(l) { - if self.should_const_prop(value) { + if self.should_const_prop(&value) { // FIXME(felix91gr): this code only handles `Scalar` cases. // For now, we're not handling `ScalarPair` cases because // doing so here would require a lot of code duplication. @@ -745,7 +745,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let r = this.ecx.eval_operand(right, None); let const_arg = match (l, r) { - (Ok(x), Err(_)) | (Err(_), Ok(x)) => this.ecx.read_immediate(x)?, + (Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?, (Err(e), Err(_)) => return Err(e), (Ok(_), Ok(_)) => { this.ecx.eval_rvalue_into_place(rvalue, place)?; @@ -809,7 +809,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { fn replace_with_const( &mut self, rval: &mut Rvalue<'tcx>, - value: OpTy<'tcx>, + value: &OpTy<'tcx>, source_info: SourceInfo, ) { if let Rvalue::Use(Operand::Constant(c)) = rval { @@ -902,7 +902,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } /// Returns `true` if and only if this `op` should be const-propagated into. - fn should_const_prop(&mut self, op: OpTy<'tcx>) -> bool { + fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool { let mir_opt_level = self.tcx.sess.opts.debugging_opts.mir_opt_level; if mir_opt_level == 0 { @@ -913,7 +913,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return false; } - match *op { + match **op { interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => { s.is_bits() } @@ -1094,7 +1094,7 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> { // This will return None if the above `const_prop` invocation only "wrote" a // type whose creation requires no write. E.g. a generator whose initial state // consists solely of uninitialized memory (so it doesn't capture any locals). - if let Some(value) = self.get_const(place) { + if let Some(ref value) = self.get_const(place) { if self.should_const_prop(value) { trace!("replacing {:?} with {:?}", rval, value); self.replace_with_const(rval, value, source_info); @@ -1177,10 +1177,10 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> { self.super_terminator(terminator, location); match &mut terminator.kind { TerminatorKind::Assert { expected, ref msg, ref mut cond, .. } => { - if let Some(value) = self.eval_operand(&cond, source_info) { + if let Some(ref value) = self.eval_operand(&cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected)); - let value_const = self.ecx.read_scalar(value).unwrap(); + let value_const = self.ecx.read_scalar(&value).unwrap(); if expected != value_const { enum DbgVal { Val(T), @@ -1199,7 +1199,7 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> { // triggered the assert on the value of the rhs. match self.eval_operand(op, source_info) { Some(op) => { - DbgVal::Val(self.ecx.read_immediate(op).unwrap().to_const_int()) + DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int()) } None => DbgVal::Underscore, } From fe0c46d07eba2fc385b6d31a883c177c91ac3e95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Mon, 15 Feb 2021 00:00:00 +0000 Subject: [PATCH 3/6] Pass PlaceTy by reference not value --- .../rustc_mir/src/const_eval/eval_queries.rs | 2 +- compiler/rustc_mir/src/const_eval/machine.rs | 6 +-- compiler/rustc_mir/src/interpret/cast.rs | 10 ++--- .../rustc_mir/src/interpret/eval_context.rs | 8 ++-- .../rustc_mir/src/interpret/intrinsics.rs | 8 ++-- .../interpret/intrinsics/caller_location.rs | 6 +-- compiler/rustc_mir/src/interpret/machine.rs | 12 +++--- compiler/rustc_mir/src/interpret/operand.rs | 4 +- compiler/rustc_mir/src/interpret/operator.rs | 4 +- compiler/rustc_mir/src/interpret/place.rs | 36 ++++++++-------- compiler/rustc_mir/src/interpret/step.rs | 42 +++++++++---------- .../rustc_mir/src/interpret/terminator.rs | 22 ++++++---- .../rustc_mir/src/transform/const_prop.rs | 18 ++++---- 13 files changed, 91 insertions(+), 87 deletions(-) diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs index e573eeae00314..9a2e659678d53 100644 --- a/compiler/rustc_mir/src/const_eval/eval_queries.rs +++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs @@ -56,7 +56,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( ecx.push_stack_frame( cid.instance, body, - Some(ret.into()), + Some(&ret.into()), StackPopCleanup::None { cleanup: false }, )?; diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs index 6282288b26e92..70548c583d695 100644 --- a/compiler/rustc_mir/src/const_eval/machine.rs +++ b/compiler/rustc_mir/src/const_eval/machine.rs @@ -222,7 +222,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, instance: ty::Instance<'tcx>, _abi: Abi, args: &[OpTy<'tcx>], - _ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + _ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>, _unwind: Option, // unwinding is not supported in consts ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> { debug!("find_mir_or_eval_fn: {:?}", instance); @@ -262,7 +262,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, ecx: &mut InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], - ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>, _unwind: Option, ) -> InterpResult<'tcx> { // Shared intrinsics. @@ -366,7 +366,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, fn box_alloc( _ecx: &mut InterpCx<'mir, 'tcx, Self>, - _dest: PlaceTy<'tcx>, + _dest: &PlaceTy<'tcx>, ) -> InterpResult<'tcx> { Err(ConstEvalErrKind::NeedsRfc("heap allocations via `box` keyword".to_string()).into()) } diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs index 257012ead6641..04c3fad3a13d7 100644 --- a/compiler/rustc_mir/src/interpret/cast.rs +++ b/compiler/rustc_mir/src/interpret/cast.rs @@ -20,7 +20,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { src: &OpTy<'tcx, M::PointerTag>, cast_kind: CastKind, cast_ty: Ty<'tcx>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { use rustc_middle::mir::CastKind::*; // FIXME: In which cases should we trigger UB when the source is uninit? @@ -260,7 +260,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn unsize_into_ptr( &mut self, src: &OpTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, // The pointee types source_ty: Ty<'tcx>, cast_ty: Ty<'tcx>, @@ -302,7 +302,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &mut self, src: &OpTy<'tcx, M::PointerTag>, cast_ty: TyAndLayout<'tcx>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty); match (&src.layout.ty.kind(), &cast_ty.ty.kind()) { @@ -340,9 +340,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let src_field = self.operand_field(src, i)?; let dst_field = self.place_field(dest, i)?; if src_field.layout.ty == cast_ty_field.ty { - self.copy_op(&src_field, dst_field)?; + self.copy_op(&src_field, &dst_field)?; } else { - self.unsize_into(&src_field, cast_ty_field, dst_field)?; + self.unsize_into(&src_field, cast_ty_field, &dst_field)?; } } Ok(()) diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index 7173e1eca5973..3d3a4afb5aca3 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -654,7 +654,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &mut self, instance: ty::Instance<'tcx>, body: &'mir mir::Body<'tcx>, - return_place: Option>, + return_place: Option<&PlaceTy<'tcx, M::PointerTag>>, return_to_block: StackPopCleanup, ) -> InterpResult<'tcx> { // first push a stack frame so we have access to the local substs @@ -662,7 +662,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { body, loc: Err(body.span), // Span used for errors caused during preamble. return_to_block, - return_place, + return_place: return_place.copied(), // empty local array, we fill it in below, after we are inside the stack frame and // all methods actually know about the frame locals: IndexVec::new(), @@ -777,10 +777,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if !unwinding { // Copy the return value to the caller's stack frame. - if let Some(return_place) = frame.return_place { + if let Some(ref return_place) = frame.return_place { let op = self.access_local(&frame, mir::RETURN_PLACE, None)?; self.copy_op_transmute(&op, return_place)?; - trace!("{:?}", self.dump_place(*return_place)); + trace!("{:?}", self.dump_place(**return_place)); } else { throw_ub!(Unreachable); } diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index 0252dd15888af..7c53fcbb9552e 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -115,7 +115,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &mut self, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, M::PointerTag>], - ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, ) -> InterpResult<'tcx, bool> { let substs = instance.substs; let intrinsic_name = self.tcx.item_name(instance.def_id()); @@ -459,7 +459,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { for i in 0..len { let place = self.place_index(dest, i)?; let value = if i == index { *elem } else { self.operand_index(input, i)? }; - self.copy_op(&value, place)?; + self.copy_op(&value, &place)?; } } sym::simd_extract => { @@ -492,7 +492,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => return Ok(false), } - trace!("{:?}", self.dump_place(*dest)); + trace!("{:?}", self.dump_place(**dest)); self.go_to_block(ret); Ok(true) } @@ -501,7 +501,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &mut self, a: &ImmTy<'tcx, M::PointerTag>, b: &ImmTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // Performs an exact division, resulting in undefined behavior where // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. diff --git a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs index 5c917f00d157b..72b07d7637243 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs @@ -92,11 +92,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let location = self.allocate(loc_layout, MemoryKind::CallerLocation); // Initialize fields. - self.write_immediate(file.to_ref(), self.mplace_field(location, 0).unwrap().into()) + self.write_immediate(file.to_ref(), &self.mplace_field(location, 0).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); - self.write_scalar(line, self.mplace_field(location, 1).unwrap().into()) + self.write_scalar(line, &self.mplace_field(location, 1).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); - self.write_scalar(col, self.mplace_field(location, 2).unwrap().into()) + self.write_scalar(col, &self.mplace_field(location, 2).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); location diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs index 91f0587a34176..65869f956397f 100644 --- a/compiler/rustc_mir/src/interpret/machine.rs +++ b/compiler/rustc_mir/src/interpret/machine.rs @@ -157,7 +157,7 @@ pub trait Machine<'mir, 'tcx>: Sized { instance: ty::Instance<'tcx>, abi: Abi, args: &[OpTy<'tcx, Self::PointerTag>], - ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, unwind: Option, ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>; @@ -168,7 +168,7 @@ pub trait Machine<'mir, 'tcx>: Sized { fn_val: Self::ExtraFnVal, abi: Abi, args: &[OpTy<'tcx, Self::PointerTag>], - ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, unwind: Option, ) -> InterpResult<'tcx>; @@ -178,7 +178,7 @@ pub trait Machine<'mir, 'tcx>: Sized { ecx: &mut InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, Self::PointerTag>], - ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>, unwind: Option, ) -> InterpResult<'tcx>; @@ -207,7 +207,7 @@ pub trait Machine<'mir, 'tcx>: Sized { /// Heap allocations via the `box` keyword. fn box_alloc( ecx: &mut InterpCx<'mir, 'tcx, Self>, - dest: PlaceTy<'tcx, Self::PointerTag>, + dest: &PlaceTy<'tcx, Self::PointerTag>, ) -> InterpResult<'tcx>; /// Called to read the specified `local` from the `frame`. @@ -327,7 +327,7 @@ pub trait Machine<'mir, 'tcx>: Sized { fn retag( _ecx: &mut InterpCx<'mir, 'tcx, Self>, _kind: mir::RetagKind, - _place: PlaceTy<'tcx, Self::PointerTag>, + _place: &PlaceTy<'tcx, Self::PointerTag>, ) -> InterpResult<'tcx> { Ok(()) } @@ -420,7 +420,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { fn_val: !, _abi: Abi, _args: &[OpTy<$tcx>], - _ret: Option<(PlaceTy<$tcx>, mir::BasicBlock)>, + _ret: Option<(&PlaceTy<$tcx>, mir::BasicBlock)>, _unwind: Option, ) -> InterpResult<$tcx> { match fn_val {} diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs index 626f8915ecf08..1a0d9ba1d3c56 100644 --- a/compiler/rustc_mir/src/interpret/operand.rs +++ b/compiler/rustc_mir/src/interpret/operand.rs @@ -462,9 +462,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline(always)] pub fn place_to_op( &self, - place: PlaceTy<'tcx, M::PointerTag>, + place: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { - let op = match *place { + let op = match **place { Place::Ptr(mplace) => Operand::Indirect(mplace), Place::Local { frame, local } => { *self.access_local(&self.stack()[frame], local, None)? diff --git a/compiler/rustc_mir/src/interpret/operator.rs b/compiler/rustc_mir/src/interpret/operator.rs index 7d2dcedda47a6..3737f8781c7ae 100644 --- a/compiler/rustc_mir/src/interpret/operator.rs +++ b/compiler/rustc_mir/src/interpret/operator.rs @@ -16,7 +16,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { op: mir::BinOp, left: &ImmTy<'tcx, M::PointerTag>, right: &ImmTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?; debug_assert_eq!( @@ -36,7 +36,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { op: mir::BinOp, left: &ImmTy<'tcx, M::PointerTag>, right: &ImmTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?; assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op); diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs index fa21ca56eba94..5dbb49018a6c1 100644 --- a/compiler/rustc_mir/src/interpret/place.rs +++ b/compiler/rustc_mir/src/interpret/place.rs @@ -592,7 +592,7 @@ where /// into the field of a local `ScalarPair`, we have to first allocate it. pub fn place_field( &mut self, - base: PlaceTy<'tcx, M::PointerTag>, + base: &PlaceTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // FIXME: We could try to be smarter and avoid allocation for fields that span the @@ -603,7 +603,7 @@ where pub fn place_index( &mut self, - base: PlaceTy<'tcx, M::PointerTag>, + base: &PlaceTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { let mplace = self.force_allocation(base)?; @@ -612,7 +612,7 @@ where pub fn place_downcast( &self, - base: PlaceTy<'tcx, M::PointerTag>, + base: &PlaceTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // Downcast just changes the layout @@ -622,7 +622,7 @@ where } Place::Local { .. } => { let layout = base.layout.for_variant(self, variant); - PlaceTy { layout, ..base } + PlaceTy { layout, ..*base } } }) } @@ -630,7 +630,7 @@ where /// Projects into a place. pub fn place_projection( &mut self, - base: PlaceTy<'tcx, M::PointerTag>, + base: &PlaceTy<'tcx, M::PointerTag>, &proj_elem: &mir::ProjectionElem>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc_middle::mir::ProjectionElem::*; @@ -660,7 +660,7 @@ where }; for elem in place.projection.iter() { - place_ty = self.place_projection(place_ty, &elem)? + place_ty = self.place_projection(&place_ty, &elem)? } trace!("{:?}", self.dump_place(place_ty.place)); @@ -681,7 +681,7 @@ where pub fn write_scalar( &mut self, val: impl Into>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { self.write_immediate(Immediate::Scalar(val.into()), dest) } @@ -691,7 +691,7 @@ where pub fn write_immediate( &mut self, src: Immediate, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { self.write_immediate_no_validate(src, dest)?; @@ -726,7 +726,7 @@ where fn write_immediate_no_validate( &mut self, src: Immediate, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if cfg!(debug_assertions) { // This is a very common path, avoid some checks in release mode @@ -844,7 +844,7 @@ where pub fn copy_op( &mut self, src: &OpTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { self.copy_op_no_validate(src, dest)?; @@ -863,7 +863,7 @@ where fn copy_op_no_validate( &mut self, src: &OpTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // We do NOT compare the types for equality, because well-typed code can // actually "transmute" `&mut T` to `&T` in an assignment without a cast. @@ -922,7 +922,7 @@ where pub fn copy_op_transmute( &mut self, src: &OpTy<'tcx, M::PointerTag>, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { // Fast path: Just use normal `copy_op` @@ -959,7 +959,7 @@ where let dest = self.force_allocation(dest)?; self.copy_op_no_validate( src, - PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), + &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), )?; if M::enforce_validity(self) { @@ -980,7 +980,7 @@ where /// version. pub fn force_allocation_maybe_sized( &mut self, - place: PlaceTy<'tcx, M::PointerTag>, + place: &PlaceTy<'tcx, M::PointerTag>, meta: MemPlaceMeta, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option)> { let (mplace, size) = match place.place { @@ -1025,7 +1025,7 @@ where #[inline(always)] pub fn force_allocation( &mut self, - place: PlaceTy<'tcx, M::PointerTag>, + place: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) } @@ -1061,7 +1061,7 @@ where pub fn write_discriminant( &mut self, variant_index: VariantIdx, - dest: PlaceTy<'tcx, M::PointerTag>, + dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // Layout computation excludes uninhabited variants from consideration // therefore there's no way to represent those variants in the given layout. @@ -1092,7 +1092,7 @@ where let tag_val = size.truncate(discr_val); let tag_dest = self.place_field(dest, tag_field)?; - self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?; + self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; } Variants::Multiple { tag_encoding: @@ -1123,7 +1123,7 @@ where )?; // Write result. let niche_dest = self.place_field(dest, tag_field)?; - self.write_immediate(*tag_val, niche_dest)?; + self.write_immediate(*tag_val, &niche_dest)?; } } } diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs index b4a2bb809af54..bb58b9d2f2a9c 100644 --- a/compiler/rustc_mir/src/interpret/step.rs +++ b/compiler/rustc_mir/src/interpret/step.rs @@ -90,7 +90,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { SetDiscriminant { place, variant_index } => { let dest = self.eval_place(**place)?; - self.write_discriminant(*variant_index, dest)?; + self.write_discriminant(*variant_index, &dest)?; } // Mark locals as alive @@ -110,7 +110,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Stacked Borrows. Retag(kind, place) => { let dest = self.eval_place(**place)?; - M::retag(self, *kind, dest)?; + M::retag(self, *kind, &dest)?; } // Statements we do not track. @@ -156,13 +156,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ThreadLocalRef(did) => { let id = M::thread_local_static_alloc_id(self, did)?; let val = self.global_base_pointer(id.into())?; - self.write_scalar(val, dest)?; + self.write_scalar(val, &dest)?; } Use(ref operand) => { // Avoid recomputing the layout let op = self.eval_operand(operand, Some(dest.layout))?; - self.copy_op(&op, dest)?; + self.copy_op(&op, &dest)?; } BinaryOp(bin_op, ref left, ref right) => { @@ -170,7 +170,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let left = self.read_immediate(&self.eval_operand(left, layout)?)?; let layout = binop_right_homogeneous(bin_op).then_some(left.layout); let right = self.read_immediate(&self.eval_operand(right, layout)?)?; - self.binop_ignore_overflow(bin_op, &left, &right, dest)?; + self.binop_ignore_overflow(bin_op, &left, &right, &dest)?; } CheckedBinaryOp(bin_op, ref left, ref right) => { @@ -178,7 +178,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let left = self.read_immediate(&self.eval_operand(left, None)?)?; let layout = binop_right_homogeneous(bin_op).then_some(left.layout); let right = self.read_immediate(&self.eval_operand(right, layout)?)?; - self.binop_with_overflow(bin_op, &left, &right, dest)?; + self.binop_with_overflow(bin_op, &left, &right, &dest)?; } UnaryOp(un_op, ref operand) => { @@ -186,15 +186,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?; let val = self.unary_op(un_op, &val)?; assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op); - self.write_immediate(*val, dest)?; + self.write_immediate(*val, &dest)?; } Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - self.write_discriminant(variant_index, dest)?; + self.write_discriminant(variant_index, &dest)?; if adt_def.is_enum() { - (self.place_downcast(dest, variant_index)?, active_field_index) + (self.place_downcast(&dest, variant_index)?, active_field_index) } else { (dest, active_field_index) } @@ -207,21 +207,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Ignore zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - let field_dest = self.place_field(dest, field_index)?; - self.copy_op(&op, field_dest)?; + let field_dest = self.place_field(&dest, field_index)?; + self.copy_op(&op, &field_dest)?; } } } Repeat(ref operand, _) => { let op = self.eval_operand(operand, None)?; - let dest = self.force_allocation(dest)?; + let dest = self.force_allocation(&dest)?; let length = dest.len(self)?; if let Some(first_ptr) = self.check_mplace_access(dest, None)? { // Write the first. let first = self.mplace_field(dest, 0)?; - self.copy_op(&op, first.into())?; + self.copy_op(&op, &first.into())?; if length > 1 { let elem_size = first.layout.size; @@ -242,23 +242,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Len(place) => { // FIXME(CTFE): don't allow computing the length of arrays in const eval let src = self.eval_place(place)?; - let mplace = self.force_allocation(src)?; + let mplace = self.force_allocation(&src)?; let len = mplace.len(self)?; - self.write_scalar(Scalar::from_machine_usize(len, self), dest)?; + self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?; } AddressOf(_, place) | Ref(_, _, place) => { let src = self.eval_place(place)?; - let place = self.force_allocation(src)?; + let place = self.force_allocation(&src)?; if place.layout.size.bytes() > 0 { // definitely not a ZST assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`"); } - self.write_immediate(place.to_ref(), dest)?; + self.write_immediate(place.to_ref(), &dest)?; } NullaryOp(mir::NullOp::Box, _) => { - M::box_alloc(self, dest)?; + M::box_alloc(self, &dest)?; } NullaryOp(mir::NullOp::SizeOf, ty) => { @@ -272,19 +272,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ); throw_inval!(SizeOfUnsizedType(ty)); } - self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?; + self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?; } Cast(cast_kind, ref operand, cast_ty) => { let src = self.eval_operand(operand, None)?; let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty); - self.cast(&src, cast_kind, cast_ty, dest)?; + self.cast(&src, cast_kind, cast_ty, &dest)?; } Discriminant(place) => { let op = self.eval_place_to_op(place, None)?; let discr_val = self.read_discriminant(&op)?.0; - self.write_scalar(discr_val, dest)?; + self.write_scalar(discr_val, &dest)?; } } diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs index 8c172a581a37c..db2766bb7e260 100644 --- a/compiler/rustc_mir/src/interpret/terminator.rs +++ b/compiler/rustc_mir/src/interpret/terminator.rs @@ -78,8 +78,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ), }; let args = self.eval_operands(args)?; + let dest_place; let ret = match destination { - Some((dest, ret)) => Some((self.eval_place(dest)?, ret)), + Some((dest, ret)) => { + dest_place = self.eval_place(dest)?; + Some((&dest_place, ret)) + }, None => None, }; self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?; @@ -96,7 +100,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { trace!("TerminatorKind::drop: {:?}, type {}", place, ty); let instance = Instance::resolve_drop_in_place(*self.tcx, ty); - self.drop_in_place(place, instance, target, unwind)?; + self.drop_in_place(&place, instance, target, unwind)?; } Assert { ref cond, expected, ref msg, target, cleanup } => { @@ -180,7 +184,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &mut self, rust_abi: bool, caller_arg: &mut impl Iterator>, - callee_arg: PlaceTy<'tcx, M::PointerTag>, + callee_arg: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if rust_abi && callee_arg.layout.is_zst() { // Nothing to do. @@ -211,7 +215,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn_val: FnVal<'tcx, M::ExtraFnVal>, caller_abi: Abi, args: &[OpTy<'tcx, M::PointerTag>], - ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, + ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, unwind: Option, ) -> InterpResult<'tcx> { trace!("eval_fn_call: {:#?}", fn_val); @@ -344,12 +348,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if Some(local) == body.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { - let dest = self.place_field(dest, i)?; - self.pass_argument(rust_abi, &mut caller_iter, dest)?; + let dest = self.place_field(&dest, i)?; + self.pass_argument(rust_abi, &mut caller_iter, &dest)?; } } else { // Normal argument - self.pass_argument(rust_abi, &mut caller_iter, dest)?; + self.pass_argument(rust_abi, &mut caller_iter, &dest)?; } } // Now we should have no more caller args @@ -426,7 +430,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn drop_in_place( &mut self, - place: PlaceTy<'tcx, M::PointerTag>, + place: &PlaceTy<'tcx, M::PointerTag>, instance: ty::Instance<'tcx>, target: mir::BasicBlock, unwind: Option, @@ -457,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { FnVal::Instance(instance), Abi::Rust, &[arg.into()], - Some((dest.into(), target)), + Some((&dest.into(), target)), unwind, ) } diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs index 90143c616c5e0..8c4ffd3e1b4ab 100644 --- a/compiler/rustc_mir/src/transform/const_prop.rs +++ b/compiler/rustc_mir/src/transform/const_prop.rs @@ -197,7 +197,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> _instance: ty::Instance<'tcx>, _abi: Abi, _args: &[OpTy<'tcx>], - _ret: Option<(PlaceTy<'tcx>, BasicBlock)>, + _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>, _unwind: Option, ) -> InterpResult<'tcx, Option<&'mir Body<'tcx>>> { Ok(None) @@ -207,7 +207,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> _ecx: &mut InterpCx<'mir, 'tcx, Self>, _instance: ty::Instance<'tcx>, _args: &[OpTy<'tcx>], - _ret: Option<(PlaceTy<'tcx>, BasicBlock)>, + _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>, _unwind: Option, ) -> InterpResult<'tcx> { throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp") @@ -237,7 +237,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> fn box_alloc( _ecx: &mut InterpCx<'mir, 'tcx, Self>, - _dest: PlaceTy<'tcx>, + _dest: &PlaceTy<'tcx>, ) -> InterpResult<'tcx> { throw_machine_stop_str!("can't const prop heap allocations") } @@ -392,12 +392,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .filter(|ret_layout| { !ret_layout.is_zst() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) }) - .map(|ret_layout| ecx.allocate(ret_layout, MemoryKind::Stack)); + .map(|ret_layout| ecx.allocate(ret_layout, MemoryKind::Stack).into()); ecx.push_stack_frame( Instance::new(def_id, substs), dummy_body, - ret.map(Into::into), + ret.as_ref(), StackPopCleanup::None { cleanup: false }, ) .expect("failed to push initial stack frame"); @@ -760,14 +760,14 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { match op { BinOp::BitAnd => { if arg_value == 0 { - this.ecx.write_immediate(*const_arg, dest)?; + this.ecx.write_immediate(*const_arg, &dest)?; } } BinOp::BitOr => { if arg_value == const_arg.layout.size.truncate(u128::MAX) || (const_arg.layout.ty.is_bool() && arg_value == 1) { - this.ecx.write_immediate(*const_arg, dest)?; + this.ecx.write_immediate(*const_arg, &dest)?; } } BinOp::Mul => { @@ -777,9 +777,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { const_arg.to_scalar()?.into(), Scalar::from_bool(false).into(), ); - this.ecx.write_immediate(val, dest)?; + this.ecx.write_immediate(val, &dest)?; } else { - this.ecx.write_immediate(*const_arg, dest)?; + this.ecx.write_immediate(*const_arg, &dest)?; } } } From 5888556efe102f855f273cd1a7b343fe08d8fc76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Mon, 15 Feb 2021 00:00:00 +0000 Subject: [PATCH 4/6] Pass ImmTy by reference not value --- compiler/rustc_mir/src/interpret/cast.rs | 8 ++++---- compiler/rustc_mir/src/interpret/intern.rs | 2 +- compiler/rustc_mir/src/interpret/intrinsics.rs | 2 +- compiler/rustc_mir/src/interpret/place.rs | 6 +++--- compiler/rustc_mir/src/interpret/validity.rs | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs index 04c3fad3a13d7..2d9e6df0ab860 100644 --- a/compiler/rustc_mir/src/interpret/cast.rs +++ b/compiler/rustc_mir/src/interpret/cast.rs @@ -32,7 +32,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Misc => { let src = self.read_immediate(src)?; - let res = self.misc_cast(src, cast_ty)?; + let res = self.misc_cast(&src, cast_ty)?; self.write_immediate(res, dest)?; } @@ -107,7 +107,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn misc_cast( &self, - src: ImmTy<'tcx, M::PointerTag>, + src: &ImmTy<'tcx, M::PointerTag>, cast_ty: Ty<'tcx>, ) -> InterpResult<'tcx, Immediate> { use rustc_middle::ty::TyKind::*; @@ -158,13 +158,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let dest_layout = self.layout_of(cast_ty)?; if dest_layout.size == src.layout.size { // Thin or fat pointer that just hast the ptr kind of target type changed. - return Ok(*src); + return Ok(**src); } else { // Casting the metadata away from a fat ptr. assert_eq!(src.layout.size, 2 * self.memory.pointer_size()); assert_eq!(dest_layout.size, self.memory.pointer_size()); assert!(src.layout.ty.is_unsafe_ptr()); - return match *src { + return match **src { Immediate::ScalarPair(data, _) => Ok(data.into()), Immediate::Scalar(..) => span_bug!( self.cur_span(), diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs index 7f0b74cf6e5f2..2eba2c4e5338b 100644 --- a/compiler/rustc_mir/src/interpret/intern.rs +++ b/compiler/rustc_mir/src/interpret/intern.rs @@ -198,7 +198,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory let ty = mplace.layout.ty; if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() { let value = self.ecx.read_immediate(&(*mplace).into())?; - let mplace = self.ecx.ref_to_mplace(value)?; + let mplace = self.ecx.ref_to_mplace(&value)?; assert_eq!(mplace.layout.ty, referenced_ty); // Handle trait object vtables. if let ty::Dynamic(..) = diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index 7c53fcbb9552e..ab1ab69c8d513 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -143,7 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::min_align_of_val | sym::size_of_val => { // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be // dereferencable! - let place = self.ref_to_mplace(self.read_immediate(&args[0])?)?; + let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?; let (size, align) = self .size_and_align_of_mplace(place)? .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?; diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs index 5dbb49018a6c1..f86a87c88f4d8 100644 --- a/compiler/rustc_mir/src/interpret/place.rs +++ b/compiler/rustc_mir/src/interpret/place.rs @@ -303,12 +303,12 @@ where /// Generally prefer `deref_operand`. pub fn ref_to_mplace( &self, - val: ImmTy<'tcx, M::PointerTag>, + val: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let pointee_type = val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; let layout = self.layout_of(pointee_type)?; - let (ptr, meta) = match *val { + let (ptr, meta) = match **val { Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None), Immediate::ScalarPair(ptr, meta) => { (ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?)) @@ -335,7 +335,7 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let val = self.read_immediate(src)?; trace!("deref to {} on {:?}", val.layout.ty, *val); - let place = self.ref_to_mplace(val)?; + let place = self.ref_to_mplace(&val)?; self.mplace_access_checked(place, None) } diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs index 9c2ae1c7fe30e..766c7a299e8f4 100644 --- a/compiler/rustc_mir/src/interpret/validity.rs +++ b/compiler/rustc_mir/src/interpret/validity.rs @@ -386,7 +386,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // Handle wide pointers. // Check metadata early, for better diagnostics let place = try_validation!( - self.ecx.ref_to_mplace(value), + self.ecx.ref_to_mplace(&value), self.path, err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind }, ); From d06a2a368df2d15cd4e9c3e1c3e8c86727307502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Mon, 15 Feb 2021 00:00:00 +0000 Subject: [PATCH 5/6] Pass MPlaceTy by reference not value --- .../rustc_mir/src/const_eval/eval_queries.rs | 8 +-- compiler/rustc_mir/src/const_eval/machine.rs | 2 +- compiler/rustc_mir/src/const_eval/mod.rs | 2 +- .../rustc_mir/src/interpret/eval_context.rs | 10 ++-- compiler/rustc_mir/src/interpret/intern.rs | 8 +-- .../rustc_mir/src/interpret/intrinsics.rs | 2 +- .../interpret/intrinsics/caller_location.rs | 6 +- compiler/rustc_mir/src/interpret/operand.rs | 23 ++++--- compiler/rustc_mir/src/interpret/place.rs | 60 +++++++++---------- compiler/rustc_mir/src/interpret/step.rs | 4 +- .../rustc_mir/src/interpret/terminator.rs | 2 +- compiler/rustc_mir/src/interpret/validity.rs | 2 +- compiler/rustc_mir/src/interpret/visitor.rs | 8 +-- 13 files changed, 72 insertions(+), 65 deletions(-) diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs index 9a2e659678d53..e9a0742d493d2 100644 --- a/compiler/rustc_mir/src/const_eval/eval_queries.rs +++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs @@ -72,7 +72,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( None => InternKind::Constant, } }; - intern_const_alloc_recursive(ecx, intern_kind, ret)?; + intern_const_alloc_recursive(ecx, intern_kind, &ret)?; debug!("eval_body_using_ecx done: {:?}", *ret); Ok(ret) @@ -137,7 +137,7 @@ pub(super) fn op_to_const<'tcx>( op.try_as_mplace(ecx) }; - let to_const_value = |mplace: MPlaceTy<'_>| match mplace.ptr { + let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr { Scalar::Ptr(ptr) => { let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(); ConstValue::ByRef { alloc, offset: ptr.offset } @@ -155,12 +155,12 @@ pub(super) fn op_to_const<'tcx>( } }; match immediate { - Ok(mplace) => to_const_value(mplace), + Ok(ref mplace) => to_const_value(mplace), // see comment on `let try_as_immediate` above Err(imm) => match *imm { Immediate::Scalar(x) => match x { ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s), - ScalarMaybeUninit::Uninit => to_const_value(op.assert_mem_place(ecx)), + ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place(ecx)), }, Immediate::ScalarPair(a, b) => { let (data, start) = match a.check_init().unwrap() { diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs index 70548c583d695..14b67fe119413 100644 --- a/compiler/rustc_mir/src/const_eval/machine.rs +++ b/compiler/rustc_mir/src/const_eval/machine.rs @@ -40,7 +40,7 @@ impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> { assert!(args.len() == 1); let msg_place = self.deref_operand(&args[0])?; - let msg = Symbol::intern(self.read_str(msg_place)?); + let msg = Symbol::intern(self.read_str(&msg_place)?); let span = self.find_closest_untracked_caller_location(); let (file, line, col) = self.location_triple_for_span(span); Err(ConstEvalErrKind::Panic { msg, file, line, col }.into()) diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs index 480489c9bc0b1..a4e1cd2faa3c0 100644 --- a/compiler/rustc_mir/src/const_eval/mod.rs +++ b/compiler/rustc_mir/src/const_eval/mod.rs @@ -29,7 +29,7 @@ pub(crate) fn const_caller_location( let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false); let loc_place = ecx.alloc_caller_location(file, line, col); - if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place).is_err() { + if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() { bug!("intern_const_alloc_recursive should not error in this case") } ConstValue::Scalar(loc_place.ptr) diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index 3d3a4afb5aca3..c56f8a40678cf 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -548,8 +548,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// This can fail to provide an answer for extern types. pub(super) fn size_and_align_of( &self, - metadata: MemPlaceMeta, - layout: TyAndLayout<'tcx>, + metadata: &MemPlaceMeta, + layout: &TyAndLayout<'tcx>, ) -> InterpResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { return Ok(Some((layout.size, layout.align.abi))); @@ -577,7 +577,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1)?; - let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? { + let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, &field)? { Some(size_and_align) => size_and_align, None => { // A field with extern type. If this field is at offset 0, we behave @@ -645,9 +645,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline] pub fn size_and_align_of_mplace( &self, - mplace: MPlaceTy<'tcx, M::PointerTag>, + mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option<(Size, Align)>> { - self.size_and_align_of(mplace.meta, mplace.layout) + self.size_and_align_of(&mplace.meta, &mplace.layout) } pub fn push_stack_frame( diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs index 2eba2c4e5338b..42601ce2195e7 100644 --- a/compiler/rustc_mir/src/interpret/intern.rs +++ b/compiler/rustc_mir/src/interpret/intern.rs @@ -296,7 +296,7 @@ pub enum InternKind { pub fn intern_const_alloc_recursive>( ecx: &mut InterpCx<'mir, 'tcx, M>, intern_kind: InternKind, - ret: MPlaceTy<'tcx>, + ret: &MPlaceTy<'tcx>, ) -> Result<(), ErrorReported> where 'tcx: 'mir, @@ -328,7 +328,7 @@ where Some(ret.layout.ty), ); - ref_tracking.track((ret, base_intern_mode), || ()); + ref_tracking.track((*ret, base_intern_mode), || ()); while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() { let res = InternVisitor { @@ -435,11 +435,11 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> layout: TyAndLayout<'tcx>, f: impl FnOnce( &mut InterpCx<'mir, 'tcx, M>, - MPlaceTy<'tcx, M::PointerTag>, + &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ()>, ) -> InterpResult<'tcx, &'tcx Allocation> { let dest = self.allocate(layout, MemoryKind::Stack); - f(self, dest)?; + f(self, &dest)?; let ptr = dest.ptr.assert_ptr(); assert_eq!(ptr.offset, Size::ZERO); let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1; diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index ab1ab69c8d513..00f8a3d0ce26d 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -145,7 +145,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // dereferencable! let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?; let (size, align) = self - .size_and_align_of_mplace(place)? + .size_and_align_of_mplace(&place)? .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?; let result = match intrinsic_name { diff --git a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs index 72b07d7637243..4dfdc08b875c0 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs @@ -92,11 +92,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let location = self.allocate(loc_layout, MemoryKind::CallerLocation); // Initialize fields. - self.write_immediate(file.to_ref(), &self.mplace_field(location, 0).unwrap().into()) + self.write_immediate(file.to_ref(), &self.mplace_field(&location, 0).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); - self.write_scalar(line, &self.mplace_field(location, 1).unwrap().into()) + self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); - self.write_scalar(col, &self.mplace_field(location, 2).unwrap().into()) + self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into()) .expect("writing to memory we just allocated cannot fail"); location diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs index 1a0d9ba1d3c56..f85191f459fa9 100644 --- a/compiler/rustc_mir/src/interpret/operand.rs +++ b/compiler/rustc_mir/src/interpret/operand.rs @@ -180,6 +180,13 @@ impl<'tcx, Tag: Copy> From> for OpTy<'tcx, Tag> { } } +impl<'tcx, Tag: Copy> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { + #[inline(always)] + fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self { + OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout } + } +} + impl<'tcx, Tag> From> for OpTy<'tcx, Tag> { #[inline(always)] fn from(val: ImmTy<'tcx, Tag>) -> Self { @@ -243,7 +250,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Returns `None` if the layout does not permit loading this as a value. fn try_read_immediate_from_mplace( &self, - mplace: MPlaceTy<'tcx, M::PointerTag>, + mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option>> { if mplace.layout.is_unsized() { // Don't touch unsized @@ -307,11 +314,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { src: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Result, MPlaceTy<'tcx, M::PointerTag>>> { Ok(match src.try_as_mplace(self) { - Ok(mplace) => { + Ok(ref mplace) => { if let Some(val) = self.try_read_immediate_from_mplace(mplace)? { Ok(val) } else { - Err(mplace) + Err(*mplace) } } Err(val) => Ok(val), @@ -340,7 +347,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } // Turn the wide MPlace into a string (must already be dereferenced!) - pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { + pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?; let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; @@ -354,7 +361,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { field: usize, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { let base = match op.try_as_mplace(self) { - Ok(mplace) => { + Ok(ref mplace) => { // We can reuse the mplace field computation logic for indirect operands. let field = self.mplace_field(mplace, field)?; return Ok(field.into()); @@ -397,7 +404,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } else { // Indexing into a big array. This must be an mplace. let mplace = op.assert_mem_place(self); - Ok(self.mplace_index(mplace, index)?.into()) + Ok(self.mplace_index(&mplace, index)?.into()) } } @@ -408,7 +415,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcasts only change the layout Ok(match op.try_as_mplace(self) { - Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(), + Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(), Err(..) => { let layout = op.layout.for_variant(self, variant); OpTy { layout, ..*op } @@ -430,7 +437,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // The rest should only occur as mplace, we do not use Immediates for types // allowing such operations. This matches place_projection forcing an allocation. let mplace = base.assert_mem_place(self); - self.mplace_projection(mplace, proj_elem)?.into() + self.mplace_projection(&mplace, proj_elem)?.into() } }) } diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs index f86a87c88f4d8..392f739e84fd6 100644 --- a/compiler/rustc_mir/src/interpret/place.rs +++ b/compiler/rustc_mir/src/interpret/place.rs @@ -183,7 +183,7 @@ impl MemPlace { } } -impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { +impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> { /// Produces a MemPlace that works for ZST but nothing else #[inline] pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { @@ -195,13 +195,13 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { /// Replace ptr tag, maintain vtable tag (if any) #[inline] - pub fn replace_tag(self, new_tag: Tag) -> Self { + pub fn replace_tag(&self, new_tag: Tag) -> Self { MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout } } #[inline] pub fn offset( - self, + &self, offset: Size, meta: MemPlaceMeta, layout: TyAndLayout<'tcx>, @@ -216,7 +216,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { } #[inline] - pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { + pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { if self.layout.is_unsized() { // We need to consult `meta` metadata match self.layout.ty.kind() { @@ -234,7 +234,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { } #[inline] - pub(super) fn vtable(self) -> Scalar { + pub(super) fn vtable(&self) -> Scalar { match self.layout.ty.kind() { ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), _ => bug!("vtable not supported on type {:?}", self.layout.ty), @@ -348,7 +348,7 @@ where #[inline] pub(super) fn check_mplace_access( &self, - place: MPlaceTy<'tcx, M::PointerTag>, + place: &MPlaceTy<'tcx, M::PointerTag>, size: Option, ) -> InterpResult<'tcx, Option>> { let size = size.unwrap_or_else(|| { @@ -370,13 +370,13 @@ where force_align: Option, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let (size, align) = self - .size_and_align_of_mplace(place)? + .size_and_align_of_mplace(&place)? .unwrap_or((place.layout.size, place.layout.align.abi)); assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?"); // Check (stricter) dynamic alignment, unless forced otherwise. place.mplace.align = force_align.unwrap_or(align); // When dereferencing a pointer, it must be non-NULL, aligned, and live. - if let Some(ptr) = self.check_mplace_access(place, Some(size))? { + if let Some(ptr) = self.check_mplace_access(&place, Some(size))? { place.mplace.ptr = ptr.into(); } Ok(place) @@ -401,7 +401,7 @@ where #[inline(always)] pub fn mplace_field( &self, - base: MPlaceTy<'tcx, M::PointerTag>, + base: &MPlaceTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let offset = base.layout.fields.offset(field); @@ -412,7 +412,7 @@ where // Re-use parent metadata to determine dynamic field layout. // With custom DSTS, this *will* execute user-defined code, but the same // happens at run-time so that's okay. - let align = match self.size_and_align_of(base.meta, field_layout)? { + let align = match self.size_and_align_of(&base.meta, &field_layout)? { Some((_, align)) => align, None if offset == Size::ZERO => { // An extern type at offset 0, we fall back to its static alignment. @@ -442,7 +442,7 @@ where #[inline(always)] pub fn mplace_index( &self, - base: MPlaceTy<'tcx, M::PointerTag>, + base: &MPlaceTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Not using the layout method because we want to compute on u64 @@ -472,8 +472,8 @@ where // same by repeatedly calling `mplace_array`. pub(super) fn mplace_array_fields( &self, - base: MPlaceTy<'tcx, Tag>, - ) -> InterpResult<'tcx, impl Iterator>> + 'tcx> + base: &'a MPlaceTy<'tcx, Tag>, + ) -> InterpResult<'tcx, impl Iterator>> + 'a> { let len = base.len(self)?; // also asserts that we have a type where this makes sense let stride = match base.layout.fields { @@ -488,7 +488,7 @@ where fn mplace_subslice( &self, - base: MPlaceTy<'tcx, M::PointerTag>, + base: &MPlaceTy<'tcx, M::PointerTag>, from: u64, to: u64, from_end: bool, @@ -533,18 +533,18 @@ where pub(super) fn mplace_downcast( &self, - base: MPlaceTy<'tcx, M::PointerTag>, + base: &MPlaceTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Downcasts only change the layout assert!(!base.meta.has_meta()); - Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) + Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base }) } /// Project into an mplace pub(super) fn mplace_projection( &self, - base: MPlaceTy<'tcx, M::PointerTag>, + base: &MPlaceTy<'tcx, M::PointerTag>, proj_elem: mir::PlaceElem<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc_middle::mir::ProjectionElem::*; @@ -598,7 +598,7 @@ where // FIXME: We could try to be smarter and avoid allocation for fields that span the // entire place. let mplace = self.force_allocation(base)?; - Ok(self.mplace_field(mplace, field)?.into()) + Ok(self.mplace_field(&mplace, field)?.into()) } pub fn place_index( @@ -607,7 +607,7 @@ where index: u64, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { let mplace = self.force_allocation(base)?; - Ok(self.mplace_index(mplace, index)?.into()) + Ok(self.mplace_index(&mplace, index)?.into()) } pub fn place_downcast( @@ -618,7 +618,7 @@ where // Downcast just changes the layout Ok(match base.place { Place::Ptr(mplace) => { - self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into() + self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into() } Place::Local { .. } => { let layout = base.layout.for_variant(self, variant); @@ -642,7 +642,7 @@ where // This matches `operand_projection`. Subslice { .. } | ConstantIndex { .. } | Index(_) => { let mplace = self.force_allocation(base)?; - self.mplace_projection(mplace, proj_elem)?.into() + self.mplace_projection(&mplace, proj_elem)?.into() } }) } @@ -708,7 +708,7 @@ where pub fn write_immediate_to_mplace( &mut self, src: Immediate, - dest: MPlaceTy<'tcx, M::PointerTag>, + dest: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { self.write_immediate_to_mplace_no_validate(src, dest)?; @@ -769,7 +769,7 @@ where let dest = MPlaceTy { mplace, layout: dest.layout }; // This is already in memory, write there. - self.write_immediate_to_mplace_no_validate(src, dest) + self.write_immediate_to_mplace_no_validate(src, &dest) } /// Write an immediate to memory. @@ -778,7 +778,7 @@ where fn write_immediate_to_mplace_no_validate( &mut self, value: Immediate, - dest: MPlaceTy<'tcx, M::PointerTag>, + dest: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { // Note that it is really important that the type here is the right one, and matches the // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here @@ -903,10 +903,10 @@ where assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); let src = self - .check_mplace_access(src, Some(size)) + .check_mplace_access(&src, Some(size)) .expect("places should be checked on creation"); let dest = self - .check_mplace_access(dest, Some(size)) + .check_mplace_access(&dest, Some(size)) .expect("places should be checked on creation"); let (src_ptr, dest_ptr) = match (src, dest) { (Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr), @@ -996,7 +996,7 @@ where self.layout_of_local(&self.stack()[frame], local, None)?; // We also need to support unsized types, and hence cannot use `allocate`. let (size, align) = self - .size_and_align_of(meta, local_layout)? + .size_and_align_of(&meta, &local_layout)? .expect("Cannot allocate for non-dyn-sized type"); let ptr = self.memory.allocate(size, align, MemoryKind::Stack); let mplace = MemPlace { ptr: ptr.into(), align, meta }; @@ -1005,7 +1005,7 @@ where // We don't have to validate as we can assume the local // was already valid for its type. let mplace = MPlaceTy { mplace, layout: local_layout }; - self.write_immediate_to_mplace_no_validate(value, mplace)?; + self.write_immediate_to_mplace_no_validate(value, &mplace)?; } // Now we can call `access_mut` again, asserting it goes well, // and actually overwrite things. @@ -1146,7 +1146,7 @@ where /// Also return some more information so drop doesn't have to run the same code twice. pub(super) fn unpack_dyn_trait( &self, - mplace: MPlaceTy<'tcx, M::PointerTag>, + mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { let vtable = mplace.vtable(); // also sanity checks the type let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; @@ -1160,7 +1160,7 @@ where assert_eq!(align, layout.align.abi); } - let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..*mplace }, layout }; + let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout }; Ok((instance, mplace)) } } diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs index bb58b9d2f2a9c..64d7c8ef2c719 100644 --- a/compiler/rustc_mir/src/interpret/step.rs +++ b/compiler/rustc_mir/src/interpret/step.rs @@ -218,9 +218,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let dest = self.force_allocation(&dest)?; let length = dest.len(self)?; - if let Some(first_ptr) = self.check_mplace_access(dest, None)? { + if let Some(first_ptr) = self.check_mplace_access(&dest, None)? { // Write the first. - let first = self.mplace_field(dest, 0)?; + let first = self.mplace_field(&dest, 0)?; self.copy_op(&op, &first.into())?; if length > 1 { diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs index db2766bb7e260..e4325d6d1e955 100644 --- a/compiler/rustc_mir/src/interpret/terminator.rs +++ b/compiler/rustc_mir/src/interpret/terminator.rs @@ -444,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (instance, place) = match place.layout.ty.kind() { ty::Dynamic(..) => { // Dropping a trait object. - self.unpack_dyn_trait(place)? + self.unpack_dyn_trait(&place)? } _ => (instance, place), }; diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs index 766c7a299e8f4..f33859f3f8633 100644 --- a/compiler/rustc_mir/src/interpret/validity.rs +++ b/compiler/rustc_mir/src/interpret/validity.rs @@ -395,7 +395,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } // Make sure this is dereferenceable and all. let size_and_align = try_validation!( - self.ecx.size_and_align_of_mplace(place), + self.ecx.size_and_align_of_mplace(&place), self.path, err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg }, ); diff --git a/compiler/rustc_mir/src/interpret/visitor.rs b/compiler/rustc_mir/src/interpret/visitor.rs index 7efed19362680..e05a1c0e0b2e3 100644 --- a/compiler/rustc_mir/src/interpret/visitor.rs +++ b/compiler/rustc_mir/src/interpret/visitor.rs @@ -102,7 +102,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> ecx: &InterpCx<'mir, 'tcx, M>, variant: VariantIdx, ) -> InterpResult<'tcx, Self> { - ecx.mplace_downcast(*self, variant) + ecx.mplace_downcast(self, variant) } #[inline(always)] @@ -111,7 +111,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> ecx: &InterpCx<'mir, 'tcx, M>, field: usize, ) -> InterpResult<'tcx, Self> { - ecx.mplace_field(*self, field) + ecx.mplace_field(self, field) } } @@ -208,7 +208,7 @@ macro_rules! make_value_visitor { ty::Dynamic(..) => { // immediate trait objects are not a thing let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx()); - let inner = self.ecx().unpack_dyn_trait(dest)?.1; + let inner = self.ecx().unpack_dyn_trait(&dest)?.1; trace!("walk_value: dyn object layout: {:#?}", inner.layout); // recurse with the inner type return self.visit_field(&v, 0, &Value::from_mem_place(inner)); @@ -241,7 +241,7 @@ macro_rules! make_value_visitor { // Now we can go over all the fields. // This uses the *run-time length*, i.e., if we are a slice, // the dynamic info from the metadata is used. - let iter = self.ecx().mplace_array_fields(mplace)? + let iter = self.ecx().mplace_array_fields(&mplace)? .map(|f| f.and_then(|f| { Ok(Value::from_mem_place(f)) })); From f2da425bfff80bb0bf47921c873b5e89ed5597a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Mon, 15 Feb 2021 00:00:00 +0000 Subject: [PATCH 6/6] ./x.py fmt --- .../rustc_mir/src/interpret/eval_context.rs | 35 ++++++++++--------- .../rustc_mir/src/interpret/intrinsics.rs | 7 ++-- .../rustc_mir/src/interpret/terminator.rs | 2 +- compiler/rustc_mir/src/interpret/visitor.rs | 10 ++++-- .../rustc_mir/src/transform/const_prop.rs | 6 ++-- 5 files changed, 34 insertions(+), 26 deletions(-) diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index c56f8a40678cf..1ba87358b1c3d 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -577,24 +577,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1)?; - let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, &field)? { - Some(size_and_align) => size_and_align, - None => { - // A field with extern type. If this field is at offset 0, we behave - // like the underlying extern type. - // FIXME: Once we have made decisions for how to handle size and alignment - // of `extern type`, this should be adapted. It is just a temporary hack - // to get some code to work that probably ought to work. - if sized_size == Size::ZERO { - return Ok(None); - } else { - span_bug!( - self.cur_span(), - "Fields cannot be extern types, unless they are at offset 0" - ) + let (unsized_size, unsized_align) = + match self.size_and_align_of(metadata, &field)? { + Some(size_and_align) => size_and_align, + None => { + // A field with extern type. If this field is at offset 0, we behave + // like the underlying extern type. + // FIXME: Once we have made decisions for how to handle size and alignment + // of `extern type`, this should be adapted. It is just a temporary hack + // to get some code to work that probably ought to work. + if sized_size == Size::ZERO { + return Ok(None); + } else { + span_bug!( + self.cur_span(), + "Fields cannot be extern types, unless they are at offset 0" + ) + } } - } - }; + }; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index 00f8a3d0ce26d..c4039f2f15e94 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -226,8 +226,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let l = self.read_immediate(&args[0])?; let r = self.read_immediate(&args[1])?; let is_add = intrinsic_name == sym::saturating_add; - let (val, overflowed, _ty) = - self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, &l, &r)?; + let (val, overflowed, _ty) = self.overflowing_binary_op( + if is_add { BinOp::Add } else { BinOp::Sub }, + &l, + &r, + )?; let val = if overflowed { let num_bits = l.layout.size.bits(); if l.layout.abi.is_signed() { diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs index e4325d6d1e955..0807949a2d91b 100644 --- a/compiler/rustc_mir/src/interpret/terminator.rs +++ b/compiler/rustc_mir/src/interpret/terminator.rs @@ -83,7 +83,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Some((dest, ret)) => { dest_place = self.eval_place(dest)?; Some((&dest_place, ret)) - }, + } None => None, }; self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?; diff --git a/compiler/rustc_mir/src/interpret/visitor.rs b/compiler/rustc_mir/src/interpret/visitor.rs index e05a1c0e0b2e3..32edca6f3dff9 100644 --- a/compiler/rustc_mir/src/interpret/visitor.rs +++ b/compiler/rustc_mir/src/interpret/visitor.rs @@ -18,7 +18,8 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy { fn layout(&self) -> TyAndLayout<'tcx>; /// Makes this into an `OpTy`. - fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>; + fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>) + -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>; /// Creates this from an `MPlaceTy`. fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self; @@ -31,8 +32,11 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy { ) -> InterpResult<'tcx, Self>; /// Projects to the n-th field. - fn project_field(&self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize) - -> InterpResult<'tcx, Self>; + fn project_field( + &self, + ecx: &InterpCx<'mir, 'tcx, M>, + field: usize, + ) -> InterpResult<'tcx, Self>; } // Operands and memory-places are both values. diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs index 8c4ffd3e1b4ab..8ef831d4f3b18 100644 --- a/compiler/rustc_mir/src/transform/const_prop.rs +++ b/compiler/rustc_mir/src/transform/const_prop.rs @@ -1198,9 +1198,9 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> { // This can be `None` if the lhs wasn't const propagated and we just // triggered the assert on the value of the rhs. match self.eval_operand(op, source_info) { - Some(op) => { - DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int()) - } + Some(op) => DbgVal::Val( + self.ecx.read_immediate(&op).unwrap().to_const_int(), + ), None => DbgVal::Underscore, } };