Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

miri: use AllocId instead of u64. #47205

Merged
merged 1 commit into from
Jan 6, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/librustc/mir/interpret/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl<'tcx> MemoryPointer {
}


#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)]
#[derive(Copy, Clone, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)]
pub struct AllocId(pub u64);

impl fmt::Display for AllocId {
Expand Down
32 changes: 15 additions & 17 deletions src/librustc/ty/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -895,31 +895,29 @@ pub struct InterpretInterner<'tcx> {
allocs: FxHashSet<&'tcx interpret::Allocation>,

/// Allows obtaining function instance handles via a unique identifier
functions: FxHashMap<u64, Instance<'tcx>>,
functions: FxHashMap<interpret::AllocId, Instance<'tcx>>,

/// Inverse map of `interpret_functions`.
/// Used so we don't allocate a new pointer every time we need one
function_cache: FxHashMap<Instance<'tcx>, u64>,
function_cache: FxHashMap<Instance<'tcx>, interpret::AllocId>,

/// Allows obtaining const allocs via a unique identifier
alloc_by_id: FxHashMap<u64, &'tcx interpret::Allocation>,
alloc_by_id: FxHashMap<interpret::AllocId, &'tcx interpret::Allocation>,

/// The AllocId to assign to the next new regular allocation.
/// Always incremented, never gets smaller.
next_id: u64,
next_id: interpret::AllocId,

/// Allows checking whether a constant already has an allocation
///
/// The pointers are to the beginning of an `alloc_by_id` allocation
alloc_cache: FxHashMap<interpret::GlobalId<'tcx>, interpret::Pointer>,
alloc_cache: FxHashMap<interpret::GlobalId<'tcx>, interpret::AllocId>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The idea was that you could have globals which are located inside other locals in the future. So if a constant's value is obtained by indexing another constant, we'd just point to the offset in the array constant.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We do need miri allocations properly exposed for miri alloc -> LLVM global translation, or at least some guarantee that statics don't get optimized like that.
FWIW I think MemoryPointer would be a better fit, eventually.


/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
/// allocations for string and bytestring literals.
literal_alloc_cache: FxHashMap<Vec<u8>, u64>,
literal_alloc_cache: FxHashMap<Vec<u8>, interpret::AllocId>,
}

impl<'tcx> InterpretInterner<'tcx> {
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> u64 {
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> interpret::AllocId {
if let Some(&alloc_id) = self.function_cache.get(&instance) {
return alloc_id;
}
Expand All @@ -932,29 +930,29 @@ impl<'tcx> InterpretInterner<'tcx> {

pub fn get_fn(
&self,
id: u64,
id: interpret::AllocId,
) -> Option<Instance<'tcx>> {
self.functions.get(&id).cloned()
}

pub fn get_alloc(
&self,
id: u64,
id: interpret::AllocId,
) -> Option<&'tcx interpret::Allocation> {
self.alloc_by_id.get(&id).cloned()
}

pub fn get_cached(
&self,
global_id: interpret::GlobalId<'tcx>,
) -> Option<interpret::Pointer> {
) -> Option<interpret::AllocId> {
self.alloc_cache.get(&global_id).cloned()
}

pub fn cache(
&mut self,
global_id: interpret::GlobalId<'tcx>,
ptr: interpret::Pointer,
ptr: interpret::AllocId,
) {
if let Some(old) = self.alloc_cache.insert(global_id, ptr) {
bug!("tried to cache {:?}, but was already existing as {:#?}", global_id, old);
Expand All @@ -963,7 +961,7 @@ impl<'tcx> InterpretInterner<'tcx> {

pub fn intern_at_reserved(
&mut self,
id: u64,
id: interpret::AllocId,
alloc: &'tcx interpret::Allocation,
) {
if let Some(old) = self.alloc_by_id.insert(id, alloc) {
Expand All @@ -975,9 +973,9 @@ impl<'tcx> InterpretInterner<'tcx> {
/// yet have an allocation backing it.
pub fn reserve(
&mut self,
) -> u64 {
) -> interpret::AllocId {
let next = self.next_id;
self.next_id = self.next_id
self.next_id.0 = self.next_id.0
.checked_add(1)
.expect("You overflowed a u64 by incrementing by 1... \
You've just earned yourself a free drink if we ever meet. \
Expand Down Expand Up @@ -1069,7 +1067,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
}

/// Allocates a byte or string literal for `mir::interpret`
pub fn allocate_cached(self, bytes: &[u8]) -> u64 {
pub fn allocate_cached(self, bytes: &[u8]) -> interpret::AllocId {
// check whether we already allocated this literal or a constant with the same memory
if let Some(&alloc_id) = self.interpret_interner.borrow().literal_alloc_cache.get(bytes) {
return alloc_id;
Expand Down
8 changes: 4 additions & 4 deletions src/librustc_mir/interpret/const_eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use rustc_data_structures::indexed_vec::Idx;
use syntax::ast::Mutability;
use syntax::codemap::Span;

use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, Pointer, PrimVal};
use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, MemoryPointer, Pointer, PrimVal};
use super::{Place, EvalContext, StackPopCleanup, ValTy};

use rustc_const_math::ConstInt;
Expand Down Expand Up @@ -67,7 +67,7 @@ pub fn eval_body<'a, 'tcx>(
layout.align,
None,
)?;
tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
tcx.interpret_interner.borrow_mut().cache(cid, ptr.alloc_id);
let cleanup = StackPopCleanup::MarkStatic(Mutability::Immutable);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("const_eval: pushing stack frame for global: {}", name);
Expand All @@ -81,8 +81,8 @@ pub fn eval_body<'a, 'tcx>(

while ecx.step()? {}
}
let value = tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached");
Ok((value, instance_ty))
let alloc = tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached");
Ok((MemoryPointer::new(alloc, 0).into(), instance_ty))
}

pub fn eval_body_as_integer<'a, 'tcx>(
Expand Down
4 changes: 2 additions & 2 deletions src/librustc_mir/interpret/eval_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -950,8 +950,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}

pub fn read_global_as_value(&self, gid: GlobalId, layout: TyLayout) -> Value {
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"),
layout.align)
let alloc = self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached");
Value::ByRef(MemoryPointer::new(alloc, 0).into(), layout.align)
}

pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
Expand Down
6 changes: 3 additions & 3 deletions src/librustc_mir/interpret/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.

use rustc::mir::interpret::{EvalResult, PrimVal, MemoryPointer, AccessKind};
use rustc::mir::interpret::{AllocId, EvalResult, PrimVal, MemoryPointer, AccessKind};
use super::{EvalContext, Place, ValTy, Memory};

use rustc::mir;
Expand Down Expand Up @@ -89,12 +89,12 @@ pub trait Machine<'tcx>: Sized {

fn add_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
_id: AllocId,
) {}

fn free_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
_id: AllocId,
_len: u64,
) -> EvalResult<'tcx> {
Ok(())
Expand Down
Loading