-
Notifications
You must be signed in to change notification settings - Fork 12.9k
/
eval_context.rs
968 lines (874 loc) · 38.6 KB
/
eval_context.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
use std::cell::Cell;
use std::fmt::Write;
use std::mem;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_index::vec::IndexVec;
use rustc_macros::HashStable;
use rustc_middle::ich::StableHashingContext;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{
sign_extend, truncate, AllocId, FrameInfo, GlobalId, InterpResult, Pointer, Scalar,
};
use rustc_middle::ty::layout::{self, TyAndLayout};
use rustc_middle::ty::{
self, fold::BottomUpFolder, query::TyCtxtAt, subst::SubstsRef, Ty, TyCtxt, TypeFoldable,
};
use rustc_span::{source_map::DUMMY_SP, Span};
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
ScalarMaybeUndef, StackPopJump,
};
use crate::util::storage::AlwaysLiveLocals;
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
pub machine: M,
/// The results of the type checker, from rustc.
pub tcx: TyCtxtAt<'tcx>,
/// Bounds in scope for polymorphic evaluations.
pub(crate) param_env: ty::ParamEnv<'tcx>,
/// The virtual memory system.
pub memory: Memory<'mir, 'tcx, M>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>>,
/// A cache for deduplicating vtables
pub(super) vtables:
FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
}
/// A stack frame.
#[derive(Clone)]
pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub body: &'mir mir::Body<'tcx>,
/// The def_id and substs of the current function.
pub instance: ty::Instance<'tcx>,
/// Extra data for the machine.
pub extra: Extra,
////////////////////////////////////////////////////////////////////////////////
// Return place and locals
////////////////////////////////////////////////////////////////////////////////
/// Work to perform when returning from this function.
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to,
/// and its layout in the caller.
pub return_place: Option<PlaceTy<'tcx, Tag>>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// The block that is currently executed (or will be executed after the above call stacks
/// return).
/// If this is `None`, we are unwinding and this function doesn't need any clean-up.
/// Just continue the same as with `Resume`.
pub block: Option<mir::BasicBlock>,
/// The index of the currently evaluated statement.
pub stmt: usize,
}
#[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
pub enum StackPopCleanup {
/// Jump to the next block in the caller, or cause UB if None (that's a function
/// that may never return). Also store layout of return place so
/// we can validate it at that layout.
/// `ret` stores the block we jump to on a normal return, while `unwind`
/// stores the block used for cleanup during unwinding.
Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
/// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
/// `cleanup` says whether locals are deallocated. Static computation
/// wants them leaked to intern what they need (and just throw away
/// the entire `ecx` when it is done).
None { cleanup: bool },
}
/// State of a local variable including a memoized layout
#[derive(Clone, PartialEq, Eq, HashStable)]
pub struct LocalState<'tcx, Tag = (), Id = AllocId> {
pub value: LocalValue<Tag, Id>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice
#[stable_hasher(ignore)]
pub layout: Cell<Option<TyAndLayout<'tcx>>>,
}
/// Current value of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
pub enum LocalValue<Tag = (), Id = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// This local is alive but not yet initialized. It can be written to
/// but not read from or its address taken. Locals get initialized on
/// first write because for unsized locals, we do not know their size
/// before that.
Uninitialized,
/// A normal, live local.
/// Mostly for convenience, we re-use the `Operand` type here.
/// This is an optimization over just always having a pointer here;
/// we can thus avoid doing an allocation when the local just stores
/// immediate values *and* never has its address taken.
Live(Operand<Tag, Id>),
}
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
match self.value {
LocalValue::Dead => throw_ub!(DeadLocal),
LocalValue::Uninitialized => {
bug!("The type checker should prevent reading from a never-written local")
}
LocalValue::Live(val) => Ok(val),
}
}
/// Overwrite the local. If the local can be overwritten in place, return a reference
/// to do so; otherwise return the `MemPlace` to consult instead.
pub fn access_mut(
&mut self,
) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
match self.value {
LocalValue::Dead => throw_ub!(DeadLocal),
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
ref mut local @ LocalValue::Live(Operand::Immediate(_))
| ref mut local @ LocalValue::Uninitialized => Ok(Ok(local)),
}
}
}
impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
/// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<mir::SourceInfo> {
self.block.map(|block| {
let block = &self.body.basic_blocks()[block];
if self.stmt < block.statements.len() {
block.statements[self.stmt].source_info
} else {
block.terminator().source_info
}
})
}
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
where
M: Machine<'mir, 'tcx>,
{
#[inline]
fn tcx(&self) -> TyCtxt<'tcx> {
*self.tcx
}
}
impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
where
M: Machine<'mir, 'tcx>,
{
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
type Ty = Ty<'tcx>;
type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>;
#[inline]
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
self.tcx
.layout_of(self.param_env.and(ty))
.map_err(|layout| err_inval!(Layout(layout)).into())
}
}
/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
/// This test should be symmetric, as it is primarily about layout compatibility.
pub(super) fn mir_assign_valid_types<'tcx>(
tcx: TyCtxt<'tcx>,
src: TyAndLayout<'tcx>,
dest: TyAndLayout<'tcx>,
) -> bool {
if src.ty == dest.ty {
// Equal types, all is good.
return true;
}
if src.layout != dest.layout {
// Layout differs, definitely not equal.
// We do this here because Miri would *do the wrong thing* if we allowed layout-changing
// assignments.
return false;
}
// Type-changing assignments can happen for (at least) two reasons:
// 1. `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment.
// 2. Subtyping is used. While all normal lifetimes are erased, higher-ranked types
// with their late-bound lifetimes are still around and can lead to type differences.
// Normalize both of them away.
let normalize = |ty: Ty<'tcx>| {
ty.fold_with(&mut BottomUpFolder {
tcx,
// Normalize all references to immutable.
ty_op: |ty| match ty.kind {
ty::Ref(_, pointee, _) => tcx.mk_imm_ref(tcx.lifetimes.re_erased, pointee),
_ => ty,
},
// We just erase all late-bound lifetimes, but this is not fully correct (FIXME):
// lifetimes in invariant positions could matter (e.g. through associated types).
// We rely on the fact that layout was confirmed to be equal above.
lt_op: |_| tcx.lifetimes.re_erased,
// Leave consts unchanged.
ct_op: |ct| ct,
})
};
normalize(src.ty) == normalize(dest.ty)
}
/// Use the already known layout if given (but sanity check in debug mode),
/// or compute the layout.
#[cfg_attr(not(debug_assertions), inline(always))]
pub(super) fn from_known_layout<'tcx>(
tcx: TyCtxtAt<'tcx>,
known_layout: Option<TyAndLayout<'tcx>>,
compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
match known_layout {
None => compute(),
Some(known_layout) => {
if cfg!(debug_assertions) {
let check_layout = compute()?;
if !mir_assign_valid_types(tcx.tcx, check_layout, known_layout) {
span_bug!(
tcx.span,
"expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
known_layout.ty,
check_layout.ty,
);
}
}
Ok(known_layout)
}
}
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn new(
tcx: TyCtxtAt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
machine: M,
memory_extra: M::MemoryExtra,
) -> Self {
InterpCx {
machine,
tcx,
param_env,
memory: Memory::new(tcx, memory_extra),
stack: Vec::new(),
vtables: FxHashMap::default(),
}
}
#[inline(always)]
pub fn set_span(&mut self, span: Span) {
self.tcx.span = span;
self.memory.tcx.span = span;
}
#[inline(always)]
pub fn force_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
self.memory.force_ptr(scalar)
}
#[inline(always)]
pub fn force_bits(
&self,
scalar: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, u128> {
self.memory.force_bits(scalar, size)
}
/// Call this to turn untagged "global" pointers (obtained via `tcx`) into
/// the *canonical* machine pointer to the allocation. Must never be used
/// for any other pointers!
///
/// This represents a *direct* access to that memory, as opposed to access
/// through a pointer that was created by the program.
#[inline(always)]
pub fn tag_global_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
self.memory.tag_global_base_pointer(ptr)
}
#[inline(always)]
pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
&self.stack
}
#[inline(always)]
pub fn cur_frame(&self) -> usize {
assert!(!self.stack.is_empty());
self.stack.len() - 1
}
#[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
self.stack.last().expect("no call frames exist")
}
#[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
self.stack.last_mut().expect("no call frames exist")
}
#[inline(always)]
pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
self.frame().body
}
#[inline(always)]
pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
assert!(ty.abi.is_signed());
sign_extend(value, ty.size)
}
#[inline(always)]
pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
truncate(value, ty.size)
}
#[inline]
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
ty.is_sized(self.tcx, self.param_env)
}
#[inline]
pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP)
}
pub fn load_mir(
&self,
instance: ty::InstanceDef<'tcx>,
promoted: Option<mir::Promoted>,
) -> InterpResult<'tcx, mir::ReadOnlyBodyAndCache<'tcx, 'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local()
&& self.tcx.has_typeck_tables(did)
&& self.tcx.typeck_tables_of(did).tainted_by_errors
{
throw_inval!(TypeckError)
}
trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
if let Some(promoted) = promoted {
return Ok(self.tcx.promoted_mir(did)[promoted].unwrap_read_only());
}
match instance {
ty::InstanceDef::Item(def_id) => {
if self.tcx.is_mir_available(did) {
Ok(self.tcx.optimized_mir(did).unwrap_read_only())
} else {
throw_unsup!(NoMirFor(def_id))
}
}
_ => Ok(self.tcx.instance_mir(instance)),
}
}
/// Call this on things you got out of the MIR (so it is as generic as the current
/// stack frame), to bring it into the proper environment for this interpreter.
pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
&self,
value: T,
) -> T {
self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
}
/// Call this on things you got out of the MIR (so it is as generic as the provided
/// stack frame), to bring it into the proper environment for this interpreter.
pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
&self,
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
value: T,
) -> T {
if let Some(substs) = frame.instance.substs_for_mir_body() {
self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value)
} else {
self.tcx.normalize_erasing_regions(self.param_env, value)
}
}
/// The `substs` are assumed to already be in our interpreter "universe" (param_env).
pub(super) fn resolve(
&self,
def_id: DefId,
substs: SubstsRef<'tcx>,
) -> InterpResult<'tcx, ty::Instance<'tcx>> {
trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("param_env: {:#?}", self.param_env);
trace!("substs: {:#?}", substs);
ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs)
.ok_or_else(|| err_inval!(TooGeneric).into())
}
pub fn layout_of_local(
&self,
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
local: mir::Local,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
// `const_prop` runs into this with an invalid (empty) frame, so we
// have to support that case (mostly by skipping all caching).
match frame.locals.get(local).and_then(|state| state.layout.get()) {
None => {
let layout = from_known_layout(self.tcx, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
self.layout_of(local_ty)
})?;
if let Some(state) = frame.locals.get(local) {
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));
}
Ok(layout)
}
Some(layout) => Ok(layout),
}
}
/// Returns the actual dynamic size and alignment of the place at the given type.
/// Only the "meta" (metadata) part of the place matters.
/// This can fail to provide an answer for extern types.
pub(super) fn size_and_align_of(
&self,
metadata: MemPlaceMeta<M::PointerTag>,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
return Ok(Some((layout.size, layout.align.abi)));
}
match layout.ty.kind {
ty::Adt(..) | ty::Tuple(..) => {
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized,
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
assert!(!layout.ty.is_simd());
assert!(layout.fields.count() > 0);
trace!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align.abi;
trace!(
"DST {} statically sized prefix size: {:?} align: {:?}",
layout.ty,
sized_size,
sized_align
);
// Recurse to get the size of the dynamically sized field (must be
// the last field). Can't have foreign types here, how would we
// adjust alignment and size for them?
let field = layout.field(self, layout.fields.count() - 1)?;
let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
Some(size_and_align) => size_and_align,
None => {
// A field with extern type. If this field is at offset 0, we behave
// like the underlying extern type.
// FIXME: Once we have made decisions for how to handle size and alignment
// of `extern type`, this should be adapted. It is just a temporary hack
// to get some code to work that probably ought to work.
if sized_size == Size::ZERO {
return Ok(None);
} else {
bug!("Fields cannot be extern types, unless they are at offset 0")
}
}
};
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let size = sized_size + unsized_size; // `Size` addition
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = sized_align.max(unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
let size = size.align_to(align);
// Check if this brought us over the size limit.
if size.bytes() >= self.tcx.data_layout().obj_size_bound() {
throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
}
Ok(Some((size, align)))
}
ty::Dynamic(..) => {
let vtable = metadata.unwrap_meta();
// Read size and align from vtable (already checks size).
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
}
ty::Slice(_) | ty::Str => {
let len = metadata.unwrap_meta().to_machine_usize(self)?;
let elem = layout.field(self, 0)?;
// Make sure the slice is not too big.
let size = elem.size.checked_mul(len, &*self.tcx).ok_or_else(|| {
err_ub!(InvalidMeta("slice is bigger than largest supported object"))
})?;
Ok(Some((size, elem.align.abi)))
}
ty::Foreign(_) => Ok(None),
_ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
}
}
#[inline]
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
self.size_and_align_of(mplace.meta, mplace.layout)
}
pub fn push_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'mir mir::Body<'tcx>,
return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
if !self.stack.is_empty() {
info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
}
::log_settings::settings().indentation += 1;
// first push a stack frame so we have access to the local substs
let extra = M::stack_push(self)?;
self.stack.push(Frame {
body,
block: Some(mir::START_BLOCK),
return_to_block,
return_place,
// empty local array, we fill it in below, after we are inside the stack frame and
// all methods actually know about the frame
locals: IndexVec::new(),
instance,
stmt: 0,
extra,
});
// don't allocate at all for trivial constants
if body.local_decls.len() > 1 {
// Locals are initially uninitialized.
let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
// Return place is handled specially by the `eval_place` functions, and the
// entry in `locals` should never be used. Make it dead, to be sure.
locals[mir::RETURN_PLACE].value = LocalValue::Dead;
// Now mark those locals as dead that we do not want to initialize
match self.tcx.def_kind(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them
//
// FIXME: The above is likely untrue. See
// <https://github.com/rust-lang/rust/pull/70004#issuecomment-602022110>. Is it
// okay to ignore `StorageDead`/`StorageLive` annotations during CTFE?
Some(DefKind::Static) | Some(DefKind::Const) | Some(DefKind::AssocConst) => {}
_ => {
// Mark locals that use `Storage*` annotations as dead on function entry.
let always_live = AlwaysLiveLocals::new(self.body());
for local in locals.indices() {
if !always_live.contains(local) {
locals[local].value = LocalValue::Dead;
}
}
}
}
// done
self.frame_mut().locals = locals;
}
info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance);
if self.stack.len() > *self.tcx.sess.recursion_limit.get() {
throw_exhaust!(StackFrameLimitReached)
} else {
Ok(())
}
}
/// Jump to the given block.
#[inline]
pub fn go_to_block(&mut self, target: mir::BasicBlock) {
let frame = self.frame_mut();
frame.block = Some(target);
frame.stmt = 0;
}
/// *Return* to the given `target` basic block.
/// Do *not* use for unwinding! Use `unwind_to_block` instead.
///
/// If `target` is `None`, that indicates the function cannot return, so we raise UB.
pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
if let Some(target) = target {
self.go_to_block(target);
Ok(())
} else {
throw_ub!(Unreachable)
}
}
/// *Unwind* to the given `target` basic block.
/// Do *not* use for returning! Use `return_to_block` instead.
///
/// If `target` is `None`, that indicates the function does not need cleanup during
/// unwinding, and we will just keep propagating that upwards.
pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
let frame = self.frame_mut();
frame.block = target;
frame.stmt = 0;
}
/// Pops the current frame from the stack, deallocating the
/// memory for allocated locals.
///
/// If `unwinding` is `false`, then we are performing a normal return
/// from a function. In this case, we jump back into the frame of the caller,
/// and continue execution as normal.
///
/// If `unwinding` is `true`, then we are in the middle of a panic,
/// and need to unwind this frame. In this case, we jump to the
/// `cleanup` block for the function, which is responsible for running
/// `Drop` impls for any locals that have been initialized at this point.
/// The cleanup block ends with a special `Resume` terminator, which will
/// cause us to continue unwinding.
pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
info!(
"LEAVING({}) {} (unwinding = {})",
self.cur_frame(),
self.frame().instance,
unwinding
);
// Sanity check `unwinding`.
assert_eq!(
unwinding,
match self.frame().block {
None => true,
Some(block) => self.body().basic_blocks()[block].is_cleanup,
}
);
::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
// Now where do we jump next?
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// In that case, we return early. We also avoid validation in that case,
// because this is CTFE and the final value will be thoroughly validated anyway.
let (cleanup, next_block) = match frame.return_to_block {
StackPopCleanup::Goto { ret, unwind } => {
(true, Some(if unwinding { unwind } else { ret }))
}
StackPopCleanup::None { cleanup, .. } => (cleanup, None),
};
if !cleanup {
assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Leak the locals, skip validation, skip machine hook.
return Ok(());
}
// Cleanup: deallocate all locals that are backed by an allocation.
for local in frame.locals {
self.deallocate_local(local.value)?;
}
if M::stack_pop(self, frame.extra, unwinding)? == StackPopJump::NoJump {
// The hook already did everything.
// We want to skip the `info!` below, hence early return.
return Ok(());
}
// Normal return.
if unwinding {
// Follow the unwind edge.
let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
self.unwind_to_block(unwind);
} else {
// Follow the normal return edge.
// Validate the return value. Do this after deallocating so that we catch dangling
// references.
if let Some(return_place) = frame.return_place {
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
// It is still possible that the return place held invalid data while
// the function is running, but that's okay because nobody could have
// accessed that same data from the "outside" to observe any broken
// invariant -- that is, unless a function somehow has a ptr to
// its return place... but the way MIR is currently generated, the
// return place is always a local and then this cannot happen.
self.validate_operand(self.place_to_op(return_place)?)?;
}
} else {
// Uh, that shouldn't happen... the function did not intend to return
throw_ub!(Unreachable);
}
// Jump to new block -- *after* validation so that the spans make more sense.
if let Some(ret) = next_block {
self.return_to_block(ret)?;
}
}
if !self.stack.is_empty() {
info!(
"CONTINUING({}) {} (unwinding = {})",
self.cur_frame(),
self.frame().instance,
unwinding
);
}
Ok(())
}
/// Mark a storage as live, killing the previous content and returning it.
/// Remember to deallocate that!
pub fn storage_live(
&mut self,
local: mir::Local,
) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
let local_val = LocalValue::Uninitialized;
// StorageLive *always* kills the value that's currently stored.
// However, we do not error if the variable already is live;
// see <https://github.com/rust-lang/rust/issues/42371>.
Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
}
/// Returns the old value of the local.
/// Remember to deallocate that!
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
}
pub(super) fn deallocate_local(
&mut self,
local: LocalValue<M::PointerTag>,
) -> InterpResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type.
let ptr = ptr.assert_ptr();
if log_enabled!(::log::Level::Trace) {
self.memory.dump_alloc(ptr.alloc_id);
}
self.memory.deallocate_local(ptr)?;
};
Ok(())
}
pub(super) fn const_eval(
&self,
gid: GlobalId<'tcx>,
ty: Ty<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
// `self.param_env`, that would mean we invoke the query to evaluate the static
// with different parameter environments, thus causing the static to be evaluated
// multiple times.
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
ty::ParamEnv::reveal_all()
} else {
self.param_env
};
let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?;
// Even though `ecx.const_eval` is called from `eval_const_to_op` we can never have a
// recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not
// return `ConstValue::Unevaluated`, which is the only way that `eval_const_to_op` will call
// `ecx.const_eval`.
let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
self.eval_const_to_op(&const_, None)
}
pub fn const_eval_raw(
&self,
gid: GlobalId<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
// `self.param_env`, that would mean we invoke the query to evaluate the static
// with different parameter environments, thus causing the static to be evaluated
// multiple times.
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
ty::ParamEnv::reveal_all()
} else {
self.param_env
};
// We use `const_eval_raw` here, and get an unvalidated result. That is okay:
// Our result will later be validated anyway, and there seems no good reason
// to have to fail early here. This is also more consistent with
// `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
let val = self.tcx.const_eval_raw(param_env.and(gid))?;
self.raw_const_to_mplace(val)
}
pub fn dump_place(&self, place: Place<M::PointerTag>) {
// Debug output
if !log_enabled!(::log::Level::Trace) {
return;
}
match place {
Place::Local { frame, local } => {
let mut allocs = Vec::new();
let mut msg = format!("{:?}", local);
if frame != self.cur_frame() {
write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
}
write!(msg, ":").unwrap();
match self.stack[frame].locals[local].value {
LocalValue::Dead => write!(msg, " is dead").unwrap(),
LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
Scalar::Ptr(ptr) => {
write!(
msg,
" by align({}){} ref:",
mplace.align.bytes(),
match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
}
)
.unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
},
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(msg, " {:?}", val).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
}
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
allocs.push(ptr.alloc_id);
}
}
}
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
Place::Ptr(mplace) => match mplace.ptr {
Scalar::Ptr(ptr) => {
trace!("by align({}) ref:", mplace.align.bytes());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),
},
}
}
pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
let mut frames = Vec::new();
for frame in self.stack().iter().rev() {
let source_info = frame.current_source_info();
let lint_root = source_info.and_then(|source_info| {
match &frame.body.source_scopes[source_info.scope].local_data {
mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
mir::ClearCrossCrate::Clear => None,
}
});
let span = source_info.map_or(DUMMY_SP, |source_info| source_info.span);
frames.push(FrameInfo { span, instance: frame.instance, lint_root });
}
trace!("generate stacktrace: {:#?}", frames);
frames
}
}
impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
for Frame<'mir, 'tcx, Tag, Extra>
where
Extra: HashStable<StableHashingContext<'ctx>>,
Tag: HashStable<StableHashingContext<'ctx>>,
{
fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
self.body.hash_stable(hcx, hasher);
self.instance.hash_stable(hcx, hasher);
self.return_to_block.hash_stable(hcx, hasher);
self.return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
self.locals.hash_stable(hcx, hasher);
self.block.hash_stable(hcx, hasher);
self.stmt.hash_stable(hcx, hasher);
self.extra.hash_stable(hcx, hasher);
}
}