From fab2532ef949a08e16b621259a91d1ff37165665 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 25 Apr 2017 14:39:00 +0300 Subject: [PATCH 01/69] rustc_trans: move const & lvalue access helpers from adt. --- src/librustc_trans/adt.rs | 301 ++++++---------------------- src/librustc_trans/intrinsic.rs | 5 +- src/librustc_trans/mir/constant.rs | 28 ++- src/librustc_trans/mir/lvalue.rs | 225 ++++++++++++++------- src/librustc_trans/mir/rvalue.rs | 12 +- src/librustc_trans/mir/statement.rs | 9 +- 6 files changed, 251 insertions(+), 329 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index b06f8e4e67116..cdf66a0835df0 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -41,52 +41,15 @@ //! used unboxed and any field can have pointers (including mutable) //! taken to it, implementing them for Rust seems difficult. -use std; - -use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, LayoutTyper}; -use common::*; -use builder::Builder; -use base; + +use context::CrateContext; use machine; use monomorphize; use type_::Type; use type_of; -use mir::lvalue::Alignment; - -/// Given an enum, struct, closure, or tuple, extracts fields. -/// Treats closures as a struct with one variant. -/// `empty_if_no_variants` is a switch to deal with empty enums. -/// If true, `variant_index` is disregarded and an empty Vec returned in this case. -pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - variant_index: usize, - empty_if_no_variants: bool) -> Vec> { - match t.sty { - ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { - Vec::default() - }, - ty::TyAdt(ref def, ref substs) => { - def.variants[variant_index].fields.iter().map(|f| { - monomorphize::field_ty(cx.tcx(), substs, f) - }).collect::>() - }, - ty::TyTuple(fields, _) => fields.to_vec(), - ty::TyClosure(def_id, substs) => { - if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} - substs.upvar_tys(def_id, cx.tcx()).collect() - }, - ty::TyGenerator(def_id, substs, _) => { - if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);} - substs.field_tys(def_id, cx.tcx()).map(|t| { - cx.tcx().fully_normalize_associated_types_in(&t) - }).collect() - }, - _ => bug!("{} is not a type that can have fields.", t) - } -} - /// LLVM-level types are a little complicated. /// /// C-like enums need to be actual ints, not wrapped in a struct, @@ -119,8 +82,8 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, (nndiscr, nonnull, nonnull.packed), _ => unreachable!() }; - let fields = compute_fields(cx, t, nonnull_variant_index as usize, true); - llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant), + llty.set_struct_body(&struct_llfields(cx, t, nonnull_variant_index as usize, + nonnull_variant, None), packed) }, _ => bug!("This function cannot handle {} with layout {:#?}", t, l) @@ -148,10 +111,9 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - let fields = compute_fields(cx, t, nndiscr as usize, false); match name { None => { - Type::struct_(cx, &struct_llfields(cx, &fields, nonnull), + Type::struct_(cx, &struct_llfields(cx, t, nndiscr as usize, nonnull, None), nonnull.packed) } Some(name) => { @@ -160,17 +122,12 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } layout::Univariant { ref variant, .. } => { - // Note that this case also handles empty enums. - // Thus the true as the final parameter here. - let fields = compute_fields(cx, t, 0, true); match name { None => { - let fields = struct_llfields(cx, &fields, &variant); - Type::struct_(cx, &fields, variant.packed) + Type::struct_(cx, &struct_llfields(cx, t, 0, &variant, None), + variant.packed) } Some(name) => { - // Hypothesis: named_struct's can never need a - // drop flag. (... needs validation.) Type::named_struct(cx, name) } } @@ -205,7 +162,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let size = size.bytes(); let align = align.abi(); let primitive_align = primitive_align.abi(); - assert!(align <= std::u32::MAX as u64); + assert!(align <= ::std::u32::MAX as u64); let discr_ty = Type::from_integer(cx, discr); let discr_size = discr.size().bytes(); let padded_discr_size = roundup(discr_size, align as u32); @@ -246,35 +203,63 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { } } - -// Double index to account for padding (FieldPath already uses `Struct::memory_index`) -fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec { - discrfield.iter().map(|&i| (i as usize) << 1).collect::>() -} - - // Lookup `Struct::memory_index` and double it to account for padding pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize { (variant.memory_index[index] as usize) << 1 } - -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec>, - variant: &layout::Struct) -> Vec { +pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + variant_index: usize, + variant: &layout::Struct, + discr: Option>) -> Vec { + let field_count = match t.sty { + ty::TyAdt(ref def, _) if def.variants.len() == 0 => return vec![], + ty::TyAdt(ref def, _) => { + discr.is_some() as usize + def.variants[variant_index].fields.len() + }, + ty::TyTuple(fields, _) => fields.len(), + ty::TyClosure(def_id, substs) => { + if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} + substs.upvar_tys(def_id, cx.tcx()).count() + }, + ty::TyGenerator(def_id, substs, _) => { + if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);} + substs.field_tys(def_id, cx.tcx()).count() + }, + _ => bug!("{} is not a type that can have fields.", t) + }; debug!("struct_llfields: variant: {:?}", variant); let mut first_field = true; let mut min_offset = 0; - let mut result: Vec = Vec::with_capacity(field_tys.len() * 2); + let mut result: Vec = Vec::with_capacity(field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { - (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) }); + (i, match t.sty { + ty::TyAdt(..) if i == 0 && discr.is_some() => discr.unwrap(), + ty::TyAdt(ref def, ref substs) => { + monomorphize::field_ty(cx.tcx(), substs, + &def.variants[variant_index].fields[i as usize - discr.is_some() as usize]) + }, + ty::TyTuple(fields, _) => fields[i as usize], + ty::TyClosure(def_id, substs) => { + substs.upvar_tys(def_id, cx.tcx()).nth(i).unwrap() + }, + ty::TyGenerator(def_id, substs, _) => { + let ty = substs.field_tys(def_id, cx.tcx()).nth(i).unwrap(); + cx.tcx().normalize_associated_type(&ty) + }, + _ => bug!() + }, variant.offsets[i as usize].bytes()) + }); for (index, ty, target_offset) in field_iter { + assert!(target_offset >= min_offset); + let padding_bytes = target_offset - min_offset; if first_field { debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}", index, ty, min_offset, target_offset); + assert_eq!(padding_bytes, 0); first_field = false; } else { - assert!(target_offset >= min_offset); - let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset }; result.push(Type::array(&Type::i8(cx), padding_bytes)); debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}", index, ty, padding_bytes, min_offset, target_offset); @@ -282,10 +267,18 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec 0 { if variant.stride().bytes() < min_offset { bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(), min_offset); @@ -294,7 +287,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec(l: &layout::Layout) -> bool { } } -/// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - alignment: Alignment, - cast_to: Option, - range_assert: bool -) -> ValueRef { - debug!("trans_get_discr t: {:?}", t); - let l = bcx.ccx.layout_of(t); - - let val = match *l { - layout::CEnum { discr, min, max, .. } => { - load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert) - } - layout::General { discr, ref variants, .. } => { - let ptr = bcx.struct_gep(scrutinee, 0); - load_discr(bcx, discr, ptr, alignment, - 0, variants.len() as u64 - 1, - range_assert) - } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), - layout::RawNullablePointer { nndiscr, .. } => { - let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - let discr = bcx.load(scrutinee, alignment.to_align()); - bcx.icmp(cmp, discr, C_null(val_ty(discr))) - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment) - }, - _ => bug!("{} is not an enum", t) - }; - match cast_to { - None => val, - Some(llty) => bcx.intcast(val, llty, is_discr_signed(&l)) - } -} - -fn struct_wrapped_nullable_bitdiscr( - bcx: &Builder, - nndiscr: u64, - discrfield: &layout::FieldPath, - scrutinee: ValueRef, - alignment: Alignment, -) -> ValueRef { - let path = struct_llfields_path(discrfield); - let llptrptr = bcx.gepi(scrutinee, &path); - let llptr = bcx.load(llptrptr, alignment.to_align()); - let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) -} - -/// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, - alignment: Alignment, min: u64, max: u64, - range_assert: bool) - -> ValueRef { - let llty = Type::from_integer(bcx.ccx, ity); - assert_eq!(val_ty(ptr), llty.ptr_to()); - let bits = ity.size().bits(); - assert!(bits <= 64); - let bits = bits as usize; - let mask = !0u64 >> (64 - bits); - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - if max.wrapping_add(1) & mask == min & mask || !range_assert { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - bcx.load(ptr, alignment.to_align()) - } else { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True, - alignment.to_align()) - } -} - -/// Set the discriminant for a new value of the given case of the given -/// representation. -pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: u64) { - let l = bcx.ccx.layout_of(t); - match *l { - layout::CEnum{ discr, min, max, .. } => { - assert_discr_in_range(min, max, to); - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - val, None); - } - layout::General{ discr, .. } => { - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - bcx.struct_gep(val, 0), None); - } - layout::Univariant { .. } - | layout::UntaggedUnion { .. } - | layout::Vector { .. } => { - assert_eq!(to, 0); - } - layout::RawNullablePointer { nndiscr, .. } => { - if to != nndiscr { - let llptrty = val_ty(val).element_type(); - bcx.store(C_null(llptrty), val, None); - } - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { - if to != nndiscr { - if target_sets_discr_via_memset(bcx) { - // Issue #34427: As workaround for LLVM bug on - // ARM, use memset of 0 on whole struct rather - // than storing null to single target field. - let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to()); - let fill_byte = C_u8(bcx.ccx, 0); - let size = C_usize(bcx.ccx, nonnull.stride().bytes()); - let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); - base::call_memset(bcx, llptr, fill_byte, size, align, false); - } else { - let path = struct_llfields_path(discrfield); - let llptrptr = bcx.gepi(val, &path); - let llptrty = val_ty(llptrptr).element_type(); - bcx.store(C_null(llptrty), llptrptr, None); - } - } - } - _ => bug!("Cannot handle {} represented as {:#?}", t, l) - } -} - -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { - bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" -} - pub fn assert_discr_in_range(min: D, max: D, discr: D) { if min <= max { assert!(min <= discr && discr <= max) @@ -453,45 +314,3 @@ pub fn assert_discr_in_range(min: D, max: D, discr: D) { // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } - -/// Extract a field of a constant value, as appropriate for its -/// representation. -/// -/// (Not to be confused with `common::const_get_elt`, which operates on -/// raw LLVM-level structs and arrays.) -pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - val: ValueRef, - ix: usize) -> ValueRef { - let l = ccx.layout_of(t); - match *l { - layout::CEnum { .. } => bug!("element access in C-like enum const"), - layout::Univariant { ref variant, .. } => { - const_struct_field(val, variant.memory_index[ix] as usize) - } - layout::Vector { .. } => const_struct_field(val, ix), - layout::UntaggedUnion { .. } => const_struct_field(val, 0), - _ => bug!("{} does not have fields.", t) - } -} - -/// Extract field of struct-like const, skipping our alignment padding. -fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef { - // Get the ix-th non-undef element of the struct. - let mut real_ix = 0; // actual position in the struct - let mut ix = ix; // logical index relative to real_ix - let mut field; - loop { - loop { - field = const_get_elt(val, &[real_ix]); - if !is_undef(field) { - break; - } - real_ix = real_ix + 1; - } - if ix == 0 { - return field; - } - ix = ix - 1; - real_ix = real_ix + 1; - } -} diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 2f1a95038eae5..daeb0dd680ff0 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -15,7 +15,6 @@ use libc; use llvm; use llvm::{ValueRef}; use abi::{Abi, FnType}; -use adt; use mir::lvalue::{LvalueRef, Alignment}; use base::*; use common::*; @@ -379,10 +378,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "discriminant_value" => { let val_ty = substs.type_at(0); + let adt_val = LvalueRef::new_sized_ty(llargs[0], val_ty, Alignment::AbiAligned); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { - adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned, - Some(llret_ty), true) + adt_val.trans_get_discr(bcx, ret_ty) } _ => C_null(llret_ty) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 6573e507bd325..67fdc1e640a95 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -462,8 +462,32 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::ProjectionElem::Field(ref field, _) => { - let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval, - field.index()); + // Extract field of struct-like const, skipping our alignment padding. + let mut ix = field.index(); + let layout = self.ccx.layout_of(tr_base.ty); + if let layout::Univariant { ref variant, .. } = *layout { + ix = variant.memory_index[ix] as usize; + } + + // Get the ix-th non-undef element of the struct. + let mut real_ix = 0; // actual position in the struct + let mut ix = ix; // logical index relative to real_ix + let mut llprojected; + loop { + loop { + llprojected = const_get_elt(base.llval, &[real_ix]); + if !is_undef(llprojected) { + break; + } + real_ix = real_ix + 1; + } + if ix == 0 { + break; + } + ix = ix - 1; + real_ix = real_ix + 1; + } + let llextra = if !has_metadata { ptr::null_mut() } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index d939acaccd99c..5faaef6ebff42 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -8,15 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::ValueRef; +use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use adt; +use base; use builder::Builder; -use common::{self, CrateContext, C_usize}; +use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty}; use consts; use machine; use type_of; @@ -70,6 +71,10 @@ impl Alignment { } } +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { + bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" +} + #[derive(Copy, Clone, Debug)] pub struct LvalueRef<'tcx> { /// Pointer to the contents of the lvalue @@ -121,23 +126,56 @@ impl<'a, 'tcx> LvalueRef<'tcx> { !self.llextra.is_null() } - fn struct_field_ptr( - self, - bcx: &Builder<'a, 'tcx>, - st: &layout::Struct, - fields: &Vec>, - ix: usize, - needs_cast: bool - ) -> (ValueRef, Alignment) { - let fty = fields[ix]; + /// Access a field, at a point when the value's case is known. + pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) { let ccx = bcx.ccx; + let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx())); + match self.ty { + LvalueTy::Ty { .. } => {} + LvalueTy::Downcast { variant_index, .. } => { + l = l.for_variant(variant_index) + } + } + let fty = l.field(ccx, ix).ty; + let mut ix = ix; + let st = match *l { + layout::Vector { .. } => { + return (bcx.struct_gep(self.llval, ix), self.alignment); + } + layout::UntaggedUnion { ref variants } => { + let ty = type_of::in_memory_type_of(ccx, fty); + return (bcx.pointercast(self.llval, ty.ptr_to()), + self.alignment | Alignment::from_packed(variants.packed)); + } + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } + if l.variant_index.unwrap() as u64 != nndiscr => { + // The unit-like case might have a nonzero number of unit-like fields. + // (e.d., Result of Either with (), as one side.) + let ty = type_of::type_of(ccx, fty); + assert_eq!(machine::llsize_of_alloc(ccx, ty), 0); + return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed); + } + layout::RawNullablePointer { .. } => { + let ty = type_of::type_of(ccx, fty); + return (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment); + } + layout::Univariant { ref variant, .. } => variant, + layout::StructWrappedNullablePointer { ref nonnull, .. } => nonnull, + layout::General { ref variants, .. } => { + ix += 1; + &variants[l.variant_index.unwrap()] + } + _ => bug!("element access in type without elements: {} represented as {:#?}", l.ty, l) + }; let alignment = self.alignment | Alignment::from_packed(st.packed); - let llfields = adt::struct_llfields(ccx, fields, st); - let ptr_val = if needs_cast { - let real_ty = Type::struct_(ccx, &llfields[..], st.packed); - bcx.pointercast(self.llval, real_ty.ptr_to()) + let ptr_val = if let layout::General { discr, .. } = *l { + let variant_ty = Type::struct_(ccx, + &adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st, + Some(discr.to_ty(&bcx.tcx(), false))), st.packed); + bcx.pointercast(self.llval, variant_ty.ptr_to()) } else { self.llval }; @@ -147,7 +185,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx.shared().type_is_sized(fty) + ccx.shared().type_is_sized(fty) { return (bcx.struct_gep( ptr_val, adt::struct_llfields_index(st, ix)), alignment); @@ -189,7 +227,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_usize(bcx.ccx, offset); + let unaligned_offset = C_usize(ccx, offset); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); @@ -200,77 +238,130 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bcx.sub(align, C_usize(bcx.ccx, 1)); + let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(ccx)); let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); + let ll_fty = type_of::in_memory_type_of(ccx, fty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment) } - /// Access a field, at a point when the value's case is known. - pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) { - let discr = match self.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { variant_index, .. } => variant_index, - }; - let t = self.ty.to_ty(bcx.tcx()); - let l = bcx.ccx.layout_of(t); - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *l { - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, 0); - self.struct_field_ptr(bcx, &variant, - &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) + // Double index to account for padding (FieldPath already uses `Struct::memory_index`) + fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef { + let path = discrfield.iter().map(|&i| (i as usize) << 1).collect::>(); + bcx.gepi(self.llval, &path) + } + + /// Helper for cases where the discriminant is simply loaded. + fn load_discr(self, bcx: &Builder, ity: layout::Integer, ptr: ValueRef, + min: u64, max: u64) -> ValueRef { + let llty = Type::from_integer(bcx.ccx, ity); + assert_eq!(val_ty(ptr), llty.ptr_to()); + let bits = ity.size().bits(); + assert!(bits <= 64); + let bits = bits as usize; + let mask = !0u64 >> (64 - bits); + // For a (max) discr of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + if max.wrapping_add(1) & mask == min & mask { + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + bcx.load(ptr, self.alignment.to_align()) + } else { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ llvm::True, + self.alignment.to_align()) + } + } + + /// Obtain the actual discriminant of a value. + pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { + let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); + + let val = match *l { + layout::CEnum { discr, min, max, .. } => { + self.load_discr(bcx, discr, self.llval, min, max) } - layout::Vector { count, .. } => { - assert_eq!(discr, 0); - assert!((ix as u64) < count); - (bcx.struct_gep(self.llval, ix), self.alignment) + layout::General { discr, ref variants, .. } => { + let ptr = bcx.struct_gep(self.llval, 0); + self.load_discr(bcx, discr, ptr, 0, variants.len() as u64 - 1) } - layout::General { discr: d, ref variants, .. } => { - let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); - fields.insert(0, d.to_ty(&bcx.tcx(), false)); - self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), + layout::RawNullablePointer { nndiscr, .. } => { + let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; + let discr = bcx.load(self.llval, self.alignment.to_align()); + bcx.icmp(cmp, discr, C_null(val_ty(discr))) } - layout::UntaggedUnion { ref variants } => { - let fields = adt::compute_fields(bcx.ccx, t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - (bcx.pointercast(self.llval, ty.ptr_to()), - self.alignment | Alignment::from_packed(variants.packed)) + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); + let llptr = bcx.load(llptrptr, self.alignment.to_align()); + let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; + bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) + }, + _ => bug!("{} is not an enum", l.ty) + }; + let cast_to = type_of::immediate_type_of(bcx.ccx, cast_to); + bcx.intcast(val, cast_to, adt::is_discr_signed(&l)) + } + + /// Set the discriminant for a new value of the given case of the given + /// representation. + pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { + let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); + let to = l.ty.ty_adt_def().unwrap() + .discriminant_for_variant(bcx.tcx(), variant_index) + .to_u128_unchecked() as u64; + match *l { + layout::CEnum { discr, min, max, .. } => { + adt::assert_discr_in_range(min, max, to); + bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), + self.llval, self.alignment.to_align()); } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { - let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx, nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed) + layout::General { discr, .. } => { + bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), + bcx.struct_gep(self.llval, 0), self.alignment.to_align()); + } + layout::Univariant { .. } + | layout::UntaggedUnion { .. } + | layout::Vector { .. } => { + assert_eq!(to, 0); } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; - assert_eq!(ix, 0); - assert_eq!(discr as u64, nndiscr); - let ty = type_of::type_of(bcx.ccx, nnty); - (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment) + if to != nndiscr { + let llptrty = val_ty(self.llval).element_type(); + bcx.store(C_null(llptrty), self.llval, self.alignment.to_align()); + } } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr as u64, nndiscr); - self.struct_field_ptr(bcx, &nonnull, - &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { + if to != nndiscr { + if target_sets_discr_via_memset(bcx) { + // Issue #34427: As workaround for LLVM bug on + // ARM, use memset of 0 on whole struct rather + // than storing null to single target field. + let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); + let fill_byte = C_u8(bcx.ccx, 0); + let size = C_usize(bcx.ccx, nonnull.stride().bytes()); + let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); + base::call_memset(bcx, llptr, fill_byte, size, align, false); + } else { + let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); + let llptrty = val_ty(llptrptr).element_type(); + bcx.store(C_null(llptrty), llptrptr, self.alignment.to_align()); + } + } } - _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) + _ => bug!("Cannot handle {} represented as {:#?}", l.ty, l) } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 7e187a85867cb..bc263fd60a25c 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -139,10 +139,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match **kind { mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { - let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index) - .to_u128_unchecked() as u64; - let dest_ty = dest.ty.to_ty(bcx.tcx()); - adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr); + dest.trans_set_discr(&bcx, variant_index); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. @@ -451,12 +448,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Rvalue::Discriminant(ref lvalue) => { - let discr_lvalue = self.trans_lvalue(&bcx, lvalue); - let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx()); let discr_ty = rvalue.ty(&*self.mir, bcx.tcx()); - let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty); - let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval, - discr_lvalue.alignment, Some(discr_type), true); + let discr = self.trans_lvalue(&bcx, lvalue) + .trans_get_discr(&bcx, discr_ty); (bcx, OperandRef { val: OperandValue::Immediate(discr), ty: discr_ty diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index bbf661ae9a735..6e9b1f36c2cab 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -17,7 +17,6 @@ use builder::Builder; use super::MirContext; use super::LocalRef; -use super::super::adt; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, @@ -59,12 +58,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { - let ty = self.monomorphized_lvalue_ty(lvalue); - let lvalue_transed = self.trans_lvalue(&bcx, lvalue); - adt::trans_set_discr(&bcx, - ty, - lvalue_transed.llval, - variant_index as u64); + self.trans_lvalue(&bcx, lvalue) + .trans_set_discr(&bcx, variant_index); bcx } mir::StatementKind::StorageLive(local) => { From 9deea47c9605f77d3d595744753704bfd74c0dc9 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 18 Nov 2017 20:24:54 +0200 Subject: [PATCH 02/69] rustc_mir: always downcast enums, even if univariant. --- src/librustc/middle/mem_categorization.rs | 4 ++-- src/librustc/ty/mod.rs | 5 ----- src/librustc_const_eval/_match.rs | 6 +++--- src/librustc_const_eval/pattern.rs | 4 ++-- src/librustc_mir/build/matches/simplify.rs | 21 +++++++++------------ src/librustc_mir/build/matches/test.rs | 4 ++-- src/librustc_mir/hair/cx/mod.rs | 4 ---- src/librustc_mir/transform/deaggregator.rs | 4 ++-- src/librustc_mir/transform/type_check.rs | 2 +- src/librustc_mir/util/elaborate_drops.rs | 2 +- src/test/run-pass/enum-univariant-repr.rs | 13 +++++++++++++ 11 files changed, 35 insertions(+), 34 deletions(-) diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 2c6bcc654a532..c89d67d4aab86 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -210,7 +210,7 @@ impl<'tcx> cmt_<'tcx> { adt_def.variant_with_id(variant_did) } _ => { - assert!(adt_def.is_univariant()); + assert_eq!(adt_def.variants.len(), 1); &adt_def.variants[0] } }; @@ -1096,7 +1096,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { -> cmt<'tcx> { // univariant enums do not need downcasts let base_did = self.tcx.parent_def_id(variant_did).unwrap(); - if !self.tcx.adt_def(base_did).is_univariant() { + if self.tcx.adt_def(base_did).variants.len() != 1 { let base_ty = base_cmt.ty; let ret = Rc::new(cmt_ { id: node.id(), diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index a584f2ce1919a..dac200efb39e3 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -1674,11 +1674,6 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.variants.iter().flat_map(|v| v.fields.iter()) } - #[inline] - pub fn is_univariant(&self) -> bool { - self.variants.len() == 1 - } - pub fn is_payloadfree(&self) -> bool { !self.variants.is_empty() && self.variants.iter().all(|v| v.fields.is_empty()) diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs index 6ebe3c679667f..33d9bfa6e6b9c 100644 --- a/src/librustc_const_eval/_match.rs +++ b/src/librustc_const_eval/_match.rs @@ -255,7 +255,7 @@ impl<'tcx> Constructor<'tcx> { match self { &Variant(vid) => adt.variant_index_with_id(vid), &Single => { - assert_eq!(adt.variants.len(), 1); + assert!(!adt.is_enum()); 0 } _ => bug!("bad constructor {:?} for adt {:?}", self, adt) @@ -356,7 +356,7 @@ impl<'tcx> Witness<'tcx> { }).collect(); if let ty::TyAdt(adt, substs) = ty.sty { - if adt.variants.len() > 1 { + if adt.is_enum() { PatternKind::Variant { adt_def: adt, substs, @@ -444,7 +444,7 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect() } } - ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => { + ty::TyAdt(def, substs) if def.is_enum() => { def.variants.iter() .filter(|v| !cx.is_variant_uninhabited(v, substs)) .map(|v| Variant(v.did)) diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index d7a16e9d2fc75..cfbb9623f7dc9 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -150,7 +150,7 @@ impl<'tcx> fmt::Display for Pattern<'tcx> { Some(&adt_def.variants[variant_index]) } _ => if let ty::TyAdt(adt, _) = self.ty.sty { - if adt.is_univariant() { + if !adt.is_enum() { Some(&adt.variants[0]) } else { None @@ -598,7 +598,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); let adt_def = self.tcx.adt_def(enum_id); - if adt_def.variants.len() > 1 { + if adt_def.is_enum() { let substs = match ty.sty { ty::TyAdt(_, substs) | ty::TyFnDef(_, substs) => substs, diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 9b3f16f1ab432..a7599f19244c2 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -98,19 +98,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => { - if self.hir.tcx().sess.features.borrow().never_type { - let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { - i == variant_index || { - self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs) - } - }); - if irrefutable { - let lvalue = match_pair.lvalue.downcast(adt_def, variant_index); - candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns)); - Ok(()) - } else { - Err(match_pair) + let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { + i == variant_index || { + self.hir.tcx().sess.features.borrow().never_type && + self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs) } + }); + if irrefutable { + let lvalue = match_pair.lvalue.downcast(adt_def, variant_index); + candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns)); + Ok(()) } else { Err(match_pair) } diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index 1cf35af3a9e1b..02a7bc83f6ee8 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -39,7 +39,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { span: match_pair.pattern.span, kind: TestKind::Switch { adt_def: adt_def.clone(), - variants: BitVector::new(self.hir.num_variants(adt_def)), + variants: BitVector::new(adt_def.variants.len()), }, } } @@ -184,7 +184,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { match test.kind { TestKind::Switch { adt_def, ref variants } => { // Variants is a BitVec of indexes into adt_def.variants. - let num_enum_variants = self.hir.num_variants(adt_def); + let num_enum_variants = adt_def.variants.len(); let used_variants = variants.count(); let mut otherwise_block = None; let mut target_blocks = Vec::with_capacity(num_enum_variants); diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 50264238aacb2..b1f4b849b8928 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -213,10 +213,6 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { bug!("found no method `{}` in `{:?}`", method_name, trait_def_id); } - pub fn num_variants(&mut self, adt_def: &ty::AdtDef) -> usize { - adt_def.variants.len() - } - pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec { (0..adt_def.variants[variant_index].fields.len()) .map(Field::new) diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index 61b4716c56409..e2ecd4839fb48 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -67,7 +67,7 @@ impl MirPass for Deaggregator { let ty = variant_def.fields[i].ty(tcx, substs); let rhs = Rvalue::Use(op.clone()); - let lhs_cast = if adt_def.variants.len() > 1 { + let lhs_cast = if adt_def.is_enum() { Lvalue::Projection(Box::new(LvalueProjection { base: lhs.clone(), elem: ProjectionElem::Downcast(adt_def, variant), @@ -89,7 +89,7 @@ impl MirPass for Deaggregator { } // if the aggregate was an enum, we need to set the discriminant - if adt_def.variants.len() > 1 { + if adt_def.is_enum() { let set_discriminant = Statement { kind: StatementKind::SetDiscriminant { lvalue: lhs.clone(), diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 837c3d42fe837..30f3a0f3186ca 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -344,7 +344,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { variant_index, } => (&adt_def.variants[variant_index], substs), LvalueTy::Ty { ty } => match ty.sty { - ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => { + ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => { (&adt_def.variants[0], substs) } ty::TyClosure(def_id, substs) => { diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 3b9772079adb9..1852712a08375 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -384,7 +384,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> substs: &'tcx Substs<'tcx>) -> (BasicBlock, Unwind) { let (succ, unwind) = self.drop_ladder_bottom(); - if adt.variants.len() == 1 { + if !adt.is_enum() { let fields = self.move_paths_for_fields( self.lvalue, self.path, diff --git a/src/test/run-pass/enum-univariant-repr.rs b/src/test/run-pass/enum-univariant-repr.rs index ef4cc60bf0da1..17d614b54969c 100644 --- a/src/test/run-pass/enum-univariant-repr.rs +++ b/src/test/run-pass/enum-univariant-repr.rs @@ -22,6 +22,11 @@ enum UnivariantWithoutDescr { Y } +#[repr(u8)] +enum UnivariantWithData { + Z(u8), +} + pub fn main() { { assert_eq!(4, mem::size_of::()); @@ -44,4 +49,12 @@ pub fn main() { // check it has the same memory layout as u16 assert_eq!(&[descr, descr, descr], ints); } + + { + assert_eq!(2, mem::size_of::()); + + match UnivariantWithData::Z(4) { + UnivariantWithData::Z(x) => assert_eq!(x, 4), + } + } } From f44b0991879f5e379573d3f2fa1d702c923729f9 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 1 Jun 2017 21:50:53 +0300 Subject: [PATCH 03/69] rustc_trans: avoid working with sizes/offsets and alignments as integers. --- src/librustc/ty/layout.rs | 142 +++++---- src/librustc_llvm/ffi.rs | 23 +- src/librustc_trans/abi.rs | 138 +++++---- src/librustc_trans/adt.rs | 98 +++--- src/librustc_trans/base.rs | 51 +--- src/librustc_trans/builder.rs | 71 +++-- src/librustc_trans/cabi_aarch64.rs | 8 +- src/librustc_trans/cabi_arm.rs | 8 +- src/librustc_trans/cabi_asmjs.rs | 2 +- src/librustc_trans/cabi_mips.rs | 29 +- src/librustc_trans/cabi_mips64.rs | 29 +- src/librustc_trans/cabi_powerpc.rs | 28 +- src/librustc_trans/cabi_powerpc64.rs | 8 +- src/librustc_trans/cabi_s390x.rs | 12 +- src/librustc_trans/cabi_sparc.rs | 31 +- src/librustc_trans/cabi_sparc64.rs | 8 +- src/librustc_trans/cabi_x86.rs | 12 +- src/librustc_trans/cabi_x86_64.rs | 38 +-- src/librustc_trans/cabi_x86_win64.rs | 8 +- src/librustc_trans/common.rs | 10 +- src/librustc_trans/consts.rs | 19 +- src/librustc_trans/debuginfo/metadata.rs | 364 +++++++++-------------- src/librustc_trans/debuginfo/mod.rs | 2 +- src/librustc_trans/debuginfo/utils.rs | 13 - src/librustc_trans/glue.rs | 14 +- src/librustc_trans/intrinsic.rs | 189 ++++++------ src/librustc_trans/lib.rs | 1 - src/librustc_trans/machine.rs | 79 ----- src/librustc_trans/meth.rs | 9 +- src/librustc_trans/mir/block.rs | 112 ++++--- src/librustc_trans/mir/constant.rs | 137 ++++----- src/librustc_trans/mir/lvalue.rs | 44 +-- src/librustc_trans/mir/mod.rs | 11 +- src/librustc_trans/mir/operand.rs | 13 +- src/librustc_trans/mir/rvalue.rs | 33 +- src/librustc_trans/mir/statement.rs | 22 +- src/librustc_trans/type_of.rs | 26 +- src/rustllvm/RustWrapper.cpp | 7 +- 38 files changed, 863 insertions(+), 986 deletions(-) delete mode 100644 src/librustc_trans/machine.rs diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 491fa2a240cce..d83f7e661baa3 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -24,7 +24,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::Deref; +use std::ops::{Deref, Add, Sub, Mul, AddAssign}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -203,6 +203,18 @@ impl TargetDataLayout { bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits) } } + + pub fn vector_align(&self, vec_size: Size) -> Align { + for &(size, align) in &self.vector_align { + if size == vec_size { + return align; + } + } + // Default to natural alignment, which is what LLVM does. + // That is, use the size, rounded up to a power of 2. + let align = vec_size.bytes().next_power_of_two(); + Align::from_bytes(align, align).unwrap() + } } pub trait HasDataLayout: Copy { @@ -236,7 +248,8 @@ pub struct Size { impl Size { pub fn from_bits(bits: u64) -> Size { - Size::from_bytes((bits + 7) / 8) + // Avoid potential overflow from `bits + 7`. + Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) } pub fn from_bytes(bytes: u64) -> Size { @@ -261,6 +274,11 @@ impl Size { Size::from_bytes((self.bytes() + mask) & !mask) } + pub fn is_abi_aligned(self, align: Align) -> bool { + let mask = align.abi() - 1; + self.bytes() & mask == 0 + } + pub fn checked_add(self, offset: Size, cx: C) -> Option { let dl = cx.data_layout(); @@ -278,8 +296,6 @@ impl Size { pub fn checked_mul(self, count: u64, cx: C) -> Option { let dl = cx.data_layout(); - // Each Size is less than dl.obj_size_bound(), so the sum is - // also less than 1 << 62 (and therefore can't overflow). match self.bytes().checked_mul(count) { Some(bytes) if bytes < dl.obj_size_bound() => { Some(Size::from_bytes(bytes)) @@ -289,6 +305,46 @@ impl Size { } } +// Panicking addition, subtraction and multiplication for convenience. +// Avoid during layout computation, return `LayoutError` instead. + +impl Add for Size { + type Output = Size; + fn add(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so the sum is + // less than 1 << 62 (and therefore can't overflow). + Size::from_bytes(self.bytes() + other.bytes()) + } +} + +impl Sub for Size { + type Output = Size; + fn sub(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so an underflow + // would result in a value larger than 1 << 61, + // which Size::from_bytes will catch for us. + Size::from_bytes(self.bytes() - other.bytes()) + } +} + +impl Mul for Size { + type Output = Size; + fn mul(self, count: u64) -> Size { + match self.bytes().checked_mul(count) { + Some(bytes) => Size::from_bytes(bytes), + None => { + bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count) + } + } + } +} + +impl AddAssign for Size { + fn add_assign(&mut self, other: Size) { + *self = *self + other; + } +} + /// Alignment of a type in bytes, both ABI-mandated and preferred. /// Each field is a power of two, giving the alignment a maximum /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with @@ -301,7 +357,8 @@ pub struct Align { impl Align { pub fn from_bits(abi: u64, pref: u64) -> Result { - Align::from_bytes((abi + 7) / 8, (pref + 7) / 8) + Align::from_bytes(Size::from_bits(abi).bytes(), + Size::from_bits(pref).bytes()) } pub fn from_bytes(abi: u64, pref: u64) -> Result { @@ -340,6 +397,14 @@ impl Align { 1 << self.pref } + pub fn abi_bits(self) -> u64 { + self.abi() * 8 + } + + pub fn pref_bits(self) -> u64 { + self.pref() * 8 + } + pub fn min(self, other: Align) -> Align { Align { abi: cmp::min(self.abi, other.abi), @@ -366,7 +431,7 @@ pub enum Integer { I128, } -impl Integer { +impl<'a, 'tcx> Integer { pub fn size(&self) -> Size { match *self { I1 => Size::from_bits(1), @@ -391,8 +456,7 @@ impl Integer { } } - pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>, - signed: bool) -> Ty<'tcx> { + pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { (I1, false) => tcx.types.u8, (I8, false) => tcx.types.u8, @@ -467,12 +531,12 @@ impl Integer { /// signed discriminant range and #[repr] attribute. /// N.B.: u64 values above i64::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. - fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - repr: &ReprOptions, - min: i64, - max: i64) - -> (Integer, bool) { + fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + repr: &ReprOptions, + min: i64, + max: i64) + -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there // are any negative values, the only valid unsigned representation is u64 @@ -898,16 +962,6 @@ impl<'a, 'tcx> Struct { } Ok(None) } - - pub fn over_align(&self) -> Option { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None - } - } } /// An untagged union. @@ -981,16 +1035,6 @@ impl<'a, 'tcx> Union { pub fn stride(&self) -> Size { self.min_size.abi_align(self.align) } - - pub fn over_align(&self) -> Option { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None - } - } } /// The first half of a fat pointer. @@ -1607,9 +1651,8 @@ impl<'a, 'tcx> Layout { FatPointer { metadata, .. } => { // Effectively a (ptr, meta) tuple. - Pointer.size(dl).abi_align(metadata.align(dl)) - .checked_add(metadata.size(dl), dl).unwrap() - .abi_align(self.align(dl)) + (Pointer.size(dl).abi_align(metadata.align(dl)) + + metadata.size(dl)).abi_align(self.align(dl)) } CEnum { discr, .. } => Int(discr).size(dl), @@ -1638,15 +1681,7 @@ impl<'a, 'tcx> Layout { None => bug!("Layout::align({:?}): {} * {} overflowed", self, elem_size.bytes(), count) }; - for &(size, align) in &dl.vector_align { - if size == vec_size { - return align; - } - } - // Default to natural alignment, which is what LLVM does. - // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() + dl.vector_align(vec_size) } FatPointer { metadata, .. } => { @@ -1666,7 +1701,7 @@ impl<'a, 'tcx> Layout { } /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align { + pub fn primitive_align(&self, cx: C) -> Align { match *self { Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, Univariant { ref variant, .. } | @@ -1674,18 +1709,7 @@ impl<'a, 'tcx> Layout { variant.primitive_align }, - _ => self.align(dl) - } - } - - /// Returns repr alignment if it is greater than the primitive alignment. - pub fn over_align(&self, dl: &TargetDataLayout) -> Option { - let align = self.align(dl); - let primitive_align = self.primitive_align(dl); - if align.abi() > primitive_align.abi() { - Some(align.abi() as u32) - } else { - None + _ => self.align(cx.data_layout()) } } diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 24c3963fbc4b3..48f8094f98d87 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -1205,15 +1205,13 @@ extern "C" { pub fn LLVMRustBuildAtomicLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef, @@ -1247,23 +1245,6 @@ extern "C" { /// Creates target data from a target layout string. pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; - /// Number of bytes clobbered when doing a Store to *T. - pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Distance between successive elements in an array of T. Includes ABI padding. - pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Returns the preferred alignment of a type. - pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /// Returns the minimum alignment of a type. - pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - - /// Computes the byte offset of the indexed struct element for a - /// target. - pub fn LLVMOffsetOfElement(TD: TargetDataRef, - StructTy: TypeRef, - Element: c_uint) - -> c_ulonglong; /// Disposes target data. pub fn LLVMDisposeTargetData(TD: TargetDataRef); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 6df40c34ec54a..ffbc4f82bca9e 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -30,17 +30,16 @@ use cabi_sparc64; use cabi_nvptx; use cabi_nvptx64; use cabi_hexagon; -use machine::llalign_of_min; use type_::Type; use type_of; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size}; +use rustc::ty::layout::{self, Align, Layout, Size, TyLayout}; +use rustc::ty::layout::{HasDataLayout, LayoutTyper}; use rustc_back::PanicStrategy; use libc::c_uint; -use std::cmp; use std::iter; pub use syntax::abi::Abi; @@ -108,8 +107,8 @@ impl ArgAttributes { self } - pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { - self.dereferenceable_bytes = bytes; + pub fn set_dereferenceable(&mut self, size: Size) -> &mut Self { + self.dereferenceable_bytes = size.bytes(); self } @@ -174,7 +173,32 @@ impl Reg { } impl Reg { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + let dl = ccx.data_layout(); + match self.kind { + RegKind::Integer => { + match self.size.bits() { + 1 => dl.i1_align, + 2...8 => dl.i8_align, + 9...16 => dl.i16_align, + 17...32 => dl.i32_align, + 33...64 => dl.i64_align, + 65...128 => dl.i128_align, + _ => bug!("unsupported integer: {:?}", self) + } + } + RegKind::Float => { + match self.size.bits() { + 32 => dl.f32_align, + 64 => dl.f64_align, + _ => bug!("unsupported float: {:?}", self) + } + } + RegKind::Vector => dl.vector_align(self.size) + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match self.kind { RegKind::Integer => Type::ix(ccx, self.size.bits()), RegKind::Float => { @@ -193,7 +217,7 @@ impl Reg { /// An argument passed entirely registers with the /// same kind (e.g. HFA / HVA on PPC64 and AArch64). -#[derive(Copy, Clone)] +#[derive(Clone, Copy, Debug)] pub struct Uniform { pub unit: Reg, @@ -216,7 +240,11 @@ impl From for Uniform { } impl Uniform { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + self.unit.align(ccx) + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { let llunit = self.unit.llvm_type(ccx); if self.total <= self.unit.size { @@ -328,11 +356,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } // Keep track of the offset (without padding). - let size = field.size(ccx); - match unaligned_offset.checked_add(size, ccx) { - Some(offset) => unaligned_offset = offset, - None => return None - } + unaligned_offset += field.size(ccx); } // There needs to be no padding. @@ -387,6 +411,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } } +#[derive(Clone, Copy, Debug)] pub enum CastTarget { Uniform(Uniform), Pair(Reg, Reg) @@ -405,7 +430,28 @@ impl From for CastTarget { } impl CastTarget { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn size(&self, ccx: &CrateContext) -> Size { + match *self { + CastTarget::Uniform(u) => u.total, + CastTarget::Pair(a, b) => { + (a.size.abi_align(a.align(ccx)) + b.size) + .abi_align(self.align(ccx)) + } + } + } + + pub fn align(&self, ccx: &CrateContext) -> Align { + match *self { + CastTarget::Uniform(u) => u.align(ccx), + CastTarget::Pair(a, b) => { + ccx.data_layout().aggregate_align + .max(a.align(ccx)) + .max(b.align(ccx)) + } + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match *self { CastTarget::Uniform(u) => u.llvm_type(ccx), CastTarget::Pair(a, b) => { @@ -426,11 +472,11 @@ impl CastTarget { pub struct ArgType<'tcx> { kind: ArgKind, pub layout: TyLayout<'tcx>, - /// Coerced LLVM Type - pub cast: Option, - /// Dummy argument, which is emitted before the real argument - pub pad: Option, - /// LLVM attributes of argument + /// Cast target, either a single uniform or a pair of registers. + pub cast: Option, + /// Dummy argument, which is emitted before the real argument. + pub pad: Option, + /// Attributes of argument. pub attrs: ArgAttributes } @@ -451,14 +497,12 @@ impl<'a, 'tcx> ArgType<'tcx> { // Wipe old attributes, likely not valid through indirection. self.attrs = ArgAttributes::default(); - let llarg_sz = self.layout.size(ccx).bytes(); - // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture self.attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) - .set_dereferenceable(llarg_sz); + .set_dereferenceable(self.layout.size(ccx)); self.kind = ArgKind::Indirect; } @@ -500,12 +544,12 @@ impl<'a, 'tcx> ArgType<'tcx> { } } - pub fn cast_to>(&mut self, ccx: &CrateContext, target: T) { - self.cast = Some(target.into().llvm_type(ccx)); + pub fn cast_to>(&mut self, target: T) { + self.cast = Some(target.into()); } - pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) { - self.pad = Some(reg.llvm_type(ccx)); + pub fn pad_with(&mut self, reg: Reg) { + self.pad = Some(reg); } pub fn is_indirect(&self) -> bool { @@ -533,16 +577,14 @@ impl<'a, 'tcx> ArgType<'tcx> { let ccx = bcx.ccx; if self.is_indirect() { let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); - let llalign = self.layout.align(ccx).abi(); - base::call_memcpy(bcx, dst, val, llsz, llalign as u32); + base::call_memcpy(bcx, dst, val, llsz, self.layout.align(ccx)); } else if let Some(ty) = self.cast { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bcx.pointercast(dst, ty.ptr_to()); - let llalign = self.layout.align(ccx).abi(); - bcx.store(val, cast_dst, Some(llalign as u32)); + let cast_dst = bcx.pointercast(dst, ty.llvm_type(ccx).ptr_to()); + bcx.store(val, cast_dst, Some(self.layout.align(ccx))); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -559,8 +601,9 @@ impl<'a, 'tcx> ArgType<'tcx> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.alloca(ty, "abi_cast", None); - base::Lifetime::Start.call(bcx, llscratch); + let llscratch = bcx.alloca(ty.llvm_type(ccx), "abi_cast", None); + let scratch_size = ty.size(ccx); + bcx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... bcx.store(val, llscratch, None); @@ -570,10 +613,9 @@ impl<'a, 'tcx> ArgType<'tcx> { bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), C_usize(ccx, self.layout.size(ccx).bytes()), - cmp::min(self.layout.align(ccx).abi() as u32, - llalign_of_min(ccx, ty))); + self.layout.align(ccx).min(ty.align(ccx))); - base::Lifetime::End.call(bcx, llscratch); + bcx.lifetime_end(llscratch, scratch_size); } } else { if self.layout.ty == ccx.tcx().types.bool { @@ -840,7 +882,7 @@ impl<'a, 'tcx> FnType<'tcx> { // Replace newtypes with their inner-most type. if unit.size == size { // Needs a cast as we've unpacked a newtype. - arg.cast_to(ccx, unit); + arg.cast_to(unit); return; } @@ -850,7 +892,7 @@ impl<'a, 'tcx> FnType<'tcx> { // FIXME(eddyb) This should be using Uniform instead of a pair, // but the resulting [2 x float/double] breaks emscripten. // See https://github.com/kripken/emscripten-fastcomp/issues/178. - arg.cast_to(ccx, CastTarget::Pair(unit, unit)); + arg.cast_to(CastTarget::Pair(unit, unit)); return; } } @@ -862,7 +904,7 @@ impl<'a, 'tcx> FnType<'tcx> { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. - arg.cast_to(ccx, Reg { + arg.cast_to(Reg { kind: RegKind::Integer, size }); @@ -931,10 +973,10 @@ impl<'a, 'tcx> FnType<'tcx> { } else if self.ret.is_indirect() { llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); Type::void(ccx) + } else if let Some(cast) = self.ret.cast { + cast.llvm_type(ccx) } else { - self.ret.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, self.ret.layout.ty) - }) + type_of::immediate_type_of(ccx, self.ret.layout.ty) }; for arg in &self.args { @@ -943,15 +985,15 @@ impl<'a, 'tcx> FnType<'tcx> { } // add padding if let Some(ty) = arg.pad { - llargument_tys.push(ty); + llargument_tys.push(ty.llvm_type(ccx)); } let llarg_ty = if arg.is_indirect() { arg.memory_ty(ccx).ptr_to() + } else if let Some(cast) = arg.cast { + cast.llvm_type(ccx) } else { - arg.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, arg.layout.ty) - }) + type_of::immediate_type_of(ccx, arg.layout.ty) }; llargument_tys.push(llarg_ty); @@ -998,7 +1040,3 @@ impl<'a, 'tcx> FnType<'tcx> { } } } - -pub fn align_up_to(off: u64, a: u64) -> u64 { - (off + a - 1) / a * a -} diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index cdf66a0835df0..b5b90753553f3 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -42,10 +42,9 @@ //! taken to it, implementing them for Rust seems difficult. use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size}; use context::CrateContext; -use machine; use monomorphize; use type_::Type; use type_of; @@ -134,9 +133,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } layout::UntaggedUnion { ref variants, .. }=> { // Use alignment-sized ints to fill all the union storage. - let size = variants.stride().bytes(); - let align = variants.align.abi(); - let fill = union_fill(cx, size, align); + let fill = union_fill(cx, variants.stride(), variants.align); match name { None => { Type::struct_(cx, &[fill], variants.packed) @@ -159,22 +156,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // So we start with the discriminant, pad it up to the alignment with // more of its own type, then use alignment-sized ints to get the rest // of the size. - let size = size.bytes(); - let align = align.abi(); - let primitive_align = primitive_align.abi(); - assert!(align <= ::std::u32::MAX as u64); let discr_ty = Type::from_integer(cx, discr); let discr_size = discr.size().bytes(); - let padded_discr_size = roundup(discr_size, align as u32); - let variant_part_size = size-padded_discr_size; - let variant_fill = union_fill(cx, variant_part_size, primitive_align); + let padded_discr_size = discr.size().abi_align(align); + let variant_part_size = size - padded_discr_size; - assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32); - assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly - let fields: Vec = - [discr_ty, - Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), - variant_fill].iter().cloned().collect(); + // Ensure discr_ty can fill pad evenly + assert_eq!(padded_discr_size.bytes() % discr_size, 0); + let fields = [ + discr_ty, + Type::array(&discr_ty, padded_discr_size.bytes() / discr_size - 1), + union_fill(cx, variant_part_size, primitive_align) + ]; match name { None => { Type::struct_(cx, &fields, false) @@ -190,17 +183,19 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { - assert_eq!(size%align, 0); - assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); - let align_units = size/align; - let layout_align = layout::Align::from_bytes(align, align).unwrap(); - if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) { - Type::array(&Type::from_integer(cx, ity), align_units) +fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { + let abi_align = align.abi(); + let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { + Type::from_integer(cx, ity) } else { - Type::array(&Type::vector(&Type::i32(cx), align/4), - align_units) - } + let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); + assert_eq!(vec_align.abi(), abi_align); + Type::vector(&Type::i32(cx), abi_align / 4) + }; + + let size = size.bytes(); + assert_eq!(size % abi_align, 0); + Type::array(&elem_ty, size / abi_align) } // Lookup `Struct::memory_index` and double it to account for padding @@ -231,7 +226,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; debug!("struct_llfields: variant: {:?}", variant); let mut first_field = true; - let mut min_offset = 0; + let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { (i, match t.sty { @@ -249,48 +244,47 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, cx.tcx().normalize_associated_type(&ty) }, _ => bug!() - }, variant.offsets[i as usize].bytes()) + }, variant.offsets[i as usize]) }); for (index, ty, target_offset) in field_iter { - assert!(target_offset >= min_offset); - let padding_bytes = target_offset - min_offset; + debug!("struct_llfields: {} ty: {} offset: {:?} target_offset: {:?}", + index, ty, offset, target_offset); + assert!(target_offset >= offset); + let padding = target_offset - offset; if first_field { - debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}", - index, ty, min_offset, target_offset); - assert_eq!(padding_bytes, 0); + assert_eq!(padding.bytes(), 0); first_field = false; } else { - result.push(Type::array(&Type::i8(cx), padding_bytes)); - debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}", - index, ty, padding_bytes, min_offset, target_offset); + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); } let llty = type_of::in_memory_type_of(cx, ty); result.push(llty); let layout = cx.layout_of(ty); if variant.packed { - assert_eq!(padding_bytes, 0); + assert_eq!(padding.bytes(), 0); } else { let field_align = layout.align(cx); assert!(field_align.abi() <= variant.align.abi(), "non-packed type has field with larger align ({}): {:#?}", field_align.abi(), variant); } - let target_size = layout.size(&cx.tcx().data_layout).bytes(); - min_offset = target_offset + target_size; + let target_size = layout.size(&cx.tcx().data_layout); + offset = target_offset + target_size; } if variant.sized && field_count > 0 { - if variant.stride().bytes() < min_offset { - bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(), - min_offset); + if offset > variant.stride() { + bug!("variant: {:?} stride: {:?} offset: {:?}", + variant, variant.stride(), offset); } - let padding_bytes = variant.stride().bytes() - min_offset; - debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n", - padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes()); - result.push(Type::array(&Type::i8(cx), padding_bytes)); + let padding = variant.stride() - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} min_size: {:?} stride: {:?}", + padding, offset, variant.min_size, variant.stride()); + result.push(Type::array(&Type::i8(cx), padding.bytes())); assert!(result.len() == (field_count * 2)); } else { - debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n", - min_offset, variant.min_size.bytes(), variant.stride().bytes()); + debug!("struct_llfields: offset: {:?} min_size: {:?} stride: {:?}", + offset, variant.min_size, variant.stride()); } result @@ -310,7 +304,3 @@ pub fn assert_discr_in_range(min: D, max: D, discr: D) { assert!(min <= discr || discr <= max) } } - -// FIXME this utility routine should be somewhere more general -#[inline] -fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 69bcd0aa50b99..98ad6a54bd1f5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -40,6 +40,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::Align; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -55,7 +56,7 @@ use builder::Builder; use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_u64, C_undef, C_array}; +use common::{C_struct_in_context, C_undef, C_array}; use common::CrateContext; use common::{type_is_zero_size, val_ty}; use common; @@ -63,7 +64,6 @@ use consts; use context::{self, LocalCrateContext, SharedCrateContext}; use debuginfo; use declare; -use machine; use meth; use mir; use monomorphize::{self, Instance}; @@ -489,42 +489,11 @@ pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { } } -pub enum Lifetime { Start, End } - -impl Lifetime { - // If LLVM lifetime intrinsic support is enabled (i.e. optimizations - // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - // and the intrinsic for `lt` and passes them to `emit`, which is in - // charge of generating code to call the passed intrinsic on whatever - // block of generated code is targeted for the intrinsic. - // - // If LLVM lifetime intrinsic support is disabled (i.e. optimizations - // off) or `ptr` is zero-sized, then no-op (does not call `emit`). - pub fn call(self, b: &Builder, ptr: ValueRef) { - if b.ccx.sess().opts.optimize == config::OptLevel::No { - return; - } - - let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = b.ccx.get_intrinsic(match self { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - - let ptr = b.pointercast(ptr, Type::i8p(b.ccx)); - b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None); - } -} - -pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - n_bytes: ValueRef, - align: u32) { +pub fn call_memcpy(b: &Builder, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: Align) { let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); @@ -532,7 +501,7 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, let src_ptr = b.pointercast(src, Type::i8p(ccx)); let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); let size = b.intcast(n_bytes, ccx.isize_ty(), false); - let align = C_i32(ccx, align as i32); + let align = C_i32(ccx, align.abi() as i32); let volatile = C_bool(ccx, false); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -542,11 +511,11 @@ pub fn memcpy_ty<'a, 'tcx>( dst: ValueRef, src: ValueRef, t: Ty<'tcx>, - align: Option, + align: Option, ) { let ccx = bcx.ccx; - let size = ccx.size_of(t); + let size = ccx.size_of(t).bytes(); if size == 0 { return; } diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index b366d5579c3d1..c8d8984122fdd 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -15,12 +15,12 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use common::*; -use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; -use rustc::session::Session; +use rustc::ty::layout::{Align, Size}; +use rustc::session::{config, Session}; use std::borrow::Cow; use std::ffi::CString; @@ -487,7 +487,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { let builder = Builder::with_ccx(self.ccx); builder.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -495,7 +495,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { builder.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -506,7 +506,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { name.as_ptr()) }; if let Some(align) = align { - llvm::LLVMSetAlignment(alloca, align as c_uint); + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); } alloca } @@ -519,12 +519,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn load(&self, ptr: ValueRef, align: Option) -> ValueRef { + pub fn load(&self, ptr: ValueRef, align: Option) -> ValueRef { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); if let Some(align) = align { - llvm::LLVMSetAlignment(load, align as c_uint); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); } load } @@ -539,20 +539,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef { + pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering, align: Align) -> ValueRef { self.count_insn("load.atomic"); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order, - align as c_uint) + let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); + load } } pub fn load_range_assert(&self, ptr: ValueRef, lo: u64, hi: u64, signed: llvm::Bool, - align: Option) -> ValueRef { + align: Option) -> ValueRef { let value = self.load(ptr, align); unsafe { @@ -571,7 +570,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { value } - pub fn load_nonnull(&self, ptr: ValueRef, align: Option) -> ValueRef { + pub fn load_nonnull(&self, ptr: ValueRef, align: Option) -> ValueRef { let value = self.load(ptr, align); unsafe { llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint, @@ -581,7 +580,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { value } - pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { + pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store"); @@ -589,7 +588,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); if let Some(align) = align { - llvm::LLVMSetAlignment(store, align as c_uint); + llvm::LLVMSetAlignment(store, align.abi() as c_uint); } store } @@ -607,14 +606,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, + order: AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); + let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + llvm::LLVMSetAlignment(store, align.abi() as c_uint); } } @@ -1233,4 +1232,36 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return Cow::Owned(casted_args); } + + pub fn lifetime_start(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + pub fn lifetime_end(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targetted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ValueRef, size: Size) { + if self.ccx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let size = size.bytes(); + if size == 0 { + return; + } + + let lifetime_intrinsic = self.ccx.get_intrinsic(intrinsic); + + let ptr = self.pointercast(ptr, Type::i8p(self.ccx)); + self.call(lifetime_intrinsic, &[C_u64(self.ccx, size), ptr], None); + } } diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs index bf842e6358f87..b021a06072595 100644 --- a/src/librustc_trans/cabi_aarch64.rs +++ b/src/librustc_trans/cabi_aarch64.rs @@ -44,7 +44,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } let size = ret.layout.size(ccx); @@ -60,7 +60,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -75,7 +75,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } let size = arg.layout.size(ccx); @@ -91,7 +91,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc Reg::i64() }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total: size }); diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 635741b4d1ac5..370a950617a1b 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } } @@ -62,7 +62,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } else { Reg::i32() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -79,14 +79,14 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } } let align = arg.layout.align(ccx).abi(); let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total }); diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index 6fcd3ed581d27..047caa431c545 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -21,7 +21,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) { let size = ret.layout.size(ccx); if unit.size == size { - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs index b7b60859d4a04..baab70367419a 100644 --- a/src/librustc_trans/cabi_mips.rs +++ b/src/librustc_trans/cabi_mips.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index dff75e628de10..1cb63e72fb9be 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i64(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i64()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i64()); } } else { arg.extend_integer_width_to(64); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 8 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index f951ac76391f6..df320fb00abe2 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -8,46 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use std::cmp; +use rustc::ty::layout::Size; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index fb5472eb6ae1f..9a9d6f8d0ac46 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -67,7 +67,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } @@ -84,7 +84,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -101,7 +101,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } @@ -124,7 +124,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc }, }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total }); diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index fedebea3f4c99..ffe2940a0284f 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -49,16 +49,16 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc if is_single_fp_element(ccx, arg.layout) { match size.bytes() { - 4 => arg.cast_to(ccx, Reg::f32()), - 8 => arg.cast_to(ccx, Reg::f64()), + 4 => arg.cast_to(Reg::f32()), + 8 => arg.cast_to(Reg::f64()), _ => arg.make_indirect(ccx) } } else { match size.bytes() { - 1 => arg.cast_to(ccx, Reg::i8()), - 2 => arg.cast_to(ccx, Reg::i16()), - 4 => arg.cast_to(ccx, Reg::i32()), - 8 => arg.cast_to(ccx, Reg::i64()), + 1 => arg.cast_to(Reg::i8()), + 2 => arg.cast_to(Reg::i16()), + 4 => arg.cast_to(Reg::i32()), + 8 => arg.cast_to(Reg::i64()), _ => arg.make_indirect(ccx) } } diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs index c17901e1adebc..baab70367419a 100644 --- a/src/librustc_trans/cabi_sparc.rs +++ b/src/librustc_trans/cabi_sparc.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { - arg.extend_integer_width_to(32) + arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs index 8383007550e1e..788fba9dc2628 100644 --- a/src/librustc_trans/cabi_sparc64.rs +++ b/src/librustc_trans/cabi_sparc64.rs @@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } let size = ret.layout.size(ccx); @@ -63,7 +63,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -81,12 +81,12 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i64(), total }); diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 49634d6e78ce9..b34337ae5f69f 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -56,16 +56,16 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // float aggregates directly in a floating-point register. if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) { match size.bytes() { - 4 => fty.ret.cast_to(ccx, Reg::f32()), - 8 => fty.ret.cast_to(ccx, Reg::f64()), + 4 => fty.ret.cast_to(Reg::f32()), + 8 => fty.ret.cast_to(Reg::f64()), _ => fty.ret.make_indirect(ccx) } } else { match size.bytes() { - 1 => fty.ret.cast_to(ccx, Reg::i8()), - 2 => fty.ret.cast_to(ccx, Reg::i16()), - 4 => fty.ret.cast_to(ccx, Reg::i32()), - 8 => fty.ret.cast_to(ccx, Reg::i64()), + 1 => fty.ret.cast_to(Reg::i8()), + 2 => fty.ret.cast_to(Reg::i16()), + 4 => fty.ret.cast_to(Reg::i32()), + 8 => fty.ret.cast_to(Reg::i64()), _ => fty.ret.make_indirect(ccx) } } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index a814f458e12aa..6670d084d6c57 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -34,9 +34,9 @@ const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64; fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) -> Result<[Class; MAX_EIGHTBYTES], Memory> { fn unify(cls: &mut [Class], - off: u64, + off: Size, c: Class) { - let i = (off / 8) as usize; + let i = (off.bytes() / 8) as usize; let to_write = match (cls[i], c) { (Class::None, _) => c, (_, Class::None) => return, @@ -55,9 +55,9 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>, cls: &mut [Class], - off: u64) + off: Size) -> Result<(), Memory> { - if off % layout.align(ccx).abi() != 0 { + if !off.is_abi_aligned(layout.align(ccx)) { if layout.size(ccx).bytes() > 0 { return Err(Memory); } @@ -85,25 +85,25 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) // everything after the first one is the upper // half of a register. - let eltsz = element.size(ccx).bytes(); + let eltsz = element.size(ccx); for i in 1..count { - unify(cls, off + i * eltsz, Class::SseUp); + unify(cls, off + eltsz * i, Class::SseUp); } } Layout::Array { count, .. } => { if count > 0 { let elt = layout.field(ccx, 0); - let eltsz = elt.size(ccx).bytes(); + let eltsz = elt.size(ccx); for i in 0..count { - classify(ccx, elt, cls, off + i * eltsz)?; + classify(ccx, elt, cls, off + eltsz * i)?; } } } Layout::Univariant { ref variant, .. } => { for i in 0..layout.field_count() { - let field_off = off + variant.offsets[i].bytes(); + let field_off = off + variant.offsets[i]; classify(ccx, layout.field(ccx, i), cls, field_off)?; } } @@ -128,7 +128,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } let mut cls = [Class::None; MAX_EIGHTBYTES]; - classify(ccx, arg.layout, &mut cls, 0)?; + classify(ccx, arg.layout, &mut cls, Size::from_bytes(0))?; if n > 2 { if cls[0] != Class::Sse { return Err(Memory); @@ -153,7 +153,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) Ok(cls) } -fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { +fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option { if *i >= cls.len() { return None; } @@ -162,7 +162,7 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { Class::None => None, Class::Int => { *i += 1; - Some(match size { + Some(match size.bytes() { 1 => Reg::i8(), 2 => Reg::i16(), 3 | @@ -174,14 +174,14 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count(); *i += vec_len; Some(if vec_len == 1 { - match size { + match size.bytes() { 4 => Reg::f32(), _ => Reg::f64() } } else { Reg { kind: RegKind::Vector, - size: Size::from_bytes(vec_len as u64 * 8) + size: Size::from_bytes(8) * (vec_len as u64) } }) } @@ -189,17 +189,17 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { } } -fn cast_target(cls: &[Class], size: u64) -> CastTarget { +fn cast_target(cls: &[Class], size: Size) -> CastTarget { let mut i = 0; let lo = reg_component(cls, &mut i, size).unwrap(); - let offset = i as u64 * 8; + let offset = Size::from_bytes(8) * (i as u64); let target = if size <= offset { CastTarget::from(lo) } else { let hi = reg_component(cls, &mut i, size - offset).unwrap(); CastTarget::Pair(lo, hi) }; - assert_eq!(reg_component(cls, &mut i, 0), None); + assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None); target } @@ -242,8 +242,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType sse_regs -= needed_sse; if arg.layout.is_aggregate() { - let size = arg.layout.size(ccx).bytes(); - arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size)) + let size = arg.layout.size(ccx); + arg.cast_to(cast_target(cls.as_ref().unwrap(), size)) } else { arg.extend_integer_width_to(32); } diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index 39e728d4e4f9b..1d391da5993fe 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -20,10 +20,10 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType let size = a.layout.size(ccx); if a.layout.is_aggregate() { match size.bits() { - 8 => a.cast_to(ccx, Reg::i8()), - 16 => a.cast_to(ccx, Reg::i16()), - 32 => a.cast_to(ccx, Reg::i32()), - 64 => a.cast_to(ccx, Reg::i64()), + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), _ => a.make_indirect(ccx) }; } else { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e3856cabcf910..e3ee8f7c75a8a 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -22,13 +22,12 @@ use base; use builder::Builder; use consts; use declare; -use machine; use monomorphize; use type_::Type; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{HasDataLayout, Layout, LayoutTyper}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -252,10 +251,6 @@ pub fn C_big_integral(t: Type, u: u128) -> ValueRef { } } -pub fn C_nil(ccx: &CrateContext) -> ValueRef { - C_struct(ccx, &[], false) -} - pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { C_uint(Type::i1(ccx), val as u64) } @@ -273,8 +268,7 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { } pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef { - let bit_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); - + let bit_size = ccx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1< ValueRef { fn set_global_alignment(ccx: &CrateContext, gv: ValueRef, - mut align: machine::llalign) { + mut align: Align) { // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. if let Some(min) = ccx.sess().target.target.options.min_global_align { match ty::layout::Align::from_bits(min, min) { - Ok(min) => align = cmp::max(align, min.abi() as machine::llalign), + Ok(min) => align = align.max(min), Err(err) => { ccx.sess().err(&format!("invalid minimum global alignment: {}", err)); } } } unsafe { - llvm::LLVMSetAlignment(gv, align); + llvm::LLVMSetAlignment(gv, align.abi() as u32); } } pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { unsafe { @@ -82,15 +82,16 @@ pub fn addr_of_mut(ccx: &CrateContext, pub fn addr_of(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { if let Some(&gv) = ccx.const_globals().borrow().get(&cv) { unsafe { // Upgrade the alignment in cases where the same constant is used with different // alignment requirements - if align > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, align); + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); } } return gv; diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index a68390eab7fd2..d2e2e1bbdee47 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -9,11 +9,10 @@ // except according to those terms. use self::RecursiveTypeDescription::*; -use self::MemberOffset::*; use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; -use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, +use super::utils::{debug_context, DIB, span_start, get_namespace_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_item; use super::type_names::compute_debuginfo_type_name; @@ -30,13 +29,11 @@ use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; use rustc::ty::fold::TypeVisitor; use rustc::ty::subst::Substs; use rustc::ty::util::TypeIdHasher; -use rustc::hir; use rustc::ich::Fingerprint; -use {type_of, machine, monomorphize}; +use monomorphize; use common::{self, CrateContext}; -use type_::Type; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, LayoutTyper, Size}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -184,7 +181,6 @@ enum RecursiveTypeDescription<'tcx> { unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>, }, FinalMetadata(DICompositeType) @@ -195,7 +191,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>) -> RecursiveTypeDescription<'tcx> { @@ -208,7 +203,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type, unique_type_id, metadata_stub, - llvm_type, member_description_factory, } } @@ -224,9 +218,7 @@ impl<'tcx> RecursiveTypeDescription<'tcx> { unfinished_type, unique_type_id, metadata_stub, - llvm_type, ref member_description_factory, - .. } => { // Make sure that we have a forward declaration of the type in // the TypeMap so that recursive references are possible. This @@ -251,7 +243,6 @@ impl<'tcx> RecursiveTypeDescription<'tcx> { // ... and attach them to the stub to complete it. set_members_of_composite_type(cx, metadata_stub, - llvm_type, &member_descriptions[..]); return MetadataCreationResult::new(metadata_stub, true); } @@ -274,20 +265,21 @@ macro_rules! return_if_metadata_created_in_meantime { fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId, + array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, - len: Option, span: Span) -> MetadataCreationResult { let element_type_metadata = type_metadata(cx, element_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let element_llvm_type = type_of::type_of(cx, element_type); - let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type); + let (size, align) = cx.size_and_align_of(array_or_slice_type); - let (array_size_in_bytes, upper_bound) = match len { - Some(len) => (element_type_size * len, len as c_longlong), - None => (0, -1) + let upper_bound = match array_or_slice_type.sty { + ty::TyArray(_, len) => { + len.val.to_const_int().unwrap().to_u64().unwrap() as c_longlong + } + _ => -1 }; let subrange = unsafe { @@ -298,8 +290,8 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let metadata = unsafe { llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), - bytes_to_bits(array_size_in_bytes), - bytes_to_bits(element_type_align), + size.bits(), + align.abi_bits() as u32, element_type_metadata, subscripts) }; @@ -308,66 +300,52 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - vec_type: Ty<'tcx>, + slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span) -> MetadataCreationResult { - let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: element_type, - mutbl: hir::MutImmutable - }); + let data_ptr_type = cx.tcx().mk_imm_ptr(element_type); - let element_type_metadata = type_metadata(cx, data_ptr_type, span); + let data_ptr_metadata = type_metadata(cx, data_ptr_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let slice_llvm_type = type_of::type_of(cx, vec_type); - let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true); + let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true); + + let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); + let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx().types.usize); - let member_llvm_types = slice_llvm_type.field_types(); - assert!(slice_layout_is_correct(cx, - &member_llvm_types[..], - element_type)); let member_descriptions = [ MemberDescription { name: "data_ptr".to_string(), - llvm_type: member_llvm_types[0], - type_metadata: element_type_metadata, - offset: ComputedMemberOffset, + type_metadata: data_ptr_metadata, + offset: Size::from_bytes(0), + size: pointer_size, + align: pointer_align, flags: DIFlags::FlagZero, }, MemberDescription { name: "length".to_string(), - llvm_type: member_llvm_types[1], type_metadata: type_metadata(cx, cx.tcx().types.usize, span), - offset: ComputedMemberOffset, + offset: pointer_size, + size: usize_size, + align: usize_align, flags: DIFlags::FlagZero, }, ]; - assert!(member_descriptions.len() == member_llvm_types.len()); - let file_metadata = unknown_file_metadata(cx); let metadata = composite_type_metadata(cx, - slice_llvm_type, + slice_ptr_type, &slice_type_name[..], unique_type_id, &member_descriptions, NO_SCOPE_METADATA, file_metadata, span); - return MetadataCreationResult::new(metadata, false); - - fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - member_llvm_types: &[Type], - element_type: Ty<'tcx>) - -> bool { - member_llvm_types.len() == 2 && - member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && - member_llvm_types[1] == cx.isize_ty() - } + MetadataCreationResult::new(metadata, false) } fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -436,38 +414,38 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let trait_type_name = compute_debuginfo_type_name(cx, trait_object_type, false); - let trait_llvm_type = type_of::type_of(cx, trait_object_type); let file_metadata = unknown_file_metadata(cx); - - let ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: cx.tcx().types.u8, - mutbl: hir::MutImmutable - }); - let ptr_type_metadata = type_metadata(cx, ptr_type, syntax_pos::DUMMY_SP); - let llvm_type = type_of::type_of(cx, ptr_type); + let layout = cx.layout_of(cx.tcx().mk_mut_ptr(trait_type)); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); + + let data_ptr_field = layout.field(cx, 0); + let vtable_field = layout.field(cx, 1); let member_descriptions = [ MemberDescription { name: "pointer".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + syntax_pos::DUMMY_SP), + offset: layout.field_offset(cx, 0), + size: data_ptr_field.size(cx), + align: data_ptr_field.align(cx), flags: DIFlags::FlagArtificial, }, MemberDescription { name: "vtable".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), + offset: layout.field_offset(cx, 1), + size: vtable_field.size(cx), + align: vtable_field.align(cx), flags: DIFlags::FlagArtificial, }, ]; composite_type_metadata(cx, - trait_llvm_type, + trait_object_type, &trait_type_name[..], unique_type_id, &member_descriptions, @@ -556,15 +534,12 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyTuple(ref elements, _) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::TyArray(typ, len) => { - let len = len.val.to_const_int().unwrap().to_u64().unwrap(); - fixed_vec_metadata(cx, unique_type_id, typ, Some(len), usage_site_span) - } + ty::TyArray(typ, _) | ty::TySlice(typ) => { - fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span) } ty::TyStr => { - fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, cx.tcx().types.i8, usage_site_span) } ty::TyDynamic(..) => { MetadataCreationResult::new( @@ -770,15 +745,14 @@ fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, _ => bug!("debuginfo::basic_type_metadata - t is invalid type") }; - let llvm_type = type_of::type_of(cx, t); - let (size, align) = size_and_align_of(cx, llvm_type); + let (size, align) = cx.size_and_align_of(t); let name = CString::new(name).unwrap(); let ty_metadata = unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), name.as_ptr(), - bytes_to_bits(size), - bytes_to_bits(align), + size.bits(), + align.abi_bits() as u32, encoding) }; @@ -790,29 +764,25 @@ fn foreign_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId) -> DIType { debug!("foreign_type_metadata: {:?}", t); - let llvm_type = type_of::type_of(cx, t); - let name = compute_debuginfo_type_name(cx, t, false); - create_struct_stub(cx, llvm_type, &name, unique_type_id, NO_SCOPE_METADATA) + create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA) } fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, pointer_type: Ty<'tcx>, pointee_type_metadata: DIType) -> DIType { - let pointer_llvm_type = type_of::type_of(cx, pointer_type); - let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type); + let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); let name = compute_debuginfo_type_name(cx, pointer_type, false); let name = CString::new(name).unwrap(); - let ptr_metadata = unsafe { + unsafe { llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_metadata, - bytes_to_bits(pointer_size), - bytes_to_bits(pointer_align), + pointer_size.bits(), + pointer_align.abi_bits() as u32, name.as_ptr()) - }; - return ptr_metadata; + } } pub fn compile_unit_metadata(scc: &SharedCrateContext, @@ -907,21 +877,15 @@ impl MetadataCreationResult { } } -#[derive(Debug)] -enum MemberOffset { - FixedMemberOffset { bytes: usize }, - // For ComputedMemberOffset, the offset is read from the llvm type definition. - ComputedMemberOffset -} - // Description of a type member, which can either be a regular field (as in // structs or tuples) or an enum variant. #[derive(Debug)] struct MemberDescription { name: String, - llvm_type: Type, type_metadata: DIType, - offset: MemberOffset, + offset: Size, + size: Size, + align: Align, flags: DIFlags, } @@ -998,13 +962,13 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { }; let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); - let offset = FixedMemberOffset { bytes: offsets[i].bytes() as usize}; - + let (size, align) = cx.size_and_align_of(fty); MemberDescription { name, - llvm_type: type_of::in_memory_type_of(cx, fty), type_metadata: type_metadata(cx, fty, self.span), - offset, + offset: offsets[i], + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1018,7 +982,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let struct_name = compute_debuginfo_type_name(cx, struct_type, false); - let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); let (struct_def_id, variant, substs) = match struct_type.sty { ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), @@ -1028,7 +991,7 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let containing_scope = get_namespace_for_item(cx, struct_def_id); let struct_metadata_stub = create_struct_stub(cx, - struct_llvm_type, + struct_type, &struct_name, unique_type_id, containing_scope); @@ -1038,7 +1001,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, struct_type, unique_type_id, struct_metadata_stub, - struct_llvm_type, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, @@ -1069,15 +1031,14 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { bug!("{} is not a tuple", self.ty); }; - self.component_types - .iter() - .enumerate() - .map(|(i, &component_type)| { + self.component_types.iter().enumerate().map(|(i, &component_type)| { + let (size, align) = cx.size_and_align_of(component_type); MemberDescription { name: format!("__{}", i), - llvm_type: type_of::type_of(cx, component_type), type_metadata: type_metadata(cx, component_type, self.span), - offset: FixedMemberOffset { bytes: offsets[i].bytes() as usize }, + offset: offsets[i], + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1091,18 +1052,16 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); - let tuple_llvm_type = type_of::type_of(cx, tuple_type); create_and_register_recursive_type_forward_declaration( cx, tuple_type, unique_type_id, create_struct_stub(cx, - tuple_llvm_type, + tuple_type, &tuple_name[..], unique_type_id, NO_SCOPE_METADATA), - tuple_llvm_type, TupleMDF(TupleMemberDescriptionFactory { ty: tuple_type, component_types: component_types.to_vec(), @@ -1126,11 +1085,13 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { -> Vec { self.variant.fields.iter().map(|field| { let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); + let (size, align) = cx.size_and_align_of(fty); MemberDescription { name: field.name.to_string(), - llvm_type: type_of::type_of(cx, fty), type_metadata: type_metadata(cx, fty, self.span), - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1143,7 +1104,6 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let union_name = compute_debuginfo_type_name(cx, union_type, false); - let union_llvm_type = type_of::in_memory_type_of(cx, union_type); let (union_def_id, variant, substs) = match union_type.sty { ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), @@ -1153,7 +1113,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let containing_scope = get_namespace_for_item(cx, union_def_id); let union_metadata_stub = create_union_stub(cx, - union_llvm_type, + union_type, &union_name, unique_type_id, containing_scope); @@ -1163,7 +1123,6 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, union_type, unique_type_id, union_metadata_stub, - union_llvm_type, UnionMDF(UnionMemberDescriptionFactory { variant, substs, @@ -1206,9 +1165,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { .iter() .enumerate() .map(|(i, struct_def)| { - let (variant_type_metadata, - variant_llvm_type, - member_desc_factory) = + let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, self.enum_type, struct_def, @@ -1222,13 +1179,13 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &member_descriptions); MemberDescription { name: "".to_string(), - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: struct_def.stride(), + align: struct_def.align, flags: DIFlags::FlagZero } }).collect() @@ -1239,9 +1196,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { if adt.variants.is_empty() { vec![] } else { - let (variant_type_metadata, - variant_llvm_type, - member_description_factory) = + let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, self.enum_type, variant, @@ -1255,14 +1210,14 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &member_descriptions[..]); vec![ MemberDescription { name: "".to_string(), - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: variant.stride(), + align: variant.align, flags: DIFlags::FlagZero } ] @@ -1278,15 +1233,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { let non_null_variant_name = non_null_variant.name.as_str(); // The llvm type and metadata of the pointer - let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); - let non_null_llvm_type = type_of::type_of(cx, nnty); + let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0]); + let (size, align) = cx.size_and_align_of(nnty); let non_null_type_metadata = type_metadata(cx, nnty, self.span); - // The type of the artificial struct wrapping the pointer - let artificial_struct_llvm_type = Type::struct_(cx, - &[non_null_llvm_type], - false); - // For the metadata of the wrapper struct, we need to create a // MemberDescription of the struct's single field. let sole_struct_member_description = MemberDescription { @@ -1297,9 +1247,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } CtorKind::Const => bug!() }, - llvm_type: non_null_llvm_type, type_metadata: non_null_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero }; @@ -1313,7 +1264,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Now we can create the metadata of the artificial struct let artificial_struct_metadata = composite_type_metadata(cx, - artificial_struct_llvm_type, + nnty, &non_null_variant_name, unique_type_id, &[sole_struct_member_description], @@ -1334,9 +1285,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { vec![ MemberDescription { name: union_member_name, - llvm_type: artificial_struct_llvm_type, type_metadata: artificial_struct_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero } ] @@ -1345,7 +1297,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { nndiscr, ref discrfield_source, ..} => { // Create a description of the non-null variant - let (variant_type_metadata, variant_llvm_type, member_description_factory) = + let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, self.enum_type, struct_def, @@ -1359,7 +1311,6 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &variant_member_descriptions[..]); // Encode the information about the null variant in the union @@ -1378,9 +1329,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { vec![ MemberDescription { name: union_member_name, - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: struct_def.stride(), + align: struct_def.align, flags: DIFlags::FlagZero } ] @@ -1404,14 +1356,16 @@ impl<'tcx> VariantMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { + let (size, align) = cx.size_and_align_of(ty); MemberDescription { name: name.to_string(), - llvm_type: type_of::type_of(cx, ty), type_metadata: match self.discriminant_type_metadata { Some(metadata) if i == 0 => metadata, _ => type_metadata(cx, ty, self.span) }, - offset: FixedMemberOffset { bytes: self.offsets[i].bytes() as usize }, + offset: self.offsets[i], + size, + align, flags: DIFlags::FlagZero } }).collect() @@ -1436,7 +1390,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) - -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { + -> (DICompositeType, MemberDescriptionFactory<'tcx>) { let substs = match enum_type.sty { ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, ref t @ _ => bug!("{:#?} is not an enum", t) @@ -1456,17 +1410,9 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }).collect::>(); if let Some((discr, signed)) = maybe_discr_and_signed { - field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); + field_tys.insert(0, discr.to_ty(cx.tcx(), signed)); } - - let variant_llvm_type = - Type::struct_(cx, &field_tys - .iter() - .map(|t| type_of::type_of(cx, t)) - .collect::>() - , - struct_def.packed); // Could do some consistency checks here: size, align, field count, discr type let variant_name = variant.name.as_str(); @@ -1478,7 +1424,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, &variant_name); let metadata_stub = create_struct_stub(cx, - variant_llvm_type, + enum_type, &variant_name, unique_type_id, containing_scope); @@ -1526,7 +1472,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span, }); - (metadata_stub, variant_llvm_type, member_description_factory) + (metadata_stub, member_description_factory) } fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -1570,12 +1516,11 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { - let discriminant_llvm_type = Type::from_integer(cx, inttype); let (discriminant_size, discriminant_align) = - size_and_align_of(cx, discriminant_llvm_type); + (inttype.size(), inttype.align(cx)); let discriminant_base_type_metadata = type_metadata(cx, - inttype.to_ty(&cx.tcx(), signed), + inttype.to_ty(cx.tcx(), signed), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); @@ -1587,8 +1532,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(discriminant_size), - bytes_to_bits(discriminant_align), + discriminant_size.bits(), + discriminant_align.abi_bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), discriminant_base_type_metadata) }; @@ -1615,8 +1560,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; - let enum_llvm_type = type_of::type_of(cx, enum_type); - let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type); + let (enum_type_size, enum_type_align) = cx.size_and_align_of(enum_type); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( @@ -1629,8 +1573,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(enum_type_size), - bytes_to_bits(enum_type_align), + enum_type_size.bits(), + enum_type_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), 0, // RuntimeLang @@ -1642,7 +1586,6 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type, unique_type_id, enum_metadata, - enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type, type_rep: type_rep.layout, @@ -1664,28 +1607,27 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, /// results in a LLVM struct. /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. -fn composite_type_metadata(cx: &CrateContext, - composite_llvm_type: Type, - composite_type_name: &str, - composite_type_unique_id: UniqueTypeId, - member_descriptions: &[MemberDescription], - containing_scope: DIScope, - - // Ignore source location information as long as it - // can't be reconstructed for non-local crates. - _file_metadata: DIFile, - _definition_span: Span) - -> DICompositeType { +fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + composite_type: Ty<'tcx>, + composite_type_name: &str, + composite_type_unique_id: UniqueTypeId, + member_descriptions: &[MemberDescription], + containing_scope: DIScope, + + // Ignore source location information as long as it + // can't be reconstructed for non-local crates. + _file_metadata: DIFile, + _definition_span: Span) + -> DICompositeType { // Create the (empty) struct metadata node ... let composite_type_metadata = create_struct_stub(cx, - composite_llvm_type, + composite_type, composite_type_name, composite_type_unique_id, containing_scope); // ... and immediately create and add the member descriptions. set_members_of_composite_type(cx, composite_type_metadata, - composite_llvm_type, member_descriptions); return composite_type_metadata; @@ -1693,7 +1635,6 @@ fn composite_type_metadata(cx: &CrateContext, fn set_members_of_composite_type(cx: &CrateContext, composite_type_metadata: DICompositeType, - composite_llvm_type: Type, member_descriptions: &[MemberDescription]) { // In some rare cases LLVM metadata uniquing would lead to an existing type // description being used instead of a new one created in @@ -1714,14 +1655,7 @@ fn set_members_of_composite_type(cx: &CrateContext, let member_metadata: Vec = member_descriptions .iter() - .enumerate() - .map(|(i, member_description)| { - let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type); - let member_offset = match member_description.offset { - FixedMemberOffset { bytes } => bytes as u64, - ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i) - }; - + .map(|member_description| { let member_name = member_description.name.as_bytes(); let member_name = CString::new(member_name).unwrap(); unsafe { @@ -1731,9 +1665,9 @@ fn set_members_of_composite_type(cx: &CrateContext, member_name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(member_size), - bytes_to_bits(member_align), - bytes_to_bits(member_offset), + member_description.size.bits(), + member_description.align.abi_bits() as u32, + member_description.offset.bits(), member_description.flags, member_description.type_metadata) } @@ -1750,13 +1684,13 @@ fn set_members_of_composite_type(cx: &CrateContext, // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). -fn create_struct_stub(cx: &CrateContext, - struct_llvm_type: Type, - struct_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type); +fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + struct_type: Ty<'tcx>, + struct_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (struct_size, struct_align) = cx.size_and_align_of(struct_type); let name = CString::new(struct_type_name).unwrap(); let unique_type_id = CString::new( @@ -1774,8 +1708,8 @@ fn create_struct_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + struct_size.bits(), + struct_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), empty_array, @@ -1787,13 +1721,13 @@ fn create_struct_stub(cx: &CrateContext, return metadata_stub; } -fn create_union_stub(cx: &CrateContext, - union_llvm_type: Type, - union_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (union_size, union_align) = size_and_align_of(cx, union_llvm_type); +fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + union_type: Ty<'tcx>, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (union_size, union_align) = cx.size_and_align_of(union_type); let name = CString::new(union_type_name).unwrap(); let unique_type_id = CString::new( @@ -1811,8 +1745,8 @@ fn create_union_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(union_size), - bytes_to_bits(union_align), + union_size.bits(), + union_align.abi_bits() as u32, DIFlags::FlagZero, empty_array, 0, // RuntimeLang @@ -1867,7 +1801,7 @@ pub fn create_global_var_metadata(cx: &CrateContext, is_local_to_unit, global, ptr::null_mut(), - global_align, + global_align.abi() as u32, ); } } @@ -1899,8 +1833,6 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); - let llvm_vtable_type = Type::vtable_ptr(cx).element_type(); - let (struct_size, struct_align) = size_and_align_of(cx, llvm_vtable_type); unsafe { // LLVMRustDIBuilderCreateStructType() wants an empty array. A null @@ -1919,8 +1851,8 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + Size::from_bytes(0).bits(), + cx.tcx().data_layout.pointer_align.abi_bits() as u32, DIFlags::FlagArtificial, ptr::null_mut(), empty_array, diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 15b299674eea3..1ca12771dd448 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -499,7 +499,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, cx.sess().opts.optimize != config::OptLevel::No, DIFlags::FlagZero, argument_index, - align, + align.abi() as u32, ) }; source_loc::set_debug_location(bcx, diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index ad4fdfca7261f..95427d9b3cd4e 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -18,15 +18,11 @@ use rustc::ty::DefIdTree; use llvm; use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; -use machine; use common::{CrateContext}; -use type_::Type; use syntax_pos::{self, Span}; use syntax::ast; -use std::ops; - pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the @@ -53,15 +49,6 @@ pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc { cx.sess().codemap().lookup_char_pos(span.lo()) } -pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u32) { - (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type)) -} - -pub fn bytes_to_bits(bytes: T) -> T - where T: ops::Mul + From { - bytes * 8u8.into() -} - #[inline] pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>) -> &'a CrateDebugContext<'tcx> { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 453b98a1d74f7..597d8c587e921 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -29,12 +29,11 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); if bcx.ccx.shared().type_is_sized(t) { - let size = bcx.ccx.size_of(t); - let align = bcx.ccx.align_of(t); - debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", + let (size, align) = bcx.ccx.size_and_align_of(t); + debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, Value(info), size, align); - let size = C_usize(bcx.ccx, size); - let align = C_usize(bcx.ccx, align as u64); + let size = C_usize(bcx.ccx, size.bytes()); + let align = C_usize(bcx.ccx, align.abi()); return (size, align); } assert!(!info.is_null()); @@ -122,8 +121,9 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let unit = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - (bcx.mul(info, C_usize(bcx.ccx, bcx.ccx.size_of(unit))), - C_usize(bcx.ccx, bcx.ccx.align_of(unit) as u64)) + let (size, align) = bcx.ccx.size_and_align_of(unit); + (bcx.mul(info, C_usize(bcx.ccx, size.bytes())), + C_usize(bcx.ccx, align.abi())) } _ => bug!("Unexpected unsized type, found {}", t) } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index daeb0dd680ff0..c66a8ae2fcc4e 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -21,9 +21,9 @@ use common::*; use declare; use glue; use type_of; -use machine; use type_::Type; use rustc::ty::{self, Ty}; +use rustc::ty::layout::HasDataLayout; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -125,7 +125,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "try" => { try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); - C_nil(ccx) + return; } "breakpoint" => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); @@ -133,42 +133,39 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "size_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); if bcx.ccx.shared().type_is_sized(tp_ty) { - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } else if bcx.ccx.shared().type_has_metadata(tp_ty) { let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize } else { - C_usize(ccx, 0u64) + C_usize(ccx, 0) } } "min_align_of" => { let tp_ty = substs.type_at(0); - C_usize(ccx, ccx.align_of(tp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); if bcx.ccx.shared().type_is_sized(tp_ty) { - C_usize(ccx, ccx.align_of(tp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } else if bcx.ccx.shared().type_has_metadata(tp_ty) { let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { - C_usize(ccx, 1u64) + C_usize(ccx, 1) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llalign_of_pref(ccx, lltp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); @@ -187,11 +184,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // large quantities of `mov [byte ptr foo],0` in the generated code.) memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1)); } - C_nil(ccx) + return; } // Effectively no-ops "uninit" => { - C_nil(ccx) + return; } "needs_drop" => { let tp_ty = substs.type_at(0); @@ -232,11 +229,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { - ptr = bcx.pointercast(ptr, ty.ptr_to()); + ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to()); } let load = bcx.volatile_load(ptr); unsafe { - llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32); } to_immediate(bcx, load, tp_ty) }, @@ -249,19 +246,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let val = if fn_ty.args[1].is_indirect() { bcx.load(llargs[1], None) } else { - if !type_is_zero_size(ccx, tp_ty) { - from_immediate(bcx, llargs[1]) - } else { - C_nil(ccx) + if type_is_zero_size(ccx, tp_ty) { + return; } + from_immediate(bcx, llargs[1]) }; let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); unsafe { - llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32); } } - C_nil(ccx) + return; }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { @@ -279,8 +275,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => { - let sty = &arg_tys[0].sty; - match int_type_width_signed(sty, ccx) { + let ty = arg_tys[0]; + match int_type_width_signed(ty, ccx) { Some((width, signed)) => match name { "ctlz" | "cttz" => { @@ -317,7 +313,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.store(result, bcx.struct_gep(llresult, 0), None); bcx.store(overflow, bcx.struct_gep(llresult, 1), None); - C_nil(bcx.ccx) + return; }, "overflowing_add" => bcx.add(llargs[0], llargs[1]), "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), @@ -347,8 +343,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, span_invalid_monomorphization_error( tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); - C_nil(ccx) + expected basic integer type, found `{}`", name, ty)); + return; } } @@ -370,7 +366,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ expected basic float type, found `{}`", name, sty)); - C_nil(ccx) + return; } } @@ -399,11 +395,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.select(is_zero, zero, bcx.sub(offset, llargs[1])) } name if name.starts_with("simd_") => { - generic_simd_intrinsic(bcx, name, - callee_ty, - &llargs, - ret_ty, llret_ty, - span) + match generic_simd_intrinsic(bcx, name, + callee_ty, + &llargs, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return + } } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst @@ -437,16 +436,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), }; - let invalid_monomorphization = |sty| { + let invalid_monomorphization = |ty| { span_invalid_monomorphization_error(tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + expected basic integer type, found `{}`", name, ty)); }; match split[1] { "cxchg" | "cxchgweak" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, failorder, weak); @@ -454,40 +453,41 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); bcx.store(result, bcx.struct_gep(llresult, 0), None); bcx.store(success, bcx.struct_gep(llresult, 1), None); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "load" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_load(llargs[0], order) + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_load(llargs[0], order, align) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } "store" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_store(llargs[1], llargs[0], order); + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_store(llargs[1], llargs[0], order, align); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "fence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - C_nil(ccx) + return; } "singlethreadfence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - C_nil(ccx) + return; } // These are all AtomicRMW ops @@ -507,12 +507,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("unknown atomic operation") }; - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } } @@ -662,16 +661,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let (dest, align) = lval.trans_field_ptr(bcx, i); bcx.store(val, dest, align.to_align()); } - C_nil(ccx) + return; } _ => val, } } }; - if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { + if !fn_ty.ret.is_ignore() { if let Some(ty) = fn_ty.ret.cast { - let ptr = bcx.pointercast(llresult, ty.ptr_to()); + let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); @@ -682,16 +681,15 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, - tp_ty: Ty<'tcx>, + ty: Ty<'tcx>, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef { let ccx = bcx.ccx; - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, ccx.align_of(tp_ty) as i32); - let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -699,7 +697,8 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "memcpy" }; - let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); + let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, + ccx.data_layout().pointer_size.bits()); let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); let src_ptr = bcx.pointercast(src, Type::i8p(ccx)); @@ -723,9 +722,9 @@ fn memset_intrinsic<'a, 'tcx>( count: ValueRef ) -> ValueRef { let ccx = bcx.ccx; - let align = C_i32(ccx, ccx.align_of(ty) as i32); - let lltp_ty = type_of::type_of(ccx, ty); - let size = machine::llsize_of(ccx, lltp_ty); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let dst = bcx.pointercast(dst, Type::i8p(ccx)); call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } @@ -975,7 +974,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty: Ty<'tcx>, llret_ty: Type, span: Span -) -> ValueRef { +) -> Result { // macros for error handling: macro_rules! emit_error { ($msg: tt) => { @@ -993,7 +992,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ($cond: expr, $($fmt: tt)*) => { if !$cond { emit_error!($($fmt)*); - return C_nil(bcx.ccx) + return Err(()); } } } @@ -1039,12 +1038,12 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty, ret_ty.simd_type(tcx)); - return compare_simd_types(bcx, - llargs[0], - llargs[1], - in_elem, - llret_ty, - cmp_op) + return Ok(compare_simd_types(bcx, + llargs[0], + llargs[1], + in_elem, + llret_ty, + cmp_op)) } if name.starts_with("simd_shuffle") { @@ -1090,23 +1089,23 @@ fn generic_simd_intrinsic<'a, 'tcx>( .collect(); let indices = match indices { Some(i) => i, - None => return C_null(llret_ty) + None => return Ok(C_null(llret_ty)) }; - return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices)) + return Ok(bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return bcx.insert_element(llargs[0], llargs[2], llargs[1]) + return Ok(bcx.insert_element(llargs[0], llargs[2], llargs[1])) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return bcx.extract_element(llargs[0], llargs[1]) + return Ok(bcx.extract_element(llargs[0], llargs[1])) } if name == "simd_cast" { @@ -1120,7 +1119,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( // casting cares about nominal type, not just structural type let out_elem = ret_ty.simd_type(tcx); - if in_elem == out_elem { return llargs[0]; } + if in_elem == out_elem { return Ok(llargs[0]); } enum Style { Float, Int(/* is signed? */ bool), Unsupported } @@ -1141,7 +1140,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { - return match in_width.cmp(&out_width) { + return Ok(match in_width.cmp(&out_width) { Ordering::Greater => bcx.trunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => if in_is_signed { @@ -1149,28 +1148,28 @@ fn generic_simd_intrinsic<'a, 'tcx>( } else { bcx.zext(llargs[0], llret_ty) } - } + }) } (Style::Int(in_is_signed), Style::Float) => { - return if in_is_signed { + return Ok(if in_is_signed { bcx.sitofp(llargs[0], llret_ty) } else { bcx.uitofp(llargs[0], llret_ty) - } + }) } (Style::Float, Style::Int(out_is_signed)) => { - return if out_is_signed { + return Ok(if out_is_signed { bcx.fptosi(llargs[0], llret_ty) } else { bcx.fptoui(llargs[0], llret_ty) - } + }) } (Style::Float, Style::Float) => { - return match in_width.cmp(&out_width) { + return Ok(match in_width.cmp(&out_width) { Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => bcx.fpext(llargs[0], llret_ty) - } + }) } _ => {/* Unsupported. Fallthrough. */} } @@ -1186,7 +1185,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( match in_elem.sty { $( $(ty::$p(_))|* => { - return bcx.$call(llargs[0], llargs[1]) + return Ok(bcx.$call(llargs[0], llargs[1])) } )* _ => {}, @@ -1213,15 +1212,13 @@ fn generic_simd_intrinsic<'a, 'tcx>( span_bug!(span, "unknown SIMD intrinsic"); } -// Returns the width of an int TypeVariant, and if it's signed or not +// Returns the width of an int Ty, and if it's signed or not // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) - -> Option<(u64, bool)> { - use rustc::ty::{TyInt, TyUint}; - match *sty { - TyInt(t) => Some((match t { +fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> { + match ty.sty { + ty::TyInt(t) => Some((match t { ast::IntTy::Is => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, @@ -1236,7 +1233,7 @@ fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) ast::IntTy::I64 => 64, ast::IntTy::I128 => 128, }, true)), - TyUint(t) => Some((match t { + ty::TyUint(t) => Some((match t { ast::UintTy::Us => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 96e11d366423a..73e03dc069145 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -136,7 +136,6 @@ mod declare; mod glue; mod intrinsic; mod llvm_util; -mod machine; mod metadata; mod meth; mod mir; diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs deleted file mode 100644 index bc383abc7e0ec..0000000000000 --- a/src/librustc_trans/machine.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Information concerning the machine representation of various types. - -#![allow(non_camel_case_types)] - -use llvm::{self, ValueRef}; -use common::*; - -use type_::Type; - -pub type llbits = u64; -pub type llsize = u64; -pub type llalign = u32; - -// ______________________________________________________________________ -// compute sizeof / alignof - -// Returns the number of bytes between successive elements of type T in an -// array of T. This is the "ABI" size. It includes any ABI-mandated padding. -pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref()); - } -} - -/// Returns the "real" size of the type in bits. -pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { - unsafe { - llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()) - } -} - -/// Returns the size of the type as an LLVM constant integer value. -pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { - // Once upon a time, this called LLVMSizeOf, which does a - // getelementptr(1) on a null pointer and casts to an int, in - // order to obtain the type size as a value without requiring the - // target data layout. But we have the target data layout, so - // there's no need for that contrivance. The instruction - // selection DAG generator would flatten that GEP(1) node into a - // constant of the type's alloc size, so let's save it some work. - return C_usize(cx, llsize_of_alloc(cx, ty)); -} - -// Returns the preferred alignment of the given type for the current target. -// The preferred alignment may be larger than the alignment used when -// packing the type into structs. This will be used for things like -// allocations inside a stack frame, which LLVM has a free hand in. -pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -// Returns the minimum alignment of a type required by the platform. -// This is the alignment that will be used for struct fields, arrays, -// and similar ABI-mandated things. -pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 { - unsafe { - return llvm::LLVMOffsetOfElement(cx.td(), - struct_ty.to_ref(), - element as u32); - } -} diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index e7c5a36838c2f..a2e7eb2258fc1 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -13,11 +13,11 @@ use callee; use common::*; use builder::Builder; use consts; -use machine; use monomorphize; use type_::Type; use value::Value; use rustc::ty::{self, Ty}; +use rustc::ty::layout::HasDataLayout; use debuginfo; #[derive(Copy, Clone, Debug)] @@ -79,10 +79,11 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Not in the cache. Build it. let nullptr = C_null(Type::nil(ccx).ptr_to()); + let (size, align) = ccx.size_and_align_of(ty); let mut components: Vec<_> = [ callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)), - C_usize(ccx, ccx.size_of(ty)), - C_usize(ccx, ccx.align_of(ty) as u64) + C_usize(ccx, size.bytes()), + C_usize(ccx, align.abi()) ].iter().cloned().collect(); if let Some(trait_ref) = trait_ref { @@ -97,7 +98,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } let vtable_const = C_struct(ccx, &components, false); - let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); + let align = ccx.data_layout().pointer_align; let vtable = consts::addr_of(ccx, vtable_const, align, "vtable"); debuginfo::create_vtable_metadata(ccx, ty, vtable); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index bd26c961bb28b..abd86a5cb01eb 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -17,12 +17,11 @@ use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; -use base::{self, Lifetime}; +use base; use callee; use builder::Builder; use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use machine::llalign_of_min; use meth; use monomorphize; use type_of; @@ -31,8 +30,6 @@ use type_::Type; use syntax::symbol::Symbol; use syntax_pos::Pos; -use std::cmp; - use super::{MirContext, LocalRef}; use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; @@ -120,7 +117,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty: FnType<'tcx>, fn_ptr: ValueRef, llargs: &[ValueRef], - destination: Option<(ReturnDest, Ty<'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest<'tcx>, Ty<'tcx>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { @@ -175,14 +172,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { - let ps = self.get_personality_slot(&bcx); - let lp = bcx.load(ps, None); - Lifetime::End.call(&bcx, ps); + let slot = self.get_personality_slot(&bcx); + + let (lp0ptr, align) = slot.trans_field_ptr(&bcx, 0); + let lp0 = bcx.load(lp0ptr, align.to_align()); + + let (lp1ptr, align) = slot.trans_field_ptr(&bcx, 1); + let lp1 = bcx.load(lp1ptr, align.to_align()); + + slot.storage_dead(&bcx); + if !bcx.sess().target.target.options.custom_unwind_resume { + let mut lp = C_undef(self.landing_pad_type()); + lp = bcx.insert_value(lp, lp0, 0); + lp = bcx.insert_value(lp, lp1, 1); bcx.resume(lp); } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle); + bcx.call(bcx.ccx.eh_unwind_resume(), &[lp0], cleanup_bundle); bcx.unreachable(); } } @@ -245,8 +251,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; let load = bcx.load( - bcx.pointercast(llslot, cast_ty.ptr_to()), - Some(ret.layout.align(bcx.ccx).abi() as u32)); + bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), + Some(ret.layout.align(bcx.ccx))); load } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); @@ -336,6 +342,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); let col = C_u32(bcx.ccx, loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align + .max(tcx.data_layout.i32_align) + .max(tcx.data_layout.pointer_align); // Put together the arguments to the panic entry point. let (lang_item, args, const_err) = match *msg { @@ -351,7 +360,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { })); let file_line_col = C_struct(bcx.ccx, &[filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(file_line_col)); let file_line_col = consts::addr_of(bcx.ccx, file_line_col, align, @@ -366,7 +374,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -387,7 +394,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -552,7 +558,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ReturnDest::Nothing => { (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) } - ReturnDest::IndirectOperand(dst, _) | + ReturnDest::IndirectOperand(dst, _) => (dst.llval, &llargs[..]), ReturnDest::Store(dst) => (dst, &llargs[..]), ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") @@ -566,7 +572,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return let op = OperandRef { - val: Ref(dst, Alignment::AbiAligned), + val: Ref(dst.llval, Alignment::AbiAligned), ty: sig.output(), }; self.store_return(&bcx, ret_dest, &fn_ty.ret, op); @@ -633,7 +639,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); + llargs.push(C_undef(ty.llvm_type(bcx.ccx))); } if arg.is_ignore() { @@ -651,13 +657,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) } } - Ref(llval, Alignment::Packed) if arg.is_indirect() => { + Ref(llval, align @ Alignment::Packed) if arg.is_indirect() => { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); + base::memcpy_ty(bcx, llscratch, llval, op.ty, align.to_align()); (llscratch, Alignment::AbiAligned, true) } Ref(llval, align) => (llval, align, true) @@ -670,8 +676,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); llval = bcx.trunc(llval, Type::i1(bcx.ccx)); } else if let Some(ty) = arg.cast { - llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), - align.min_with(arg.layout.align(bcx.ccx).abi() as u32)); + llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), + align.min_with(Some(arg.layout.align(bcx.ccx)))); } else { llval = bcx.load(llval, align.to_align()); } @@ -759,14 +765,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> { let ccx = bcx.ccx; - if let Some(slot) = self.llpersonalityslot { + if let Some(slot) = self.personality_slot { slot } else { - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.alloca(llretty, "personalityslot", None); - self.llpersonalityslot = Some(slot); + let ty = ccx.tcx().intern_tup(&[ + ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8), + ccx.tcx().types.i32 + ], false); + let slot = LvalueRef::alloca(bcx, ty, "personalityslot"); + self.personality_slot = Some(slot); slot } } @@ -794,16 +803,26 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); - bcx.set_cleanup(llretval); + let llretty = self.landing_pad_type(); + let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); + bcx.set_cleanup(lp); + let slot = self.get_personality_slot(&bcx); - Lifetime::Start.call(&bcx, slot); - bcx.store(llretval, slot, None); + slot.storage_live(&bcx); + self.store_operand(&bcx, slot.llval, None, OperandRef { + val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)), + ty: slot.ty.to_ty(ccx.tcx()) + }); + bcx.br(target_bb); bcx.llbb() } + fn landing_pad_type(&self) -> Type { + let ccx = self.ccx; + Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false) + } + fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { let bl = self.new_block("unreachable"); @@ -825,7 +844,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, - llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { + llargs: &mut Vec, is_intrinsic: bool) + -> ReturnDest<'tcx> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret_ty.is_ignore() { return ReturnDest::Nothing; @@ -841,14 +861,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + tmp.storage_live(bcx); llargs.push(tmp.llval); - ReturnDest::IndirectOperand(tmp.llval, index) + ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); - ReturnDest::IndirectOperand(tmp.llval, index) + tmp.storage_live(bcx); + ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) }; @@ -891,8 +913,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let lvalue_ty = self.monomorphized_lvalue_ty(dst); assert!(!lvalue_ty.has_erasable_regions()); let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); + lvalue.storage_live(bcx); self.trans_transmute_into(bcx, src, &lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); + lvalue.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } LocalRef::Operand(Some(_)) => { @@ -915,15 +939,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let in_type = val.ty; let out_type = dst.ty.to_ty(bcx.tcx()); - let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type)); - self.store_operand(bcx, cast_ptr, Some(llalign), val); + let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type)); + self.store_operand(bcx, cast_ptr, Some(align), val); } // Stores the return value of a function call into it's final location. fn store_return(&mut self, bcx: &Builder<'a, 'tcx>, - dest: ReturnDest, + dest: ReturnDest<'tcx>, ret_ty: &ArgType<'tcx>, op: OperandRef<'tcx>) { use self::ReturnDest::*; @@ -932,15 +956,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Nothing => (), Store(dst) => ret_ty.store(bcx, op.immediate(), dst), IndirectOperand(tmp, index) => { - let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty); + let op = self.trans_load(bcx, tmp.llval, Alignment::AbiAligned, op.ty); + tmp.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); + tmp.storage_live(bcx); ret_ty.store(bcx, op.immediate(), tmp.llval); - self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty) + let op = self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty); + tmp.storage_dead(bcx); + op } else { op.unpack_if_pair(bcx) }; @@ -950,13 +978,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } -enum ReturnDest { +enum ReturnDest<'tcx> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer Store(ValueRef), // Stores an indirect return value to an operand local lvalue - IndirectOperand(ValueRef, mir::Local), + IndirectOperand(LvalueRef<'tcx>, mir::Local), // Stores a direct return value to an operand local lvalue DirectOperand(mir::Local) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 67fdc1e640a95..57c131a106b3b 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,12 +18,12 @@ use rustc::traits; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutTyper, Size}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_apfloat::{ieee, Float, Status}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {adt, base, machine}; +use {adt, base}; use abi::{self, Abi}; use callee; use builder::Builder; @@ -100,9 +100,11 @@ impl<'tcx> Const<'tcx> { ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), - ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"), + ConstVal::ByteStr(v) => { + consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str") + } ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64), - ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)), + ConstVal::Function(..) => C_null(llty), ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => { @@ -368,12 +370,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match &tcx.item_name(def_id)[..] { "size_of" => { let llval = C_usize(self.ccx, - self.ccx.size_of(substs.type_at(0))); + self.ccx.size_of(substs.type_at(0)).bytes()); Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { let llval = C_usize(self.ccx, - self.ccx.align_of(substs.type_at(0)) as u64); + self.ccx.align_of(substs.type_at(0)).abi()); Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) @@ -590,7 +592,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { self.const_array(dest_ty, &fields) } - mir::Rvalue::Aggregate(ref kind, ref operands) => { + mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => { // Make sure to evaluate all operands to // report as many errors as we possibly can. let mut fields = Vec::with_capacity(operands.len()); @@ -603,17 +605,23 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } failure?; - match **kind { - mir::AggregateKind::Array(_) => { - self.const_array(dest_ty, &fields) - } - mir::AggregateKind::Adt(..) | - mir::AggregateKind::Closure(..) | - mir::AggregateKind::Generator(..) | - mir::AggregateKind::Tuple => { - Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) + self.const_array(dest_ty, &fields) + } + + mir::Rvalue::Aggregate(ref kind, ref operands) => { + // Make sure to evaluate all operands to + // report as many errors as we possibly can. + let mut fields = Vec::with_capacity(operands.len()); + let mut failure = Ok(()); + for operand in operands { + match self.const_operand(operand, span) { + Ok(val) => fields.push(val), + Err(err) => if failure.is_ok() { failure = Err(err); } } } + failure?; + + trans_const_adt(self.ccx, dest_ty, kind, &fields) } mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { @@ -780,7 +788,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let align = if self.ccx.shared().type_is_sized(ty) { self.ccx.align_of(ty) } else { - self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + self.ccx.tcx().data_layout.pointer_align }; if bk == mir::BorrowKind::Mut { consts::addr_of_mut(self.ccx, llval, align, "ref_mut") @@ -860,7 +868,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(self.ccx.shared().type_is_sized(ty)); - let llval = C_usize(self.ccx, self.ccx.size_of(ty)); + let llval = C_usize(self.ccx, self.ccx.size_of(ty).bytes()); Const::new(llval, tcx.types.usize) } @@ -1099,12 +1107,12 @@ pub fn trans_static_initializer<'a, 'tcx>( /// Currently the returned value has the same size as the type, but /// this could be changed in the future to avoid allocating unnecessary /// space after values of shorter-than-maximum cases. -fn trans_const<'a, 'tcx>( +fn trans_const_adt<'a, 'tcx>( ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, kind: &mir::AggregateKind, - vals: &[ValueRef] -) -> ValueRef { + vals: &[Const<'tcx>] +) -> Const<'tcx> { let l = ccx.layout_of(t); let variant_index = match *kind { mir::AggregateKind::Adt(_, index, _, _) => index, @@ -1121,112 +1129,97 @@ fn trans_const<'a, 'tcx>( }; assert_eq!(vals.len(), 0); adt::assert_discr_in_range(min, max, discr); - C_int(Type::from_integer(ccx, d), discr as i64) + Const::new(C_int(Type::from_integer(ccx, d), discr as i64), t) } layout::General { discr: d, ref variants, .. } => { let variant = &variants[variant_index]; let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); - let mut vals_with_discr = vec![lldiscr]; + let mut vals_with_discr = vec![ + Const::new(lldiscr, d.to_ty(ccx.tcx(), false)) + ]; vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(ccx).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) + build_const_struct(ccx, l, &variant, &vals_with_discr) } layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(variant_index, 0); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) + let mut contents = vec![vals[0].llval]; + + let offset = ccx.size_of(vals[0].ty); + let size = variants.stride(); + if offset != size { + contents.push(padding(ccx, size - offset)); + } + + Const::new(C_struct(ccx, &contents, variants.packed), t) } layout::Univariant { ref variant, .. } => { assert_eq!(variant_index, 0); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) + build_const_struct(ccx, l, &variant, vals) } layout::Vector { .. } => { - C_vector(vals) + Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } layout::RawNullablePointer { nndiscr, .. } => { if variant_index as u64 == nndiscr { assert_eq!(vals.len(), 1); - vals[0] + Const::new(vals[0].llval, t) } else { - C_null(type_of::type_of(ccx, t)) + Const::new(C_null(type_of::type_of(ccx, t)), t) } } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { if variant_index as u64 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + build_const_struct(ccx, l, &nonnull, vals) } else { // Always use null even if it's not the `discrfield`th // field; see #8506. - C_null(type_of::type_of(ccx, t)) + Const::new(C_null(type_of::type_of(ccx, t)), t) } } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + _ => bug!("trans_const_adt: cannot handle type {} repreented as {:#?}", t, l) } } /// Building structs is a little complicated, because we might need to /// insert padding if a field's value is less aligned than its type. /// -/// Continuing the example from `trans_const`, a value of type `(u32, +/// Continuing the example from `trans_const_adt`, a value of type `(u32, /// E)` should have the `E` at offset 8, but if that field's /// initializer is 4-byte aligned then simply translating the tuple as /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: layout::TyLayout<'tcx>, st: &layout::Struct, - vals: &[ValueRef]) - -> Vec { + vals: &[Const<'tcx>]) + -> Const<'tcx> { assert_eq!(vals.len(), st.offsets.len()); - if vals.len() == 0 { - return Vec::new(); - } - // offset of current value - let mut offset = 0; + let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); cfields.reserve(st.offsets.len()*2); let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) + (vals[i], st.offsets[i]) }); - for (&val, target_offset) in parts { + for (val, target_offset) in parts { if offset < target_offset { cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); + assert!(!is_undef(val.llval)); + cfields.push(val.llval); + offset = target_offset + ccx.size_of(val.ty); } - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { + let size = layout.size(ccx); + if offset < size { cfields.push(padding(ccx, size - offset)); } - cfields + Const::new(C_struct(ccx, &cfields, st.packed), layout.ty) } -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) +fn padding(ccx: &CrateContext, size: Size) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size.bytes())) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5faaef6ebff42..376d42c71adfe 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -19,7 +19,6 @@ use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty}; use consts; -use machine; use type_of; use type_::Type; use value::Value; @@ -56,18 +55,15 @@ impl Alignment { } } - pub fn to_align(self) -> Option { + pub fn to_align(self) -> Option { match self { - Alignment::Packed => Some(1), + Alignment::Packed => Some(Align::from_bytes(1, 1).unwrap()), Alignment::AbiAligned => None, } } - pub fn min_with(self, align: u32) -> Option { - match self { - Alignment::Packed => Some(1), - Alignment::AbiAligned => Some(align), - } + pub fn min_with(self, align: Option) -> Option { + self.to_align().or(align) } } @@ -153,7 +149,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(ccx, fty); - assert_eq!(machine::llsize_of_alloc(ccx, ty), 0); + assert_eq!(ccx.size_of(fty).bytes(), 0); return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed); } layout::RawNullablePointer { .. } => { @@ -174,7 +170,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let ptr_val = if let layout::General { discr, .. } = *l { let variant_ty = Type::struct_(ccx, &adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st, - Some(discr.to_ty(&bcx.tcx(), false))), st.packed); + Some(discr.to_ty(bcx.tcx(), false))), st.packed); bcx.pointercast(self.llval, variant_ty.ptr_to()) } else { self.llval @@ -374,6 +370,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.inbounds_gep(self.llval, &[zero, llindex]) } } + + pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_start(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + } + + pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_end(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { @@ -432,7 +436,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let index = self.trans_operand(bcx, index); - let llindex = self.prepare_index(bcx, index.immediate()); + let llindex = index.immediate(); ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) } mir::ProjectionElem::ConstantIndex { offset, @@ -487,22 +491,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { result } - /// Adjust the bitwidth of an index since LLVM is less forgiving - /// than we are. - /// - /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.isize_ty()); - if index_size < int_size { - bcx.zext(llindex, bcx.ccx.isize_ty()) - } else if index_size > int_size { - bcx.trunc(llindex, bcx.ccx.isize_ty()) - } else { - llindex - } - } - pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { let tcx = self.ccx.tcx(); let lvalue_ty = lvalue.ty(self.mir, tcx); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 59da80035fd36..1cb13c973f9b8 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -61,7 +61,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - llpersonalityslot: Option, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` blocks: IndexVec, @@ -177,9 +177,8 @@ enum LocalRef<'tcx> { Operand(Option>), } -impl<'tcx> LocalRef<'tcx> { - fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> LocalRef<'tcx> { +impl<'a, 'tcx> LocalRef<'tcx> { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> LocalRef<'tcx> { if common::type_is_zero_size(ccx, ty) { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -232,7 +231,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( llfn, fn_ty, ccx, - llpersonalityslot: None, + personality_slot: None, blocks: block_bcxs, unreachable_block: None, cleanup_kinds, @@ -470,7 +469,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &mircx.fn_ty.args[idx]; idx += 1; - assert_eq!((meta.cast, meta.pad), (None, None)); + assert!(meta.cast.is_none() && meta.pad.is_none()); let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 9ce1749190ba1..47350d0712544 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{Align, Layout, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -310,7 +310,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn store_operand(&mut self, bcx: &Builder<'a, 'tcx>, lldest: ValueRef, - align: Option, + align: Option, operand: OperandRef<'tcx>) { debug!("store_operand: operand={:?}, align={:?}", operand, align); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized @@ -319,10 +319,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } match operand.val { - OperandValue::Ref(r, Alignment::Packed) => - base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)), - OperandValue::Ref(r, Alignment::AbiAligned) => - base::memcpy_ty(bcx, lldest, r, operand.ty, align), + OperandValue::Ref(r, source_align) => + base::memcpy_ty(bcx, lldest, r, operand.ty, + source_align.min_with(align)), OperandValue::Immediate(s) => { bcx.store(base::from_immediate(bcx, s), lldest, align); } @@ -331,7 +330,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Layout::Univariant { ref variant, .. } => { (adt::struct_llfields_index(variant, 0), adt::struct_llfields_index(variant, 1), - if variant.packed { Some(1) } else { None }) + if variant.packed { Some(variant.align) } else { None }) } _ => (0, 1, align) }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index bc263fd60a25c..7e4b723575031 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -22,10 +22,10 @@ use std::{u128, i128}; use base; use builder::Builder; use callee; -use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; +use common::{self, val_ty}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; use consts; use adt; -use machine; use monomorphize; use type_::Type; use type_of; @@ -104,33 +104,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let tr_elem = self.trans_operand(&bcx, elem); - let size = count.as_u64(); - let size = C_usize(bcx.ccx, size); + let count = count.as_u64(); + let count = C_usize(bcx.ccx, count); let base = base::get_dataptr(&bcx, dest.llval); let align = dest.alignment.to_align(); if let OperandValue::Immediate(v) = tr_elem.val { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align.abi() as i32); + let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes()); + // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); - let ty = type_of::type_of(bcx.ccx, dest_ty); - let size = machine::llsize_of(bcx.ccx, ty); - let fill = C_uint(Type::i8(bcx.ccx), 0); + let fill = C_u8(bcx.ccx, 0); base::call_memset(&bcx, base, fill, size, align, false); return bcx; } // Use llvm.memset.p0i8.* to initialize byte arrays if common::val_ty(v) == Type::i8(bcx.ccx) { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); base::call_memset(&bcx, base, v, size, align, false); return bcx; } } - tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { + tvec::slice_for_each(&bcx, base, tr_elem.ty, count, |bcx, llslot, loop_bb| { self.store_operand(bcx, llslot, align, tr_elem); bcx.br(loop_bb); }) @@ -459,7 +457,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bcx.ccx.shared().type_is_sized(ty)); - let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty)); + let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty).bytes()); let tcx = bcx.tcx(); (bcx, OperandRef { val: OperandValue::Immediate(val), @@ -469,12 +467,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx, content_ty); - let llsize = machine::llsize_of(bcx.ccx, llty); - let align = bcx.ccx.align_of(content_ty); - let llalign = C_usize(bcx.ccx, align as u64); - let llty_ptr = llty.ptr_to(); + let (size, align) = bcx.ccx.size_and_align_of(content_ty); + let llsize = C_usize(bcx.ccx, size.bytes()); + let llalign = C_usize(bcx.ccx, align.abi()); let box_ty = bcx.tcx().mk_box(content_ty); + let llty_ptr = type_of::type_of(bcx.ccx, box_ty); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 6e9b1f36c2cab..2559b21c46b06 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -10,7 +10,6 @@ use rustc::mir; -use base; use asm; use common; use builder::Builder; @@ -63,10 +62,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } mir::StatementKind::StorageLive(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::Start) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_live(&bcx); + } + bcx } mir::StatementKind::StorageDead(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::End) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_dead(&bcx); + } + bcx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { @@ -86,15 +91,4 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::StatementKind::Nop => bcx, } } - - fn trans_storage_liveness(&self, - bcx: Builder<'a, 'tcx>, - index: mir::Local, - intrinsic: base::Lifetime) - -> Builder<'a, 'tcx> { - if let LocalRef::Lvalue(tr_lval) = self.locals[index] { - intrinsic.call(&bcx, tr_lval.llval); - } - bcx - } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index cac09a81361f0..f74aec07087c7 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -11,9 +11,8 @@ use abi::FnType; use adt; use common::*; -use machine; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::{Align, LayoutTyper, Size}; use trans_item::DefPathBasedNames; use type_::Type; @@ -212,19 +211,26 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } impl<'a, 'tcx> CrateContext<'a, 'tcx> { - pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign { - self.layout_of(ty).align(self).abi() as machine::llalign + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align(self) } - pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize { - self.layout_of(ty).size(self).bytes() as machine::llsize + pub fn size_of(&self, ty: Ty<'tcx>) -> Size { + self.layout_of(ty).size(self) } - pub fn over_align_of(&self, t: Ty<'tcx>) - -> Option { + pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { + let layout = self.layout_of(ty); + (layout.size(self), layout.align(self)) + } + + /// Returns alignment if it is different than the primitive alignment. + pub fn over_align_of(&self, t: Ty<'tcx>) -> Option { let layout = self.layout_of(t); - if let Some(align) = layout.over_align(&self.tcx().data_layout) { - Some(align as machine::llalign) + let align = layout.align(self); + let primitive_align = layout.primitive_align(self); + if align != primitive_align { + Some(align) } else { None } diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 20ea8d7030220..c8d974febf23e 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -257,21 +257,18 @@ extern "C" void LLVMRustSetHasUnsafeAlgebra(LLVMValueRef V) { extern "C" LLVMValueRef LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name, - LLVMAtomicOrdering Order, unsigned Alignment) { + LLVMAtomicOrdering Order) { LoadInst *LI = new LoadInst(unwrap(Source), 0); LI->setAtomic(fromRust(Order)); - LI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(LI, Name)); } extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B, LLVMValueRef V, LLVMValueRef Target, - LLVMAtomicOrdering Order, - unsigned Alignment) { + LLVMAtomicOrdering Order) { StoreInst *SI = new StoreInst(unwrap(V), unwrap(Target)); SI->setAtomic(fromRust(Order)); - SI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(SI)); } From 386d59dc893375e4386d4f52b79b6c65a593be00 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 18 Jun 2017 16:59:51 +0300 Subject: [PATCH 04/69] rustc_trans: use a predictable layout for constant ADTs. --- src/librustc_llvm/ffi.rs | 3 - src/librustc_trans/adt.rs | 9 ++- src/librustc_trans/builder.rs | 2 - src/librustc_trans/common.rs | 21 ++---- src/librustc_trans/intrinsic.rs | 3 +- src/librustc_trans/mir/constant.rs | 104 ++++++++++++----------------- src/librustc_trans/mir/lvalue.rs | 4 +- src/librustc_trans/mir/operand.rs | 14 ++-- src/test/codegen/consts.rs | 4 +- src/test/codegen/link_section.rs | 4 +- 10 files changed, 68 insertions(+), 100 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 48f8094f98d87..d800129b0c07f 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -611,10 +611,7 @@ extern "C" { pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef; pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; - // only for isize/vector pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef; - pub fn LLVMIsNull(Val: ValueRef) -> Bool; - pub fn LLVMIsUndef(Val: ValueRef) -> Bool; // Operations on metadata pub fn LLVMMDStringInContext(C: ContextRef, Str: *const c_char, SLen: c_uint) -> ValueRef; diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index b5b90753553f3..9d693e098cff0 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -198,9 +198,14 @@ fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { Type::array(&elem_ty, size / abi_align) } -// Lookup `Struct::memory_index` and double it to account for padding +/// Double an index to account for padding. +pub fn memory_index_to_gep(index: usize) -> usize { + index * 2 +} + +/// Lookup `Struct::memory_index`, double it to account for padding. pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize { - (variant.memory_index[index] as usize) << 1 + memory_index_to_gep(variant.memory_index[index] as usize) } pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index c8d8984122fdd..2b632ba6f2603 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -1150,14 +1150,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { unsafe { - if llvm::LLVMIsUndef(s) == llvm::True { return; } llvm::LLVMAddCase(s, on_val, dest) } } pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { unsafe { - if llvm::LLVMIsUndef(phi) == llvm::True { return; } llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e3ee8f7c75a8a..659ce0f7f9f43 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -356,13 +356,13 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { } } -pub fn const_get_elt(v: ValueRef, us: &[c_uint]) - -> ValueRef { +pub fn const_get_elt(v: ValueRef, i: usize) -> ValueRef { unsafe { + let us = &[i as c_uint]; let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - debug!("const_get_elt(v={:?}, us={:?}, r={:?})", - Value(v), us, Value(r)); + debug!("const_get_elt(v={:?}, i={}, r={:?})", + Value(v), i, Value(r)); r } @@ -402,19 +402,6 @@ pub fn const_to_opt_u128(v: ValueRef, sign_ext: bool) -> Option { } } -pub fn is_undef(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsUndef(val) != False - } -} - -#[allow(dead_code)] // potentially useful -pub fn is_null(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsNull(val) != False - } -} - pub fn langcall(tcx: TyCtxt, span: Option, msg: &str, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index c66a8ae2fcc4e..711854e28a899 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -11,7 +11,6 @@ #![allow(non_upper_case_globals)] use intrinsics::{self, Intrinsic}; -use libc; use llvm; use llvm::{ValueRef}; use abi::{Abi, FnType}; @@ -1072,7 +1071,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, &[i as libc::c_uint]); + let val = const_get_elt(vector, i); match const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 57c131a106b3b..fa0558faad257 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -29,7 +29,7 @@ use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, is_undef}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector}; use common::const_to_opt_u128; use consts; use type_of; @@ -55,7 +55,7 @@ pub struct Const<'tcx> { pub ty: Ty<'tcx> } -impl<'tcx> Const<'tcx> { +impl<'a, 'tcx> Const<'tcx> { pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> { Const { llval, @@ -63,8 +63,7 @@ impl<'tcx> Const<'tcx> { } } - pub fn from_constint<'a>(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) - -> Const<'tcx> { + pub fn from_constint(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> { let tcx = ccx.tcx(); let (llval, ty) = match *ci { I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8), @@ -84,10 +83,10 @@ impl<'tcx> Const<'tcx> { } /// Translate ConstVal into a LLVM constant value. - pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, - cv: &ConstVal, - ty: Ty<'tcx>) - -> Const<'tcx> { + pub fn from_constval(ccx: &CrateContext<'a, 'tcx>, + cv: &ConstVal, + ty: Ty<'tcx>) + -> Const<'tcx> { let llty = type_of::type_of(ccx, ty); let val = match *cv { ConstVal::Float(v) => { @@ -104,7 +103,7 @@ impl<'tcx> Const<'tcx> { consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str") } ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64), - ConstVal::Function(..) => C_null(llty), + ConstVal::Function(..) => C_undef(llty), ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => { @@ -117,15 +116,25 @@ impl<'tcx> Const<'tcx> { Const::new(val, ty) } - fn get_pair(&self) -> (ValueRef, ValueRef) { - (const_get_elt(self.llval, &[0]), - const_get_elt(self.llval, &[1])) + fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { + let layout = ccx.layout_of(self.ty); + let ix = if let layout::Univariant { ref variant, .. } = *layout { + adt::struct_llfields_index(variant, i) + } else { + i + }; + + const_get_elt(self.llval, ix) + } + + fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { + (self.get_field(ccx, 0), self.get_field(ccx, 1)) } - fn get_fat_ptr(&self) -> (ValueRef, ValueRef) { + fn get_fat_ptr(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); - self.get_pair() + self.get_pair(ccx) } fn as_lvalue(&self) -> ConstLvalue<'tcx> { @@ -136,12 +145,12 @@ impl<'tcx> Const<'tcx> { } } - pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { + pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { let llty = type_of::immediate_type_of(ccx, self.ty); let llvalty = val_ty(self.llval); let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { - let (a, b) = self.get_pair(); + let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { // If the types match, we can use the value directly. @@ -438,7 +447,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let (base, extra) = if !has_metadata { (base.llval, ptr::null_mut()) } else { - base.get_fat_ptr() + base.get_fat_ptr(self.ccx) }; if self.ccx.statics().borrow().contains_key(&base) { (Base::Static(base), extra) @@ -464,32 +473,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::ProjectionElem::Field(ref field, _) => { - // Extract field of struct-like const, skipping our alignment padding. - let mut ix = field.index(); - let layout = self.ccx.layout_of(tr_base.ty); - if let layout::Univariant { ref variant, .. } = *layout { - ix = variant.memory_index[ix] as usize; - } - - // Get the ix-th non-undef element of the struct. - let mut real_ix = 0; // actual position in the struct - let mut ix = ix; // logical index relative to real_ix - let mut llprojected; - loop { - loop { - llprojected = const_get_elt(base.llval, &[real_ix]); - if !is_undef(llprojected) { - break; - } - real_ix = real_ix + 1; - } - if ix == 0 { - break; - } - ix = ix - 1; - real_ix = real_ix + 1; - } - + let llprojected = base.get_field(self.ccx, field.index()); let llextra = if !has_metadata { ptr::null_mut() } else { @@ -510,7 +494,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { // Produce an undef instead of a LLVM assertion on OOB. let len = common::const_to_uint(tr_base.len(self.ccx)); let llelem = if iv < len as u128 { - const_get_elt(base.llval, &[iv as u32]) + const_get_elt(base.llval, iv as usize) } else { C_undef(type_of::type_of(self.ccx, projected_ty)) }; @@ -680,7 +664,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { // to use a different vtable. In that case, we want to // load out the original data pointer so we can repackage // it. - let (base, extra) = operand.get_fat_ptr(); + let (base, extra) = operand.get_fat_ptr(self.ccx); (base, Some(extra)) } else { (operand.llval, None) @@ -755,7 +739,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); if common::type_is_fat_ptr(self.ccx, operand.ty) { - let (data_ptr, meta_ptr) = operand.get_fat_ptr(); + let (data_ptr, meta_ptr) = operand.get_fat_ptr(self.ccx); if common::type_is_fat_ptr(self.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); @@ -833,8 +817,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) { Some((llval, of)) => { - let llof = C_bool(self.ccx, of); - Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty) + trans_const_adt(self.ccx, binop_ty, &mir::AggregateKind::Tuple, &[ + Const::new(llval, val_ty), + Const::new(C_bool(self.ccx, of), tcx.types.bool) + ]) } None => { span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}", @@ -1142,13 +1128,10 @@ fn trans_const_adt<'a, 'tcx>( } layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(variant_index, 0); - let mut contents = vec![vals[0].llval]; - - let offset = ccx.size_of(vals[0].ty); - let size = variants.stride(); - if offset != size { - contents.push(padding(ccx, size - offset)); - } + let contents = [ + vals[0].llval, + padding(ccx, variants.stride() - ccx.size_of(vals[0].ty)) + ]; Const::new(C_struct(ccx, &contents, variants.packed), t) } @@ -1203,19 +1186,20 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let parts = st.field_index_by_increasing_offset().map(|i| { (vals[i], st.offsets[i]) }); + let mut first_field = true; for (val, target_offset) in parts { - if offset < target_offset { + if first_field { + first_field = false; + assert_eq!(target_offset.bytes(), 0); + } else { cfields.push(padding(ccx, target_offset - offset)); } - assert!(!is_undef(val.llval)); cfields.push(val.llval); offset = target_offset + ccx.size_of(val.ty); } let size = layout.size(ccx); - if offset < size { - cfields.push(padding(ccx, size - offset)); - } + cfields.push(padding(ccx, size - offset)); Const::new(C_struct(ccx, &cfields, st.packed), layout.ty) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 376d42c71adfe..a0cab01b007d2 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -252,7 +252,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Double index to account for padding (FieldPath already uses `Struct::memory_index`) fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef { - let path = discrfield.iter().map(|&i| (i as usize) << 1).collect::>(); + let path = discrfield.iter().map(|&i| { + adt::memory_index_to_gep(i as usize) + }).collect::>(); bcx.gepi(self.llval, &path) } diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 47350d0712544..60f585f4bd927 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -17,7 +17,7 @@ use rustc_data_structures::indexed_vec::Idx; use adt; use base; -use common::{self, CrateContext, C_null}; +use common::{self, CrateContext, C_undef}; use builder::Builder; use value::Value; use type_of; @@ -93,9 +93,9 @@ impl<'a, 'tcx> OperandRef<'tcx> { (0, 1) }; let fields = llty.field_types(); - OperandValue::Pair(C_null(fields[ix0]), C_null(fields[ix1])) + OperandValue::Pair(C_undef(fields[ix0]), C_undef(fields[ix1])) } else { - OperandValue::Immediate(C_null(llty)) + OperandValue::Immediate(C_undef(llty)) }; OperandRef { val, @@ -134,14 +134,10 @@ impl<'a, 'tcx> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. let llty = type_of::type_of(bcx.ccx, self.ty); - let mut llpair = common::C_undef(llty); + let mut llpair = C_undef(llty); let elems = [a, b]; for i in 0..2 { - let mut elem = elems[i]; - // Extend boolean i1's to i8. - if common::val_ty(elem) == Type::i1(bcx.ccx) { - elem = bcx.zext(elem, Type::i8(bcx.ccx)); - } + let elem = base::from_immediate(bcx, elems[i]); let layout = bcx.ccx.layout_of(self.ty); let i = if let Layout::Univariant { ref variant, .. } = *layout { adt::struct_llfields_index(variant, i) diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index 33b4221b73338..a75b8f3992d07 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } @@ -62,6 +62,6 @@ pub fn low_align_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } diff --git a/src/test/codegen/link_section.rs b/src/test/codegen/link_section.rs index 98214dc5c6f3d..1879002e7f3d7 100644 --- a/src/test/codegen/link_section.rs +++ b/src/test/codegen/link_section.rs @@ -22,12 +22,12 @@ pub enum E { B(f32) } -// CHECK: @VAR2 = constant {{.*}} { i32 0, i32 666 }, section ".test_two" +// CHECK: @VAR2 = constant {{.*}}, section ".test_two" #[no_mangle] #[link_section = ".test_two"] pub static VAR2: E = E::A(666); -// CHECK: @VAR3 = constant {{.*}} { i32 1, float 1.000000e+00 }, section ".test_three" +// CHECK: @VAR3 = constant {{.*}}, section ".test_three" #[no_mangle] #[link_section = ".test_three"] pub static VAR3: E = E::B(1.); From b8671bef977723bd5983fc3130910d2e5a8b6cd9 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 14 Jun 2017 12:27:43 +0300 Subject: [PATCH 05/69] rustc_trans: remove obsolete Type methods. --- src/librustc_llvm/ffi.rs | 1 - src/librustc_trans/context.rs | 11 ++++------- src/librustc_trans/type_.rs | 10 ---------- 3 files changed, 4 insertions(+), 18 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index d800129b0c07f..4d02274eadd4a 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -585,7 +585,6 @@ extern "C" { pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef; pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef; - pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint; pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint; // Operations on other types diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index cb71ef104d3d9..5e12be5a22c9a 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -101,7 +101,6 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { lltypes: RefCell, Type>>, isize_ty: Type, - opaque_vec_type: Type, str_slice_type: Type, dbg_cx: Option>, @@ -378,7 +377,6 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), isize_ty: Type::from_ref(ptr::null_mut()), - opaque_vec_type: Type::from_ref(ptr::null_mut()), str_slice_type: Type::from_ref(ptr::null_mut()), dbg_cx, eh_personality: Cell::new(None), @@ -389,24 +387,23 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { placeholder: PhantomData, }; - let (isize_ty, opaque_vec_type, str_slice_ty, mut local_ccx) = { + let (isize_ty, str_slice_ty, mut local_ccx) = { // Do a little dance to create a dummy CrateContext, so we can // create some things in the LLVM module of this codegen unit let mut local_ccxs = vec![local_ccx]; - let (isize_ty, opaque_vec_type, str_slice_ty) = { + let (isize_ty, str_slice_ty) = { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx), Type::isize(&dummy_ccx)], false); - (Type::isize(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty) + (Type::isize(&dummy_ccx), str_slice_ty) }; - (isize_ty, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap()) + (isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) }; local_ccx.isize_ty = isize_ty; - local_ccx.opaque_vec_type = opaque_vec_type; local_ccx.str_slice_type = str_slice_ty; local_ccx diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index ffb303688aaf6..098c5b2d957ab 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -214,16 +214,6 @@ impl Type { ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint)) } - pub fn vec(ccx: &CrateContext, ty: &Type) -> Type { - Type::struct_(ccx, - &[Type::array(ty, 0), Type::isize(ccx)], - false) - } - - pub fn opaque_vec(ccx: &CrateContext) -> Type { - Type::vec(ccx, &Type::i8(ccx)) - } - pub fn vtable_ptr(ccx: &CrateContext) -> Type { Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() } From 260c41b4b808fd9995f23ce1eb7d820f49254c85 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 18 Jun 2017 17:42:03 +0300 Subject: [PATCH 06/69] rustc_trans: do not introspect LLVM aggregate field types. --- src/librustc_llvm/ffi.rs | 2 -- src/librustc_trans/mir/constant.rs | 15 ++++++--------- src/librustc_trans/mir/operand.rs | 19 ++----------------- src/librustc_trans/mir/rvalue.rs | 15 ++++++--------- src/librustc_trans/type_.rs | 13 ------------- 5 files changed, 14 insertions(+), 50 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 4d02274eadd4a..0f96a22f897ae 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -575,8 +575,6 @@ extern "C" { ElementCount: c_uint, Packed: Bool) -> TypeRef; - pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint; - pub fn LLVMGetStructElementTypes(StructTy: TypeRef, Dest: *mut TypeRef); pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool; // Operations on array, pointer, and vector types (sequence types) diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index fa0558faad257..d4289363ecf38 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -736,20 +736,17 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::CastKind::Misc => { // Casts from a fat-ptr. - let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); - let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); if common::type_is_fat_ptr(self.ccx, operand.ty) { - let (data_ptr, meta_ptr) = operand.get_fat_ptr(self.ccx); + let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); if common::type_is_fat_ptr(self.ccx, cast_ty) { - let ll_cft = ll_cast_ty.field_types(); - let ll_fft = ll_from_ty.field_types(); - let data_cast = consts::ptrcast(data_ptr, ll_cft[0]); - assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); - C_struct(self.ccx, &[data_cast, meta_ptr], false) + let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); + let data_cast = consts::ptrcast(data_ptr, llcast_ty); + C_struct(self.ccx, &[data_cast, meta], false) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - consts::ptrcast(data_ptr, ll_cast_ty) + let llcast_ty = type_of::immediate_type_of(self.ccx, cast_ty); + consts::ptrcast(data_ptr, llcast_ty) } } else { bug!("Unexpected non-fat-pointer operand") diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 60f585f4bd927..f4285c2ad0ad0 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -27,6 +27,7 @@ use std::fmt; use std::ptr; use super::{MirContext, LocalRef}; +use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; /// The representation of a Rust value. The enum variant is in fact @@ -84,23 +85,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { ty: Ty<'tcx>) -> OperandRef<'tcx> { assert!(common::type_is_zero_size(ccx, ty)); let llty = type_of::type_of(ccx, ty); - let val = if common::type_is_imm_pair(ccx, ty) { - let layout = ccx.layout_of(ty); - let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1)) - } else { - (0, 1) - }; - let fields = llty.field_types(); - OperandValue::Pair(C_undef(fields[ix0]), C_undef(fields[ix1])) - } else { - OperandValue::Immediate(C_undef(llty)) - }; - OperandRef { - val, - ty, - } + Const::new(C_undef(llty), ty).to_operand(ccx) } /// Asserts that this operand refers to a scalar and returns diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 7e4b723575031..d975ed8cda482 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -263,19 +263,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { - let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); - let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty); - if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { + if let OperandValue::Pair(data_ptr, meta) = operand.val { if common::type_is_fat_ptr(bcx.ccx, cast_ty) { - let ll_cft = ll_cast_ty.field_types(); - let ll_fft = ll_from_ty.field_types(); - let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); - assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); - OperandValue::Pair(data_cast, meta_ptr) + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); + let data_cast = bcx.pointercast(data_ptr, llcast_ty); + OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llval = bcx.pointercast(data_ptr, ll_cast_ty); + let llcast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); + let llval = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } } else { diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index 098c5b2d957ab..bb8f3f23108ec 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -249,19 +249,6 @@ impl Type { } } - pub fn field_types(&self) -> Vec { - unsafe { - let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize; - if n_elts == 0 { - return Vec::new(); - } - let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts]; - llvm::LLVMGetStructElementTypes(self.to_ref(), - elts.as_mut_ptr() as *mut TypeRef); - elts - } - } - pub fn func_params(&self) -> Vec { unsafe { let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize; From 5b1fdaeb80b14bc0b7d7c172019b7cd792bb9abb Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 25 Jun 2017 12:41:24 +0300 Subject: [PATCH 07/69] rustc_trans: use more of the trans::mir and ty::layout APIs throughout. --- src/librustc/ty/layout.rs | 62 +++- src/librustc_trans/abi.rs | 17 +- src/librustc_trans/adt.rs | 106 +++---- src/librustc_trans/asm.rs | 13 +- src/librustc_trans/base.rs | 169 +++-------- src/librustc_trans/builder.rs | 28 +- src/librustc_trans/common.rs | 65 +---- src/librustc_trans/debuginfo/metadata.rs | 90 +++--- src/librustc_trans/glue.rs | 37 +-- src/librustc_trans/intrinsic.rs | 59 ++-- src/librustc_trans/lib.rs | 1 - src/librustc_trans/meth.rs | 8 +- src/librustc_trans/mir/block.rs | 116 ++++---- src/librustc_trans/mir/constant.rs | 13 +- src/librustc_trans/mir/lvalue.rs | 352 +++++++++++++++-------- src/librustc_trans/mir/mod.rs | 46 +-- src/librustc_trans/mir/operand.rs | 162 +++-------- src/librustc_trans/mir/rvalue.rs | 125 ++++---- src/librustc_trans/tvec.rs | 53 ---- src/librustc_trans/type_of.rs | 46 ++- src/librustc_trans_utils/monomorphize.rs | 11 +- src/test/codegen/slice-init.rs | 12 +- 22 files changed, 706 insertions(+), 885 deletions(-) delete mode 100644 src/librustc_trans/tvec.rs diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d83f7e661baa3..d51c25ba6d389 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1700,6 +1700,10 @@ impl<'a, 'tcx> Layout { } } + pub fn size_and_align(&self, cx: C) -> (Size, Align) { + (self.size(cx), self.align(cx)) + } + /// Returns alignment before repr alignment is applied pub fn primitive_align(&self, cx: C) -> Align { match *self { @@ -2201,6 +2205,11 @@ impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> { impl<'a, 'tcx> TyLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { + let is_enum = match self.ty.sty { + ty::TyAdt(def, _) => def.is_enum(), + _ => false + }; + assert!(is_enum); TyLayout { variant_index: Some(variant_index), ..*self @@ -2214,13 +2223,26 @@ impl<'a, 'tcx> TyLayout<'tcx> { pub fn field_count(&self) -> usize { // Handle enum/union through the type rather than Layout. if let ty::TyAdt(def, _) = self.ty.sty { - let v = self.variant_index.unwrap_or(0); - if def.variants.is_empty() { - assert_eq!(v, 0); - return 0; + let v = if def.is_enum() { + if def.variants.is_empty() { + return 0; + } + match self.variant_index { + None => match *self.layout { + // Discriminant field for enums (where applicable). + General { .. } => return 1, + _ if def.variants.len() > 1 => return 0, + + // Enums with one variant behave like structs. + _ => 0 + }, + Some(v) => v + } } else { - return def.variants[v].fields.len(); - } + 0 + }; + + return def.variants[v].fields.len(); } match *self.layout { @@ -2248,7 +2270,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { } } - pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { + fn field_type_unnormalized>(&self, cx: C, i: usize) -> Ty<'tcx> { let tcx = cx.tcx(); let ptr_field_type = |pointee: Ty<'tcx>| { @@ -2314,7 +2336,25 @@ impl<'a, 'tcx> TyLayout<'tcx> { // ADTs. ty::TyAdt(def, substs) => { - def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs) + let v = if def.is_enum() { + match self.variant_index { + None => match *self.layout { + // Discriminant field for enums (where applicable). + General { discr, .. } => { + return [discr.to_ty(tcx, false)][i]; + } + _ if def.variants.len() > 1 => return [][i], + + // Enums with one variant behave like structs. + _ => 0 + }, + Some(v) => v + } + } else { + 0 + }; + + def.variants[v].fields[i].ty(tcx, substs) } ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | @@ -2324,11 +2364,15 @@ impl<'a, 'tcx> TyLayout<'tcx> { } } + pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { + cx.normalize_projections(self.field_type_unnormalized(cx, i)) + } + pub fn field>(&self, cx: C, i: usize) -> C::TyLayout { - cx.layout_of(cx.normalize_projections(self.field_type(cx, i))) + cx.layout_of(self.field_type(cx, i)) } } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index ffbc4f82bca9e..04041488016c5 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -30,6 +30,7 @@ use cabi_sparc64; use cabi_nvptx; use cabi_nvptx64; use cabi_hexagon; +use mir::lvalue::LvalueRef; use type_::Type; use type_of; @@ -570,20 +571,20 @@ impl<'a, 'tcx> ArgType<'tcx> { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: LvalueRef<'tcx>) { if self.is_ignore() { return; } let ccx = bcx.ccx; if self.is_indirect() { let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); - base::call_memcpy(bcx, dst, val, llsz, self.layout.align(ccx)); + base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align(ccx)); } else if let Some(ty) = self.cast { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bcx.pointercast(dst, ty.llvm_type(ccx).ptr_to()); + let cast_dst = bcx.pointercast(dst.llval, ty.llvm_type(ccx).ptr_to()); bcx.store(val, cast_dst, Some(self.layout.align(ccx))); } else { // The actual return type is a struct, but the ABI @@ -610,7 +611,7 @@ impl<'a, 'tcx> ArgType<'tcx> { // ...and then memcpy it to the intended destination. base::call_memcpy(bcx, - bcx.pointercast(dst, Type::i8p(ccx)), + bcx.pointercast(dst.llval, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), C_usize(ccx, self.layout.size(ccx).bytes()), self.layout.align(ccx).min(ty.align(ccx))); @@ -618,14 +619,12 @@ impl<'a, 'tcx> ArgType<'tcx> { bcx.lifetime_end(llscratch, scratch_size); } } else { - if self.layout.ty == ccx.tcx().types.bool { - val = bcx.zext(val, Type::i8(ccx)); - } - bcx.store(val, dst, None); + val = base::from_immediate(bcx, val); + bcx.store(val, dst.llval, None); } } - pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) { if self.pad.is_some() { *idx += 1; } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 9d693e098cff0..2383b37286510 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -42,10 +42,9 @@ //! taken to it, implementing them for Rust seems difficult. use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size}; +use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size, TyLayout}; use context::CrateContext; -use monomorphize; use type_::Type; use type_of; @@ -75,15 +74,25 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } layout::Univariant { ..} | layout::StructWrappedNullablePointer { .. } => { - let (nonnull_variant_index, nonnull_variant, packed) = match *l { - layout::Univariant { ref variant, .. } => (0, variant, variant.packed), + let (variant_layout, variant) = match *l { + layout::Univariant { ref variant, .. } => { + let is_enum = if let ty::TyAdt(def, _) = t.sty { + def.is_enum() + } else { + false + }; + if is_enum { + (l.for_variant(0), variant) + } else { + (l, variant) + } + } layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => - (nndiscr, nonnull, nonnull.packed), + (l.for_variant(nndiscr as usize), nonnull), _ => unreachable!() }; - llty.set_struct_body(&struct_llfields(cx, t, nonnull_variant_index as usize, - nonnull_variant, None), - packed) + llty.set_struct_body(&struct_llfields(cx, variant_layout, variant, None), + variant.packed) }, _ => bug!("This function cannot handle {} with layout {:#?}", t, l) } @@ -97,22 +106,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match *l { layout::CEnum { discr, .. } => Type::from_integer(cx, discr), layout::RawNullablePointer { nndiscr, .. } => { - let (def, substs) = match t.sty { - ty::TyAdt(d, s) => (d, s), - _ => bug!("{} is not an ADT", t) - }; - let nnty = monomorphize::field_ty(cx.tcx(), substs, - &def.variants[nndiscr as usize].fields[0]); - if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) { + let nnfield = l.for_variant(nndiscr as usize).field(cx, 0); + if let layout::Scalar { value: layout::Pointer, .. } = *nnfield { Type::i8p(cx) } else { - type_of::type_of(cx, nnty) + type_of::type_of(cx, nnfield.ty) } } layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, t, nndiscr as usize, nonnull, None), + Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize), + nonnull, None), nonnull.packed) } Some(name) => { @@ -123,7 +128,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::Univariant { ref variant, .. } => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, t, 0, &variant, None), + Type::struct_(cx, &struct_llfields(cx, l, &variant, None), variant.packed) } Some(name) => { @@ -199,61 +204,30 @@ fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { } /// Double an index to account for padding. -pub fn memory_index_to_gep(index: usize) -> usize { +pub fn memory_index_to_gep(index: u64) -> u64 { index * 2 } -/// Lookup `Struct::memory_index`, double it to account for padding. -pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize { - memory_index_to_gep(variant.memory_index[index] as usize) -} - pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - variant_index: usize, + layout: TyLayout<'tcx>, variant: &layout::Struct, discr: Option>) -> Vec { - let field_count = match t.sty { - ty::TyAdt(ref def, _) if def.variants.len() == 0 => return vec![], - ty::TyAdt(ref def, _) => { - discr.is_some() as usize + def.variants[variant_index].fields.len() - }, - ty::TyTuple(fields, _) => fields.len(), - ty::TyClosure(def_id, substs) => { - if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} - substs.upvar_tys(def_id, cx.tcx()).count() - }, - ty::TyGenerator(def_id, substs, _) => { - if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);} - substs.field_tys(def_id, cx.tcx()).count() - }, - _ => bug!("{} is not a type that can have fields.", t) - }; + let field_count = (discr.is_some() as usize) + layout.field_count(); debug!("struct_llfields: variant: {:?}", variant); let mut first_field = true; let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { - (i, match t.sty { - ty::TyAdt(..) if i == 0 && discr.is_some() => discr.unwrap(), - ty::TyAdt(ref def, ref substs) => { - monomorphize::field_ty(cx.tcx(), substs, - &def.variants[variant_index].fields[i as usize - discr.is_some() as usize]) - }, - ty::TyTuple(fields, _) => fields[i as usize], - ty::TyClosure(def_id, substs) => { - substs.upvar_tys(def_id, cx.tcx()).nth(i).unwrap() - }, - ty::TyGenerator(def_id, substs, _) => { - let ty = substs.field_tys(def_id, cx.tcx()).nth(i).unwrap(); - cx.tcx().normalize_associated_type(&ty) - }, - _ => bug!() - }, variant.offsets[i as usize]) + let ty = if i == 0 && discr.is_some() { + cx.layout_of(discr.unwrap()) + } else { + layout.field(cx, i - discr.is_some() as usize) + }; + (i, ty, variant.offsets[i as usize]) }); - for (index, ty, target_offset) in field_iter { - debug!("struct_llfields: {} ty: {} offset: {:?} target_offset: {:?}", - index, ty, offset, target_offset); + for (index, field, target_offset) in field_iter { + debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", + index, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; if first_field { @@ -263,19 +237,19 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, result.push(Type::array(&Type::i8(cx), padding.bytes())); debug!(" padding before: {:?}", padding); } - let llty = type_of::in_memory_type_of(cx, ty); + let llty = type_of::in_memory_type_of(cx, field.ty); result.push(llty); - let layout = cx.layout_of(ty); + if variant.packed { assert_eq!(padding.bytes(), 0); } else { - let field_align = layout.align(cx); + let field_align = field.align(cx); assert!(field_align.abi() <= variant.align.abi(), "non-packed type has field with larger align ({}): {:#?}", field_align.abi(), variant); } - let target_size = layout.size(&cx.tcx().data_layout); - offset = target_offset + target_size; + + offset = target_offset + field.size(cx); } if variant.sized && field_count > 0 { if offset > variant.stride() { diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 92cbd004206e7..e6199df62d345 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -11,7 +11,6 @@ //! # Translation of inline assembly. use llvm::{self, ValueRef}; -use base; use common::*; use type_of; use type_::Type; @@ -19,8 +18,9 @@ use builder::Builder; use rustc::hir; use rustc::ty::Ty; +use rustc::ty::layout::Align; -use mir::lvalue::Alignment; +use mir::lvalue::{LvalueRef, Alignment}; use std::ffi::CString; use syntax::ast::AsmDialect; @@ -40,16 +40,17 @@ pub fn trans_inline_asm<'a, 'tcx>( let mut indirect_outputs = vec![]; for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { let val = if out.is_rw || out.is_indirect { - Some(base::load_ty(bcx, val, Alignment::Packed, ty)) + Some(LvalueRef::new_sized(val, ty, + Alignment::Packed(Align::from_bytes(1, 1).unwrap())).load(bcx)) } else { None }; if out.is_rw { - inputs.push(val.unwrap()); + inputs.push(val.unwrap().immediate()); ext_constraints.push(i.to_string()); } if out.is_indirect { - indirect_outputs.push(val.unwrap()); + indirect_outputs.push(val.unwrap().immediate()); } else { output_types.push(type_of::type_of(bcx.ccx, ty)); } @@ -107,7 +108,7 @@ pub fn trans_inline_asm<'a, 'tcx>( // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &(val, _))) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) }; + let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) }; bcx.store(v, val, None); } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 98ad6a54bd1f5..91f7bf39f1a6b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -48,7 +48,6 @@ use rustc::util::common::{time, print_time_passes_entry}; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; use rustc_incremental; -use abi; use allocator; use mir::lvalue::LvalueRef; use attributes; @@ -56,7 +55,7 @@ use builder::Builder; use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_undef, C_array}; +use common::{C_struct_in_context, C_array}; use common::CrateContext; use common::{type_is_zero_size, val_ty}; use common; @@ -66,14 +65,13 @@ use debuginfo; use declare; use meth; use mir; -use monomorphize::{self, Instance}; +use monomorphize::Instance; use partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; use symbol_names_test; use time_graph; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use type_::Type; use type_of; -use value::Value; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use CrateInfo; @@ -90,7 +88,7 @@ use syntax::attr; use rustc::hir; use syntax::ast; -use mir::lvalue::Alignment; +use mir::operand::{OperandRef, OperandValue}; pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr}; pub use rustc_trans_utils::trans_item::linkage_by_name; @@ -125,14 +123,6 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { - bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { - bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) -} - pub fn bin_op_to_icmp_predicate(op: hir::BinOp_, signed: bool) -> llvm::IntPredicate { @@ -257,25 +247,29 @@ pub fn unsize_thin_ptr<'a, 'tcx>( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, - src: &LvalueRef<'tcx>, - dst: &LvalueRef<'tcx>) { + src: LvalueRef<'tcx>, + dst: LvalueRef<'tcx>) { let src_ty = src.ty.to_ty(bcx.tcx()); let dst_ty = dst.ty.to_ty(bcx.tcx()); let coerce_ptr = || { - let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty); - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); - let base = bcx.pointercast(base, llcast_ty); - (base, info) - } else { - let base = load_ty(bcx, src.llval, src.alignment, src_ty); - unsize_thin_ptr(bcx, base, src_ty, dst_ty) + let (base, info) = match src.load(bcx).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); + (bcx.pointercast(base, llcast_ty), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bcx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() }; - store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty); + OperandRef { + val: OperandValue::Pair(base, info), + ty: dst_ty + }.store(bcx, dst); }; match (&src_ty.sty, &dst_ty.sty) { (&ty::TyRef(..), &ty::TyRef(..)) | @@ -287,32 +281,25 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, coerce_ptr() } - (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { assert_eq!(def_a, def_b); - let src_fields = def_a.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_a, f) - }); - let dst_fields = def_b.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_b, f) - }); + for i in 0..def_a.variants[0].fields.len() { + let src_f = src.project_field(bcx, i); + let dst_f = dst.project_field(bcx, i); + + let src_f_ty = src_f.ty.to_ty(bcx.tcx()); + let dst_f_ty = dst_f.ty.to_ty(bcx.tcx()); - let iter = src_fields.zip(dst_fields).enumerate(); - for (i, (src_fty, dst_fty)) in iter { - if type_is_zero_size(bcx.ccx, dst_fty) { + if type_is_zero_size(bcx.ccx, dst_f_ty) { continue; } - let (src_f, src_f_align) = src.trans_field_ptr(bcx, i); - let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i); - if src_fty == dst_fty { - memcpy_ty(bcx, dst_f, src_f, src_fty, None); + if src_f_ty == dst_f_ty { + memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f_ty, + (src_f.alignment | dst_f.alignment).non_abi()); } else { - coerce_unsized_into( - bcx, - &LvalueRef::new_sized_ty(src_f, src_fty, src_f_align), - &LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align) - ); + coerce_unsized_into(bcx, src_f, dst_f); } } } @@ -385,94 +372,6 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { b.call(assume_intrinsic, &[val], None); } -/// Helper for loading values from memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. Also handles various special cases where the type -/// gives us better information about what we are loading. -pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, - alignment: Alignment, t: Ty<'tcx>) -> ValueRef { - let ccx = b.ccx; - if type_is_zero_size(ccx, t) { - return C_undef(type_of::type_of(ccx, t)); - } - - unsafe { - let global = llvm::LLVMIsAGlobalVariable(ptr); - if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { - let val = llvm::LLVMGetInitializer(global); - if !val.is_null() { - if t.is_bool() { - return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref()); - } - return val; - } - } - } - - if t.is_bool() { - b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()), - Type::i1(ccx)) - } else if t.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align()) - } else if (t.is_region_ptr() || t.is_box() || t.is_fn()) - && !common::type_is_fat_ptr(ccx, t) - { - b.load_nonnull(ptr, alignment.to_align()) - } else { - b.load(ptr, alignment.to_align()) - } -} - -/// Helper for storing values in memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. -pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, - dst_align: Alignment, t: Ty<'tcx>) { - debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); - - if common::type_is_fat_ptr(cx.ccx, t) { - let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); - let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); - store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t); - } else { - cx.store(from_immediate(cx, v), dst, dst_align.to_align()); - } -} - -pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>, - data: ValueRef, - extra: ValueRef, - dst: ValueRef, - dst_align: Alignment, - _ty: Ty<'tcx>) { - // FIXME: emit metadata - cx.store(data, get_dataptr(cx, dst), dst_align.to_align()); - cx.store(extra, get_meta(cx, dst), dst_align.to_align()); -} - -pub fn load_fat_ptr<'a, 'tcx>( - b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx> -) -> (ValueRef, ValueRef) { - let ptr = get_dataptr(b, src); - let ptr = if t.is_region_ptr() || t.is_box() { - b.load_nonnull(ptr, alignment.to_align()) - } else { - b.load(ptr, alignment.to_align()) - }; - - let meta = get_meta(b, src); - let meta_ty = val_ty(meta); - // If the 'meta' field is a pointer, it's a vtable, so use load_nonnull - // instead - let meta = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer { - b.load_nonnull(meta, None) - } else { - b.load(meta, None) - }; - - (ptr, meta) -} - pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx) { bcx.zext(val, Type::i8(bcx.ccx)) diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 2b632ba6f2603..6ad12a13eca7a 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -625,25 +625,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - // Simple wrapper around GEP that takes an array of ints and wraps them - // in C_i32() - #[inline] - pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef { - // Small vector optimization. This should catch 100% of the cases that - // we care about. - if ixs.len() < 16 { - let mut small_vec = [ C_i32(self.ccx, 0); 16 ]; - for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) { - *small_vec_e = C_i32(self.ccx, ix as i32); - } - self.inbounds_gep(base, &small_vec[..ixs.len()]) - } else { - let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::>(); - self.count_insn("gepi"); - self.inbounds_gep(base, &v) - } - } - pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { self.count_insn("inboundsgep"); unsafe { @@ -652,8 +633,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef { + pub fn struct_gep(&self, ptr: ValueRef, idx: u64) -> ValueRef { self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) } @@ -959,16 +941,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef { + pub fn extract_value(&self, agg_val: ValueRef, idx: u64) -> ValueRef { self.count_insn("extractvalue"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) } } pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef, - idx: usize) -> ValueRef { + idx: u64) -> ValueRef { self.count_insn("insertvalue"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, noname()) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 659ce0f7f9f43..109c111efa95f 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -22,7 +22,6 @@ use base; use builder::Builder; use consts; use declare; -use monomorphize; use type_::Type; use value::Value; use rustc::traits; @@ -68,53 +67,11 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - } } -/// Returns Some([a, b]) if the type has a pair of fields with types a and b. -pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - -> Option<[Ty<'tcx>; 2]> { - match ty.sty { - ty::TyAdt(adt, substs) => { - assert_eq!(adt.variants.len(), 1); - let fields = &adt.variants[0].fields; - if fields.len() != 2 { - return None; - } - Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]), - monomorphize::field_ty(ccx.tcx(), substs, &fields[1])]) - } - ty::TyClosure(def_id, substs) => { - let mut tys = substs.upvar_tys(def_id, ccx.tcx()); - tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { - if tys.next().is_some() { - None - } else { - Some([first_ty, second_ty]) - } - })) - } - ty::TyGenerator(def_id, substs, _) => { - let mut tys = substs.field_tys(def_id, ccx.tcx()); - tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { - if tys.next().is_some() { - None - } else { - Some([first_ty, second_ty]) - } - })) - } - ty::TyTuple(tys, _) => { - if tys.len() != 2 { - return None; - } - Some([tys[0], tys[1]]) - } - _ => None - } -} - /// Returns true if the type is represented as a pair of immediates. pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - match *ccx.layout_of(ty) { + let layout = ccx.layout_of(ty); + match *layout { Layout::FatPointer { .. } => true, Layout::Univariant { ref variant, .. } => { // There must be only 2 fields. @@ -122,12 +79,9 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) return false; } - match type_pair_fields(ccx, ty) { - Some([a, b]) => { - type_is_immediate(ccx, a) && type_is_immediate(ccx, b) - } - None => false - } + // The two fields must be both immediates. + type_is_immediate(ccx, layout.field_type(ccx, 0)) && + type_is_immediate(ccx, layout.field_type(ccx, 1)) } _ => false } @@ -356,13 +310,14 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { } } -pub fn const_get_elt(v: ValueRef, i: usize) -> ValueRef { +pub fn const_get_elt(v: ValueRef, idx: u64) -> ValueRef { unsafe { - let us = &[i as c_uint]; + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - debug!("const_get_elt(v={:?}, i={}, r={:?})", - Value(v), i, Value(r)); + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + Value(v), idx, Value(r)); r } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index d2e2e1bbdee47..2869ddb6e220e 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -30,10 +30,9 @@ use rustc::ty::fold::TypeVisitor; use rustc::ty::subst::Substs; use rustc::ty::util::TypeIdHasher; use rustc::ich::Fingerprint; -use monomorphize; use common::{self, CrateContext}; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, Align, LayoutTyper, Size}; +use rustc::ty::layout::{self, Align, LayoutTyper, Size, TyLayout}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -932,7 +931,6 @@ impl<'tcx> MemberDescriptionFactory<'tcx> { struct StructMemberDescriptionFactory<'tcx> { ty: Ty<'tcx>, variant: &'tcx ty::VariantDef, - substs: &'tcx Substs<'tcx>, span: Span, } @@ -960,12 +958,11 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { } else { f.name.to_string() }; - let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); - - let (size, align) = cx.size_and_align_of(fty); + let field = layout.field(cx, i); + let (size, align) = field.size_and_align(cx); MemberDescription { name, - type_metadata: type_metadata(cx, fty, self.span), + type_metadata: type_metadata(cx, field.ty, self.span), offset: offsets[i], size, align, @@ -983,8 +980,8 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, -> RecursiveTypeDescription<'tcx> { let struct_name = compute_debuginfo_type_name(cx, struct_type, false); - let (struct_def_id, variant, substs) = match struct_type.sty { - ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + let (struct_def_id, variant) = match struct_type.sty { + ty::TyAdt(def, _) => (def.did, def.struct_variant()), _ => bug!("prepare_struct_metadata on a non-ADT") }; @@ -1004,7 +1001,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, - substs, span, }) ) @@ -1075,20 +1071,20 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { + layout: TyLayout<'tcx>, variant: &'tcx ty::VariantDef, - substs: &'tcx Substs<'tcx>, span: Span, } impl<'tcx> UnionMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { - self.variant.fields.iter().map(|field| { - let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); - let (size, align) = cx.size_and_align_of(fty); + self.variant.fields.iter().enumerate().map(|(i, f)| { + let field = self.layout.field(cx, i); + let (size, align) = field.size_and_align(cx); MemberDescription { - name: field.name.to_string(), - type_metadata: type_metadata(cx, fty, self.span), + name: f.name.to_string(), + type_metadata: type_metadata(cx, field.ty, self.span), offset: Size::from_bytes(0), size, align, @@ -1105,8 +1101,8 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, -> RecursiveTypeDescription<'tcx> { let union_name = compute_debuginfo_type_name(cx, union_type, false); - let (union_def_id, variant, substs) = match union_type.sty { - ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + let (union_def_id, variant) = match union_type.sty { + ty::TyAdt(def, _) => (def.did, def.struct_variant()), _ => bug!("prepare_union_metadata on a non-ADT") }; @@ -1124,8 +1120,8 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id, union_metadata_stub, UnionMDF(UnionMemberDescriptionFactory { + layout: cx.layout_of(union_type), variant, - substs, span, }) ) @@ -1142,7 +1138,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: &'tcx layout::Layout, + type_rep: TyLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, file_metadata: DIFile, @@ -1153,11 +1149,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); - let substs = match self.enum_type.sty { - ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s, - _ => bug!("{} is not an enum", self.enum_type) - }; - match *self.type_rep { + match *self.type_rep.layout { layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); @@ -1169,6 +1161,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { describe_enum_variant(cx, self.enum_type, struct_def, + i, &adt.variants[i], discriminant_info, self.containing_scope, @@ -1200,6 +1193,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { describe_enum_variant(cx, self.enum_type, variant, + 0, &adt.variants[0], NoDiscriminant, self.containing_scope, @@ -1223,19 +1217,19 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ] } } - layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => { + layout::RawNullablePointer { nndiscr, .. } => { // As far as debuginfo is concerned, the pointer this enum // represents is still wrapped in a struct. This is to make the // DWARF representation of enums uniform. // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[non_null_variant_index as usize]; + let non_null_variant = &adt.variants[nndiscr as usize]; let non_null_variant_name = non_null_variant.name.as_str(); // The llvm type and metadata of the pointer - let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0]); - let (size, align) = cx.size_and_align_of(nnty); - let non_null_type_metadata = type_metadata(cx, nnty, self.span); + let nnfield = self.type_rep.for_variant(nndiscr as usize).field(cx, 0); + let (size, align) = nnfield.size_and_align(cx); + let non_null_type_metadata = type_metadata(cx, nnfield.ty, self.span); // For the metadata of the wrapper struct, we need to create a // MemberDescription of the struct's single field. @@ -1264,7 +1258,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Now we can create the metadata of the artificial struct let artificial_struct_metadata = composite_type_metadata(cx, - nnty, + nnfield.ty, &non_null_variant_name, unique_type_id, &[sole_struct_member_description], @@ -1274,8 +1268,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - non_null_variant_index) as usize; - let null_variant_name = adt.variants[null_variant_index].name; + let null_variant_name = adt.variants[(1 - nndiscr) as usize].name; let union_member_name = format!("RUST$ENCODED$ENUM${}${}", 0, null_variant_name); @@ -1301,6 +1294,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { describe_enum_variant(cx, self.enum_type, struct_def, + nndiscr as usize, &adt.variants[nndiscr as usize], OptimizedDiscriminant, self.containing_scope, @@ -1386,31 +1380,25 @@ enum EnumDiscriminantInfo { fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type: Ty<'tcx>, struct_def: &'tcx layout::Struct, + variant_index: usize, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) -> (DICompositeType, MemberDescriptionFactory<'tcx>) { - let substs = match enum_type.sty { - ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, - ref t @ _ => bug!("{:#?} is not an enum", t) - }; - - let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) { - layout::CEnum {discr, ..} => Some((discr, true)), - layout::General{discr, ..} => Some((discr, false)), - layout::Univariant { .. } - | layout::RawNullablePointer { .. } - | layout::StructWrappedNullablePointer { .. } => None, - ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l) + let layout = cx.layout_of(enum_type); + let maybe_discr = match *layout { + layout::General { .. } => Some(layout.field_type(cx, 0)), + _ => None, }; - let mut field_tys = variant.fields.iter().map(|f| { - monomorphize::field_ty(cx.tcx(), &substs, f) + let layout = layout.for_variant(variant_index); + let mut field_tys = (0..layout.field_count()).map(|i| { + layout.field_type(cx, i) }).collect::>(); - if let Some((discr, signed)) = maybe_discr_and_signed { - field_tys.insert(0, discr.to_ty(cx.tcx(), signed)); + if let Some(discr) = maybe_discr { + field_tys.insert(0, discr); } // Could do some consistency checks here: size, align, field count, discr type @@ -1560,7 +1548,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; - let (enum_type_size, enum_type_align) = cx.size_and_align_of(enum_type); + let (enum_type_size, enum_type_align) = type_rep.size_and_align(cx); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( @@ -1588,7 +1576,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_metadata, EnumMDF(EnumMemberDescriptionFactory { enum_type, - type_rep: type_rep.layout, + type_rep, discriminant_type_metadata, containing_scope, file_metadata, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 597d8c587e921..9152a1febdf73 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,7 +19,6 @@ use common::*; use llvm::{ValueRef}; use llvm; use meth; -use monomorphize; use rustc::ty::layout::LayoutTyper; use rustc::ty::{self, Ty}; use value::Value; @@ -38,7 +37,19 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf } assert!(!info.is_null()); match t.sty { - ty::TyAdt(..) | ty::TyTuple(..) => { + ty::TyDynamic(..) => { + // load size/align from vtable + (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) + } + ty::TySlice(_) | ty::TyStr => { + let unit = t.sequence_element_type(bcx.tcx()); + // The info in this case is the length of the str, so the size is that + // times the unit size. + let (size, align) = bcx.ccx.size_and_align_of(unit); + (bcx.mul(info, C_usize(bcx.ccx, size.bytes())), + C_usize(bcx.ccx, align.abi())) + } + _ => { let ccx = bcx.ccx; // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we @@ -63,14 +74,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = match t.sty { - ty::TyAdt(def, substs) => { - let last_field = def.struct_variant().fields.last().unwrap(); - monomorphize::field_ty(bcx.tcx(), substs, last_field) - }, - ty::TyTuple(tys, _) => tys.last().unwrap(), - _ => unreachable!(), - }; + let field_ty = layout.field_type(ccx, layout.field_count() - 1); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -113,18 +117,5 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf (size, align) } - ty::TyDynamic(..) => { - // load size/align from vtable - (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) - } - ty::TySlice(_) | ty::TyStr => { - let unit = t.sequence_element_type(bcx.tcx()); - // The info in this case is the length of the str, so the size is that - // times the unit size. - let (size, align) = bcx.ccx.size_and_align_of(unit); - (bcx.mul(info, C_usize(bcx.ccx, size.bytes())), - C_usize(bcx.ccx, align.abi())) - } - _ => bug!("Unexpected unsized type, found {}", t) } } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 711854e28a899..2f0e86b8cac52 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -13,8 +13,9 @@ use intrinsics::{self, Intrinsic}; use llvm; use llvm::{ValueRef}; -use abi::{Abi, FnType}; +use abi::{self, Abi, FnType}; use mir::lvalue::{LvalueRef, Alignment}; +use mir::operand::{OperandRef, OperandValue}; use base::*; use common::*; use declare; @@ -105,6 +106,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let name = &*tcx.item_name(def_id); let llret_ty = type_of::type_of(ccx, ret_ty); + let result = LvalueRef::new_sized(llresult, ret_ty, Alignment::AbiAligned); let simple = get_simple_intrinsic(ccx, name); let llval = match name { @@ -238,9 +240,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }, "volatile_store" => { let tp_ty = substs.type_at(0); + let dst = LvalueRef::new_sized(llargs[0], tp_ty, Alignment::AbiAligned); if type_is_fat_ptr(bcx.ccx, tp_ty) { - bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); - bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); + bcx.volatile_store(llargs[1], dst.project_field(bcx, abi::FAT_PTR_ADDR).llval); + bcx.volatile_store(llargs[2], dst.project_field(bcx, abi::FAT_PTR_EXTRA).llval); } else { let val = if fn_ty.args[1].is_indirect() { bcx.load(llargs[1], None) @@ -250,7 +253,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } from_immediate(bcx, llargs[1]) }; - let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); + let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); unsafe { llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32); @@ -306,11 +309,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llfn = bcx.ccx.get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter - let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); - let result = bcx.extract_value(val, 0); - let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx)); - bcx.store(result, bcx.struct_gep(llresult, 0), None); - bcx.store(overflow, bcx.struct_gep(llresult, 1), None); + let pair = bcx.call(llfn, &[llargs[0], llargs[1]], None); + let val = bcx.extract_value(pair, 0); + let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx)); + + let dest = result.project_field(bcx, 0); + bcx.store(val, dest.llval, dest.alignment.non_abi()); + let dest = result.project_field(bcx, 1); + bcx.store(overflow, dest.llval, dest.alignment.non_abi()); return; }, @@ -373,7 +379,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "discriminant_value" => { let val_ty = substs.type_at(0); - let adt_val = LvalueRef::new_sized_ty(llargs[0], val_ty, Alignment::AbiAligned); + let adt_val = LvalueRef::new_sized(llargs[0], val_ty, Alignment::AbiAligned); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { adt_val.trans_get_discr(bcx, ret_ty) @@ -446,12 +452,15 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ty = substs.type_at(0); if int_type_width_signed(ty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, + let pair = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, failorder, weak); - let result = bcx.extract_value(val, 0); - let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); - bcx.store(result, bcx.struct_gep(llresult, 0), None); - bcx.store(success, bcx.struct_gep(llresult, 1), None); + let val = bcx.extract_value(pair, 0); + let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx)); + + let dest = result.project_field(bcx, 0); + bcx.store(val, dest.llval, dest.alignment.non_abi()); + let dest = result.project_field(bcx, 1); + bcx.store(success, dest.llval, dest.alignment.non_abi()); return; } else { return invalid_monomorphization(ty); @@ -589,10 +598,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned); + let arg = LvalueRef::new_sized(llarg, arg_type, Alignment::AbiAligned); (0..contents.len()).map(|i| { - let (ptr, align) = arg.trans_field_ptr(bcx, i); - bcx.load(ptr, align.to_align()) + arg.project_field(bcx, i).load(bcx).immediate() }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { @@ -654,11 +662,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert!(!flatten); for i in 0..elems.len() { - let val = bcx.extract_value(val, i); - let lval = LvalueRef::new_sized_ty(llresult, ret_ty, - Alignment::AbiAligned); - let (dest, align) = lval.trans_field_ptr(bcx, i); - bcx.store(val, dest, align.to_align()); + let dest = result.project_field(bcx, i); + let val = bcx.extract_value(val, i as u64); + bcx.store(val, dest.llval, dest.alignment.non_abi()); } return; } @@ -672,7 +678,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { - store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); + OperandRef { + val: OperandValue::Immediate(llval), + ty: ret_ty + }.unpack_if_pair(bcx).store(bcx, result); } } } @@ -1071,7 +1080,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, i); + let val = const_get_elt(vector, i as u64); match const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 73e03dc069145..2776125bd8288 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -143,7 +143,6 @@ mod partitioning; mod symbol_names_test; mod time_graph; mod trans_item; -mod tvec; mod type_; mod type_of; mod value; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index a2e7eb2258fc1..2289adb01ea6b 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -21,7 +21,7 @@ use rustc::ty::layout::HasDataLayout; use debuginfo; #[derive(Copy, Clone, Debug)] -pub struct VirtualIndex(usize); +pub struct VirtualIndex(u64); pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0); pub const SIZE: VirtualIndex = VirtualIndex(1); @@ -29,14 +29,14 @@ pub const ALIGN: VirtualIndex = VirtualIndex(2); impl<'a, 'tcx> VirtualIndex { pub fn from_index(index: usize) -> Self { - VirtualIndex(index + 3) + VirtualIndex(index as u64 + 3) } pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", Value(llvtable), self); - let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None); + let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr @@ -47,7 +47,7 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", Value(llvtable), self); let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to()); - let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None); + let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index abd86a5cb01eb..cedc78cf9460d 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -12,11 +12,10 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::LayoutTyper; use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; -use adt; use base; use callee; use builder::Builder; @@ -24,7 +23,7 @@ use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use meth; use monomorphize; -use type_of; +use type_of::{self, LayoutLlvmExt}; use type_::Type; use syntax::symbol::Symbol; @@ -173,13 +172,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.cleanup_ret(cleanup_pad, None); } else { let slot = self.get_personality_slot(&bcx); - - let (lp0ptr, align) = slot.trans_field_ptr(&bcx, 0); - let lp0 = bcx.load(lp0ptr, align.to_align()); - - let (lp1ptr, align) = slot.trans_field_ptr(&bcx, 1); - let lp1 = bcx.load(lp1ptr, align.to_align()); - + let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate(); + let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate(); slot.storage_dead(&bcx); if !bcx.sess().target.target.options.custom_unwind_resume { @@ -240,9 +234,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None); - self.store_operand(&bcx, llscratch, None, op); - llscratch + let scratch = LvalueRef::alloca(&bcx, ret.layout.ty, "ret"); + op.store(&bcx, scratch); + scratch.llval } Ref(llval, align) => { assert_eq!(align, Alignment::AbiAligned, @@ -257,7 +251,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); if let Ref(llval, align) = op.val { - base::load_ty(&bcx, llval, align, op.ty) + bcx.load(llval, align.non_abi()) } else { op.pack_if_pair(&bcx).immediate() } @@ -558,8 +552,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ReturnDest::Nothing => { (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) } - ReturnDest::IndirectOperand(dst, _) => (dst.llval, &llargs[..]), - ReturnDest::Store(dst) => (dst, &llargs[..]), + ReturnDest::IndirectOperand(dst, _) | + ReturnDest::Store(dst) => (dst.llval, &llargs[..]), ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") }; @@ -650,21 +644,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - self.store_operand(bcx, llscratch, None, op); - (llscratch, Alignment::AbiAligned, true) + let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg"); + op.store(bcx, scratch); + (scratch.llval, Alignment::AbiAligned, true) } else { (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) } } - Ref(llval, align @ Alignment::Packed) if arg.is_indirect() => { + Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - base::memcpy_ty(bcx, llscratch, llval, op.ty, align.to_align()); - (llscratch, Alignment::AbiAligned, true) + let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg"); + base::memcpy_ty(bcx, scratch.llval, llval, op.ty, align.non_abi()); + (scratch.llval, Alignment::AbiAligned, true) } Ref(llval, align) => (llval, align, true) }; @@ -672,14 +666,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if arg.layout.ty == bcx.tcx().types.bool { - // We store bools as i8 so we need to truncate to i1. llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - llval = bcx.trunc(llval, Type::i1(bcx.ccx)); + // We store bools as i8 so we need to truncate to i1. + llval = base::to_immediate(bcx, llval, arg.layout.ty); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), - align.min_with(Some(arg.layout.align(bcx.ccx)))); + (align | Alignment::Packed(arg.layout.align(bcx.ccx))) + .non_abi()); } else { - llval = bcx.load(llval, align.to_align()); + llval = bcx.load(llval, align.non_abi()); } } @@ -705,38 +700,28 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval, align) => { + let tuple_ptr = LvalueRef::new_sized(llval, tuple.ty, align); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align); - let (ptr, align) = ptr.trans_field_ptr(bcx, n); - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { - let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty); - Pair(lldata, llextra) + let field_ptr = tuple_ptr.project_field(bcx, n); + let op = if common::type_is_fat_ptr(bcx.ccx, ty) { + field_ptr.load(bcx) } else { // trans_argument will load this if it needs to - Ref(ptr, align) - }; - let op = OperandRef { - val, - ty, + OperandRef { + val: Ref(field_ptr.llval, field_ptr.alignment), + ty + } }; self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); } } Immediate(llval) => { - let l = bcx.ccx.layout_of(tuple.ty); - let v = if let layout::Univariant { ref variant, .. } = *l { - variant - } else { - bug!("Not a tuple."); - }; + let layout = bcx.ccx.layout_of(tuple.ty); for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = bcx.extract_value( - llval, adt::struct_llfields_index(v, n)); + let mut elem = bcx.extract_value(llval, layout.llvm_field_index(n)); // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } + elem = base::to_immediate(bcx, elem, ty); // If the tuple is immediate, the elements are as well let op = OperandRef { val: Immediate(elem), @@ -748,11 +733,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Pair(a, b) => { let elems = [a, b]; for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = elems[n]; - // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } + let elem = base::to_immediate(bcx, elems[n], ty); // Pair is always made up of immediates let op = OperandRef { val: Immediate(elem), @@ -809,10 +790,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let slot = self.get_personality_slot(&bcx); slot.storage_live(&bcx); - self.store_operand(&bcx, slot.llval, None, OperandRef { + OperandRef { val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)), ty: slot.ty.to_ty(ccx.tcx()) - }); + }.store(&bcx, slot); bcx.br(target_bb); bcx.llbb() @@ -888,7 +869,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llargs.push(dest.llval); ReturnDest::Nothing }, - Alignment::Packed => { + Alignment::Packed(_) => { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps), @@ -899,7 +880,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } } else { - ReturnDest::Store(dest.llval) + ReturnDest::Store(dest) } } @@ -908,14 +889,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { dst: &mir::Lvalue<'tcx>) { if let mir::Lvalue::Local(index) = *dst { match self.locals[index] { - LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue), + LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue), LocalRef::Operand(None) => { let lvalue_ty = self.monomorphized_lvalue_ty(dst); assert!(!lvalue_ty.has_erasable_regions()); let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); lvalue.storage_live(bcx); - self.trans_transmute_into(bcx, src, &lvalue); - let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); + self.trans_transmute_into(bcx, src, lvalue); + let op = lvalue.load(bcx); lvalue.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -927,20 +908,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } else { let dst = self.trans_lvalue(bcx, dst); - self.trans_transmute_into(bcx, src, &dst); + self.trans_transmute_into(bcx, src, dst); } } fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, - dst: &LvalueRef<'tcx>) { + dst: LvalueRef<'tcx>) { let val = self.trans_operand(bcx, src); let llty = type_of::type_of(bcx.ccx, val.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let in_type = val.ty; let out_type = dst.ty.to_ty(bcx.tcx()); let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type)); - self.store_operand(bcx, cast_ptr, Some(align), val); + val.store(bcx, + LvalueRef::new_sized(cast_ptr, val.ty, Alignment::Packed(align))); } @@ -956,7 +938,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Nothing => (), Store(dst) => ret_ty.store(bcx, op.immediate(), dst), IndirectOperand(tmp, index) => { - let op = self.trans_load(bcx, tmp.llval, Alignment::AbiAligned, op.ty); + let op = tmp.load(bcx); tmp.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -965,8 +947,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op = if ret_ty.cast.is_some() { let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); tmp.storage_live(bcx); - ret_ty.store(bcx, op.immediate(), tmp.llval); - let op = self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty); + ret_ty.store(bcx, op.immediate(), tmp); + let op = tmp.load(bcx); tmp.storage_dead(bcx); op } else { @@ -982,7 +964,7 @@ enum ReturnDest<'tcx> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(ValueRef), + Store(LvalueRef<'tcx>), // Stores an indirect return value to an operand local lvalue IndirectOperand(LvalueRef<'tcx>, mir::Local), // Stores a direct return value to an operand local lvalue diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index d4289363ecf38..c2bc0684c6278 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -32,7 +32,7 @@ use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector}; use common::const_to_opt_u128; use consts; -use type_of; +use type_of::{self, LayoutLlvmExt}; use type_::Type; use value::Value; @@ -117,14 +117,7 @@ impl<'a, 'tcx> Const<'tcx> { } fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { - let layout = ccx.layout_of(self.ty); - let ix = if let layout::Univariant { ref variant, .. } = *layout { - adt::struct_llfields_index(variant, i) - } else { - i - }; - - const_get_elt(self.llval, ix) + const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i)) } fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { @@ -494,7 +487,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { // Produce an undef instead of a LLVM assertion on OOB. let len = common::const_to_uint(tr_base.len(self.ccx)); let llelem = if iv < len as u128 { - const_get_elt(base.llval, iv as usize) + const_get_elt(base.llval, iv as u64) } else { C_undef(type_of::type_of(self.ccx, projected_ty)) }; diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index a0cab01b007d2..168f2e8c05679 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,28 +10,31 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, LayoutTyper}; +use rustc::ty::layout::{self, Align, Layout, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; +use abi; use adt; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty}; +use common::{self, CrateContext, C_usize, C_u8, C_u32, C_int, C_null, val_ty}; use consts; -use type_of; +use type_of::{self, LayoutLlvmExt}; use type_::Type; use value::Value; use glue; +use std::iter; use std::ptr; use std::ops; use super::{MirContext, LocalRef}; +use super::operand::{OperandRef, OperandValue}; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Alignment { - Packed, + Packed(Align), AbiAligned, } @@ -40,31 +43,41 @@ impl ops::BitOr for Alignment { fn bitor(self, rhs: Self) -> Self { match (self, rhs) { - (Alignment::Packed, _) => Alignment::Packed, - (Alignment::AbiAligned, a) => a, + (Alignment::Packed(a), Alignment::Packed(b)) => { + Alignment::Packed(a.min(b)) + } + (Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => { + Alignment::Packed(x) + } + (Alignment::AbiAligned, Alignment::AbiAligned) => { + Alignment::AbiAligned + } } } } -impl Alignment { - pub fn from_packed(packed: bool) -> Self { +impl<'a> From<&'a Layout> for Alignment { + fn from(layout: &Layout) -> Self { + let (packed, align) = match *layout { + Layout::UntaggedUnion { ref variants } => (variants.packed, variants.align), + Layout::Univariant { ref variant, .. } => (variant.packed, variant.align), + _ => return Alignment::AbiAligned + }; if packed { - Alignment::Packed + Alignment::Packed(align) } else { Alignment::AbiAligned } } +} - pub fn to_align(self) -> Option { +impl Alignment { + pub fn non_abi(self) -> Option { match self { - Alignment::Packed => Some(Align::from_bytes(1, 1).unwrap()), + Alignment::Packed(x) => Some(x), Alignment::AbiAligned => None, } } - - pub fn min_with(self, align: Option) -> Option { - self.to_align().or(align) - } } fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { @@ -87,13 +100,8 @@ pub struct LvalueRef<'tcx> { } impl<'a, 'tcx> LvalueRef<'tcx> { - pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>, - alignment: Alignment) -> LvalueRef<'tcx> { - LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment } - } - - pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { - LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment) + pub fn new_sized(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { + LvalueRef { llval, llextra: ptr::null_mut(), ty: LvalueTy::from_ty(ty), alignment } } pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { @@ -101,7 +109,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let tmp = bcx.alloca( type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty)); assert!(!ty.has_param_types()); - Self::new_sized_ty(tmp, ty, Alignment::AbiAligned) + Self::new_sized(tmp, ty, Alignment::AbiAligned) } pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { @@ -122,8 +130,74 @@ impl<'a, 'tcx> LvalueRef<'tcx> { !self.llextra.is_null() } + pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { + debug!("LvalueRef::load: {:?}", self); + + assert!(!self.has_extra()); + + let ty = self.ty.to_ty(bcx.tcx()); + + if common::type_is_zero_size(bcx.ccx, ty) { + return OperandRef::new_zst(bcx.ccx, ty); + } + + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { + let data = self.project_field(bcx, abi::FAT_PTR_ADDR); + let lldata = if ty.is_region_ptr() || ty.is_box() { + bcx.load_nonnull(data.llval, data.alignment.non_abi()) + } else { + bcx.load(data.llval, data.alignment.non_abi()) + }; + + let extra = self.project_field(bcx, abi::FAT_PTR_EXTRA); + let meta_ty = val_ty(extra.llval); + // If the 'extra' field is a pointer, it's a vtable, so use load_nonnull + // instead + let llextra = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer { + bcx.load_nonnull(extra.llval, extra.alignment.non_abi()) + } else { + bcx.load(extra.llval, extra.alignment.non_abi()) + }; + + OperandValue::Pair(lldata, llextra) + } else if common::type_is_imm_pair(bcx.ccx, ty) { + OperandValue::Pair( + self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), + self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) + } else if common::type_is_immediate(bcx.ccx, ty) { + let mut const_llval = ptr::null_mut(); + unsafe { + let global = llvm::LLVMIsAGlobalVariable(self.llval); + if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + + let llval = if !const_llval.is_null() { + const_llval + } else if ty.is_bool() { + bcx.load_range_assert(self.llval, 0, 2, llvm::False, + self.alignment.non_abi()) + } else if ty.is_char() { + // a char is a Unicode codepoint, and so takes values from 0 + // to 0x10FFFF inclusive only. + bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False, + self.alignment.non_abi()) + } else if ty.is_region_ptr() || ty.is_box() || ty.is_fn() { + bcx.load_nonnull(self.llval, self.alignment.non_abi()) + } else { + bcx.load(self.llval, self.alignment.non_abi()) + }; + OperandValue::Immediate(base::to_immediate(bcx, llval, ty)) + } else { + OperandValue::Ref(self.llval, self.alignment) + }; + + OperandRef { val, ty } + } + /// Access a field, at a point when the value's case is known. - pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) { + pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { let ccx = bcx.ccx; let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx())); match self.ty { @@ -132,16 +206,16 @@ impl<'a, 'tcx> LvalueRef<'tcx> { l = l.for_variant(variant_index) } } - let fty = l.field(ccx, ix).ty; - let mut ix = ix; - let st = match *l { - layout::Vector { .. } => { - return (bcx.struct_gep(self.llval, ix), self.alignment); - } - layout::UntaggedUnion { ref variants } => { + let fty = l.field_type(ccx, ix); + + let alignment = self.alignment | Alignment::from(&*l); + + // Handle all the non-aggregate cases first. + match *l { + layout::UntaggedUnion { .. } => { let ty = type_of::in_memory_type_of(ccx, fty); - return (bcx.pointercast(self.llval, ty.ptr_to()), - self.alignment | Alignment::from_packed(variants.packed)); + return LvalueRef::new_sized( + bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } @@ -150,58 +224,66 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(ccx, fty); assert_eq!(ccx.size_of(fty).bytes(), 0); - return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed); + return LvalueRef::new_sized( + bcx.pointercast(self.llval, ty.ptr_to()), fty, + Alignment::Packed(Align::from_bytes(1, 1).unwrap())); } layout::RawNullablePointer { .. } => { let ty = type_of::type_of(ccx, fty); - return (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment); + return LvalueRef::new_sized( + bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); } - layout::Univariant { ref variant, .. } => variant, - layout::StructWrappedNullablePointer { ref nonnull, .. } => nonnull, - layout::General { ref variants, .. } => { + _ => {} + } + + // Adjust the index to account for enum discriminants in variants. + let mut ix = ix; + if let layout::General { .. } = *l { + if l.variant_index.is_some() { ix += 1; - &variants[l.variant_index.unwrap()] } - _ => bug!("element access in type without elements: {} represented as {:#?}", l.ty, l) - }; + } - let alignment = self.alignment | Alignment::from_packed(st.packed); + let simple = || { + LvalueRef { + llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)), + llextra: if !ccx.shared().type_has_metadata(fty) { + ptr::null_mut() + } else { + self.llextra + }, + ty: LvalueTy::from_ty(fty), + alignment, + } + }; - let ptr_val = if let layout::General { discr, .. } = *l { - let variant_ty = Type::struct_(ccx, - &adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st, - Some(discr.to_ty(bcx.tcx(), false))), st.packed); - bcx.pointercast(self.llval, variant_ty.ptr_to()) - } else { - self.llval + // Check whether the variant being used is packed, if applicable. + let is_packed = match (&*l, l.variant_index) { + (&layout::Univariant { ref variant, .. }, _) => variant.packed, + (&layout::StructWrappedNullablePointer { ref nonnull, .. }, _) => nonnull.packed, + (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, + _ => return simple() }; // Simple case - we can just GEP the field - // * First field - Always aligned properly // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - ccx.shared().type_is_sized(fty) - { - return (bcx.struct_gep( - ptr_val, adt::struct_llfields_index(st, ix)), alignment); + if is_packed || ccx.shared().type_is_sized(fty) { + return simple(); } // If the type of the last field is [T], str or a foreign type, then we don't need to do // any adjusments match fty.sty { - ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => { - return (bcx.struct_gep( - ptr_val, adt::struct_llfields_index(st, ix)), alignment); - } + ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(), _ => () } // There's no metadata available, log the case and just do the GEP. if !self.has_extra() { debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, Value(ptr_val)); - return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment); + ix, Value(self.llval)); + return simple(); } // We need to get the pointer manually now. @@ -222,7 +304,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let meta = self.llextra; - let offset = st.offsets[ix].bytes(); + let offset = l.field_offset(ccx, ix).bytes(); let unaligned_offset = C_usize(ccx, offset); // Get the alignment of the field @@ -241,21 +323,29 @@ impl<'a, 'tcx> LvalueRef<'tcx> { debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(ccx)); + let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected let ll_fty = type_of::in_memory_type_of(ccx, fty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); - (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment) + + LvalueRef { + llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()), + llextra: self.llextra, + ty: LvalueTy::from_ty(fty), + alignment, + } } // Double index to account for padding (FieldPath already uses `Struct::memory_index`) fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef { - let path = discrfield.iter().map(|&i| { - adt::memory_index_to_gep(i as usize) - }).collect::>(); - bcx.gepi(self.llval, &path) + let path = iter::once(C_u32(bcx.ccx, 0)).chain(discrfield[1..].iter().map(|&i| { + let i = adt::memory_index_to_gep(i as u64); + assert_eq!(i as u32 as u64, i); + C_u32(bcx.ccx, i as u32) + })).collect::>(); + bcx.inbounds_gep(self.llval, &path) } /// Helper for cases where the discriminant is simply loaded. @@ -274,12 +364,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - bcx.load(ptr, self.alignment.to_align()) + bcx.load(ptr, self.alignment.non_abi()) } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ llvm::True, - self.alignment.to_align()) + self.alignment.non_abi()) } } @@ -292,18 +382,18 @@ impl<'a, 'tcx> LvalueRef<'tcx> { self.load_discr(bcx, discr, self.llval, min, max) } layout::General { discr, ref variants, .. } => { - let ptr = bcx.struct_gep(self.llval, 0); - self.load_discr(bcx, discr, ptr, 0, variants.len() as u64 - 1) + let ptr = self.project_field(bcx, 0); + self.load_discr(bcx, discr, ptr.llval, 0, variants.len() as u64 - 1) } layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - let discr = bcx.load(self.llval, self.alignment.to_align()); + let discr = bcx.load(self.llval, self.alignment.non_abi()); bcx.icmp(cmp, discr, C_null(val_ty(discr))) } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); - let llptr = bcx.load(llptrptr, self.alignment.to_align()); + let llptr = bcx.load(llptrptr, self.alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) }, @@ -324,11 +414,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::CEnum { discr, min, max, .. } => { adt::assert_discr_in_range(min, max, to); bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - self.llval, self.alignment.to_align()); + self.llval, self.alignment.non_abi()); } layout::General { discr, .. } => { + let ptr = self.project_field(bcx, 0); bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - bcx.struct_gep(self.llval, 0), self.alignment.to_align()); + ptr.llval, ptr.alignment.non_abi()); } layout::Univariant { .. } | layout::UntaggedUnion { .. } @@ -338,7 +429,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::RawNullablePointer { nndiscr, .. } => { if to != nndiscr { let llptrty = val_ty(self.llval).element_type(); - bcx.store(C_null(llptrty), self.llval, self.alignment.to_align()); + bcx.store(C_null(llptrty), self.llval, self.alignment.non_abi()); } } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { @@ -350,12 +441,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); let size = C_usize(bcx.ccx, nonnull.stride().bytes()); - let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); + let align = C_u32(bcx.ccx, nonnull.align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); let llptrty = val_ty(llptrptr).element_type(); - bcx.store(C_null(llptrty), llptrptr, self.alignment.to_align()); + bcx.store(C_null(llptrty), llptrptr, self.alignment.non_abi()); } } } @@ -363,13 +454,47 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } - pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty { - // Slices already point to the array element type. - bcx.inbounds_gep(self.llval, &[llindex]) + pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) + -> LvalueRef<'tcx> { + let ty = self.ty.to_ty(bcx.tcx()); + let (ptr, elem_ty) = match ty.sty { + ty::TySlice(ty) => { + // Slices already point to the array element type. + (bcx.inbounds_gep(self.llval, &[llindex]), ty) + } + ty::TyArray(ty, _) => { + let zero = common::C_usize(bcx.ccx, 0); + (bcx.inbounds_gep(self.llval, &[zero, llindex]), ty) + } + _ => bug!("unexpected type `{}` in LvalueRef::project_index", ty) + }; + LvalueRef::new_sized(ptr, elem_ty, self.alignment) + } + + pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) + -> LvalueRef<'tcx> { + let ty = self.ty.to_ty(bcx.tcx()); + if let ty::TyAdt(adt_def, substs) = ty.sty { + let mut downcast = *self; + downcast.ty = LvalueTy::Downcast { + adt_def, + substs, + variant_index, + }; + + // If this is an enum, cast to the appropriate variant struct type. + let layout = bcx.ccx.layout_of(ty).for_variant(variant_index); + if let layout::General { discr, ref variants, .. } = *layout { + let st = &variants[variant_index]; + let variant_ty = Type::struct_(bcx.ccx, + &adt::struct_llfields(bcx.ccx, layout, st, + Some(discr.to_ty(bcx.tcx(), false))), st.packed); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); + } + + downcast } else { - let zero = common::C_usize(bcx.ccx, 0); - bcx.inbounds_gep(self.llval, &[zero, llindex]) + bug!("unexpected type `{}` in LvalueRef::project_downcast", ty) } } @@ -407,7 +532,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Lvalue::Local(_) => bug!(), // handled above mir::Lvalue::Static(box mir::Static { def_id, ty }) => { LvalueRef::new_sized(consts::get_static(ccx, def_id), - LvalueTy::from_ty(self.monomorphize(&ty)), + self.monomorphize(&ty), Alignment::AbiAligned) }, mir::Lvalue::Projection(box mir::Projection { @@ -419,33 +544,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); - let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let projected_ty = self.monomorphize(&projected_ty); - let align = tr_base.alignment; - let ((llprojected, align), llextra) = match projection.elem { + match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let has_metadata = self.ccx.shared() - .type_has_metadata(projected_ty.to_ty(tcx)); - let llextra = if !has_metadata { - ptr::null_mut() - } else { - tr_base.llextra - }; - (tr_base.trans_field_ptr(bcx, field.index()), llextra) + tr_base.project_field(bcx, field.index()) } mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let index = self.trans_operand(bcx, index); let llindex = index.immediate(); - ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) + tr_base.project_index(bcx, llindex) } mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { let lloffset = C_usize(bcx.ccx, offset as u64); - ((tr_base.project_index(bcx, lloffset), align), ptr::null_mut()) + tr_base.project_index(bcx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, @@ -453,39 +568,34 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let lloffset = C_usize(bcx.ccx, offset as u64); let lllen = tr_base.len(bcx.ccx); let llindex = bcx.sub(lllen, lloffset); - ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) + tr_base.project_index(bcx, llindex) } mir::ProjectionElem::Subslice { from, to } => { - let llbase = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64)); + let mut subslice = tr_base.project_index(bcx, + C_usize(bcx.ccx, from as u64)); + subslice.ty = tr_base.ty.projection_ty(tcx, &projection.elem); + subslice.ty = self.monomorphize(&subslice.ty); - let base_ty = tr_base.ty.to_ty(bcx.tcx()); - match base_ty.sty { + match subslice.ty.to_ty(tcx).sty { ty::TyArray(..) => { // must cast the lvalue pointer type to the new // array type (*[%_; new_len]). - let base_ty = self.monomorphized_lvalue_ty(lvalue); - let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to(); - let llbase = bcx.pointercast(llbase, llbasety); - ((llbase, align), ptr::null_mut()) + subslice.llval = bcx.pointercast(subslice.llval, + type_of::type_of(bcx.ccx, subslice.ty.to_ty(tcx)).ptr_to()) } ty::TySlice(..) => { - assert!(tr_base.llextra != ptr::null_mut()); - let lllen = bcx.sub(tr_base.llextra, - C_usize(bcx.ccx, (from as u64)+(to as u64))); - ((llbase, align), lllen) + assert!(tr_base.has_extra()); + subslice.llextra = bcx.sub(tr_base.llextra, + C_usize(bcx.ccx, (from as u64) + (to as u64))); } - _ => bug!("unexpected type {:?} in Subslice", base_ty) + _ => bug!("unexpected type {:?} in Subslice", subslice.ty) } + + subslice } - mir::ProjectionElem::Downcast(..) => { - ((tr_base.llval, align), tr_base.llextra) + mir::ProjectionElem::Downcast(_, v) => { + tr_base.project_downcast(bcx, v) } - }; - LvalueRef { - llval: llprojected, - llextra, - ty: projected_ty, - alignment: align, } } }; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 1cb13c973f9b8..c82a9317a021e 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -14,7 +14,6 @@ use llvm::debuginfo::DIScope; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutTyper}; use rustc::mir::{self, Mir}; -use rustc::mir::tcx::LvalueTy; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; use rustc::session::config::FullDebugInfo; @@ -23,7 +22,7 @@ use builder::Builder; use common::{self, CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; -use abi::{ArgAttribute, FnType}; +use abi::{self, ArgAttribute, FnType}; use type_of; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; @@ -281,8 +280,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); let llretptr = llvm::get_param(llfn, 0); - LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty), - Alignment::AbiAligned)) + LocalRef::Lvalue(LvalueRef::new_sized(llretptr, ty, Alignment::AbiAligned)) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); assert!(!ty.has_erasable_regions()); @@ -404,7 +402,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let lvalue = LvalueRef::alloca(bcx, arg_ty, &name); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { - let (dst, _) = lvalue.trans_field_ptr(bcx, i); + let dst = lvalue.project_field(bcx, i); let arg = &mircx.fn_ty.args[idx]; idx += 1; if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { @@ -412,8 +410,10 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // they are the two sub-fields of a single aggregate field. let meta = &mircx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst)); - meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst)); + arg.store_fn_arg(bcx, &mut llarg_idx, + dst.project_field(bcx, abi::FAT_PTR_ADDR)); + meta.store_fn_arg(bcx, &mut llarg_idx, + dst.project_field(bcx, abi::FAT_PTR_EXTRA)); } else { arg.store_fn_arg(bcx, &mut llarg_idx, dst); } @@ -441,7 +441,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let arg = &mircx.fn_ty.args[idx]; idx += 1; - let llval = if arg.is_indirect() { + let lvalue = if arg.is_indirect() { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up // FIXME: lifetimes @@ -451,7 +451,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); bcx.set_value_name(llarg, &name); llarg_idx += 1; - llarg + LvalueRef::new_sized(llarg, arg_ty, Alignment::AbiAligned) } else if !lvalue_locals.contains(local.index()) && arg.cast.is_none() && arg_scope.is_none() { if arg.is_ignore() { @@ -502,21 +502,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = LvalueRef::alloca(bcx, arg_ty, &name); + let tmp = LvalueRef::alloca(bcx, arg_ty, &name); if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. let meta = &mircx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval)); - meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval)); + arg.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_ADDR)); + meta.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_EXTRA)); } else { // otherwise, arg is passed by value, so make a // temporary and store it there - arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval); + arg.store_fn_arg(bcx, &mut llarg_idx, tmp); } - lltemp.llval + tmp }; arg_scope.map(|scope| { // Is this a regular argument? @@ -527,11 +527,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let variable_access = if arg.is_indirect() && !arg.attrs.contains(ArgAttribute::ByVal) { VariableAccess::IndirectVariable { - alloca: llval, + alloca: lvalue.llval, address_operations: &deref_op, } } else { - VariableAccess::DirectVariable { alloca: llval } + VariableAccess::DirectVariable { alloca: lvalue.llval } }; declare_local( @@ -567,11 +567,12 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None); - bcx.store(llval, alloc, None); - alloc + let alloc_ty = tcx.mk_mut_ptr(arg_ty); + let alloc = LvalueRef::alloca(bcx, alloc_ty, "__debuginfo_env_ptr"); + bcx.store(lvalue.llval, alloc.llval, None); + alloc.llval } else { - llval + lvalue.llval }; let layout = bcx.ccx.layout_of(closure_ty); @@ -619,8 +620,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ); } }); - LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty), - Alignment::AbiAligned)) + LocalRef::Lvalue(lvalue) }).collect() } @@ -628,6 +628,6 @@ mod analyze; mod block; mod constant; pub mod lvalue; -mod operand; +pub mod operand; mod rvalue; mod statement; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index f4285c2ad0ad0..85bd99fed1465 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,18 +10,16 @@ use llvm::ValueRef; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{Align, Layout, LayoutTyper}; +use rustc::ty::layout::LayoutTyper; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; -use adt; use base; use common::{self, CrateContext, C_undef}; use builder::Builder; use value::Value; -use type_of; -use type_::Type; +use type_of::{self, LayoutLlvmExt}; use std::fmt; use std::ptr; @@ -49,8 +47,8 @@ pub enum OperandValue { /// /// NOTE: unless you know a value's type exactly, you should not /// generate LLVM opcodes acting on it and instead act via methods, -/// to avoid nasty edge cases. In particular, using `Builder.store` -/// directly is sure to cause problems -- use `MirContext.store_operand` +/// to avoid nasty edge cases. In particular, using `Builder::store` +/// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] pub struct OperandRef<'tcx> { @@ -121,15 +119,10 @@ impl<'a, 'tcx> OperandRef<'tcx> { let llty = type_of::type_of(bcx.ccx, self.ty); let mut llpair = C_undef(llty); let elems = [a, b]; + let layout = bcx.ccx.layout_of(self.ty); for i in 0..2 { let elem = base::from_immediate(bcx, elems[i]); - let layout = bcx.ccx.layout_of(self.ty); - let i = if let Layout::Univariant { ref variant, .. } = *layout { - adt::struct_llfields_index(variant, i) - } else { - i - }; - llpair = bcx.insert_value(llpair, elem, i); + llpair = bcx.insert_value(llpair, elem, layout.llvm_field_index(i)); } self.val = OperandValue::Immediate(llpair); } @@ -145,72 +138,51 @@ impl<'a, 'tcx> OperandRef<'tcx> { debug!("Operand::unpack_if_pair: unpacking {:?}", self); let layout = bcx.ccx.layout_of(self.ty); - let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1)) - } else { - (0, 1) - }; - let mut a = bcx.extract_value(llval, ix0); - let mut b = bcx.extract_value(llval, ix1); + let a = bcx.extract_value(llval, layout.llvm_field_index(0)); + let a = base::to_immediate(bcx, a, layout.field_type(bcx.ccx, 0)); - let pair_fields = common::type_pair_fields(bcx.ccx, self.ty); - if let Some([a_ty, b_ty]) = pair_fields { - if a_ty.is_bool() { - a = bcx.trunc(a, Type::i1(bcx.ccx)); - } - if b_ty.is_bool() { - b = bcx.trunc(b, Type::i1(bcx.ccx)); - } - } + let b = bcx.extract_value(llval, layout.llvm_field_index(1)); + let b = base::to_immediate(bcx, b, layout.field_type(bcx.ccx, 1)); self.val = OperandValue::Pair(a, b); } } self } -} - -impl<'a, 'tcx> MirContext<'a, 'tcx> { - pub fn trans_load(&mut self, - bcx: &Builder<'a, 'tcx>, - llval: ValueRef, - align: Alignment, - ty: Ty<'tcx>) - -> OperandRef<'tcx> - { - debug!("trans_load: {:?} @ {:?}", Value(llval), ty); - - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { - let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty); - OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx, ty) { - let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) { - Layout::Univariant { ref variant, .. } => { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1), - Alignment::from_packed(variant.packed) | align) - }, - _ => (0, 1, align) - }; - let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap(); - let a_ptr = bcx.struct_gep(llval, ix0); - let b_ptr = bcx.struct_gep(llval, ix1); - - OperandValue::Pair( - base::load_ty(bcx, a_ptr, f_align, a_ty), - base::load_ty(bcx, b_ptr, f_align, b_ty) - ) - } else if common::type_is_immediate(bcx.ccx, ty) { - OperandValue::Immediate(base::load_ty(bcx, llval, align, ty)) - } else { - OperandValue::Ref(llval, align) - }; - OperandRef { val: val, ty: ty } + pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) { + debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`, and store itself is useless. + if common::type_is_zero_size(bcx.ccx, self.ty) { + return; + } + match self.val { + OperandValue::Ref(r, source_align) => + base::memcpy_ty(bcx, dest.llval, r, self.ty, + (source_align | dest.alignment).non_abi()), + OperandValue::Immediate(s) => { + bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi()); + } + OperandValue::Pair(a, b) => { + // See comment above about zero-sized values. + let dest_a = dest.project_field(bcx, 0); + if !common::type_is_zero_size(bcx.ccx, dest_a.ty.to_ty(bcx.tcx())) { + let a = base::from_immediate(bcx, a); + bcx.store(a, dest_a.llval, dest_a.alignment.non_abi()); + } + let dest_b = dest.project_field(bcx, 1); + if !common::type_is_zero_size(bcx.ccx, dest_b.ty.to_ty(bcx.tcx())) { + let b = base::from_immediate(bcx, b); + bcx.store(b, dest_b.llval, dest_b.alignment.non_abi()); + } + } + } } +} +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_consume(&mut self, bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) @@ -258,9 +230,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // for most lvalues, to consume them we just load them // out from their home - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty) + self.trans_lvalue(bcx, lvalue).load(bcx) } pub fn trans_operand(&mut self, @@ -280,59 +250,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let operand = val.to_operand(bcx.ccx); if let OperandValue::Ref(ptr, align) = operand.val { // If this is a OperandValue::Ref to an immediate constant, load it. - self.trans_load(bcx, ptr, align, operand.ty) + LvalueRef::new_sized(ptr, operand.ty, align).load(bcx) } else { operand } } } } - - pub fn store_operand(&mut self, - bcx: &Builder<'a, 'tcx>, - lldest: ValueRef, - align: Option, - operand: OperandRef<'tcx>) { - debug!("store_operand: operand={:?}, align={:?}", operand, align); - // Avoid generating stores of zero-sized values, because the only way to have a zero-sized - // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx, operand.ty) { - return; - } - match operand.val { - OperandValue::Ref(r, source_align) => - base::memcpy_ty(bcx, lldest, r, operand.ty, - source_align.min_with(align)), - OperandValue::Immediate(s) => { - bcx.store(base::from_immediate(bcx, s), lldest, align); - } - OperandValue::Pair(a, b) => { - let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) { - Layout::Univariant { ref variant, .. } => { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1), - if variant.packed { Some(variant.align) } else { None }) - } - _ => (0, 1, align) - }; - - let a = base::from_immediate(bcx, a); - let b = base::from_immediate(bcx, b); - - // See comment above about zero-sized values. - let (a_zst, b_zst) = common::type_pair_fields(bcx.ccx, operand.ty) - .map_or((false, false), |[a_ty, b_ty]| { - (common::type_is_zero_size(bcx.ccx, a_ty), - common::type_is_zero_size(bcx.ccx, b_ty)) - }); - - if !a_zst { - bcx.store(a, bcx.struct_gep(lldest, ix0), f_align); - } - if !b_zst { - bcx.store(b, bcx.struct_gep(lldest, ix1), f_align); - } - } - } - } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d975ed8cda482..4f4fbcd62371e 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -12,7 +12,6 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::{Layout, LayoutTyper}; -use rustc::mir::tcx::LvalueTy; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; @@ -25,11 +24,9 @@ use callee; use common::{self, val_ty}; use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; use consts; -use adt; use monomorphize; use type_::Type; use type_of; -use tvec; use value::Value; use super::{MirContext, LocalRef}; @@ -52,7 +49,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tr_operand = self.trans_operand(&bcx, operand); // FIXME: consider not copying constants through stack. (fixable by translating // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand); + tr_operand.store(&bcx, dest); bcx } @@ -63,7 +60,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); + temp.store(&bcx, dest); return bcx; } @@ -73,9 +70,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); let operand = operand.pack_if_pair(&bcx); - let llref = match operand.val { + match operand.val { OperandValue::Pair(..) => bug!(), - OperandValue::Immediate(llval) => { + OperandValue::Immediate(_) => { // unsize from an immediate structure. We don't // really need a temporary alloca here, but // avoiding it would require us to have @@ -84,101 +81,93 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // important enough for it. debug!("trans_rvalue: creating ugly alloca"); let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp"); - base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty); - scratch + scratch.storage_live(&bcx); + operand.store(&bcx, scratch); + base::coerce_unsized_into(&bcx, scratch, dest); + scratch.storage_dead(&bcx); } OperandValue::Ref(llref, align) => { - LvalueRef::new_sized_ty(llref, operand.ty, align) + let source = LvalueRef::new_sized(llref, operand.ty, align); + base::coerce_unsized_into(&bcx, source, dest); } - }; - base::coerce_unsized_into(&bcx, &llref, &dest); + } bcx } mir::Rvalue::Repeat(ref elem, count) => { - let dest_ty = dest.ty.to_ty(bcx.tcx()); + let tr_elem = self.trans_operand(&bcx, elem); - // No need to inizialize memory of a zero-sized slice + // Do not generate the loop for zero-sized elements or empty arrays. + let dest_ty = dest.ty.to_ty(bcx.tcx()); if common::type_is_zero_size(bcx.ccx, dest_ty) { return bcx; } - let tr_elem = self.trans_operand(&bcx, elem); - let count = count.as_u64(); - let count = C_usize(bcx.ccx, count); - let base = base::get_dataptr(&bcx, dest.llval); - let align = dest.alignment.to_align(); + let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval; if let OperandValue::Immediate(v) = tr_elem.val { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = dest.alignment.non_abi() + .unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); let align = C_i32(bcx.ccx, align.abi() as i32); let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { let fill = C_u8(bcx.ccx, 0); - base::call_memset(&bcx, base, fill, size, align, false); + base::call_memset(&bcx, start, fill, size, align, false); return bcx; } // Use llvm.memset.p0i8.* to initialize byte arrays if common::val_ty(v) == Type::i8(bcx.ccx) { - base::call_memset(&bcx, base, v, size, align, false); + base::call_memset(&bcx, start, v, size, align, false); return bcx; } } - tvec::slice_for_each(&bcx, base, tr_elem.ty, count, |bcx, llslot, loop_bb| { - self.store_operand(bcx, llslot, align, tr_elem); - bcx.br(loop_bb); - }) + let count = count.as_u64(); + let count = C_usize(bcx.ccx, count); + let end = dest.project_index(&bcx, count).llval; + + let header_bcx = bcx.build_sibling_block("repeat_loop_header"); + let body_bcx = bcx.build_sibling_block("repeat_loop_body"); + let next_bcx = bcx.build_sibling_block("repeat_loop_next"); + + bcx.br(header_bcx.llbb()); + let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]); + + let keep_going = header_bcx.icmp(llvm::IntNE, current, end); + header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); + + tr_elem.store(&body_bcx, + LvalueRef::new_sized(current, tr_elem.ty, dest.alignment)); + + let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]); + body_bcx.br(header_bcx.llbb()); + header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); + + next_bcx } mir::Rvalue::Aggregate(ref kind, ref operands) => { - match **kind { - mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { + let (dest, active_field_index) = match **kind { + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { dest.trans_set_discr(&bcx, variant_index); - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(&bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx, op.ty) { - let mut val = LvalueRef::new_sized( - dest.llval, dest.ty, dest.alignment); - let field_index = active_field_index.unwrap_or(i); - val.ty = LvalueTy::Downcast { - adt_def, - substs: self.monomorphize(&substs), - variant_index, - }; - let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index); - self.store_operand(&bcx, lldest_i, align.to_align(), op); - } - } - }, - _ => { - // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); - let get_memory_index = |i| { - if let Layout::Univariant { ref variant, .. } = *layout { - adt::struct_llfields_index(variant, i) - } else { - i - } - }; - let alignment = dest.alignment; - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(&bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx, op.ty) { - // Note: perhaps this should be StructGep, but - // note that in some cases the values here will - // not be structs but arrays. - let i = get_memory_index(i); - let dest = bcx.gepi(dest.llval, &[0, i]); - self.store_operand(&bcx, dest, alignment.to_align(), op); - } + if adt_def.is_enum() { + (dest.project_downcast(&bcx, variant_index), active_field_index) + } else { + (dest, active_field_index) } } + _ => (dest, None) + }; + for (i, operand) in operands.iter().enumerate() { + let op = self.trans_operand(&bcx, operand); + // Do not generate stores and GEPis for zero-sized fields. + if !common::type_is_zero_size(bcx.ccx, op.ty) { + let field_index = active_field_index.unwrap_or(i); + op.store(&bcx, dest.project_field(&bcx, field_index)); + } } bcx } @@ -186,7 +175,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => { assert!(self.rvalue_creates_operand(rvalue)); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); + temp.store(&bcx, dest); bcx } } diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs deleted file mode 100644 index da4a4e55a67f4..0000000000000 --- a/src/librustc_trans/tvec.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use builder::Builder; -use llvm::{BasicBlockRef, ValueRef}; -use common::*; -use rustc::ty::Ty; - -pub fn slice_for_each<'a, 'tcx, F>( - bcx: &Builder<'a, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F -) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) { - // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - let zst = type_is_zero_size(bcx.ccx, unit_ty); - let add = |bcx: &Builder, a, b| if zst { - bcx.add(a, b) - } else { - bcx.inbounds_gep(a, &[b]) - }; - - let body_bcx = bcx.build_sibling_block("slice_loop_body"); - let header_bcx = bcx.build_sibling_block("slice_loop_header"); - let next_bcx = bcx.build_sibling_block("slice_loop_next"); - - let start = if zst { - C_usize(bcx.ccx, 1) - } else { - data_ptr - }; - let end = add(&bcx, start, len); - - bcx.br(header_bcx.llbb()); - let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]); - - let keep_going = header_bcx.icmp(llvm::IntNE, current, end); - header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - - let next = add(&body_bcx, current, C_usize(bcx.ccx, 1)); - f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb()); - header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); - next_bcx -} diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index f74aec07087c7..1187ef1cd2f22 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -12,7 +12,7 @@ use abi::FnType; use adt; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{Align, LayoutTyper, Size}; +use rustc::ty::layout::{Align, Layout, LayoutTyper, Size, TyLayout}; use trans_item::DefPathBasedNames; use type_::Type; @@ -237,6 +237,50 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } } +pub trait LayoutLlvmExt { + fn llvm_field_index(&self, index: usize) -> u64; +} + +impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> { + fn llvm_field_index(&self, index: usize) -> u64 { + match **self { + Layout::Scalar { .. } | + Layout::CEnum { .. } | + Layout::UntaggedUnion { .. } | + Layout::RawNullablePointer { .. } => { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + + Layout::Vector { .. } | + Layout::Array { .. } | + Layout::FatPointer { .. } => { + index as u64 + } + + Layout::Univariant { ref variant, .. } => { + adt::memory_index_to_gep(variant.memory_index[index] as u64) + } + + Layout::General { ref variants, .. } => { + if let Some(v) = self.variant_index { + adt::memory_index_to_gep(variants[v].memory_index[index] as u64) + } else { + assert_eq!(index, 0); + index as u64 + } + } + + Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + if self.variant_index == Some(nndiscr as usize) { + adt::memory_index_to_gep(nonnull.memory_index[index] as u64) + } else { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + } + } + } +} + fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { let mut name = String::with_capacity(32); let printer = DefPathBasedNames::new(cx.tcx(), true, true); diff --git a/src/librustc_trans_utils/monomorphize.rs b/src/librustc_trans_utils/monomorphize.rs index ab61dacf010ae..eee5c1d9ef238 100644 --- a/src/librustc_trans_utils/monomorphize.rs +++ b/src/librustc_trans_utils/monomorphize.rs @@ -12,7 +12,7 @@ use rustc::hir::def_id::DefId; use rustc::middle::lang_items::DropInPlaceFnLangItem; use rustc::traits; use rustc::ty::adjustment::CustomCoerceUnsized; -use rustc::ty::subst::{Kind, Subst, Substs}; +use rustc::ty::subst::{Kind, Subst}; use rustc::ty::{self, Ty, TyCtxt}; pub use rustc::ty::Instance; @@ -125,12 +125,3 @@ pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -/// Returns the normalized type of a struct field -pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_substs: &Substs<'tcx>, - f: &'tcx ty::FieldDef) - -> Ty<'tcx> -{ - tcx.fully_normalize_associated_types_in(&f.ty(tcx, param_substs)) -} - diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs index 569d937c812cb..915db493fc2a4 100644 --- a/src/test/codegen/slice-init.rs +++ b/src/test/codegen/slice-init.rs @@ -15,7 +15,7 @@ // CHECK-LABEL: @zero_sized_elem #[no_mangle] pub fn zero_sized_elem() { - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [(); 4]; drop(&x); @@ -24,7 +24,7 @@ pub fn zero_sized_elem() { // CHECK-LABEL: @zero_len_array #[no_mangle] pub fn zero_len_array() { - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [4; 0]; drop(&x); @@ -34,7 +34,7 @@ pub fn zero_len_array() { #[no_mangle] pub fn byte_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [7u8; 4]; drop(&x); } @@ -50,7 +50,7 @@ enum Init { #[no_mangle] pub fn byte_enum_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [Init::Memset; 4]; drop(&x); } @@ -59,7 +59,7 @@ pub fn byte_enum_array() { #[no_mangle] pub fn zeroed_integer_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [0u32; 4]; drop(&x); } @@ -67,7 +67,7 @@ pub fn zeroed_integer_array() { // CHECK-LABEL: @nonzero_integer_array #[no_mangle] pub fn nonzero_integer_array() { - // CHECK: br label %slice_loop_header{{.*}} + // CHECK: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [0x1a_2b_3c_4d_u32; 4]; drop(&x); From 84b5a3d84d6cc81423821cb3cbcf6cca3985b601 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 26 Jun 2017 14:57:50 +0300 Subject: [PATCH 08/69] rustc_trans: remove the in_memory_type_of distinction. --- src/librustc_trans/abi.rs | 7 ++- src/librustc_trans/adt.rs | 5 +- src/librustc_trans/asm.rs | 3 +- src/librustc_trans/base.rs | 4 +- src/librustc_trans/callee.rs | 3 +- src/librustc_trans/consts.rs | 9 ++-- src/librustc_trans/intrinsic.rs | 3 +- src/librustc_trans/mir/block.rs | 4 +- src/librustc_trans/mir/constant.rs | 20 ++++---- src/librustc_trans/mir/lvalue.rs | 16 +++--- src/librustc_trans/mir/mod.rs | 2 +- src/librustc_trans/mir/operand.rs | 6 +-- src/librustc_trans/mir/rvalue.rs | 8 +-- src/librustc_trans/trans_item.rs | 3 +- src/librustc_trans/type_of.rs | 82 +++++++++++++----------------- src/test/run-pass/issue-30276.rs | 14 ----- 16 files changed, 77 insertions(+), 112 deletions(-) delete mode 100644 src/test/run-pass/issue-30276.rs diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 04041488016c5..f4f37cdef514d 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -32,7 +32,6 @@ use cabi_nvptx64; use cabi_hexagon; use mir::lvalue::LvalueRef; use type_::Type; -use type_of; use rustc::hir; use rustc::ty::{self, Ty}; @@ -564,7 +563,7 @@ impl<'a, 'tcx> ArgType<'tcx> { /// Get the LLVM type for an lvalue of the original Rust type of /// this argument/return, i.e. the result of `type_of::type_of`. pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { - type_of::type_of(ccx, self.layout.ty) + ccx.llvm_type_of(self.layout.ty) } /// Store a direct/indirect value described by this ArgType into a @@ -975,7 +974,7 @@ impl<'a, 'tcx> FnType<'tcx> { } else if let Some(cast) = self.ret.cast { cast.llvm_type(ccx) } else { - type_of::immediate_type_of(ccx, self.ret.layout.ty) + ccx.immediate_llvm_type_of(self.ret.layout.ty) }; for arg in &self.args { @@ -992,7 +991,7 @@ impl<'a, 'tcx> FnType<'tcx> { } else if let Some(cast) = arg.cast { cast.llvm_type(ccx) } else { - type_of::immediate_type_of(ccx, arg.layout.ty) + ccx.immediate_llvm_type_of(arg.layout.ty) }; llargument_tys.push(llarg_ty); diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 2383b37286510..c2988cd3da3f2 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -46,7 +46,6 @@ use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size, TyLayout} use context::CrateContext; use type_::Type; -use type_of; /// LLVM-level types are a little complicated. /// @@ -110,7 +109,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, if let layout::Scalar { value: layout::Pointer, .. } = *nnfield { Type::i8p(cx) } else { - type_of::type_of(cx, nnfield.ty) + cx.llvm_type_of(nnfield.ty) } } layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { @@ -237,7 +236,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, result.push(Type::array(&Type::i8(cx), padding.bytes())); debug!(" padding before: {:?}", padding); } - let llty = type_of::in_memory_type_of(cx, field.ty); + let llty = cx.llvm_type_of(field.ty); result.push(llty); if variant.packed { diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index e6199df62d345..f4fbde2535fcb 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -12,7 +12,6 @@ use llvm::{self, ValueRef}; use common::*; -use type_of; use type_::Type; use builder::Builder; @@ -52,7 +51,7 @@ pub fn trans_inline_asm<'a, 'tcx>( if out.is_indirect { indirect_outputs.push(val.unwrap().immediate()); } else { - output_types.push(type_of::type_of(bcx.ccx, ty)); + output_types.push(bcx.ccx.llvm_type_of(ty)); } } if !indirect_outputs.is_empty() { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 91f7bf39f1a6b..a987fa4a40e55 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -231,13 +231,13 @@ pub fn unsize_thin_ptr<'a, 'tcx>( (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + let ptr_ty = bcx.ccx.llvm_type_of(b).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + let ptr_ty = bcx.ccx.llvm_type_of(b).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } _ => bug!("unsize_thin_ptr: called on bad types"), diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index b515c9420bf36..bb271a574a5e9 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -25,7 +25,6 @@ use rustc::ty::{self, TypeFoldable}; use rustc::traits; use rustc::ty::subst::Substs; use rustc_back::PanicStrategy; -use type_of; /// Translates a reference to a fn/method item, monomorphizing and /// inlining as it goes. @@ -56,7 +55,7 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty)); - let llptrty = type_of::type_of(ccx, fn_ptr_ty); + let llptrty = ccx.llvm_type_of(fn_ptr_ty); let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { // This is subtle and surprising, but sometimes we have to bitcast diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 83ecbbed76b9a..339405ab1baf0 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -21,7 +21,6 @@ use common::{self, CrateContext, val_ty}; use declare; use monomorphize::Instance; use type_::Type; -use type_of; use rustc::ty; use rustc::ty::layout::Align; @@ -113,7 +112,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let ty = common::instance_ty(ccx.tcx(), &instance); let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) { - let llty = type_of::type_of(ccx, ty); + let llty = ccx.llvm_type_of(ty); let (g, attrs) = match ccx.tcx().hir.get(id) { hir_map::NodeItem(&hir::Item { ref attrs, span, node: hir::ItemStatic(..), .. @@ -158,7 +157,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { } }; let llty2 = match ty.sty { - ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), + ty::TyRawPtr(ref mt) => ccx.llvm_type_of(mt.ty), _ => { ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`"); } @@ -207,7 +206,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? // FIXME(nagisa): investigate whether it can be changed into define_global - let g = declare::declare_global(ccx, &sym, type_of::type_of(ccx, ty)); + let g = declare::declare_global(ccx, &sym, ccx.llvm_type_of(ty)); // Thread-local statics in some other crate need to *always* be linked // against in a thread-local fashion, so we need to be sure to apply the // thread-local attribute locally if it was present remotely. If we @@ -267,7 +266,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = type_of::type_of(ccx, ty); + let llty = ccx.llvm_type_of(ty); let g = if val_llty == llty { g } else { diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 2f0e86b8cac52..dbb8ef261f83d 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -20,7 +20,6 @@ use base::*; use common::*; use declare; use glue; -use type_of; use type_::Type; use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; @@ -105,7 +104,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id); - let llret_ty = type_of::type_of(ccx, ret_ty); + let llret_ty = ccx.llvm_type_of(ret_ty); let result = LvalueRef::new_sized(llresult, ret_ty, Alignment::AbiAligned); let simple = get_simple_intrinsic(ccx, name); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index cedc78cf9460d..380ed5266e9a5 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -23,7 +23,7 @@ use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use meth; use monomorphize; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use syntax::symbol::Symbol; @@ -916,7 +916,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let val = self.trans_operand(bcx, src); - let llty = type_of::type_of(bcx.ccx, val.ty); + let llty = bcx.ccx.llvm_type_of(val.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let in_type = val.ty; let out_type = dst.ty.to_ty(bcx.tcx()); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index c2bc0684c6278..e253701903f22 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -87,7 +87,7 @@ impl<'a, 'tcx> Const<'tcx> { cv: &ConstVal, ty: Ty<'tcx>) -> Const<'tcx> { - let llty = type_of::type_of(ccx, ty); + let llty = ccx.llvm_type_of(ty); let val = match *cv { ConstVal::Float(v) => { let bits = match v.ty { @@ -139,7 +139,7 @@ impl<'a, 'tcx> Const<'tcx> { } pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { - let llty = type_of::immediate_type_of(ccx, self.ty); + let llty = ccx.immediate_llvm_type_of(self.ty); let llvalty = val_ty(self.llval); let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { @@ -489,7 +489,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let llelem = if iv < len as u128 { const_get_elt(base.llval, iv as u64) } else { - C_undef(type_of::type_of(self.ccx, projected_ty)) + C_undef(self.ccx.llvm_type_of(projected_ty)) }; (Base::Value(llelem), ptr::null_mut()) @@ -543,7 +543,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let elem_ty = array_ty.builtin_index().unwrap_or_else(|| { bug!("bad array type {:?}", array_ty) }); - let llunitty = type_of::type_of(self.ccx, elem_ty); + let llunitty = self.ccx.llvm_type_of(elem_ty); // If the array contains enums, an LLVM array won't work. let val = if fields.iter().all(|&f| val_ty(f) == llunitty) { C_array(llunitty, fields) @@ -665,7 +665,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to(); + let ptr_ty = self.ccx.llvm_type_of(unsized_ty).ptr_to(); let base = consts::ptrcast(base, ptr_ty); let info = base::unsized_info(self.ccx, pointee_ty, unsized_ty, old_info); @@ -683,7 +683,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty); + let ll_t_out = self.ccx.immediate_llvm_type_of(cast_ty); let llval = operand.llval; let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { let l = self.ccx.layout_of(operand.ty); @@ -738,7 +738,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = type_of::immediate_type_of(self.ccx, cast_ty); + let llcast_ty = self.ccx.immediate_llvm_type_of(cast_ty); consts::ptrcast(data_ptr, llcast_ty) } } else { @@ -1045,7 +1045,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let result = result.unwrap_or_else(|_| { // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx, ty); + let llty = bcx.ccx.llvm_type_of(ty); Const::new(C_undef(llty), ty) }); @@ -1137,7 +1137,7 @@ fn trans_const_adt<'a, 'tcx>( assert_eq!(vals.len(), 1); Const::new(vals[0].llval, t) } else { - Const::new(C_null(type_of::type_of(ccx, t)), t) + Const::new(C_null(ccx.llvm_type_of(t)), t) } } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { @@ -1146,7 +1146,7 @@ fn trans_const_adt<'a, 'tcx>( } else { // Always use null even if it's not the `discrfield`th // field; see #8506. - Const::new(C_null(type_of::type_of(ccx, t)), t) + Const::new(C_null(ccx.llvm_type_of(t)), t) } } _ => bug!("trans_const_adt: cannot handle type {} repreented as {:#?}", t, l) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 168f2e8c05679..77d99427da30f 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -20,7 +20,7 @@ use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_int, C_null, val_ty}; use consts; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; use glue; @@ -107,7 +107,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, ty); let tmp = bcx.alloca( - type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty)); + bcx.ccx.llvm_type_of(ty), name, bcx.ccx.over_align_of(ty)); assert!(!ty.has_param_types()); Self::new_sized(tmp, ty, Alignment::AbiAligned) } @@ -213,7 +213,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Handle all the non-aggregate cases first. match *l { layout::UntaggedUnion { .. } => { - let ty = type_of::in_memory_type_of(ccx, fty); + let ty = ccx.llvm_type_of(fty); return LvalueRef::new_sized( bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); } @@ -222,14 +222,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> { if l.variant_index.unwrap() as u64 != nndiscr => { // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(ccx, fty); + let ty = ccx.llvm_type_of(fty); assert_eq!(ccx.size_of(fty).bytes(), 0); return LvalueRef::new_sized( bcx.pointercast(self.llval, ty.ptr_to()), fty, Alignment::Packed(Align::from_bytes(1, 1).unwrap())); } layout::RawNullablePointer { .. } => { - let ty = type_of::type_of(ccx, fty); + let ty = ccx.llvm_type_of(fty); return LvalueRef::new_sized( bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); } @@ -327,7 +327,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(ccx, fty); + let ll_fty = ccx.llvm_type_of(fty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); LvalueRef { @@ -399,7 +399,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }, _ => bug!("{} is not an enum", l.ty) }; - let cast_to = type_of::immediate_type_of(bcx.ccx, cast_to); + let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); bcx.intcast(val, cast_to, adt::is_discr_signed(&l)) } @@ -581,7 +581,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // must cast the lvalue pointer type to the new // array type (*[%_; new_len]). subslice.llval = bcx.pointercast(subslice.llval, - type_of::type_of(bcx.ccx, subslice.ty.to_ty(tcx)).ptr_to()) + bcx.ccx.llvm_type_of(subslice.ty.to_ty(tcx)).ptr_to()) } ty::TySlice(..) => { assert!(tr_base.has_extra()); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index c82a9317a021e..a03408390f95e 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -483,7 +483,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(), _ => bug!() }; - let data_llty = type_of::in_memory_type_of(bcx.ccx, pointee); + let data_llty = bcx.ccx.llvm_type_of(pointee); let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); let llarg = bcx.pointercast(llarg, data_llty.ptr_to()); diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 85bd99fed1465..b65a6453e6178 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -19,7 +19,7 @@ use base; use common::{self, CrateContext, C_undef}; use builder::Builder; use value::Value; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use std::fmt; use std::ptr; @@ -82,7 +82,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> OperandRef<'tcx> { assert!(common::type_is_zero_size(ccx, ty)); - let llty = type_of::type_of(ccx, ty); + let llty = ccx.llvm_type_of(ty); Const::new(C_undef(llty), ty).to_operand(ccx) } @@ -116,7 +116,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. - let llty = type_of::type_of(bcx.ccx, self.ty); + let llty = bcx.ccx.llvm_type_of(self.ty); let mut llpair = C_undef(llty); let elems = [a, b]; let layout = bcx.ccx.layout_of(self.ty); diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 4f4fbcd62371e..592181df85297 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -260,7 +260,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); + let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast_ty); let llval = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -272,8 +272,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty); - let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty); + let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.ty); + let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast_ty); let llval = operand.immediate(); let l = bcx.ccx.layout_of(operand.ty); let signed = if let Layout::CEnum { signed, min, max, .. } = *l { @@ -457,7 +457,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llsize = C_usize(bcx.ccx, size.bytes()); let llalign = C_usize(bcx.ccx, align.abi()); let box_ty = bcx.tcx().mk_box(content_ty); - let llty_ptr = type_of::type_of(bcx.ccx, box_ty); + let llty_ptr = bcx.ccx.llvm_type_of(box_ty); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index fb68be293a79e..a452ed21aef49 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -30,7 +30,6 @@ use syntax::ast; use syntax::attr; use syntax_pos::Span; use syntax_pos::symbol::Symbol; -use type_of; use std::fmt; pub use rustc::middle::trans::TransItem; @@ -173,7 +172,7 @@ fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let def_id = ccx.tcx().hir.local_def_id(node_id); let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = type_of::type_of(ccx, ty); + let llty = ccx.llvm_type_of(ty); let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { ccx.sess().span_fatal(ccx.tcx().hir.span(node_id), diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 1187ef1cd2f22..e3f6485d46245 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -22,10 +22,10 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> match ty.sty { ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => { - in_memory_type_of(ccx, t).ptr_to() + ccx.llvm_type_of(t).ptr_to() } ty::TyAdt(def, _) if def.is_box() => { - in_memory_type_of(ccx, ty.boxed_ty()).ptr_to() + ccx.llvm_type_of(ty.boxed_ty()).ptr_to() } _ => bug!("expected fat ptr ty but got {:?}", ty) } @@ -43,44 +43,7 @@ pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if t.is_bool() { - Type::i1(cx) - } else { - type_of(cx, t) - } -} - -/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. -/// This is the right LLVM type for an alloca containing a value of that type, -/// and the pointee of an Lvalue Datum (which is always a LLVM pointer). -/// For unsized types, the returned type is a fat pointer, thus the resulting -/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double -/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`. -/// This is needed due to the treatment of immediate values, as a fat pointer -/// is too large for it to be placed in SSA value (by our rules). -/// For the raw type without far pointer indirection, see `in_memory_type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let ty = if cx.shared().type_has_metadata(ty) { - cx.tcx().mk_imm_ptr(ty) - } else { - ty - }; - in_memory_type_of(cx, ty) -} - -/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. -/// This is the right LLVM type for a field/array element of that type, -/// and is the same as `type_of` for all Sized types. -/// Unsized types, however, are represented by a "minimal unit", e.g. -/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this -/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. -/// If the type is an unsized struct, the regular layout is generated, -/// with the inner-most trailing unsized field using the "minimal unit" -/// of that field's type - this is useful for taking the address of -/// that field and ensuring the struct has the right alignment. -/// For the LLVM type of a value as a whole, see `type_of`. -pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { +fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { // Check the cache. if let Some(&llty) = cx.lltypes().borrow().get(&t) { return llty; @@ -98,7 +61,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> let t_norm = cx.tcx().erase_regions(&t); if t != t_norm { - let llty = in_memory_type_of(cx, t_norm); + let llty = cx.llvm_type_of(t_norm); debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); cx.lltypes().borrow_mut().insert(t, llty); return llty; @@ -111,12 +74,12 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // unsized). cx.str_slice_type() } else { - let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); + let ptr_ty = cx.llvm_type_of(ty).ptr_to(); let info_ty = unsized_info_ty(cx, ty); Type::struct_(cx, &[ptr_ty, info_ty], false) } } else { - in_memory_type_of(cx, ty).ptr_to() + cx.llvm_type_of(ty).ptr_to() } }; @@ -147,7 +110,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } ty::TyArray(ty, size) => { - let llty = in_memory_type_of(cx, ty); + let llty = cx.llvm_type_of(ty); let size = size.val.to_const_int().unwrap().to_u64().unwrap(); Type::array(&llty, size) } @@ -156,7 +119,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // traits have the type of u8. This is so that the data pointer inside // fat pointers is of the right type (e.g. for array accesses), even // when taking the address of an unsized field in a struct. - ty::TySlice(ty) => in_memory_type_of(cx, ty), + ty::TySlice(ty) => cx.llvm_type_of(ty), ty::TyStr | ty::TyDynamic(..) | ty::TyForeign(..) => Type::i8(cx), ty::TyFnDef(..) => Type::nil(cx), @@ -175,7 +138,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> a non-machine element type `{}`", t, e)) } - let llet = in_memory_type_of(cx, e); + let llet = cx.llvm_type_of(e); let n = t.simd_size(cx.tcx()) as u64; Type::vector(&llet, n) } @@ -225,8 +188,8 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } /// Returns alignment if it is different than the primitive alignment. - pub fn over_align_of(&self, t: Ty<'tcx>) -> Option { - let layout = self.layout_of(t); + pub fn over_align_of(&self, ty: Ty<'tcx>) -> Option { + let layout = self.layout_of(ty); let align = layout.align(self); let primitive_align = layout.primitive_align(self); if align != primitive_align { @@ -235,6 +198,29 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { None } } + + /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. + /// The pointee type of the pointer in `LvalueRef` is always this type. + /// For sized types, it is also the right LLVM type for an `alloca` + /// containing a value of that type, and most immediates (except `bool`). + /// Unsized types, however, are represented by a "minimal unit", e.g. + /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this + /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. + /// If the type is an unsized struct, the regular layout is generated, + /// with the inner-most trailing unsized field using the "minimal unit" + /// of that field's type - this is useful for taking the address of + /// that field and ensuring the struct has the right alignment. + pub fn llvm_type_of(&self, ty: Ty<'tcx>) -> Type { + compute_llvm_type(self, ty) + } + + pub fn immediate_llvm_type_of(&self, ty: Ty<'tcx>) -> Type { + if ty.is_bool() { + Type::i1(self) + } else { + self.llvm_type_of(ty) + } + } } pub trait LayoutLlvmExt { diff --git a/src/test/run-pass/issue-30276.rs b/src/test/run-pass/issue-30276.rs deleted file mode 100644 index 5dd0cd8ba5313..0000000000000 --- a/src/test/run-pass/issue-30276.rs +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -struct Test([i32]); -fn main() { - let _x: fn(_) -> Test = Test; -} From 0a1fcc32a65c87646fe1613ea00c9447f04a646b Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 26 Jun 2017 18:33:50 +0300 Subject: [PATCH 09/69] rustc_trans: use *[T; 0] for slice data pointers instead of *T. --- src/librustc_trans/common.rs | 3 ++- src/librustc_trans/context.rs | 4 +++- src/librustc_trans/mir/lvalue.rs | 26 ++++++++------------------ src/librustc_trans/type_of.rs | 14 ++++++++------ src/test/codegen/adjustments.rs | 7 ++++--- src/test/codegen/function-arguments.rs | 2 +- src/test/codegen/refs.rs | 6 +++--- 7 files changed, 29 insertions(+), 33 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 109c111efa95f..749c5393e43e9 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -265,7 +265,8 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va // you will be kicked off fast isel. See issue #4352 for an example of this. pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx)); + let cs = consts::ptrcast(C_cstr(cx, s, false), + cx.llvm_type_of(cx.tcx().mk_str()).ptr_to()); C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)]) } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 5e12be5a22c9a..4e003edac3c6b 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -395,7 +395,9 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx), + + let llptrty = dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(); + str_slice_ty.set_struct_body(&[llptrty, Type::isize(&dummy_ccx)], false); (Type::isize(&dummy_ccx), str_slice_ty) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 77d99427da30f..8bd4142f2c1a4 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -456,18 +456,8 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> LvalueRef<'tcx> { - let ty = self.ty.to_ty(bcx.tcx()); - let (ptr, elem_ty) = match ty.sty { - ty::TySlice(ty) => { - // Slices already point to the array element type. - (bcx.inbounds_gep(self.llval, &[llindex]), ty) - } - ty::TyArray(ty, _) => { - let zero = common::C_usize(bcx.ccx, 0); - (bcx.inbounds_gep(self.llval, &[zero, llindex]), ty) - } - _ => bug!("unexpected type `{}` in LvalueRef::project_index", ty) - }; + let ptr = bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]); + let elem_ty = self.ty.to_ty(bcx.tcx()).builtin_index().unwrap(); LvalueRef::new_sized(ptr, elem_ty, self.alignment) } @@ -577,12 +567,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { subslice.ty = self.monomorphize(&subslice.ty); match subslice.ty.to_ty(tcx).sty { - ty::TyArray(..) => { - // must cast the lvalue pointer type to the new - // array type (*[%_; new_len]). - subslice.llval = bcx.pointercast(subslice.llval, - bcx.ccx.llvm_type_of(subslice.ty.to_ty(tcx)).ptr_to()) - } + ty::TyArray(..) => {} ty::TySlice(..) => { assert!(tr_base.has_extra()); subslice.llextra = bcx.sub(tr_base.llextra, @@ -591,6 +576,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => bug!("unexpected type {:?} in Subslice", subslice.ty) } + // Cast the lvalue pointer type to the new + // array or slice type (*[%_; new_len]). + subslice.llval = bcx.pointercast(subslice.llval, + bcx.ccx.llvm_type_of(subslice.ty.to_ty(tcx)).ptr_to()); + subslice } mir::ProjectionElem::Downcast(_, v) => { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index e3f6485d46245..d130595763447 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -115,12 +115,14 @@ fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type Type::array(&llty, size) } - // Unsized slice types (and str) have the type of their element, and - // traits have the type of u8. This is so that the data pointer inside - // fat pointers is of the right type (e.g. for array accesses), even - // when taking the address of an unsized field in a struct. - ty::TySlice(ty) => cx.llvm_type_of(ty), - ty::TyStr | ty::TyDynamic(..) | ty::TyForeign(..) => Type::i8(cx), + ty::TySlice(ty) => { + Type::array(&cx.llvm_type_of(ty), 0) + } + ty::TyStr => { + Type::array(&Type::i8(cx), 0) + } + ty::TyDynamic(..) | + ty::TyForeign(..) => adt::type_of(cx, t), ty::TyFnDef(..) => Type::nil(cx), ty::TyFnPtr(sig) => { diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 342a4f0d085c4..56f9b98b48294 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -23,9 +23,10 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0 -// CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1 -// CHECK: ret { i8*, [[USIZE]] } %1 +// CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* +// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.ptr, 0 +// CHECK: %2 = insertvalue { [0 x i8]*, [[USIZE]] } %1, [[USIZE]] %x.meta, 1 +// CHECK: ret { [0 x i8]*, [[USIZE]] } %2 { x } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 29e2840c8817e..0bacb81624120 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -132,7 +132,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 4b713e28b0525..d191bedee5d99 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -23,9 +23,9 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0 -// CHECK: store i8* %s.ptr, i8** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1 +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 +// CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 // CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] let x = &*s; From 8afa3a01e61906459a25d305176137e14ba3f835 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 25 Jun 2017 12:42:55 +0300 Subject: [PATCH 10/69] rustc_trans: always insert alignment padding, even before the first field. --- src/librustc_trans/adt.rs | 19 +++++++------------ src/librustc_trans/common.rs | 25 ++++++++++++++++++++++++- src/librustc_trans/context.rs | 12 +++++++----- src/librustc_trans/mir/constant.rs | 18 +++++------------- src/librustc_trans/type_of.rs | 15 ++++++++++++--- src/test/codegen/adjustments.rs | 7 ++++--- src/test/codegen/consts.rs | 4 ++-- src/test/codegen/function-arguments.rs | 3 ++- src/test/codegen/refs.rs | 5 +++-- 9 files changed, 66 insertions(+), 42 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index c2988cd3da3f2..c1242f5713971 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -202,9 +202,9 @@ fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { Type::array(&elem_ty, size / abi_align) } -/// Double an index to account for padding. +/// Double an index and add 1 to account for padding. pub fn memory_index_to_gep(index: u64) -> u64 { - index * 2 + 1 + index * 2 } pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -213,9 +213,8 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, discr: Option>) -> Vec { let field_count = (discr.is_some() as usize) + layout.field_count(); debug!("struct_llfields: variant: {:?}", variant); - let mut first_field = true; let mut offset = Size::from_bytes(0); - let mut result: Vec = Vec::with_capacity(field_count * 2); + let mut result: Vec = Vec::with_capacity(1 + field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { let ty = if i == 0 && discr.is_some() { cx.layout_of(discr.unwrap()) @@ -229,13 +228,9 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, index, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; - if first_field { - assert_eq!(padding.bytes(), 0); - first_field = false; - } else { - result.push(Type::array(&Type::i8(cx), padding.bytes())); - debug!(" padding before: {:?}", padding); - } + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); + let llty = cx.llvm_type_of(field.ty); result.push(llty); @@ -259,7 +254,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, debug!("struct_llfields: pad_bytes: {:?} offset: {:?} min_size: {:?} stride: {:?}", padding, offset, variant.min_size, variant.stride()); result.push(Type::array(&Type::i8(cx), padding.bytes())); - assert!(result.len() == (field_count * 2)); + assert!(result.len() == 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} min_size: {:?} stride: {:?}", offset, variant.min_size, variant.stride()); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 749c5393e43e9..2e010ccee4824 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -18,6 +18,7 @@ use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::middle::lang_items::LangItem; +use abi; use base; use builder::Builder; use consts; @@ -267,7 +268,29 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), cx.llvm_type_of(cx.tcx().mk_str()).ptr_to()); - C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)]) + let empty = C_array(Type::i8(cx), &[]); + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_named_struct(cx.str_slice_type(), &[ + empty, + cs, + empty, + C_usize(cx, len as u64), + empty + ]) +} + +pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { + let empty = C_array(Type::i8(cx), &[]); + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_struct(cx, &[ + empty, + ptr, + empty, + meta, + empty + ], false) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 4e003edac3c6b..ac5f437228671 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -395,11 +395,13 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - - let llptrty = dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(); - str_slice_ty.set_struct_body(&[llptrty, - Type::isize(&dummy_ccx)], - false); + str_slice_ty.set_struct_body(&[ + Type::array(&Type::i8(&dummy_ccx), 0), + dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(), + Type::array(&Type::i8(&dummy_ccx), 0), + Type::isize(&dummy_ccx), + Type::array(&Type::i8(&dummy_ccx), 0) + ], false); (Type::isize(&dummy_ccx), str_slice_ty) }; (isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index e253701903f22..1b3559a50e305 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -29,7 +29,7 @@ use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; use type_of::{self, LayoutLlvmExt}; @@ -675,9 +675,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { .insert(base, operand.llval); assert!(prev_const.is_none() || prev_const == Some(operand.llval)); } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(self.ccx, &[base, info], false) + C_fat_ptr(self.ccx, base, info) } mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); @@ -734,7 +732,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { if common::type_is_fat_ptr(self.ccx, cast_ty) { let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); let data_cast = consts::ptrcast(data_ptr, llcast_ty); - C_struct(self.ccx, &[data_cast, meta], false) + C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. @@ -777,7 +775,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let ptr = if self.ccx.shared().type_is_sized(ty) { base } else { - C_struct(self.ccx, &[base, tr_lvalue.llextra], false) + C_fat_ptr(self.ccx, base, tr_lvalue.llextra) }; Const::new(ptr, ref_ty) } @@ -1176,14 +1174,8 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let parts = st.field_index_by_increasing_offset().map(|i| { (vals[i], st.offsets[i]) }); - let mut first_field = true; for (val, target_offset) in parts { - if first_field { - first_field = false; - assert_eq!(target_offset.bytes(), 0); - } else { - cfields.push(padding(ccx, target_offset - offset)); - } + cfields.push(padding(ccx, target_offset - offset)); cfields.push(val.llval); offset = target_offset + ccx.size_of(val.ty); } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index d130595763447..f86bc17d20aac 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -76,7 +76,13 @@ fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type } else { let ptr_ty = cx.llvm_type_of(ty).ptr_to(); let info_ty = unsized_info_ty(cx, ty); - Type::struct_(cx, &[ptr_ty, info_ty], false) + Type::struct_(cx, &[ + Type::array(&Type::i8(cx), 0), + ptr_ty, + Type::array(&Type::i8(cx), 0), + info_ty, + Type::array(&Type::i8(cx), 0) + ], false) } } else { cx.llvm_type_of(ty).ptr_to() @@ -240,11 +246,14 @@ impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> { } Layout::Vector { .. } | - Layout::Array { .. } | - Layout::FatPointer { .. } => { + Layout::Array { .. } => { index as u64 } + Layout::FatPointer { .. } => { + adt::memory_index_to_gep(index as u64) + } + Layout::Univariant { ref variant, .. } => { adt::memory_index_to_gep(variant.memory_index[index] as u64) } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 56f9b98b48294..8a680f1c9d698 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -24,9 +25,9 @@ pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot // CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* -// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.ptr, 0 -// CHECK: %2 = insertvalue { [0 x i8]*, [[USIZE]] } %1, [[USIZE]] %x.meta, 1 -// CHECK: ret { [0 x i8]*, [[USIZE]] } %2 +// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1 +// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3 +// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2 { x } } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index a75b8f3992d07..705488b7757cc 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } @@ -62,6 +62,6 @@ pub fn low_align_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 0bacb81624120..5d073670d865c 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] #![feature(custom_attribute)] @@ -132,7 +133,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index d191bedee5d99..ad799247f598b 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -23,9 +24,9 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1 // CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 // CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] let x = &*s; From 44eef7c9ac6d2944131c4216136a2c39e0c5da30 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 10 Sep 2017 17:15:29 +0300 Subject: [PATCH 11/69] rustc: do not inject discriminant fields into Layout::General's variants. --- src/librustc/ty/layout.rs | 82 ++++++++----------- src/librustc_trans/adt.rs | 26 ++---- src/librustc_trans/debuginfo/metadata.rs | 10 ++- src/librustc_trans/mir/constant.rs | 21 +++-- src/librustc_trans/mir/lvalue.rs | 13 +-- src/test/codegen/consts.rs | 4 +- .../run-pass/enum-discrim-manual-sizing.rs | 3 + 7 files changed, 70 insertions(+), 89 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d51c25ba6d389..4fea7ee082c00 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -651,14 +651,14 @@ pub struct Struct { } /// Info required to optimize struct layout. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] +#[derive(Copy, Clone, Debug)] enum StructKind { /// A tuple, closure, or univariant which cannot be coerced to unsized. AlwaysSizedUnivariant, /// A univariant, the last field of which may be coerced to unsized. MaybeUnsizedUnivariant, /// A univariant, but part of an enum. - EnumVariant, + EnumVariant(Integer), } impl<'a, 'tcx> Struct { @@ -692,30 +692,27 @@ impl<'a, 'tcx> Struct { // Neither do 1-member and 2-member structs. // In addition, code in trans assume that 2-element structs can become pairs. // It's easier to just short-circuit here. - let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind) - && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); - - let (optimize, sort_ascending) = match kind { - StructKind::AlwaysSizedUnivariant => (can_optimize, false), - StructKind::MaybeUnsizedUnivariant => (can_optimize, false), - StructKind::EnumVariant => { - assert!(fields.len() >= 1, "Enum variants must have discriminants."); - (can_optimize && fields[0].size(dl).bytes() == 1, true) + let (mut optimize, sort_ascending) = match kind { + StructKind::AlwaysSizedUnivariant | + StructKind::MaybeUnsizedUnivariant => (fields.len() > 2, false), + StructKind::EnumVariant(discr) => { + (discr.size().bytes() == 1, true) } }; + optimize &= (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); + ret.offsets = vec![Size::from_bytes(0); fields.len()]; let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); if optimize { - let start = if let StructKind::EnumVariant = kind { 1 } else { 0 }; let end = if let StructKind::MaybeUnsizedUnivariant = kind { fields.len() - 1 } else { fields.len() }; - if end > start { - let optimizing = &mut inverse_memory_index[start..end]; + if end > 0 { + let optimizing = &mut inverse_memory_index[..end]; if sort_ascending { optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi()); } else { @@ -734,13 +731,17 @@ impl<'a, 'tcx> Struct { // field 5 with offset 0 puts 0 in offsets[5]. // At the bottom of this function, we use inverse_memory_index to produce memory_index. - if let StructKind::EnumVariant = kind { - assert_eq!(inverse_memory_index[0], 0, - "Enum variant discriminants must have the lowest offset."); - } - let mut offset = Size::from_bytes(0); + if let StructKind::EnumVariant(discr) = kind { + offset = discr.size(); + if !ret.packed { + let align = discr.align(dl); + ret.align = ret.align.max(align); + ret.primitive_align = ret.primitive_align.max(align); + } + } + for i in inverse_memory_index.iter() { let field = fields[*i as usize]; if !ret.sized { @@ -1112,8 +1113,9 @@ pub enum Layout { variants: Union, }, - /// General-case enums: for each case there is a struct, and they - /// all start with a field for the discriminant. + /// General-case enums: for each case there is a struct, and they all have + /// all space reserved for the discriminant, and their first field starts + /// at a non-0 offset, after where the discriminant would go. General { discr: Integer, variants: Vec, @@ -1495,21 +1497,17 @@ impl<'a, 'tcx> Layout { // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256, 256).unwrap(); - // Create the set of structs that represent each variant - // Use the minimum integer type we figured out above - let discr = Scalar { value: Int(min_ity), non_zero: false }; + // Create the set of structs that represent each variant. let mut variants = variants.into_iter().map(|fields| { - let mut fields = fields.into_iter().map(|field| { + let fields = fields.into_iter().map(|field| { field.layout(tcx, param_env) }).collect::, _>>()?; - fields.insert(0, &discr); let st = Struct::new(dl, &fields, - &def.repr, StructKind::EnumVariant, ty)?; + &def.repr, StructKind::EnumVariant(min_ity), ty)?; // Find the first field we can't move later // to make room for a larger discriminant. - // It is important to skip the first field. - for i in st.field_index_by_increasing_offset().skip(1) { + for i in st.field_index_by_increasing_offset() { let field = fields[i]; let field_align = field.align(dl); if field.size(dl).bytes() != 0 || field_align.abi() != 1 { @@ -1569,9 +1567,8 @@ impl<'a, 'tcx> Layout { let new_ity_size = Int(ity).size(dl); for variant in &mut variants { for i in variant.offsets.iter_mut() { - // The first field is the discrimminant, at offset 0. - // These aren't in order, and we need to skip it. - if *i <= old_ity_size && *i > Size::from_bytes(0) { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); *i = new_ity_size; } } @@ -1759,7 +1756,7 @@ impl<'a, 'tcx> Layout { General { ref variants, .. } => { let v = variant_index.expect("variant index required"); - variants[v].offsets[i + 1] + variants[v].offsets[i] } StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { @@ -1857,21 +1854,12 @@ impl<'a, 'tcx> Layout { } }; - enum Fields<'a> { - WithDiscrim(&'a Struct), - NoDiscrim(&'a Struct), - } - let build_variant_info = |n: Option, flds: &[(ast::Name, Ty<'tcx>)], - layout: Fields| { - let (s, field_offsets) = match layout { - Fields::WithDiscrim(s) => (s, &s.offsets[1..]), - Fields::NoDiscrim(s) => (s, &s.offsets[0..]), - }; + s: &Struct| { let field_info: Vec<_> = flds.iter() - .zip(field_offsets.iter()) + .zip(&s.offsets) .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset)) .collect(); @@ -1904,7 +1892,7 @@ impl<'a, 'tcx> Layout { None, vec![build_variant_info(Some(variant_def.name), &fields, - Fields::NoDiscrim(variant_layout))]); + variant_layout)]); } Layout::RawNullablePointer { nndiscr, value } => { debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}", @@ -1931,7 +1919,7 @@ impl<'a, 'tcx> Layout { None, vec![build_variant_info(Some(variant_def.name), &fields, - Fields::NoDiscrim(variant_layout))]); + variant_layout)]); } else { // (This case arises for *empty* enums; so give it // zero variants.) @@ -1953,7 +1941,7 @@ impl<'a, 'tcx> Layout { .collect(); build_variant_info(Some(variant_def.name), &fields, - Fields::WithDiscrim(variant_layout)) + variant_layout) }) .collect(); record(adt_kind.into(), Some(discr.size()), variant_infos); diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index c1242f5713971..9c492ca52d2a3 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -90,8 +90,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, (l.for_variant(nndiscr as usize), nonnull), _ => unreachable!() }; - llty.set_struct_body(&struct_llfields(cx, variant_layout, variant, None), - variant.packed) + llty.set_struct_body(&struct_llfields(cx, variant_layout, variant), variant.packed) }, _ => bug!("This function cannot handle {} with layout {:#?}", t, l) } @@ -116,7 +115,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match name { None => { Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize), - nonnull, None), + nonnull), nonnull.packed) } Some(name) => { @@ -127,7 +126,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::Univariant { ref variant, .. } => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, l, &variant, None), + Type::struct_(cx, &struct_llfields(cx, l, &variant), variant.packed) } Some(name) => { @@ -209,23 +208,16 @@ pub fn memory_index_to_gep(index: u64) -> u64 { pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>, - variant: &layout::Struct, - discr: Option>) -> Vec { - let field_count = (discr.is_some() as usize) + layout.field_count(); + variant: &layout::Struct) -> Vec { + let field_count = layout.field_count(); debug!("struct_llfields: variant: {:?}", variant); let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); - let field_iter = variant.field_index_by_increasing_offset().map(|i| { - let ty = if i == 0 && discr.is_some() { - cx.layout_of(discr.unwrap()) - } else { - layout.field(cx, i - discr.is_some() as usize) - }; - (i, ty, variant.offsets[i as usize]) - }); - for (index, field, target_offset) in field_iter { + for i in variant.field_index_by_increasing_offset() { + let field = layout.field(cx, i); + let target_offset = variant.offsets[i as usize]; debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", - index, field, offset, target_offset); + i, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; result.push(Type::array(&Type::i8(cx), padding.bytes())); diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 2869ddb6e220e..879a74678579f 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1340,7 +1340,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Creates MemberDescriptions for the fields of a single enum variant. struct VariantMemberDescriptionFactory<'tcx> { // Cloned from the layout::Struct describing the variant. - offsets: &'tcx [layout::Size], + offsets: Vec, args: Vec<(String, Ty<'tcx>)>, discriminant_type_metadata: Option, span: Span, @@ -1436,8 +1436,12 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; // If this is not a univariant enum, there is also the discriminant field. + let mut offsets = struct_def.offsets.clone(); match discriminant_info { - RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()), + RegularDiscriminant(_) => { + arg_names.insert(0, "RUST$ENUM$DISR".to_string()); + offsets.insert(0, Size::from_bytes(0)); + } _ => { /* do nothing */ } }; @@ -1449,7 +1453,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { - offsets: &struct_def.offsets[..], + offsets, args, discriminant_type_metadata: match discriminant_info { RegularDiscriminant(discriminant_type_metadata) => { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 1b3559a50e305..ab8749f56110e 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1108,11 +1108,8 @@ fn trans_const_adt<'a, 'tcx>( layout::General { discr: d, ref variants, .. } => { let variant = &variants[variant_index]; let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); - let mut vals_with_discr = vec![ - Const::new(lldiscr, d.to_ty(ccx.tcx(), false)) - ]; - vals_with_discr.extend_from_slice(vals); - build_const_struct(ccx, l, &variant, &vals_with_discr) + build_const_struct(ccx, l, &variant, vals, + Some(Const::new(lldiscr, d.to_ty(ccx.tcx(), false)))) } layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(variant_index, 0); @@ -1125,7 +1122,7 @@ fn trans_const_adt<'a, 'tcx>( } layout::Univariant { ref variant, .. } => { assert_eq!(variant_index, 0); - build_const_struct(ccx, l, &variant, vals) + build_const_struct(ccx, l, &variant, vals, None) } layout::Vector { .. } => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) @@ -1140,7 +1137,7 @@ fn trans_const_adt<'a, 'tcx>( } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { if variant_index as u64 == nndiscr { - build_const_struct(ccx, l, &nonnull, vals) + build_const_struct(ccx, l, &nonnull, vals, None) } else { // Always use null even if it's not the `discrfield`th // field; see #8506. @@ -1162,14 +1159,20 @@ fn trans_const_adt<'a, 'tcx>( fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: layout::TyLayout<'tcx>, st: &layout::Struct, - vals: &[Const<'tcx>]) + vals: &[Const<'tcx>], + discr: Option>) -> Const<'tcx> { assert_eq!(vals.len(), st.offsets.len()); // offset of current value let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); - cfields.reserve(st.offsets.len()*2); + cfields.reserve(discr.is_some() as usize + 1 + st.offsets.len() * 2); + + if let Some(discr) = discr { + cfields.push(discr.llval); + offset = ccx.size_of(discr.ty); + } let parts = st.field_index_by_increasing_offset().map(|i| { (vals[i], st.offsets[i]) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 8bd4142f2c1a4..c11596b01403e 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -236,14 +236,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { _ => {} } - // Adjust the index to account for enum discriminants in variants. - let mut ix = ix; - if let layout::General { .. } = *l { - if l.variant_index.is_some() { - ix += 1; - } - } - let simple = || { LvalueRef { llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)), @@ -474,11 +466,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // If this is an enum, cast to the appropriate variant struct type. let layout = bcx.ccx.layout_of(ty).for_variant(variant_index); - if let layout::General { discr, ref variants, .. } = *layout { + if let layout::General { ref variants, .. } = *layout { let st = &variants[variant_index]; let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, layout, st, - Some(discr.to_ty(bcx.tcx(), false))), st.packed); + &adt::struct_llfields(bcx.ccx, layout, st), st.packed); downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index 705488b7757cc..a75b8f3992d07 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } @@ -62,6 +62,6 @@ pub fn low_align_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } diff --git a/src/test/run-pass/enum-discrim-manual-sizing.rs b/src/test/run-pass/enum-discrim-manual-sizing.rs index 3bbc107e0b99e..8557c065dc69c 100644 --- a/src/test/run-pass/enum-discrim-manual-sizing.rs +++ b/src/test/run-pass/enum-discrim-manual-sizing.rs @@ -108,6 +108,9 @@ pub fn main() { let array_expected_size = round_up(28, align_of::>()); assert_eq!(size_of::>(), array_expected_size); assert_eq!(size_of::>(), 32); + + assert_eq!(align_of::(), align_of::()); + assert_eq!(align_of::>(), align_of::()); } // Rounds x up to the next multiple of a From 50a3fd0097f0dcd3661387ebb4dee8062b8caab4 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 10 Sep 2017 19:34:11 +0300 Subject: [PATCH 12/69] rustc: remove useless 0 prefix from Layout::StructWrappedNullablePointer's discrfield. --- src/librustc/ty/layout.rs | 3 --- src/librustc_trans/debuginfo/metadata.rs | 1 - src/librustc_trans/mir/lvalue.rs | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 4fea7ee082c00..a7c707de4815a 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1145,7 +1145,6 @@ pub enum Layout { StructWrappedNullablePointer { nndiscr: u64, nonnull: Struct, - /// N.B. There is a 0 at the start, for LLVM GEP through a pointer. discrfield: FieldPath, /// Like discrfield, but in source order. For debuginfo. discrfield_source: FieldPath @@ -1472,9 +1471,7 @@ impl<'a, 'tcx> Layout { let mut i = *path.last().unwrap(); i = st.memory_index[i as usize]; *path.last_mut().unwrap() = i; - path.push(0); // For GEP through a pointer. path.reverse(); - path_source.push(0); path_source.reverse(); return success(StructWrappedNullablePointer { diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 879a74678579f..82c975c751ff2 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1312,7 +1312,6 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { let null_variant_index = (1 - nndiscr) as usize; let null_variant_name = adt.variants[null_variant_index].name; let discrfield_source = discrfield_source.iter() - .skip(1) .map(|x| x.to_string()) .collect::>().join("$"); let union_member_name = format!("RUST$ENCODED$ENUM${}${}", diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index c11596b01403e..7cf35c4405c8e 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -332,7 +332,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Double index to account for padding (FieldPath already uses `Struct::memory_index`) fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef { - let path = iter::once(C_u32(bcx.ccx, 0)).chain(discrfield[1..].iter().map(|&i| { + let path = iter::once(C_u32(bcx.ccx, 0)).chain(discrfield.iter().map(|&i| { let i = adt::memory_index_to_gep(i as u64); assert_eq!(i as u32 as u64, i); C_u32(bcx.ccx, i as u32) From bc8e1f7efa9196f31d234ebc1c90a85e4e37874c Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 10 Sep 2017 23:53:57 +0300 Subject: [PATCH 13/69] rustc: use an offset instead of a field path in Layout::StructWrappedNullablePointer. --- src/librustc/ty/layout.rs | 157 ++++++++++--------- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 78 +++++---- src/librustc_trans/type_.rs | 10 ++ src/test/ui/print_type_sizes/nullable.stdout | 1 - 6 files changed, 137 insertions(+), 113 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index a7c707de4815a..c16092666f7b1 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -841,35 +841,47 @@ impl<'a, 'tcx> Struct { }) } - /// Find the path leading to a non-zero leaf field, starting from + /// Find the offset of a non-zero leaf field, starting from /// the given type and recursing through aggregates. - /// The tuple is `(path, source_path)`, - /// where `path` is in memory order and `source_path` in source order. + /// The tuple is `(offset, primitive, source_path)`. // FIXME(eddyb) track value ranges and traverse already optimized enums. fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) - -> Result, LayoutError<'tcx>> { - match (ty.layout(tcx, param_env)?, &ty.sty) { - (&Scalar { non_zero: true, .. }, _) | - (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))), + -> Result, LayoutError<'tcx>> { + let layout = ty.layout(tcx, param_env)?; + match (layout, &ty.sty) { + (&Scalar { non_zero: true, value, .. }, _) => { + Ok(Some((Size::from_bytes(0), value, vec![]))) + } + (&CEnum { non_zero: true, discr, .. }, _) => { + Ok(Some((Size::from_bytes(0), Int(discr), vec![]))) + } + (&FatPointer { non_zero: true, .. }, _) => { - Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32]))) + Ok(Some((layout.field_offset(tcx, FAT_PTR_ADDR, None), + Pointer, + vec![FAT_PTR_ADDR as u32]))) } // Is this the NonZero lang item wrapping a pointer or integer type? (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => { let fields = &def.struct_variant().fields; assert_eq!(fields.len(), 1); - match *fields[0].ty(tcx, substs).layout(tcx, param_env)? { + let field = fields[0].ty(tcx, substs).layout(tcx, param_env)?; + match *field { // FIXME(eddyb) also allow floating-point types here. - Scalar { value: Int(_), non_zero: false } | - Scalar { value: Pointer, non_zero: false } => { - Ok(Some((vec![0], vec![0]))) + Scalar { value: value @ Int(_), non_zero: false } | + Scalar { value: value @ Pointer, non_zero: false } => { + Ok(Some((layout.field_offset(tcx, 0, None), + value, + vec![0]))) } FatPointer { non_zero: false, .. } => { - let tmp = vec![FAT_PTR_ADDR as u32, 0]; - Ok(Some((tmp.clone(), tmp))) + Ok(Some((layout.field_offset(tcx, 0, None) + + field.field_offset(tcx, FAT_PTR_ADDR, None), + Pointer, + vec![FAT_PTR_ADDR as u32, 0]))) } _ => Ok(None) } @@ -878,31 +890,31 @@ impl<'a, 'tcx> Struct { // Perhaps one of the fields of this struct is non-zero // let's recurse and find out (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => { - Struct::non_zero_field_paths( + Struct::non_zero_field( tcx, param_env, def.struct_variant().fields.iter().map(|field| { field.ty(tcx, substs) }), - Some(&variant.memory_index[..])) + &variant.offsets) } // Perhaps one of the upvars of this closure is non-zero (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => { let upvar_tys = substs.upvar_tys(def, tcx); - Struct::non_zero_field_paths( + Struct::non_zero_field( tcx, param_env, upvar_tys, - Some(&variant.memory_index[..])) + &variant.offsets) } // Can we use one of the fields in this tuple? (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => { - Struct::non_zero_field_paths( + Struct::non_zero_field( tcx, param_env, tys.iter().cloned(), - Some(&variant.memory_index[..])) + &variant.offsets) } // Is this a fixed-size array of something non-zero @@ -915,11 +927,11 @@ impl<'a, 'tcx> Struct { } } if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { - Struct::non_zero_field_paths( + Struct::non_zero_field( tcx, param_env, Some(ety).into_iter(), - None) + &[Size::from_bytes(0)]) } else { Ok(None) } @@ -938,27 +950,20 @@ impl<'a, 'tcx> Struct { } } - /// Find the path leading to a non-zero leaf field, starting from + /// Find the offset of a non-zero leaf field, starting from /// the given set of fields and recursing through aggregates. - /// Returns Some((path, source_path)) on success. - /// `path` is translated to memory order. `source_path` is not. - fn non_zero_field_paths(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - fields: I, - permutation: Option<&[u32]>) - -> Result, LayoutError<'tcx>> + /// Returns Some((offset, primitive, source_path)) on success. + fn non_zero_field(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + fields: I, + offsets: &[Size]) + -> Result, LayoutError<'tcx>> where I: Iterator> { for (i, ty) in fields.enumerate() { let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?; - if let Some((mut path, mut source_path)) = r { + if let Some((offset, primitive, mut source_path)) = r { source_path.push(i as u32); - let index = if let Some(p) = permutation { - p[i] as usize - } else { - i - }; - path.push(index as u32); - return Ok(Some((path, source_path))); + return Ok(Some((offsets[i] + offset, primitive, source_path))); } } Ok(None) @@ -1135,18 +1140,19 @@ pub enum Layout { /// identity function. RawNullablePointer { nndiscr: u64, - value: Primitive + discr: Primitive }, /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th - /// field is known to be nonnull due to its type; if that field is null, then + /// `nndiscr` is represented by the struct `nonnull`, where the field at the + /// `discr_offset` offset is known to be nonnull due to its type; if that field is null, then /// it represents the other case, which is known to be zero sized. StructWrappedNullablePointer { nndiscr: u64, nonnull: Struct, - discrfield: FieldPath, - /// Like discrfield, but in source order. For debuginfo. + discr: Primitive, + discr_offset: Size, + /// Like discr_offset, but the source field path. For debuginfo. discrfield_source: FieldPath } } @@ -1440,44 +1446,36 @@ impl<'a, 'tcx> Layout { if !Struct::would_be_zero_sized(dl, other_fields)? { continue; } - let paths = Struct::non_zero_field_paths(tcx, - param_env, - variants[discr].iter().cloned(), - None)?; - let (mut path, mut path_source) = if let Some(p) = paths { p } + + let st = Struct::new(dl, + &variants[discr].iter().map(|ty| ty.layout(tcx, param_env)) + .collect::, _>>()?, + &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; + + let field = Struct::non_zero_field(tcx, + param_env, + variants[discr].iter().cloned(), + &st.offsets)?; + let (offset, primitive, mut path_source) = if let Some(f) = field { f } else { continue }; // FIXME(eddyb) should take advantage of a newtype. - if path == &[0] && variants[discr].len() == 1 { - let value = match *variants[discr][0].layout(tcx, param_env)? { - Scalar { value, .. } => value, - CEnum { discr, .. } => Int(discr), - _ => bug!("Layout::compute: `{}`'s non-zero \ - `{}` field not scalar?!", - ty, variants[discr][0]) - }; + if offset.bytes() == 0 && primitive.size(dl) == st.stride() && + variants[discr].len() == 1 { return success(RawNullablePointer { nndiscr: discr as u64, - value, + discr: primitive, }); } - let st = Struct::new(dl, - &variants[discr].iter().map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; - - // We have to fix the last element of path here. - let mut i = *path.last().unwrap(); - i = st.memory_index[i as usize]; - *path.last_mut().unwrap() = i; - path.reverse(); + // We have to fix the source path here. path_source.reverse(); return success(StructWrappedNullablePointer { nndiscr: discr as u64, nonnull: st, - discrfield: path, + discr: primitive, + discr_offset: offset, discrfield_source: path_source }); } @@ -1621,7 +1619,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } | RawNullablePointer { value, .. } => { + Scalar { value, .. } | RawNullablePointer { discr: value, .. } => { value.size(dl) } @@ -1664,7 +1662,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } | RawNullablePointer { value, .. } => { + Scalar { value, .. } | RawNullablePointer { discr: value, .. } => { value.align(dl) } @@ -1876,7 +1874,8 @@ impl<'a, 'tcx> Layout { match *layout { Layout::StructWrappedNullablePointer { nonnull: ref variant_layout, nndiscr, - discrfield: _, + discr: _, + discr_offset: _, discrfield_source: _ } => { debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}", ty, nndiscr, variant_layout); @@ -1891,12 +1890,12 @@ impl<'a, 'tcx> Layout { &fields, variant_layout)]); } - Layout::RawNullablePointer { nndiscr, value } => { + Layout::RawNullablePointer { nndiscr, discr } => { debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}", - ty, nndiscr, value); + ty, nndiscr, discr); let variant_def = &adt_def.variants[nndiscr as usize]; record(adt_kind.into(), None, - vec![build_primitive_info(variant_def.name, &value)]); + vec![build_primitive_info(variant_def.name, &discr)]); } Layout::Univariant { variant: ref variant_layout, non_zero: _ } => { let variant_names = || { @@ -2410,19 +2409,21 @@ impl<'gcx> HashStable> for Layout align.hash_stable(hcx, hasher); primitive_align.hash_stable(hcx, hasher); } - RawNullablePointer { nndiscr, ref value } => { + RawNullablePointer { nndiscr, ref discr } => { nndiscr.hash_stable(hcx, hasher); - value.hash_stable(hcx, hasher); + discr.hash_stable(hcx, hasher); } StructWrappedNullablePointer { nndiscr, ref nonnull, - ref discrfield, + ref discr, + discr_offset, ref discrfield_source } => { nndiscr.hash_stable(hcx, hasher); nonnull.hash_stable(hcx, hasher); - discrfield.hash_stable(hcx, hasher); + discr.hash_stable(hcx, hasher); + discr_offset.hash_stable(hcx, hasher); discrfield_source.hash_stable(hcx, hasher); } } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index f4f37cdef514d..329da2c36a2ad 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -295,7 +295,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { match *self.layout { // The primitives for this algorithm. Layout::Scalar { value, .. } | - Layout::RawNullablePointer { value, .. } => { + Layout::RawNullablePointer { discr: value, .. } => { let kind = match value { layout::Int(_) | layout::Pointer => RegKind::Integer, diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 6670d084d6c57..dcbb2de9c4ded 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -66,7 +66,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) match *layout { Layout::Scalar { value, .. } | - Layout::RawNullablePointer { value, .. } => { + Layout::RawNullablePointer { discr: value, .. } => { let reg = match value { layout::Int(_) | layout::Pointer => Class::Int, diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 7cf35c4405c8e..7dcb5d219b794 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutTyper}; +use rustc::ty::layout::{self, Align, Layout, LayoutTyper, Size}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -25,7 +25,6 @@ use type_::Type; use value::Value; use glue; -use std::iter; use std::ptr; use std::ops; @@ -330,14 +329,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } - // Double index to account for padding (FieldPath already uses `Struct::memory_index`) - fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef { - let path = iter::once(C_u32(bcx.ccx, 0)).chain(discrfield.iter().map(|&i| { - let i = adt::memory_index_to_gep(i as u64); - assert_eq!(i as u32 as u64, i); - C_u32(bcx.ccx, i as u32) - })).collect::>(); - bcx.inbounds_gep(self.llval, &path) + // Return a pointer to the discriminant, given its type and offset. + fn gepi_discr_at_offset(self, bcx: &Builder, + discr: ty::layout::Primitive, + offset: Size) + -> (ValueRef, Alignment) { + let size = discr.size(bcx.ccx); + let ptr_ty = Type::from_primitive(bcx.ccx, discr).ptr_to(); + + // If the discriminant is not on a multiple of the primitive's size, + // we need to go through i8*. Also assume the worst alignment. + if offset.bytes() % size.bytes() != 0 { + let byte_ptr = bcx.pointercast(self.llval, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(bcx.ccx, offset.bytes())]); + let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); + return (bcx.pointercast(byte_ptr, ptr_ty), byte_align); + } + + let discr_ptr = bcx.pointercast(self.llval, ptr_ty); + (bcx.inbounds_gep(discr_ptr, &[C_usize(bcx.ccx, offset.bytes() / size.bytes())]), + self.alignment) } /// Helper for cases where the discriminant is simply loaded. @@ -378,16 +389,16 @@ impl<'a, 'tcx> LvalueRef<'tcx> { self.load_discr(bcx, discr, ptr.llval, 0, variants.len() as u64 - 1) } layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), - layout::RawNullablePointer { nndiscr, .. } => { - let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - let discr = bcx.load(self.llval, self.alignment.non_abi()); - bcx.icmp(cmp, discr, C_null(val_ty(discr))) - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); - let llptr = bcx.load(llptrptr, self.alignment.non_abi()); + layout::RawNullablePointer { nndiscr, discr } | + layout::StructWrappedNullablePointer { nndiscr, discr, .. } => { + let discr_offset = match *l { + layout::StructWrappedNullablePointer { discr_offset, .. } => discr_offset, + _ => Size::from_bytes(0), + }; + let (lldiscrptr, alignment) = self.gepi_discr_at_offset(bcx, discr, discr_offset); + let lldiscr = bcx.load(lldiscrptr, alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) + bcx.icmp(cmp, lldiscr, C_null(Type::from_primitive(bcx.ccx, discr))) }, _ => bug!("{} is not an enum", l.ty) }; @@ -418,27 +429,30 @@ impl<'a, 'tcx> LvalueRef<'tcx> { | layout::Vector { .. } => { assert_eq!(to, 0); } - layout::RawNullablePointer { nndiscr, .. } => { + layout::RawNullablePointer { nndiscr, discr, .. } | + layout::StructWrappedNullablePointer { nndiscr, discr, .. } => { if to != nndiscr { - let llptrty = val_ty(self.llval).element_type(); - bcx.store(C_null(llptrty), self.llval, self.alignment.non_abi()); - } - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { - if to != nndiscr { - if target_sets_discr_via_memset(bcx) { + let (use_memset, discr_offset) = match *l { + layout::StructWrappedNullablePointer { discr_offset, .. } => { + (target_sets_discr_via_memset(bcx), discr_offset) + } + _ => (false, Size::from_bytes(0)), + }; + if use_memset { // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather // than storing null to single target field. let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); - let size = C_usize(bcx.ccx, nonnull.stride().bytes()); - let align = C_u32(bcx.ccx, nonnull.align.abi() as u32); + let (size, align) = l.size_and_align(bcx.ccx); + let size = C_usize(bcx.ccx, size.bytes()); + let align = C_u32(bcx.ccx, align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { - let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield); - let llptrty = val_ty(llptrptr).element_type(); - bcx.store(C_null(llptrty), llptrptr, self.alignment.non_abi()); + let (lldiscrptr, alignment) = + self.gepi_discr_at_offset(bcx, discr, discr_offset); + bcx.store(C_null(Type::from_primitive(bcx.ccx, discr)), + lldiscrptr, alignment.non_abi()); } } } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index bb8f3f23108ec..2359aa811fa75 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -287,4 +287,14 @@ impl Type { I128 => Type::i128(cx), } } + + pub fn from_primitive(cx: &CrateContext, p: layout::Primitive) -> Type { + use rustc::ty::layout::Primitive::*; + match p { + Int(i) => Type::from_integer(cx, i), + F32 => Type::f32(cx), + F64 => Type::f64(cx), + Pointer => Type::i8p(cx), + } + } } diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/nullable.stdout index 830678f174f88..c9cdde78a4d1b 100644 --- a/src/test/ui/print_type_sizes/nullable.stdout +++ b/src/test/ui/print_type_sizes/nullable.stdout @@ -19,6 +19,5 @@ print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes print-type-size variant `Some`: 4 bytes -print-type-size field `.0`: 4 bytes print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes print-type-size field `.0`: 4 bytes From aa811d728a2957c9a7a79c03a6e6e73b9372997e Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 11 Sep 2017 22:31:16 +0300 Subject: [PATCH 14/69] rustc: remove source field path from Layout::StructWrappedNullablePointer. --- src/librustc/ty/layout.rs | 76 +++++++----------------- src/librustc_trans/debuginfo/metadata.rs | 46 ++++++++++---- 2 files changed, 55 insertions(+), 67 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index c16092666f7b1..319d4789efa92 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -619,10 +619,6 @@ impl Primitive { } } -/// Path through fields of nested structures. -// FIXME(eddyb) use small vector optimization for the common case. -pub type FieldPath = Vec; - /// A structure, a product type in ADT terms. #[derive(PartialEq, Eq, Hash, Debug)] pub struct Struct { @@ -848,20 +844,19 @@ impl<'a, 'tcx> Struct { fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) - -> Result, LayoutError<'tcx>> { + -> Result, LayoutError<'tcx>> { let layout = ty.layout(tcx, param_env)?; match (layout, &ty.sty) { (&Scalar { non_zero: true, value, .. }, _) => { - Ok(Some((Size::from_bytes(0), value, vec![]))) + Ok(Some((Size::from_bytes(0), value))) } (&CEnum { non_zero: true, discr, .. }, _) => { - Ok(Some((Size::from_bytes(0), Int(discr), vec![]))) + Ok(Some((Size::from_bytes(0), Int(discr)))) } (&FatPointer { non_zero: true, .. }, _) => { Ok(Some((layout.field_offset(tcx, FAT_PTR_ADDR, None), - Pointer, - vec![FAT_PTR_ADDR as u32]))) + Pointer))) } // Is this the NonZero lang item wrapping a pointer or integer type? @@ -873,15 +868,12 @@ impl<'a, 'tcx> Struct { // FIXME(eddyb) also allow floating-point types here. Scalar { value: value @ Int(_), non_zero: false } | Scalar { value: value @ Pointer, non_zero: false } => { - Ok(Some((layout.field_offset(tcx, 0, None), - value, - vec![0]))) + Ok(Some((layout.field_offset(tcx, 0, None), value))) } FatPointer { non_zero: false, .. } => { Ok(Some((layout.field_offset(tcx, 0, None) + field.field_offset(tcx, FAT_PTR_ADDR, None), - Pointer, - vec![FAT_PTR_ADDR as u32, 0]))) + Pointer))) } _ => Ok(None) } @@ -890,31 +882,22 @@ impl<'a, 'tcx> Struct { // Perhaps one of the fields of this struct is non-zero // let's recurse and find out (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => { - Struct::non_zero_field( + variant.non_zero_field( tcx, param_env, def.struct_variant().fields.iter().map(|field| { field.ty(tcx, substs) - }), - &variant.offsets) + })) } // Perhaps one of the upvars of this closure is non-zero (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => { let upvar_tys = substs.upvar_tys(def, tcx); - Struct::non_zero_field( - tcx, - param_env, - upvar_tys, - &variant.offsets) + variant.non_zero_field(tcx, param_env, upvar_tys) } // Can we use one of the fields in this tuple? (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => { - Struct::non_zero_field( - tcx, - param_env, - tys.iter().cloned(), - &variant.offsets) + variant.non_zero_field(tcx, param_env, tys.iter().cloned()) } // Is this a fixed-size array of something non-zero @@ -927,11 +910,7 @@ impl<'a, 'tcx> Struct { } } if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { - Struct::non_zero_field( - tcx, - param_env, - Some(ety).into_iter(), - &[Size::from_bytes(0)]) + Struct::non_zero_field_in_type(tcx, param_env, ety) } else { Ok(None) } @@ -953,17 +932,15 @@ impl<'a, 'tcx> Struct { /// Find the offset of a non-zero leaf field, starting from /// the given set of fields and recursing through aggregates. /// Returns Some((offset, primitive, source_path)) on success. - fn non_zero_field(tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn non_zero_field(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - fields: I, - offsets: &[Size]) - -> Result, LayoutError<'tcx>> + fields: I) + -> Result, LayoutError<'tcx>> where I: Iterator> { - for (i, ty) in fields.enumerate() { + for (ty, &field_offset) in fields.zip(&self.offsets) { let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?; - if let Some((offset, primitive, mut source_path)) = r { - source_path.push(i as u32); - return Ok(Some((offsets[i] + offset, primitive, source_path))); + if let Some((offset, primitive)) = r { + return Ok(Some((field_offset + offset, primitive))); } } Ok(None) @@ -1152,8 +1129,6 @@ pub enum Layout { nonnull: Struct, discr: Primitive, discr_offset: Size, - /// Like discr_offset, but the source field path. For debuginfo. - discrfield_source: FieldPath } } @@ -1452,11 +1427,9 @@ impl<'a, 'tcx> Layout { .collect::, _>>()?, &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; - let field = Struct::non_zero_field(tcx, - param_env, - variants[discr].iter().cloned(), - &st.offsets)?; - let (offset, primitive, mut path_source) = if let Some(f) = field { f } + let field = st.non_zero_field(tcx, param_env, + variants[discr].iter().cloned())?; + let (offset, primitive) = if let Some(f) = field { f } else { continue }; // FIXME(eddyb) should take advantage of a newtype. @@ -1468,15 +1441,11 @@ impl<'a, 'tcx> Layout { }); } - // We have to fix the source path here. - path_source.reverse(); - return success(StructWrappedNullablePointer { nndiscr: discr as u64, nonnull: st, discr: primitive, discr_offset: offset, - discrfield_source: path_source }); } } @@ -1875,8 +1844,7 @@ impl<'a, 'tcx> Layout { Layout::StructWrappedNullablePointer { nonnull: ref variant_layout, nndiscr, discr: _, - discr_offset: _, - discrfield_source: _ } => { + discr_offset: _ } => { debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}", ty, nndiscr, variant_layout); let variant_def = &adt_def.variants[nndiscr as usize]; @@ -2418,13 +2386,11 @@ impl<'gcx> HashStable> for Layout ref nonnull, ref discr, discr_offset, - ref discrfield_source } => { nndiscr.hash_stable(hcx, hasher); nonnull.hash_stable(hcx, hasher); discr.hash_stable(hcx, hasher); discr_offset.hash_stable(hcx, hasher); - discrfield_source.hash_stable(hcx, hasher); } } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 82c975c751ff2..98402b5e8c5f7 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -39,6 +39,7 @@ use rustc::util::common::path2cstr; use libc::{c_uint, c_longlong}; use std::ffi::CString; +use std::fmt::Write; use std::ptr; use std::path::Path; use syntax::ast; @@ -1286,9 +1287,12 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - layout::StructWrappedNullablePointer { nonnull: ref struct_def, - nndiscr, - ref discrfield_source, ..} => { + layout::StructWrappedNullablePointer { + nonnull: ref struct_def, + nndiscr, + discr, + discr_offset + } => { // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, @@ -1309,19 +1313,37 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - nndiscr) as usize; - let null_variant_name = adt.variants[null_variant_index].name; - let discrfield_source = discrfield_source.iter() - .map(|x| x.to_string()) - .collect::>().join("$"); - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - discrfield_source, - null_variant_name); + let mut name = String::from("RUST$ENCODED$ENUM$"); + // HACK(eddyb) the debuggers should just handle offset+size + // of discriminant instead of us having to recover its path. + fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + name: &mut String, + layout: TyLayout<'tcx>, + offset: Size, + size: Size) { + for i in 0..layout.field_count() { + let field_offset = layout.field_offset(ccx, i); + if field_offset > offset { + continue; + } + let inner_offset = offset - field_offset; + let field = layout.field(ccx, i); + if inner_offset + size <= field.size(ccx) { + write!(name, "{}$", i).unwrap(); + compute_field_path(ccx, name, field, inner_offset, size); + } + } + } + compute_field_path(cx, &mut name, + self.type_rep, + discr_offset, + discr.size(cx)); + name.push_str(&adt.variants[(1 - nndiscr) as usize].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ MemberDescription { - name: union_member_name, + name, type_metadata: variant_type_metadata, offset: Size::from_bytes(0), size: struct_def.stride(), From 8864668d536071dee35a472b459586d733105444 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 13 Sep 2017 00:33:56 +0300 Subject: [PATCH 15/69] rustc: re-complicate the TyLayout API and use better names. --- src/librustc/ty/layout.rs | 111 +++++++++++------------ src/librustc_trans/abi.rs | 10 +- src/librustc_trans/adt.rs | 4 +- src/librustc_trans/cabi_s390x.rs | 4 +- src/librustc_trans/cabi_x86.rs | 4 +- src/librustc_trans/cabi_x86_64.rs | 4 +- src/librustc_trans/common.rs | 6 +- src/librustc_trans/context.rs | 40 ++++---- src/librustc_trans/debuginfo/metadata.rs | 12 +-- src/librustc_trans/debuginfo/mod.rs | 2 +- src/librustc_trans/glue.rs | 4 +- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/constant.rs | 4 +- src/librustc_trans/mir/lvalue.rs | 4 +- src/librustc_trans/mir/mod.rs | 2 +- src/librustc_trans/mir/operand.rs | 6 +- src/librustc_trans/mir/rvalue.rs | 2 +- src/librustc_trans/type_of.rs | 8 +- 18 files changed, 109 insertions(+), 120 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 319d4789efa92..3150014564881 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -227,12 +227,6 @@ impl<'a> HasDataLayout for &'a TargetDataLayout { } } -impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> { - fn data_layout(&self) -> &TargetDataLayout { - &self.data_layout - } -} - /// Endianness of the target, which must match cfg(target-endian). #[derive(Copy, Clone)] pub enum Endian { @@ -2089,80 +2083,85 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { } } -/// A pair of a type and its layout. Implements various -/// type traversal APIs (e.g. recursing into fields). +/// The details of the layout of a type, alongside the type itself. +/// Provides various type traversal APIs (e.g. recursing into fields). +/// +/// Note that the details are NOT guaranteed to always be identical +/// to those obtained from `layout_of(ty)`, as we need to produce +/// layouts for which Rust types do not exist, such as enum variants +/// or synthetic fields of enums (i.e. discriminants) and fat pointers. #[derive(Copy, Clone, Debug)] -pub struct TyLayout<'tcx> { +pub struct FullLayout<'tcx> { pub ty: Ty<'tcx>, - pub layout: &'tcx Layout, pub variant_index: Option, + pub layout: &'tcx Layout, } -impl<'tcx> Deref for TyLayout<'tcx> { +impl<'tcx> Deref for FullLayout<'tcx> { type Target = Layout; fn deref(&self) -> &Layout { self.layout } } -pub trait LayoutTyper<'tcx>: HasDataLayout { - type TyLayout; - +pub trait HasTyCtxt<'tcx>: HasDataLayout { fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout; - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>; } -/// Combines a tcx with the parameter environment so that you can -/// compute layout operations. -#[derive(Copy, Clone)] -pub struct LayoutCx<'a, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, +impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.data_layout + } } -impl<'a, 'tcx> LayoutCx<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self { - LayoutCx { tcx, param_env } +impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.global_tcx() } } -impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> { +impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) { fn data_layout(&self) -> &TargetDataLayout { - &self.tcx.data_layout + self.0.data_layout() } } -impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> { - type TyLayout = Result, LayoutError<'tcx>>; - - fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.tcx +impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.0.tcx() } +} + +pub trait LayoutOf { + type FullLayout; + + fn layout_of(self, ty: T) -> Self::FullLayout; +} + +impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { + type FullLayout = Result, LayoutError<'tcx>>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let ty = self.normalize_projections(ty); + fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + let (tcx, param_env) = self; - Ok(TyLayout { + let ty = tcx.normalize_associated_type_in_env(&ty, param_env); + + Ok(FullLayout { ty, - layout: ty.layout(self.tcx, self.param_env)?, - variant_index: None + variant_index: None, + layout: ty.layout(tcx, param_env)?, }) } - - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx.normalize_associated_type_in_env(&ty, self.param_env) - } } -impl<'a, 'tcx> TyLayout<'tcx> { +impl<'a, 'tcx> FullLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { let is_enum = match self.ty.sty { ty::TyAdt(def, _) => def.is_enum(), _ => false }; assert!(is_enum); - TyLayout { + FullLayout { variant_index: Some(variant_index), ..*self } @@ -2199,7 +2198,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { match *self.layout { Scalar { .. } => { - bug!("TyLayout::field_count({:?}): not applicable", self) + bug!("FullLayout::field_count({:?}): not applicable", self) } // Handled above (the TyAdt case). @@ -2222,9 +2221,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { } } - fn field_type_unnormalized>(&self, cx: C, i: usize) -> Ty<'tcx> { - let tcx = cx.tcx(); - + fn field_type_unnormalized(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, i: usize) -> Ty<'tcx> { let ptr_field_type = |pointee: Ty<'tcx>| { assert!(i < 2); let slice = |element: Ty<'tcx>| { @@ -2238,7 +2235,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TySlice(element) => slice(element), ty::TyStr => slice(tcx.types.u8), ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()), - _ => bug!("TyLayout::field_type({:?}): not applicable", self) + _ => bug!("FullLayout::field_type({:?}): not applicable", self) } }; @@ -2253,7 +2250,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyFnDef(..) | ty::TyDynamic(..) | ty::TyForeign(..) => { - bug!("TyLayout::field_type({:?}): not applicable", self) + bug!("FullLayout::field_type({:?}): not applicable", self) } // Potentially-fat pointers. @@ -2311,20 +2308,16 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { - bug!("TyLayout::field_type: unexpected type `{}`", self.ty) + bug!("FullLayout::field_type: unexpected type `{}`", self.ty) } } } - pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { - cx.normalize_projections(self.field_type_unnormalized(cx, i)) - } - - pub fn field>(&self, - cx: C, - i: usize) - -> C::TyLayout { - cx.layout_of(self.field_type(cx, i)) + pub fn field> + HasTyCtxt<'tcx>>(&self, + cx: C, + i: usize) + -> C::FullLayout { + cx.layout_of(self.field_type_unnormalized(cx.tcx(), i)) } } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 329da2c36a2ad..211c88b4b2294 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -35,8 +35,8 @@ use type_::Type; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, Layout, Size, TyLayout}; -use rustc::ty::layout::{HasDataLayout, LayoutTyper}; +use rustc::ty::layout::{self, Align, Layout, Size, FullLayout}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc_back::PanicStrategy; use libc::c_uint; @@ -274,7 +274,7 @@ pub trait LayoutExt<'tcx> { fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; } -impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { +impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { fn is_aggregate(&self) -> bool { match *self.layout { Layout::Scalar { .. } | @@ -471,7 +471,7 @@ impl CastTarget { #[derive(Clone, Copy, Debug)] pub struct ArgType<'tcx> { kind: ArgKind, - pub layout: TyLayout<'tcx>, + pub layout: FullLayout<'tcx>, /// Cast target, either a single uniform or a pair of registers. pub cast: Option, /// Dummy argument, which is emitted before the real argument. @@ -481,7 +481,7 @@ pub struct ArgType<'tcx> { } impl<'a, 'tcx> ArgType<'tcx> { - fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { + fn new(layout: FullLayout<'tcx>) -> ArgType<'tcx> { ArgType { kind: ArgKind::Direct, layout, diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 9c492ca52d2a3..62888556f1498 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -42,7 +42,7 @@ //! taken to it, implementing them for Rust seems difficult. use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size, TyLayout}; +use rustc::ty::layout::{self, Align, HasDataLayout, LayoutOf, Size, FullLayout}; use context::CrateContext; use type_::Type; @@ -207,7 +207,7 @@ pub fn memory_index_to_gep(index: u64) -> u64 { } pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: TyLayout<'tcx>, + layout: FullLayout<'tcx>, variant: &layout::Struct) -> Vec { let field_count = layout.field_count(); debug!("struct_llfields: variant: {:?}", variant); diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index ffe2940a0284f..225919c6a3afd 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -14,7 +14,7 @@ use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout}; +use rustc::ty::layout::{self, Layout, FullLayout}; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { @@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: TyLayout<'tcx>) -> bool { + layout: FullLayout<'tcx>) -> bool { match *layout { Layout::Scalar { value: layout::F32, .. } | Layout::Scalar { value: layout::F64, .. } => true, diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index b34337ae5f69f..0f9f9b87b5cf8 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -11,7 +11,7 @@ use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use common::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout}; +use rustc::ty::layout::{self, Layout, FullLayout}; #[derive(PartialEq)] pub enum Flavor { @@ -20,7 +20,7 @@ pub enum Flavor { } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: TyLayout<'tcx>) -> bool { + layout: FullLayout<'tcx>) -> bool { match *layout { Layout::Scalar { value: layout::F32, .. } | Layout::Scalar { value: layout::F64, .. } => true, diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index dcbb2de9c4ded..34c795316ba14 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -14,7 +14,7 @@ use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout, Size}; +use rustc::ty::layout::{self, Layout, FullLayout, Size}; #[derive(Clone, Copy, PartialEq, Debug)] enum Class { @@ -53,7 +53,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: TyLayout<'tcx>, + layout: FullLayout<'tcx>, cls: &mut [Class], off: Size) -> Result<(), Memory> { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 2e010ccee4824..e259e19ac93a2 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -27,7 +27,7 @@ use type_::Type; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, Layout, LayoutTyper}; +use rustc::ty::layout::{HasDataLayout, Layout, LayoutOf}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -81,8 +81,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) } // The two fields must be both immediates. - type_is_immediate(ccx, layout.field_type(ccx, 0)) && - type_is_immediate(ccx, layout.field_type(ccx, 1)) + type_is_immediate(ccx, layout.field(ccx, 0).ty) && + type_is_immediate(ccx, layout.field(ccx, 1).ty) } _ => false } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index ac5f437228671..647cc54effe9d 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -29,7 +29,7 @@ use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutCx, LayoutError, LayoutTyper, TyLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, FullLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -648,48 +648,44 @@ impl<'a, 'tcx> ty::layout::HasDataLayout for &'a SharedCrateContext<'a, 'tcx> { } } +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.tcx + } +} + impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.shared.tcx.data_layout } } -impl<'a, 'tcx> LayoutTyper<'tcx> for &'a SharedCrateContext<'a, 'tcx> { - type TyLayout = TyLayout<'tcx>; - +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.tcx + self.shared.tcx } +} - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let param_env = ty::ParamEnv::empty(traits::Reveal::All); - LayoutCx::new(self.tcx, param_env) +impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { + type FullLayout = FullLayout<'tcx>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + (self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) .layout_of(ty) .unwrap_or_else(|e| match e { LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()), _ => bug!("failed to get layout for `{}`: {}", ty, e) }) } - - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx().fully_normalize_associated_types_in(&ty) - } } -impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> { - type TyLayout = TyLayout<'tcx>; +impl<'a, 'tcx> LayoutOf> for &'a CrateContext<'a, 'tcx> { + type FullLayout = FullLayout<'tcx>; - fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.shared.tcx - } - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { self.shared.layout_of(ty) } - - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.shared.normalize_projections(ty) - } } /// Declare any llvm intrinsics that you might need diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 98402b5e8c5f7..703456ab41bd2 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -32,7 +32,7 @@ use rustc::ty::util::TypeIdHasher; use rustc::ich::Fingerprint; use common::{self, CrateContext}; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, Align, LayoutTyper, Size, TyLayout}; +use rustc::ty::layout::{self, Align, LayoutOf, Size, FullLayout}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -1072,7 +1072,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { - layout: TyLayout<'tcx>, + layout: FullLayout<'tcx>, variant: &'tcx ty::VariantDef, span: Span, } @@ -1139,7 +1139,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: TyLayout<'tcx>, + type_rep: FullLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, file_metadata: DIFile, @@ -1318,7 +1318,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // of discriminant instead of us having to recover its path. fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &mut String, - layout: TyLayout<'tcx>, + layout: FullLayout<'tcx>, offset: Size, size: Size) { for i in 0..layout.field_count() { @@ -1409,13 +1409,13 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, -> (DICompositeType, MemberDescriptionFactory<'tcx>) { let layout = cx.layout_of(enum_type); let maybe_discr = match *layout { - layout::General { .. } => Some(layout.field_type(cx, 0)), + layout::General { .. } => Some(layout.field(cx, 0).ty), _ => None, }; let layout = layout.for_variant(variant_index); let mut field_tys = (0..layout.field_count()).map(|i| { - layout.field_type(cx, i) + layout.field(cx, i).ty }).collect::>(); if let Some(discr) = maybe_discr { diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 1ca12771dd448..53c5c82b8e3f1 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -43,7 +43,7 @@ use std::ptr; use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::Symbol; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutOf}; pub mod gdb; mod utils; diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 9152a1febdf73..8f4a983feb8b6 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,7 +19,7 @@ use common::*; use llvm::{ValueRef}; use llvm; use meth; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; @@ -74,7 +74,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field_type(ccx, layout.field_count() - 1); + let field_ty = layout.field(ccx, layout.field_count() - 1).ty; let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 380ed5266e9a5..cf5d43e2f2b6f 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -12,7 +12,7 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::LayoutOf; use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index ab8749f56110e..b52d0da6580c4 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,7 +18,7 @@ use rustc::traits; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper, Size}; +use rustc::ty::layout::{self, LayoutOf, Size}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_apfloat::{ieee, Float, Status}; @@ -1157,7 +1157,7 @@ fn trans_const_adt<'a, 'tcx>( /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: layout::TyLayout<'tcx>, + layout: layout::FullLayout<'tcx>, st: &layout::Struct, vals: &[Const<'tcx>], discr: Option>) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 7dcb5d219b794..0b9be3e49d1e7 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutTyper, Size}; +use rustc::ty::layout::{self, Align, Layout, LayoutOf, Size}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -205,7 +205,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { l = l.for_variant(variant_index) } } - let fty = l.field_type(ccx, ix); + let fty = l.field(ccx, ix).ty; let alignment = self.alignment | Alignment::from(&*l); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index a03408390f95e..f365de123cd11 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -12,7 +12,7 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutOf}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index b65a6453e6178..f43115b84d46a 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty::{self, Ty}; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::LayoutOf; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -140,10 +140,10 @@ impl<'a, 'tcx> OperandRef<'tcx> { let layout = bcx.ccx.layout_of(self.ty); let a = bcx.extract_value(llval, layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, layout.field_type(bcx.ccx, 0)); + let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0).ty); let b = bcx.extract_value(llval, layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, layout.field_type(bcx.ccx, 1)); + let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1).ty); self.val = OperandValue::Pair(a, b); } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 592181df85297..e83d4c586e7bc 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{Layout, LayoutOf}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index f86bc17d20aac..feb2b0569319a 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -12,7 +12,7 @@ use abi::FnType; use adt; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{Align, Layout, LayoutTyper, Size, TyLayout}; +use rustc::ty::layout::{Align, Layout, LayoutOf, Size, FullLayout}; use trans_item::DefPathBasedNames; use type_::Type; @@ -235,14 +235,14 @@ pub trait LayoutLlvmExt { fn llvm_field_index(&self, index: usize) -> u64; } -impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> { +impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { fn llvm_field_index(&self, index: usize) -> u64 { match **self { Layout::Scalar { .. } | Layout::CEnum { .. } | Layout::UntaggedUnion { .. } | Layout::RawNullablePointer { .. } => { - bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } Layout::Vector { .. } | @@ -271,7 +271,7 @@ impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> { if self.variant_index == Some(nndiscr as usize) { adt::memory_index_to_gep(nonnull.memory_index[index] as u64) } else { - bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } } } From 8c4d5af52b7fba20d9bf3dd901ddf4f30fd743c9 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 13 Sep 2017 02:19:11 +0300 Subject: [PATCH 16/69] rustc: remove Ty::layout and move everything to layout_of. --- src/librustc/lint/context.rs | 11 +- src/librustc/ty/layout.rs | 321 +++++++++---------- src/librustc/ty/mod.rs | 3 +- src/librustc/ty/util.rs | 46 --- src/librustc_lint/types.rs | 5 +- src/librustc_mir/transform/inline.rs | 5 +- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/adt.rs | 12 +- src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/common.rs | 2 +- src/librustc_trans/debuginfo/metadata.rs | 6 +- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/mir/constant.rs | 8 +- src/librustc_trans/mir/lvalue.rs | 6 +- src/librustc_trans/mir/mod.rs | 2 +- src/librustc_trans/type_of.rs | 2 +- src/test/{ui => compile-fail}/issue-26548.rs | 5 +- src/test/ui/issue-26548.stderr | 9 - src/tools/toolstate.toml | 2 +- 19 files changed, 201 insertions(+), 250 deletions(-) rename src/test/{ui => compile-fail}/issue-26548.rs (70%) delete mode 100644 src/test/ui/issue-26548.stderr diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index 601e0316d4af9..a080f968da44e 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -34,7 +34,8 @@ use middle::privacy::AccessLevels; use rustc_serialize::{Decoder, Decodable, Encoder, Encodable}; use session::{config, early_error, Session}; use traits::Reveal; -use ty::{self, TyCtxt}; +use ty::{self, TyCtxt, Ty}; +use ty::layout::{FullLayout, LayoutError, LayoutOf}; use util::nodemap::FxHashMap; use std::default::Default as StdDefault; @@ -626,6 +627,14 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { } } +impl<'a, 'tcx> LayoutOf> for &'a LateContext<'a, 'tcx> { + type FullLayout = Result, LayoutError<'tcx>>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + (self.tcx, self.param_env.reveal_all()).layout_of(ty) + } +} + impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { /// Because lints are scoped lexically, we want to walk nested /// items in the context of the outer item, so enable diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 3150014564881..cb7021760274b 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -653,7 +653,7 @@ enum StructKind { impl<'a, 'tcx> Struct { fn new(dl: &TargetDataLayout, - fields: &Vec<&'a Layout>, + fields: &[FullLayout], repr: &ReprOptions, kind: StructKind, scapegoat: Ty<'tcx>) @@ -793,19 +793,6 @@ impl<'a, 'tcx> Struct { self.min_size.abi_align(self.align) } - /// Determine whether a structure would be zero-sized, given its fields. - fn would_be_zero_sized(dl: &TargetDataLayout, fields: I) - -> Result> - where I: Iterator>> { - for field in fields { - let field = field?; - if field.is_unsized() || field.size(dl).bytes() > 0 { - return Ok(false); - } - } - Ok(true) - } - /// Get indices of the tys that made this struct by increasing offset. #[inline] pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator+'b { @@ -837,10 +824,10 @@ impl<'a, 'tcx> Struct { // FIXME(eddyb) track value ranges and traverse already optimized enums. fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>) + layout: FullLayout<'tcx>) -> Result, LayoutError<'tcx>> { - let layout = ty.layout(tcx, param_env)?; - match (layout, &ty.sty) { + let cx = (tcx, param_env); + match (layout.layout, &layout.ty.sty) { (&Scalar { non_zero: true, value, .. }, _) => { Ok(Some((Size::from_bytes(0), value))) } @@ -849,49 +836,33 @@ impl<'a, 'tcx> Struct { } (&FatPointer { non_zero: true, .. }, _) => { - Ok(Some((layout.field_offset(tcx, FAT_PTR_ADDR, None), - Pointer))) + Ok(Some((layout.field_offset(tcx, FAT_PTR_ADDR), Pointer))) } // Is this the NonZero lang item wrapping a pointer or integer type? - (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => { - let fields = &def.struct_variant().fields; - assert_eq!(fields.len(), 1); - let field = fields[0].ty(tcx, substs).layout(tcx, param_env)?; + (_, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { + let field = layout.field(cx, 0)?; match *field { // FIXME(eddyb) also allow floating-point types here. Scalar { value: value @ Int(_), non_zero: false } | Scalar { value: value @ Pointer, non_zero: false } => { - Ok(Some((layout.field_offset(tcx, 0, None), value))) + Ok(Some((layout.field_offset(tcx, 0), value))) } FatPointer { non_zero: false, .. } => { - Ok(Some((layout.field_offset(tcx, 0, None) + - field.field_offset(tcx, FAT_PTR_ADDR, None), + Ok(Some((layout.field_offset(tcx, 0) + + field.field_offset(tcx, FAT_PTR_ADDR), Pointer))) } _ => Ok(None) } } - // Perhaps one of the fields of this struct is non-zero - // let's recurse and find out - (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => { + // Perhaps one of the fields is non-zero, let's recurse and find out. + (&Univariant(ref variant), _) => { variant.non_zero_field( tcx, param_env, - def.struct_variant().fields.iter().map(|field| { - field.ty(tcx, substs) - })) - } - - // Perhaps one of the upvars of this closure is non-zero - (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => { - let upvar_tys = substs.upvar_tys(def, tcx); - variant.non_zero_field(tcx, param_env, upvar_tys) - } - // Can we use one of the fields in this tuple? - (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => { - variant.non_zero_field(tcx, param_env, tys.iter().cloned()) + (0..layout.field_count()).map(|i| layout.field(cx, i))) } // Is this a fixed-size array of something non-zero @@ -900,22 +871,18 @@ impl<'a, 'tcx> Struct { if count.has_projections() { count = tcx.normalize_associated_type_in_env(&count, param_env); if count.has_projections() { - return Err(LayoutError::Unknown(ty)); + return Err(LayoutError::Unknown(layout.ty)); } } if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { - Struct::non_zero_field_in_type(tcx, param_env, ety) + Struct::non_zero_field_in_type(tcx, param_env, cx.layout_of(ety)?) } else { Ok(None) } } (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => { - let normalized = tcx.normalize_associated_type_in_env(&ty, param_env); - if ty == normalized { - return Ok(None); - } - return Struct::non_zero_field_in_type(tcx, param_env, normalized); + bug!("Struct::non_zero_field_in_type: {:?} not normalized", layout); } // Anything else is not a non-zero type. @@ -930,9 +897,9 @@ impl<'a, 'tcx> Struct { param_env: ty::ParamEnv<'tcx>, fields: I) -> Result, LayoutError<'tcx>> - where I: Iterator> { - for (ty, &field_offset) in fields.zip(&self.offsets) { - let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?; + where I: Iterator, LayoutError<'tcx>>> { + for (field, &field_offset) in fields.zip(&self.offsets) { + let r = Struct::non_zero_field_in_type(tcx, param_env, field?)?; if let Some((offset, primitive)) = r { return Ok(Some((field_offset + offset, primitive))); } @@ -981,7 +948,7 @@ impl<'a, 'tcx> Union { } } - /// Extend the Struct with more fields. + /// Extend the Union with more fields. fn extend(&mut self, dl: &TargetDataLayout, fields: I, scapegoat: Ty<'tcx>) @@ -1077,17 +1044,10 @@ pub enum Layout { }, /// Single-case enums, and structs/tuples. - Univariant { - variant: Struct, - /// If true, the structure is NonZero. - // FIXME(eddyb) use a newtype Layout kind for this. - non_zero: bool - }, + Univariant(Struct), /// Untagged unions. - UntaggedUnion { - variants: Union, - }, + UntaggedUnion(Union), /// General-case enums: for each case there is a struct, and they all have /// all space reserved for the discriminant, and their first field starts @@ -1145,31 +1105,59 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } +fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result<&'tcx Layout, LayoutError<'tcx>> +{ + let (param_env, ty) = query.into_parts(); + + let rec_limit = tcx.sess.recursion_limit.get(); + let depth = tcx.layout_depth.get(); + if depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", ty)); + } + + tcx.layout_depth.set(depth+1); + let layout = Layout::compute_uncached(tcx, param_env, ty); + tcx.layout_depth.set(depth); + + layout +} + +pub fn provide(providers: &mut ty::maps::Providers) { + *providers = ty::maps::Providers { + layout_raw, + ..*providers + }; +} + impl<'a, 'tcx> Layout { - pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>) - -> Result<&'tcx Layout, LayoutError<'tcx>> { + fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>) + -> Result<&'tcx Layout, LayoutError<'tcx>> { let success = |layout| Ok(tcx.intern_layout(layout)); - let dl = &tcx.data_layout; + let cx = (tcx, param_env); + let dl = cx.data_layout(); assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { let non_zero = !ty.is_unsafe_ptr(); let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - Ok(Scalar { value: Pointer, non_zero: non_zero }) + Ok(Scalar { value: Pointer, non_zero }) } else { let unsized_part = tcx.struct_tail(pointee); - match unsized_part.sty { - ty::TySlice(_) | ty::TyStr => Ok(FatPointer { - metadata: Int(dl.ptr_sized_integer()), - non_zero: non_zero - }), - ty::TyDynamic(..) => Ok(FatPointer { metadata: Pointer, non_zero: non_zero }), - ty::TyForeign(..) => Ok(Scalar { value: Pointer, non_zero: non_zero }), - _ => Err(LayoutError::Unknown(unsized_part)), - } + let metadata = match unsized_part.sty { + ty::TyForeign(..) => return Ok(Scalar { value: Pointer, non_zero }), + ty::TySlice(_) | ty::TyStr => { + Int(dl.ptr_sized_integer()) + } + ty::TyDynamic(..) => Pointer, + _ => return Err(LayoutError::Unknown(unsized_part)) + }; + Ok(FatPointer { metadata, non_zero }) } }; @@ -1194,11 +1182,10 @@ impl<'a, 'tcx> Layout { ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true }, // The never type. - ty::TyNever => Univariant { - variant: Struct::new(dl, &vec![], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false - }, + ty::TyNever => { + Univariant(Struct::new(dl, &[], &ReprOptions::default(), + StructKind::AlwaysSizedUnivariant, ty)?) + } // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | @@ -1218,7 +1205,7 @@ impl<'a, 'tcx> Layout { } } - let element = element.layout(tcx, param_env)?; + let element = cx.layout_of(element)?; let element_size = element.size(dl); let count = count.val.to_const_int().unwrap().to_u64().unwrap(); if element_size.checked_mul(count, dl).is_none() { @@ -1233,7 +1220,7 @@ impl<'a, 'tcx> Layout { } } ty::TySlice(element) => { - let element = element.layout(tcx, param_env)?; + let element = cx.layout_of(element)?; Array { sized: false, align: element.align(dl), @@ -1254,38 +1241,33 @@ impl<'a, 'tcx> Layout { // Odd unit types. ty::TyFnDef(..) => { - Univariant { - variant: Struct::new(dl, &vec![], - &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false - } + Univariant(Struct::new(dl, &[], &ReprOptions::default(), + StructKind::AlwaysSizedUnivariant, ty)?) } ty::TyDynamic(..) | ty::TyForeign(..) => { - let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(), + let mut unit = Struct::new(dl, &[], &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?; unit.sized = false; - Univariant { variant: unit, non_zero: false } + Univariant(unit) } // Tuples, generators and closures. ty::TyGenerator(def_id, ref substs, _) => { let tys = substs.field_tys(def_id, tcx); - let st = Struct::new(dl, - &tys.map(|ty| ty.layout(tcx, param_env)) + Univariant(Struct::new(dl, + &tys.map(|ty| cx.layout_of(ty)) .collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - Univariant { variant: st, non_zero: false } + StructKind::AlwaysSizedUnivariant, ty)?) } ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); - let st = Struct::new(dl, - &tys.map(|ty| ty.layout(tcx, param_env)) + Univariant(Struct::new(dl, + &tys.map(|ty| cx.layout_of(ty)) .collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - Univariant { variant: st, non_zero: false } + StructKind::AlwaysSizedUnivariant, ty)?) } ty::TyTuple(tys, _) => { @@ -1295,17 +1277,16 @@ impl<'a, 'tcx> Layout { StructKind::MaybeUnsizedUnivariant }; - let st = Struct::new(dl, - &tys.iter().map(|ty| ty.layout(tcx, param_env)) + Univariant(Struct::new(dl, + &tys.iter().map(|ty| cx.layout_of(ty)) .collect::, _>>()?, - &ReprOptions::default(), kind, ty)?; - Univariant { variant: st, non_zero: false } + &ReprOptions::default(), kind, ty)?) } // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { let element = ty.simd_type(tcx); - match *element.layout(tcx, param_env)? { + match *cx.layout_of(element)? { Scalar { value, .. } => { return success(Vector { element: value, @@ -1326,11 +1307,8 @@ impl<'a, 'tcx> Layout { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) - return success(Univariant { - variant: Struct::new(dl, &vec![], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false - }); + return success(Univariant(Struct::new(dl, &[], + &def.repr, StructKind::AlwaysSizedUnivariant, ty)?)); } if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { @@ -1376,17 +1354,14 @@ impl<'a, 'tcx> Layout { }; let fields = def.variants[0].fields.iter().map(|field| { - field.ty(tcx, substs).layout(tcx, param_env) + cx.layout_of(field.ty(tcx, substs)) }).collect::, _>>()?; let layout = if def.is_union() { let mut un = Union::new(dl, &def.repr); - un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?; - UntaggedUnion { variants: un } + un.extend(dl, fields.iter().map(|&f| Ok(f.layout)), ty)?; + UntaggedUnion(un) } else { - let st = Struct::new(dl, &fields, &def.repr, - kind, ty)?; - let non_zero = Some(def.did) == tcx.lang_items().non_zero(); - Univariant { variant: st, non_zero: non_zero } + Univariant(Struct::new(dl, &fields, &def.repr, kind, ty)?) }; return success(layout); } @@ -1403,28 +1378,41 @@ impl<'a, 'tcx> Layout { // Cache the substituted and normalized variant field types. let variants = def.variants.iter().map(|v| { - v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() - }).collect::>(); + v.fields.iter().map(|field| { + cx.layout_of(field.ty(tcx, substs)) + }).collect::, _>>() + }).collect::, _>>()?; if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() { // Nullable pointer optimization + let st0 = Struct::new(dl, &variants[0], + &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; + let st1 = Struct::new(dl, &variants[1], + &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; + + let mut choice = None; for discr in 0..2 { - let other_fields = variants[1 - discr].iter().map(|ty| { - ty.layout(tcx, param_env) - }); - if !Struct::would_be_zero_sized(dl, other_fields)? { + let (st, other) = if discr == 0 { + (&st0, &st1) + } else { + (&st1, &st0) + }; + if other.stride().bytes() > 0 { continue; } - let st = Struct::new(dl, - &variants[discr].iter().map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; - let field = st.non_zero_field(tcx, param_env, - variants[discr].iter().cloned())?; - let (offset, primitive) = if let Some(f) = field { f } - else { continue }; + variants[discr].iter().map(|&f| Ok(f)))?; + if let Some((offset, primitive)) = field { + choice = Some((discr, offset, primitive)); + break; + } + } + + if let Some((discr, offset, primitive)) = choice { + // HACK(eddyb) work around not being able to move + // out of arrays with just the indexing operator. + let st = if discr == 0 { st0 } else { st1 }; // FIXME(eddyb) should take advantage of a newtype. if offset.bytes() == 0 && primitive.size(dl) == st.stride() && @@ -1457,11 +1445,7 @@ impl<'a, 'tcx> Layout { // Create the set of structs that represent each variant. let mut variants = variants.into_iter().map(|fields| { - let fields = fields.into_iter().map(|field| { - field.layout(tcx, param_env) - }).collect::, _>>()?; - let st = Struct::new(dl, - &fields, + let st = Struct::new(dl, &fields, &def.repr, StructKind::EnumVariant(min_ity), ty)?; // Find the first field we can't move later // to make room for a larger discriminant. @@ -1552,7 +1536,7 @@ impl<'a, 'tcx> Layout { if ty == normalized { return Err(LayoutError::Unknown(ty)); } - return normalized.layout(tcx, param_env); + return Ok(cx.layout_of(normalized)?.layout); } ty::TyParam(_) => { return Err(LayoutError::Unknown(ty)); @@ -1574,7 +1558,7 @@ impl<'a, 'tcx> Layout { StructWrappedNullablePointer {..} => false, Array { sized, .. } | - Univariant { variant: Struct { sized, .. }, .. } => !sized + Univariant(Struct { sized, .. }) => !sized } } @@ -1612,9 +1596,9 @@ impl<'a, 'tcx> Layout { CEnum { discr, .. } => Int(discr).size(dl), General { size, .. } => size, - UntaggedUnion { ref variants } => variants.stride(), + UntaggedUnion(ref un) => un.stride(), - Univariant { ref variant, .. } | + Univariant(ref variant) | StructWrappedNullablePointer { nonnull: ref variant, .. } => { variant.stride() } @@ -1646,9 +1630,9 @@ impl<'a, 'tcx> Layout { CEnum { discr, .. } => Int(discr).align(dl), Array { align, .. } | General { align, .. } => align, - UntaggedUnion { ref variants } => variants.align, + UntaggedUnion(ref un) => un.align, - Univariant { ref variant, .. } | + Univariant(ref variant) | StructWrappedNullablePointer { nonnull: ref variant, .. } => { variant.align } @@ -1663,7 +1647,7 @@ impl<'a, 'tcx> Layout { pub fn primitive_align(&self, cx: C) -> Align { match *self { Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, - Univariant { ref variant, .. } | + Univariant(ref variant) | StructWrappedNullablePointer { nonnull: ref variant, .. } => { variant.primitive_align }, @@ -1682,7 +1666,7 @@ impl<'a, 'tcx> Layout { match *self { Scalar { .. } | CEnum { .. } | - UntaggedUnion { .. } | + UntaggedUnion(_) | RawNullablePointer { .. } => { Size::from_bytes(0) } @@ -1710,7 +1694,7 @@ impl<'a, 'tcx> Layout { } } - Univariant { ref variant, .. } => variant.offsets[i], + Univariant(ref variant) => variant.offsets[i], General { ref variants, .. } => { let v = variant_index.expect("variant index required"); @@ -1730,10 +1714,10 @@ impl<'a, 'tcx> Layout { /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] - pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: &Layout) { + fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + param_env: ty::ParamEnv<'tcx>, + layout: &Layout) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1788,8 +1772,7 @@ impl<'a, 'tcx> Layout { let adt_kind = adt_def.adt_kind(); let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| { - let layout = field_ty.layout(tcx, param_env); - match layout { + match (tcx, param_env).layout_of(field_ty) { Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty), Ok(field_layout) => { session::FieldInfo { @@ -1859,7 +1842,7 @@ impl<'a, 'tcx> Layout { record(adt_kind.into(), None, vec![build_primitive_info(variant_def.name, &discr)]); } - Layout::Univariant { variant: ref variant_layout, non_zero: _ } => { + Layout::Univariant(ref variant_layout) => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() }; @@ -1905,9 +1888,8 @@ impl<'a, 'tcx> Layout { record(adt_kind.into(), Some(discr.size()), variant_infos); } - Layout::UntaggedUnion { ref variants } => { - debug!("print-type-size t: `{:?}` adt union variants {:?}", - ty, variants); + Layout::UntaggedUnion(ref un) => { + debug!("print-type-size t: `{:?}` adt union {:?}", ty, un); // layout does not currently store info about each // variant... record(adt_kind.into(), None, Vec::new()); @@ -1966,7 +1948,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { assert!(!ty.has_infer_types()); // First try computing a static layout. - let err = match ty.layout(tcx, param_env) { + let err = match (tcx, param_env).layout_of(ty) { Ok(layout) => { return Ok(SizeSkeleton::Known(layout.size(tcx))); } @@ -2141,15 +2123,29 @@ pub trait LayoutOf { impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { type FullLayout = Result, LayoutError<'tcx>>; + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { let (tcx, param_env) = self; - let ty = tcx.normalize_associated_type_in_env(&ty, param_env); + let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); + let layout = tcx.layout_raw(param_env.reveal_all().and(ty)); + + // NB: This recording is normally disabled; when enabled, it + // can however trigger recursive invocations of `layout()`. + // Therefore, we execute it *after* the main query has + // completed, to avoid problems around recursive structures + // and the like. (Admitedly, I wasn't able to reproduce a problem + // here, but it seems like the right thing to do. -nmatsakis) + if let Ok(l) = layout { + Layout::record_layout_for_printing(tcx, ty, param_env, l); + } Ok(FullLayout { ty, variant_index: None, - layout: ty.layout(tcx, param_env)?, + layout: layout?, }) } } @@ -2204,7 +2200,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { // Handled above (the TyAdt case). CEnum { .. } | General { .. } | - UntaggedUnion { .. } | + UntaggedUnion(_) | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => bug!(), @@ -2217,7 +2213,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { usize_count } - Univariant { ref variant, .. } => variant.offsets.len(), + Univariant(ref variant) => variant.offsets.len(), } } @@ -2356,12 +2352,11 @@ impl<'gcx> HashStable> for Layout min.hash_stable(hcx, hasher); max.hash_stable(hcx, hasher); } - Univariant { ref variant, non_zero } => { + Univariant(ref variant) => { variant.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); } - UntaggedUnion { ref variants } => { - variants.hash_stable(hcx, hasher); + UntaggedUnion(ref un) => { + un.hash_stable(hcx, hasher); } General { discr, ref variants, size, align, primitive_align } => { discr.hash_stable(hcx, hasher); diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index dac200efb39e3..48ec92a255b4c 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -2617,9 +2617,10 @@ fn original_crate_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } pub fn provide(providers: &mut ty::maps::Providers) { - util::provide(providers); context::provide(providers); erase_regions::provide(providers); + layout::provide(providers); + util::provide(providers); *providers = ty::maps::Providers { associated_item, associated_item_def_ids, diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index a0219f2f95b84..23dd3f1bc2bba 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -19,7 +19,6 @@ use middle::const_val::ConstVal; use traits::{self, Reveal}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeVisitor; -use ty::layout::{Layout, LayoutError}; use ty::subst::{Subst, Kind}; use ty::TypeVariants::*; use util::common::ErrorReported; @@ -852,30 +851,6 @@ impl<'a, 'tcx> ty::TyS<'tcx> { tcx.needs_drop_raw(param_env.and(self)) } - /// Computes the layout of a type. Note that this implicitly - /// executes in "reveal all" mode. - #[inline] - pub fn layout<'lcx>(&'tcx self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Result<&'tcx Layout, LayoutError<'tcx>> { - let ty = tcx.erase_regions(&self); - let layout = tcx.layout_raw(param_env.reveal_all().and(ty)); - - // NB: This recording is normally disabled; when enabled, it - // can however trigger recursive invocations of `layout()`. - // Therefore, we execute it *after* the main query has - // completed, to avoid problems around recursive structures - // and the like. (Admitedly, I wasn't able to reproduce a problem - // here, but it seems like the right thing to do. -nmatsakis) - if let Ok(l) = layout { - Layout::record_layout_for_printing(tcx, ty, param_env, l); - } - - layout - } - - /// Check whether a type is representable. This means it cannot contain unboxed /// structural recursion. This check is needed for structs and enums. pub fn is_representable(&'tcx self, @@ -1184,26 +1159,6 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>> -{ - let (param_env, ty) = query.into_parts(); - - let rec_limit = tcx.sess.recursion_limit.get(); - let depth = tcx.layout_depth.get(); - if depth > rec_limit { - tcx.sess.fatal( - &format!("overflow representing the type `{}`", ty)); - } - - tcx.layout_depth.set(depth+1); - let layout = Layout::compute_uncached(tcx, param_env, ty); - tcx.layout_depth.set(depth); - - layout -} - pub enum ExplicitSelf<'tcx> { ByValue, ByReference(ty::Region<'tcx>, hir::Mutability), @@ -1262,7 +1217,6 @@ pub fn provide(providers: &mut ty::maps::Providers) { is_sized_raw, is_freeze_raw, needs_drop_raw, - layout_raw, ..*providers }; } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 8f08987505b94..86bd227b1af07 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -13,7 +13,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, Primitive}; +use rustc::ty::layout::{Layout, LayoutOf, Primitive}; use middle::const_val::ConstVal; use rustc_const_eval::ConstContext; use util::nodemap::FxHashSet; @@ -748,9 +748,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { // sizes only make sense for non-generic types let item_def_id = cx.tcx.hir.local_def_id(it.id); let t = cx.tcx.type_of(item_def_id); - let param_env = cx.param_env.reveal_all(); let ty = cx.tcx.erase_regions(&t); - let layout = ty.layout(cx.tcx, param_env).unwrap_or_else(|e| { + let layout = cx.layout_of(ty).unwrap_or_else(|e| { bug!("failed to get layout for `{}`: {}", t, e) }); diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 628a8161615e3..0e5528f916a7a 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -19,6 +19,7 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::*; use rustc::mir::visit::*; use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use rustc::ty::subst::{Subst,Substs}; use std::collections::VecDeque; @@ -625,9 +626,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> Option { - ty.layout(tcx, param_env).ok().map(|layout| { - layout.size(&tcx.data_layout).bytes() - }) + (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size(tcx).bytes()) } fn subst_and_normalize<'a, 'tcx: 'a>( diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 211c88b4b2294..695fc9160c5b3 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -330,7 +330,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { } } - Layout::Univariant { ref variant, .. } => { + Layout::Univariant(ref variant) => { let mut unaligned_offset = Size::from_bytes(0); let mut result = None; diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 62888556f1498..3d67d3df04284 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -74,7 +74,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::Univariant { ..} | layout::StructWrappedNullablePointer { .. } => { let (variant_layout, variant) = match *l { - layout::Univariant { ref variant, .. } => { + layout::Univariant(ref variant) => { let is_enum = if let ty::TyAdt(def, _) = t.sty { def.is_enum() } else { @@ -123,7 +123,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - layout::Univariant { ref variant, .. } => { + layout::Univariant(ref variant) => { match name { None => { Type::struct_(cx, &struct_llfields(cx, l, &variant), @@ -134,16 +134,16 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - layout::UntaggedUnion { ref variants, .. }=> { + layout::UntaggedUnion(ref un) => { // Use alignment-sized ints to fill all the union storage. - let fill = union_fill(cx, variants.stride(), variants.align); + let fill = union_fill(cx, un.stride(), un.align); match name { None => { - Type::struct_(cx, &[fill], variants.packed) + Type::struct_(cx, &[fill], un.packed) } Some(name) => { let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], variants.packed); + llty.set_struct_body(&[fill], un.packed); llty } } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 34c795316ba14..493a9f713fd23 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -101,7 +101,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } } - Layout::Univariant { ref variant, .. } => { + Layout::Univariant(ref variant) => { for i in 0..layout.field_count() { let field_off = off + variant.offsets[i]; classify(ccx, layout.field(ccx, i), cls, field_off)?; diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e259e19ac93a2..40ddc329dddd5 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -74,7 +74,7 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) let layout = ccx.layout_of(ty); match *layout { Layout::FatPointer { .. } => true, - Layout::Univariant { ref variant, .. } => { + Layout::Univariant(ref variant) => { // There must be only 2 fields. if variant.offsets.len() != 2 { return false; diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 703456ab41bd2..44dc831e44236 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -942,7 +942,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { let tmp; let offsets = match *layout { - layout::Univariant { ref variant, .. } => &variant.offsets, + layout::Univariant(ref variant) => &variant.offsets, layout::Vector { element, count } => { let element_size = element.size(cx).bytes(); tmp = (0..count). @@ -1022,7 +1022,7 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - let offsets = if let layout::Univariant { ref variant, .. } = *layout { + let offsets = if let layout::Univariant(ref variant) = *layout { &variant.offsets } else { bug!("{} is not a tuple", self.ty); @@ -1184,7 +1184,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - layout::Univariant{ ref variant, .. } => { + layout::Univariant(ref variant) => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 8f4a983feb8b6..657f7da66b34b 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -59,7 +59,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf debug!("DST {} layout: {:?}", t, layout); let (sized_size, sized_align) = match *layout { - ty::layout::Layout::Univariant { ref variant, .. } => { + ty::layout::Layout::Univariant(ref variant) => { (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi()) } _ => { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index b52d0da6580c4..badc9100c40a6 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1111,16 +1111,16 @@ fn trans_const_adt<'a, 'tcx>( build_const_struct(ccx, l, &variant, vals, Some(Const::new(lldiscr, d.to_ty(ccx.tcx(), false)))) } - layout::UntaggedUnion { ref variants, .. }=> { + layout::UntaggedUnion(ref un) => { assert_eq!(variant_index, 0); let contents = [ vals[0].llval, - padding(ccx, variants.stride() - ccx.size_of(vals[0].ty)) + padding(ccx, un.stride() - ccx.size_of(vals[0].ty)) ]; - Const::new(C_struct(ccx, &contents, variants.packed), t) + Const::new(C_struct(ccx, &contents, un.packed), t) } - layout::Univariant { ref variant, .. } => { + layout::Univariant(ref variant) => { assert_eq!(variant_index, 0); build_const_struct(ccx, l, &variant, vals, None) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0b9be3e49d1e7..8f094cd510223 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -58,8 +58,8 @@ impl ops::BitOr for Alignment { impl<'a> From<&'a Layout> for Alignment { fn from(layout: &Layout) -> Self { let (packed, align) = match *layout { - Layout::UntaggedUnion { ref variants } => (variants.packed, variants.align), - Layout::Univariant { ref variant, .. } => (variant.packed, variant.align), + Layout::UntaggedUnion(ref un) => (un.packed, un.align), + Layout::Univariant(ref variant) => (variant.packed, variant.align), _ => return Alignment::AbiAligned }; if packed { @@ -250,7 +250,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Check whether the variant being used is packed, if applicable. let is_packed = match (&*l, l.variant_index) { - (&layout::Univariant { ref variant, .. }, _) => variant.packed, + (&layout::Univariant(ref variant), _) => variant.packed, (&layout::StructWrappedNullablePointer { ref nonnull, .. }, _) => nonnull.packed, (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, _ => return simple() diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index f365de123cd11..b1a9be881f789 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -577,7 +577,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let layout = bcx.ccx.layout_of(closure_ty); let offsets = match *layout { - layout::Univariant { ref variant, .. } => &variant.offsets[..], + layout::Univariant(ref variant) => &variant.offsets[..], _ => bug!("Closures are only supposed to be Univariant") }; diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index feb2b0569319a..d504ea1c30768 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -254,7 +254,7 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { adt::memory_index_to_gep(index as u64) } - Layout::Univariant { ref variant, .. } => { + Layout::Univariant(ref variant) => { adt::memory_index_to_gep(variant.memory_index[index] as u64) } diff --git a/src/test/ui/issue-26548.rs b/src/test/compile-fail/issue-26548.rs similarity index 70% rename from src/test/ui/issue-26548.rs rename to src/test/compile-fail/issue-26548.rs index 2591d7bcbaef4..39c6e97268f98 100644 --- a/src/test/ui/issue-26548.rs +++ b/src/test/compile-fail/issue-26548.rs @@ -8,7 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// error-pattern: overflow representing the type +// error-pattern: unsupported cyclic reference between types/traits detected +// note-pattern: the cycle begins when computing layout of +// note-pattern: ...which then requires computing layout of +// note-pattern: ...which then again requires computing layout of trait Mirror { type It: ?Sized; } diff --git a/src/test/ui/issue-26548.stderr b/src/test/ui/issue-26548.stderr deleted file mode 100644 index 8bfe4ac733b6d..0000000000000 --- a/src/test/ui/issue-26548.stderr +++ /dev/null @@ -1,9 +0,0 @@ -error[E0391]: unsupported cyclic reference between types/traits detected - | -note: the cycle begins when computing layout of `S`... -note: ...which then requires computing layout of `std::option::Option<::It>`... -note: ...which then requires computing layout of `::It`... - = note: ...which then again requires computing layout of `S`, completing the cycle. - -error: aborting due to previous error - diff --git a/src/tools/toolstate.toml b/src/tools/toolstate.toml index 744a0f96ad734..f1684f4c5acbe 100644 --- a/src/tools/toolstate.toml +++ b/src/tools/toolstate.toml @@ -26,7 +26,7 @@ miri = "Broken" # ping @Manishearth @llogiq @mcarton @oli-obk -clippy = "Testing" +clippy = "Broken" # ping @nrc rls = "Testing" From 9a0efea4c2ddab7214c7305dd470049e3240ad6a Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 13 Sep 2017 14:35:04 +0300 Subject: [PATCH 17/69] rustc: pre-compute field placements out of Layout. --- src/librustc/ty/layout.rs | 334 ++++++++++++++--------- src/librustc/ty/maps/mod.rs | 4 +- src/librustc_const_eval/eval.rs | 3 +- src/librustc_trans/abi.rs | 4 +- src/librustc_trans/adt.rs | 2 +- src/librustc_trans/cabi_s390x.rs | 2 +- src/librustc_trans/cabi_x86.rs | 2 +- src/librustc_trans/cabi_x86_64.rs | 4 +- src/librustc_trans/debuginfo/metadata.rs | 10 +- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 2 +- 11 files changed, 228 insertions(+), 141 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index cb7021760274b..a08a9ddcd1ab3 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -836,7 +836,7 @@ impl<'a, 'tcx> Struct { } (&FatPointer { non_zero: true, .. }, _) => { - Ok(Some((layout.field_offset(tcx, FAT_PTR_ADDR), Pointer))) + Ok(Some((layout.fields.offset(FAT_PTR_ADDR), Pointer))) } // Is this the NonZero lang item wrapping a pointer or integer type? @@ -846,11 +846,11 @@ impl<'a, 'tcx> Struct { // FIXME(eddyb) also allow floating-point types here. Scalar { value: value @ Int(_), non_zero: false } | Scalar { value: value @ Pointer, non_zero: false } => { - Ok(Some((layout.field_offset(tcx, 0), value))) + Ok(Some((layout.fields.offset(0), value))) } FatPointer { non_zero: false, .. } => { - Ok(Some((layout.field_offset(tcx, 0) + - field.field_offset(tcx, FAT_PTR_ADDR), + Ok(Some((layout.fields.offset(0) + + field.fields.offset(FAT_PTR_ADDR), Pointer))) } _ => Ok(None) @@ -862,7 +862,7 @@ impl<'a, 'tcx> Struct { variant.non_zero_field( tcx, param_env, - (0..layout.field_count()).map(|i| layout.field(cx, i))) + (0..layout.fields.count()).map(|i| layout.field(cx, i))) } // Is this a fixed-size array of something non-zero @@ -991,6 +991,59 @@ pub const FAT_PTR_ADDR: usize = 0; /// - For a slice, this is the length. pub const FAT_PTR_EXTRA: usize = 1; +/// Describes how the fields of a type are located in memory. +#[derive(Copy, Clone, Debug)] +pub enum FieldPlacement<'a> { + /// Array-like placement. Can also express + /// unions, by using a stride of zero bytes. + Linear { + stride: Size, + count: u64 + }, + + /// Struct-like placement, with precomputed offsets. + /// + /// Fields are guaranteed to not overlap, but note that gaps + /// before, between and after all the fields are NOT always + /// padding, and as such their contents may not be discarded. + /// For example, enum variants leave a gap at the start, + /// where the discriminant field in the enum layout goes. + Arbitrary { + offsets: &'a [Size] + } +} + +impl<'a> FieldPlacement<'a> { + pub fn union(count: usize) -> Self { + FieldPlacement::Linear { + stride: Size::from_bytes(0), + count: count as u64 + } + } + + pub fn count(&self) -> usize { + match *self { + FieldPlacement::Linear { count, .. } => { + let usize_count = count as usize; + assert_eq!(usize_count as u64, count); + usize_count + } + FieldPlacement::Arbitrary { offsets } => offsets.len() + } + } + + pub fn offset(&self, i: usize) -> Size { + match *self { + FieldPlacement::Linear { stride, count, .. } => { + let i = i as u64; + assert!(i < count); + stride * i + } + FieldPlacement::Arbitrary { offsets } => offsets[i] + } + } +} + /// Type layout, from which size and alignment can be cheaply computed. /// For ADTs, it also includes field placement and enum optimizations. /// NOTE: Because Layout is interned, redundant information should be @@ -1105,9 +1158,15 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } +#[derive(Copy, Clone, Debug)] +pub struct CachedLayout<'tcx> { + pub layout: &'tcx Layout, + pub fields: FieldPlacement<'tcx>, +} + fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>> + -> Result, LayoutError<'tcx>> { let (param_env, ty) = query.into_parts(); @@ -1136,10 +1195,59 @@ impl<'a, 'tcx> Layout { fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) - -> Result<&'tcx Layout, LayoutError<'tcx>> { - let success = |layout| Ok(tcx.intern_layout(layout)); + -> Result, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); + let success = |layout| { + let layout = tcx.intern_layout(layout); + let fields = match *layout { + Scalar { .. } | + CEnum { .. } | + RawNullablePointer { .. } | + StructWrappedNullablePointer { .. } => { + FieldPlacement::union(0) + } + + Vector { element, count } => { + FieldPlacement::Linear { + stride: element.size(tcx), + count + } + } + + Array { element_size, count, .. } => { + FieldPlacement::Linear { + stride: element_size, + count + } + } + + FatPointer { .. } => { + FieldPlacement::Linear { + stride: Pointer.size(tcx), + count: 2 + } + } + + Univariant(ref variant) => { + FieldPlacement::Arbitrary { + offsets: &variant.offsets + } + } + + UntaggedUnion(_) => { + // Handle unions through the type rather than Layout. + let def = ty.ty_adt_def().unwrap(); + FieldPlacement::union(def.struct_variant().fields.len()) + } + + General { .. } => FieldPlacement::union(1) + }; + Ok(CachedLayout { + layout, + fields + }) + }; assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { @@ -1536,7 +1644,11 @@ impl<'a, 'tcx> Layout { if ty == normalized { return Err(LayoutError::Unknown(ty)); } - return Ok(cx.layout_of(normalized)?.layout); + let layout = cx.layout_of(normalized)?; + return Ok(CachedLayout { + layout: layout.layout, + fields: layout.fields + }); } ty::TyParam(_) => { return Err(LayoutError::Unknown(ty)); @@ -1656,61 +1768,6 @@ impl<'a, 'tcx> Layout { } } - pub fn field_offset(&self, - cx: C, - i: usize, - variant_index: Option) - -> Size { - let dl = cx.data_layout(); - - match *self { - Scalar { .. } | - CEnum { .. } | - UntaggedUnion(_) | - RawNullablePointer { .. } => { - Size::from_bytes(0) - } - - Vector { element, count } => { - let element_size = element.size(dl); - let i = i as u64; - assert!(i < count); - Size::from_bytes(element_size.bytes() * count) - } - - Array { element_size, count, .. } => { - let i = i as u64; - assert!(i < count); - Size::from_bytes(element_size.bytes() * count) - } - - FatPointer { metadata, .. } => { - // Effectively a (ptr, meta) tuple. - assert!(i < 2); - if i == 0 { - Size::from_bytes(0) - } else { - Pointer.size(dl).abi_align(metadata.align(dl)) - } - } - - Univariant(ref variant) => variant.offsets[i], - - General { ref variants, .. } => { - let v = variant_index.expect("variant index required"); - variants[v].offsets[i] - } - - StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - if Some(nndiscr as usize) == variant_index { - nonnull.offsets[i] - } else { - Size::from_bytes(0) - } - } - } - } - /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] @@ -2077,6 +2134,7 @@ pub struct FullLayout<'tcx> { pub ty: Ty<'tcx>, pub variant_index: Option, pub layout: &'tcx Layout, + pub fields: FieldPlacement<'tcx>, } impl<'tcx> Deref for FullLayout<'tcx> { @@ -2130,90 +2188,94 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx let (tcx, param_env) = self; let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); - let layout = tcx.layout_raw(param_env.reveal_all().and(ty)); + let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?; // NB: This recording is normally disabled; when enabled, it - // can however trigger recursive invocations of `layout()`. + // can however trigger recursive invocations of `layout_of`. // Therefore, we execute it *after* the main query has // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - if let Ok(l) = layout { - Layout::record_layout_for_printing(tcx, ty, param_env, l); - } + Layout::record_layout_for_printing(tcx, ty, param_env, cached.layout); + + Ok(FullLayout { + ty, + variant_index: None, + layout: cached.layout, + fields: cached.fields + }) + } +} + +impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, + ty::ParamEnv<'tcx>) { + type FullLayout = Result, LayoutError<'tcx>>; + + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] + fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + let (tcx_at, param_env) = self; + + let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); + let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; + + // NB: This recording is normally disabled; when enabled, it + // can however trigger recursive invocations of `layout_of`. + // Therefore, we execute it *after* the main query has + // completed, to avoid problems around recursive structures + // and the like. (Admitedly, I wasn't able to reproduce a problem + // here, but it seems like the right thing to do. -nmatsakis) + Layout::record_layout_for_printing(tcx_at.tcx, ty, param_env, cached.layout); Ok(FullLayout { ty, variant_index: None, - layout: layout?, + layout: cached.layout, + fields: cached.fields }) } } impl<'a, 'tcx> FullLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { - let is_enum = match self.ty.sty { - ty::TyAdt(def, _) => def.is_enum(), - _ => false + let variants = match self.ty.sty { + ty::TyAdt(def, _) if def.is_enum() => &def.variants[..], + _ => &[] + }; + let count = if variants.is_empty() { + 0 + } else { + variants[variant_index].fields.len() }; - assert!(is_enum); - FullLayout { - variant_index: Some(variant_index), - ..*self - } - } - - pub fn field_offset(&self, cx: C, i: usize) -> Size { - self.layout.field_offset(cx, i, self.variant_index) - } - pub fn field_count(&self) -> usize { - // Handle enum/union through the type rather than Layout. - if let ty::TyAdt(def, _) = self.ty.sty { - let v = if def.is_enum() { - if def.variants.is_empty() { - return 0; - } - match self.variant_index { - None => match *self.layout { - // Discriminant field for enums (where applicable). - General { .. } => return 1, - _ if def.variants.len() > 1 => return 0, - - // Enums with one variant behave like structs. - _ => 0 - }, - Some(v) => v + let fields = match *self.layout { + Univariant(ref variant) => { + FieldPlacement::Arbitrary { + offsets: &variant.offsets } - } else { - 0 - }; - - return def.variants[v].fields.len(); - } - - match *self.layout { - Scalar { .. } => { - bug!("FullLayout::field_count({:?}): not applicable", self) } - // Handled above (the TyAdt case). - CEnum { .. } | - General { .. } | - UntaggedUnion(_) | - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => bug!(), - - FatPointer { .. } => 2, + General { ref variants, .. } => { + FieldPlacement::Arbitrary { + offsets: &variants[variant_index].offsets + } + } - Vector { count, .. } | - Array { count, .. } => { - let usize_count = count as usize; - assert_eq!(usize_count as u64, count); - usize_count + StructWrappedNullablePointer { nndiscr, ref nonnull, .. } + if nndiscr as usize == variant_index => { + FieldPlacement::Arbitrary { + offsets: &nonnull.offsets + } } - Univariant(ref variant) => variant.offsets.len(), + _ => FieldPlacement::union(count) + }; + + FullLayout { + variant_index: Some(variant_index), + fields, + ..*self } } @@ -2384,6 +2446,30 @@ impl<'gcx> HashStable> for Layout } } +impl<'gcx> HashStable> for FieldPlacement<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::FieldPlacement::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Linear { count, stride } => { + count.hash_stable(hcx, hasher); + stride.hash_stable(hcx, hasher); + } + Arbitrary { offsets } => { + offsets.hash_stable(hcx, hasher); + } + } + } +} + +impl_stable_hash_for!(struct ::ty::layout::CachedLayout<'tcx> { + layout, + fields +}); + impl_stable_hash_for!(enum ::ty::layout::Integer { I1, I8, diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 320f651484987..6746776308903 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -34,7 +34,6 @@ use session::config::OutputFilenames; use traits::Vtable; use traits::specialization_graph; use ty::{self, CrateInherentImpls, Ty, TyCtxt}; -use ty::layout::{Layout, LayoutError}; use ty::steal::Steal; use ty::subst::Substs; use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; @@ -265,7 +264,8 @@ define_maps! { <'tcx> [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>>, + -> Result, + ty::layout::LayoutError<'tcx>>, [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) -> Rc>, diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 657156902b5c1..7badea86c1aba 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -17,6 +17,7 @@ use rustc::hir::map::blocks::FnLikeNode; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::def_id::DefId; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::LayoutOf; use rustc::ty::maps::Providers; use rustc::ty::util::IntTypeExt; use rustc::ty::subst::{Substs, Subst}; @@ -313,7 +314,7 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic { let layout_of = |ty: Ty<'tcx>| { let ty = tcx.erase_regions(&ty); - tcx.at(e.span).layout_raw(cx.param_env.reveal_all().and(ty)).map_err(|err| { + (tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| { ConstEvalErr { span: e.span, kind: LayoutError(err) } }) }; diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 695fc9160c5b3..1b4cbd687df4b 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -334,7 +334,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { let mut unaligned_offset = Size::from_bytes(0); let mut result = None; - for i in 0..self.field_count() { + for i in 0..self.fields.count() { if unaligned_offset != variant.offsets[i] { return None; } @@ -371,7 +371,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { let mut max = Size::from_bytes(0); let mut result = None; - for i in 0..self.field_count() { + for i in 0..self.fields.count() { let field = self.field(ccx, i); match (result, field.homogeneous_aggregate(ccx)) { // The field itself must be a homogeneous aggregate. diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 3d67d3df04284..ec87429c2b228 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -209,7 +209,7 @@ pub fn memory_index_to_gep(index: u64) -> u64 { pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>, variant: &layout::Struct) -> Vec { - let field_count = layout.field_count(); + let field_count = layout.fields.count(); debug!("struct_llfields: variant: {:?}", variant); let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 225919c6a3afd..4db33b9204a1e 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -30,7 +30,7 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, Layout::Scalar { value: layout::F32, .. } | Layout::Scalar { value: layout::F64, .. } => true, Layout::Univariant { .. } => { - if layout.field_count() == 1 { + if layout.fields.count() == 1 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 0f9f9b87b5cf8..362ceb6060139 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -25,7 +25,7 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, Layout::Scalar { value: layout::F32, .. } | Layout::Scalar { value: layout::F64, .. } => true, Layout::Univariant { .. } => { - if layout.field_count() == 1 { + if layout.fields.count() == 1 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 493a9f713fd23..95470b075640b 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -102,14 +102,14 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } Layout::Univariant(ref variant) => { - for i in 0..layout.field_count() { + for i in 0..layout.fields.count() { let field_off = off + variant.offsets[i]; classify(ccx, layout.field(ccx, i), cls, field_off)?; } } Layout::UntaggedUnion { .. } => { - for i in 0..layout.field_count() { + for i in 0..layout.fields.count() { classify(ccx, layout.field(ccx, i), cls, off)?; } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 44dc831e44236..e08141f2fc39e 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -429,7 +429,7 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, type_metadata: type_metadata(cx, cx.tcx().mk_mut_ptr(cx.tcx().types.u8), syntax_pos::DUMMY_SP), - offset: layout.field_offset(cx, 0), + offset: layout.fields.offset(0), size: data_ptr_field.size(cx), align: data_ptr_field.align(cx), flags: DIFlags::FlagArtificial, @@ -437,7 +437,7 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, MemberDescription { name: "vtable".to_string(), type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), - offset: layout.field_offset(cx, 1), + offset: layout.fields.offset(1), size: vtable_field.size(cx), align: vtable_field.align(cx), flags: DIFlags::FlagArtificial, @@ -1321,8 +1321,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { layout: FullLayout<'tcx>, offset: Size, size: Size) { - for i in 0..layout.field_count() { - let field_offset = layout.field_offset(ccx, i); + for i in 0..layout.fields.count() { + let field_offset = layout.fields.offset(i); if field_offset > offset { continue; } @@ -1414,7 +1414,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; let layout = layout.for_variant(variant_index); - let mut field_tys = (0..layout.field_count()).map(|i| { + let mut field_tys = (0..layout.fields.count()).map(|i| { layout.field(cx, i).ty }).collect::>(); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 657f7da66b34b..61c0820539d56 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -74,7 +74,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(ccx, layout.field_count() - 1).ty; + let field_ty = layout.field(ccx, layout.fields.count() - 1).ty; let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 8f094cd510223..2bd76308c9187 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -295,7 +295,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let meta = self.llextra; - let offset = l.field_offset(ccx, ix).bytes(); + let offset = l.fields.offset(ix).bytes(); let unaligned_offset = C_usize(ccx, offset); // Get the alignment of the field From 30710609c06beecf4ef33d04d2814f9503f37b6b Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 13 Sep 2017 23:18:40 +0300 Subject: [PATCH 18/69] rustc_trans: treat General enums like unions. --- src/librustc_trans/adt.rs | 29 ++++------------------------- src/librustc_trans/mir/lvalue.rs | 5 +++++ src/librustc_trans/type_of.rs | 3 +-- 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index ec87429c2b228..7f64019550634 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -148,36 +148,15 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - layout::General { discr, size, align, primitive_align, .. } => { - // We need a representation that has: - // * The alignment of the most-aligned field - // * The size of the largest variant (rounded up to that alignment) - // * No alignment padding anywhere any variant has actual data - // (currently matters only for enums small enough to be immediate) - // * The discriminant in an obvious place. - // - // So we start with the discriminant, pad it up to the alignment with - // more of its own type, then use alignment-sized ints to get the rest - // of the size. - let discr_ty = Type::from_integer(cx, discr); - let discr_size = discr.size().bytes(); - let padded_discr_size = discr.size().abi_align(align); - let variant_part_size = size - padded_discr_size; - - // Ensure discr_ty can fill pad evenly - assert_eq!(padded_discr_size.bytes() % discr_size, 0); - let fields = [ - discr_ty, - Type::array(&discr_ty, padded_discr_size.bytes() / discr_size - 1), - union_fill(cx, variant_part_size, primitive_align) - ]; + layout::General { size, align, .. } => { + let fill = union_fill(cx, size, align); match name { None => { - Type::struct_(cx, &fields, false) + Type::struct_(cx, &[fill], false) } Some(name) => { let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&fields, false); + llty.set_struct_body(&[fill], false); llty } } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 2bd76308c9187..40515743af04c 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -216,6 +216,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> { return LvalueRef::new_sized( bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); } + layout::General { .. } if l.variant_index.is_none() => { + let ty = ccx.llvm_type_of(fty); + return LvalueRef::new_sized( + bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); + } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } if l.variant_index.unwrap() as u64 != nndiscr => { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index d504ea1c30768..06a82bb2de476 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -262,8 +262,7 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { if let Some(v) = self.variant_index { adt::memory_index_to_gep(variants[v].memory_index[index] as u64) } else { - assert_eq!(index, 0); - index as u64 + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } } From 1dc572b85e1f7bc245fb31ba659c822d68fda0bc Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 14 Sep 2017 00:02:53 +0300 Subject: [PATCH 19/69] rustc: represent the discriminant as a field for Layout::{Raw,StructWrapped}NullablePointer. --- src/librustc/ty/layout.rs | 41 ++++-- src/librustc_trans/mir/lvalue.rs | 117 ++++++++---------- src/librustc_trans/type_.rs | 10 -- .../run-pass/packed-struct-optimized-enum.rs | 25 ++++ 4 files changed, 112 insertions(+), 81 deletions(-) create mode 100644 src/test/run-pass/packed-struct-optimized-enum.rs diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index a08a9ddcd1ab3..e7e0d08b69c20 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -12,6 +12,7 @@ pub use self::Integer::*; pub use self::Layout::*; pub use self::Primitive::*; +use rustc_back::slice::ref_slice; use session::{self, DataTypeKind, Session}; use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags}; @@ -582,7 +583,7 @@ pub enum Primitive { Pointer } -impl Primitive { +impl<'a, 'tcx> Primitive { pub fn size(self, cx: C) -> Size { let dl = cx.data_layout(); @@ -611,6 +612,15 @@ impl Primitive { Pointer => dl.pointer_align } } + + pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + match *self { + Int(i) => i.to_ty(tcx, false), + F32 => tcx.types.f32, + F64 => tcx.types.f64, + Pointer => tcx.mk_mut_ptr(tcx.mk_nil()), + } + } } /// A structure, a product type in ADT terms. @@ -1202,9 +1212,7 @@ impl<'a, 'tcx> Layout { let layout = tcx.intern_layout(layout); let fields = match *layout { Scalar { .. } | - CEnum { .. } | - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => { + CEnum { .. } => { FieldPlacement::union(0) } @@ -1241,7 +1249,14 @@ impl<'a, 'tcx> Layout { FieldPlacement::union(def.struct_variant().fields.len()) } - General { .. } => FieldPlacement::union(1) + General { .. } | + RawNullablePointer { .. } => FieldPlacement::union(1), + + StructWrappedNullablePointer { ref discr_offset, .. } => { + FieldPlacement::Arbitrary { + offsets: ref_slice(discr_offset) + } + } }; Ok(CachedLayout { layout, @@ -1520,7 +1535,7 @@ impl<'a, 'tcx> Layout { if let Some((discr, offset, primitive)) = choice { // HACK(eddyb) work around not being able to move // out of arrays with just the indexing operator. - let st = if discr == 0 { st0 } else { st1 }; + let mut st = if discr == 0 { st0 } else { st1 }; // FIXME(eddyb) should take advantage of a newtype. if offset.bytes() == 0 && primitive.size(dl) == st.stride() && @@ -1531,6 +1546,14 @@ impl<'a, 'tcx> Layout { }); } + let mut discr_align = primitive.align(dl); + if offset.abi_align(discr_align) != offset { + st.packed = true; + discr_align = dl.i8_align; + } + st.align = st.align.max(discr_align); + st.primitive_align = st.primitive_align.max(discr_align); + return success(StructWrappedNullablePointer { nndiscr: discr as u64, nonnull: st, @@ -2292,7 +2315,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { match tcx.struct_tail(pointee).sty { ty::TySlice(element) => slice(element), ty::TyStr => slice(tcx.types.u8), - ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()), + ty::TyDynamic(..) => Pointer.to_ty(tcx), _ => bug!("FullLayout::field_type({:?}): not applicable", self) } }; @@ -2350,6 +2373,10 @@ impl<'a, 'tcx> FullLayout<'tcx> { General { discr, .. } => { return [discr.to_ty(tcx, false)][i]; } + RawNullablePointer { discr, .. } | + StructWrappedNullablePointer { discr, .. } => { + return [discr.to_ty(tcx)][i]; + } _ if def.variants.len() > 1 => return [][i], // Enums with one variant behave like structs. diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 40515743af04c..cb4abc61c6221 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutOf, Size}; +use rustc::ty::layout::{self, Align, Layout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -205,37 +205,55 @@ impl<'a, 'tcx> LvalueRef<'tcx> { l = l.for_variant(variant_index) } } - let fty = l.field(ccx, ix).ty; + let field = l.field(ccx, ix); + let offset = l.fields.offset(ix).bytes(); let alignment = self.alignment | Alignment::from(&*l); // Handle all the non-aggregate cases first. match *l { layout::UntaggedUnion { .. } => { - let ty = ccx.llvm_type_of(fty); + let ty = ccx.llvm_type_of(field.ty); return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); + bcx.pointercast(self.llval, ty.ptr_to()), field.ty, alignment); } - layout::General { .. } if l.variant_index.is_none() => { - let ty = ccx.llvm_type_of(fty); + // Discriminant field of enums. + layout::General { .. } | + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } if l.variant_index.is_none() => { + let ty = ccx.llvm_type_of(field.ty); + let size = field.size(ccx).bytes(); + + // If the discriminant is not on a multiple of the primitive's size, + // we need to go through i8*. Also assume the worst alignment. + if offset % size != 0 { + let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); + let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(ccx, offset)]); + let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); + return LvalueRef::new_sized( + bcx.pointercast(byte_ptr, ty.ptr_to()), field.ty, byte_align); + } + + let discr_ptr = bcx.pointercast(self.llval, ty.ptr_to()); return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); + bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), + field.ty, alignment); } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } if l.variant_index.unwrap() as u64 != nndiscr => { // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) - let ty = ccx.llvm_type_of(fty); - assert_eq!(ccx.size_of(fty).bytes(), 0); + let ty = ccx.llvm_type_of(field.ty); + assert_eq!(field.size(ccx).bytes(), 0); return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), fty, + bcx.pointercast(self.llval, ty.ptr_to()), field.ty, Alignment::Packed(Align::from_bytes(1, 1).unwrap())); } layout::RawNullablePointer { .. } => { - let ty = ccx.llvm_type_of(fty); + let ty = ccx.llvm_type_of(field.ty); return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), fty, alignment); + bcx.pointercast(self.llval, ty.ptr_to()), field.ty, alignment); } _ => {} } @@ -243,12 +261,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let simple = || { LvalueRef { llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)), - llextra: if !ccx.shared().type_has_metadata(fty) { - ptr::null_mut() - } else { + llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra + } else { + ptr::null_mut() }, - ty: LvalueTy::from_ty(fty), + ty: LvalueTy::from_ty(field.ty), alignment, } }; @@ -264,13 +282,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Simple case - we can just GEP the field // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if is_packed || ccx.shared().type_is_sized(fty) { + if is_packed || !field.is_unsized() { return simple(); } // If the type of the last field is [T], str or a foreign type, then we don't need to do // any adjusments - match fty.sty { + match field.ty.sty { ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(), _ => () } @@ -299,12 +317,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let meta = self.llextra; - - let offset = l.fields.offset(ix).bytes(); let unaligned_offset = C_usize(ccx, offset); // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta); // Bump the unaligned offset up to the appropriate alignment using the // following expression: @@ -323,39 +339,17 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = ccx.llvm_type_of(fty); + let ll_fty = ccx.llvm_type_of(field.ty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); LvalueRef { llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()), llextra: self.llextra, - ty: LvalueTy::from_ty(fty), + ty: LvalueTy::from_ty(field.ty), alignment, } } - // Return a pointer to the discriminant, given its type and offset. - fn gepi_discr_at_offset(self, bcx: &Builder, - discr: ty::layout::Primitive, - offset: Size) - -> (ValueRef, Alignment) { - let size = discr.size(bcx.ccx); - let ptr_ty = Type::from_primitive(bcx.ccx, discr).ptr_to(); - - // If the discriminant is not on a multiple of the primitive's size, - // we need to go through i8*. Also assume the worst alignment. - if offset.bytes() % size.bytes() != 0 { - let byte_ptr = bcx.pointercast(self.llval, Type::i8p(bcx.ccx)); - let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(bcx.ccx, offset.bytes())]); - let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); - return (bcx.pointercast(byte_ptr, ptr_ty), byte_align); - } - - let discr_ptr = bcx.pointercast(self.llval, ptr_ty); - (bcx.inbounds_gep(discr_ptr, &[C_usize(bcx.ccx, offset.bytes() / size.bytes())]), - self.alignment) - } - /// Helper for cases where the discriminant is simply loaded. fn load_discr(self, bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64) -> ValueRef { @@ -394,16 +388,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { self.load_discr(bcx, discr, ptr.llval, 0, variants.len() as u64 - 1) } layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), - layout::RawNullablePointer { nndiscr, discr } | - layout::StructWrappedNullablePointer { nndiscr, discr, .. } => { - let discr_offset = match *l { - layout::StructWrappedNullablePointer { discr_offset, .. } => discr_offset, - _ => Size::from_bytes(0), - }; - let (lldiscrptr, alignment) = self.gepi_discr_at_offset(bcx, discr, discr_offset); - let lldiscr = bcx.load(lldiscrptr, alignment.non_abi()); + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } => { + let ptr = self.project_field(bcx, 0); + let lldiscr = bcx.load(ptr.llval, ptr.alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - bcx.icmp(cmp, lldiscr, C_null(Type::from_primitive(bcx.ccx, discr))) + bcx.icmp(cmp, lldiscr, C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())))) }, _ => bug!("{} is not an enum", l.ty) }; @@ -434,14 +424,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> { | layout::Vector { .. } => { assert_eq!(to, 0); } - layout::RawNullablePointer { nndiscr, discr, .. } | - layout::StructWrappedNullablePointer { nndiscr, discr, .. } => { + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } => { if to != nndiscr { - let (use_memset, discr_offset) = match *l { - layout::StructWrappedNullablePointer { discr_offset, .. } => { - (target_sets_discr_via_memset(bcx), discr_offset) + let use_memset = match *l { + layout::StructWrappedNullablePointer { .. } => { + target_sets_discr_via_memset(bcx) } - _ => (false, Size::from_bytes(0)), + _ => false, }; if use_memset { // Issue #34427: As workaround for LLVM bug on @@ -454,10 +444,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let align = C_u32(bcx.ccx, align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { - let (lldiscrptr, alignment) = - self.gepi_discr_at_offset(bcx, discr, discr_offset); - bcx.store(C_null(Type::from_primitive(bcx.ccx, discr)), - lldiscrptr, alignment.non_abi()); + let ptr = self.project_field(bcx, 0); + bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx()))), + ptr.llval, ptr.alignment.non_abi()); } } } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index 2359aa811fa75..bb8f3f23108ec 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -287,14 +287,4 @@ impl Type { I128 => Type::i128(cx), } } - - pub fn from_primitive(cx: &CrateContext, p: layout::Primitive) -> Type { - use rustc::ty::layout::Primitive::*; - match p { - Int(i) => Type::from_integer(cx, i), - F32 => Type::f32(cx), - F64 => Type::f64(cx), - Pointer => Type::i8p(cx), - } - } } diff --git a/src/test/run-pass/packed-struct-optimized-enum.rs b/src/test/run-pass/packed-struct-optimized-enum.rs new file mode 100644 index 0000000000000..1179f16daa238 --- /dev/null +++ b/src/test/run-pass/packed-struct-optimized-enum.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[repr(packed)] +#[derive(Copy, Clone)] +struct Packed(T); + +fn main() { + let one = (Some(Packed((&(), 0))), true); + let two = [one, one]; + let stride = (&two[1] as *const _ as usize) - (&two[0] as *const _ as usize); + + // This can fail if rustc and LLVM disagree on the size of a type. + // In this case, `Option>` was erronously not + // marked as packed despite needing alignment `1` and containing + // its `&()` discriminant, which has alignment larger than `1`. + assert_eq!(stride, std::mem::size_of_val(&one)); +} From caef91d7c6dcfc307b9e915bd9a80a25063cce22 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 14 Sep 2017 22:50:18 +0300 Subject: [PATCH 20/69] rustc: introduce layout::Abi for reduced general ABI "passing style". --- src/librustc/ty/layout.rs | 59 ++++++++++++++-- src/librustc_trans/abi.rs | 108 +++++++++--------------------- src/librustc_trans/cabi_x86_64.rs | 46 +++++-------- src/librustc_trans/common.rs | 19 ++---- 4 files changed, 105 insertions(+), 127 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index e7e0d08b69c20..99dd73e03c752 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1054,6 +1054,15 @@ impl<'a> FieldPlacement<'a> { } } +/// Describes how values of the type are passed by target ABIs, +/// in terms of categories of C types there are ABI rules for. +#[derive(Copy, Clone, Debug)] +pub enum Abi { + Scalar(Primitive), + Vector, + Aggregate +} + /// Type layout, from which size and alignment can be cheaply computed. /// For ADTs, it also includes field placement and enum optimizations. /// NOTE: Because Layout is interned, redundant information should be @@ -1172,6 +1181,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { pub struct CachedLayout<'tcx> { pub layout: &'tcx Layout, pub fields: FieldPlacement<'tcx>, + pub abi: Abi, } fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -1258,9 +1268,24 @@ impl<'a, 'tcx> Layout { } } }; + let abi = match *layout { + Scalar { value, .. } | + RawNullablePointer { discr: value, .. } => Abi::Scalar(value), + CEnum { discr, .. } => Abi::Scalar(Int(discr)), + + Vector { .. } => Abi::Vector, + + Array { .. } | + FatPointer { .. } | + Univariant(_) | + UntaggedUnion(_) | + General { .. } | + StructWrappedNullablePointer { .. } => Abi::Aggregate + }; Ok(CachedLayout { layout, - fields + fields, + abi }) }; assert!(!ty.has_infer_types()); @@ -1670,7 +1695,8 @@ impl<'a, 'tcx> Layout { let layout = cx.layout_of(normalized)?; return Ok(CachedLayout { layout: layout.layout, - fields: layout.fields + fields: layout.fields, + abi: layout.abi }); } ty::TyParam(_) => { @@ -2158,6 +2184,7 @@ pub struct FullLayout<'tcx> { pub variant_index: Option, pub layout: &'tcx Layout, pub fields: FieldPlacement<'tcx>, + pub abi: Abi, } impl<'tcx> Deref for FullLayout<'tcx> { @@ -2225,7 +2252,8 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx ty, variant_index: None, layout: cached.layout, - fields: cached.fields + fields: cached.fields, + abi: cached.abi }) } } @@ -2255,7 +2283,8 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, ty, variant_index: None, layout: cached.layout, - fields: cached.fields + fields: cached.fields, + abi: cached.abi }) } } @@ -2492,9 +2521,29 @@ impl<'gcx> HashStable> for FieldPlacement<'gcx> { } } +impl<'gcx> HashStable> for Abi { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::Abi::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Scalar(value) => { + value.hash_stable(hcx, hasher); + } + Vector => { + } + Aggregate => { + } + } + } +} + impl_stable_hash_for!(struct ::ty::layout::CachedLayout<'tcx> { layout, - fields + fields, + abi }); impl_stable_hash_for!(enum ::ty::layout::Integer { diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 1b4cbd687df4b..8be2cb2a1d326 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -40,7 +40,7 @@ use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc_back::PanicStrategy; use libc::c_uint; -use std::iter; +use std::{cmp, iter}; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; @@ -276,26 +276,17 @@ pub trait LayoutExt<'tcx> { impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { fn is_aggregate(&self) -> bool { - match *self.layout { - Layout::Scalar { .. } | - Layout::RawNullablePointer { .. } | - Layout::CEnum { .. } | - Layout::Vector { .. } => false, - - Layout::Array { .. } | - Layout::FatPointer { .. } | - Layout::Univariant { .. } | - Layout::UntaggedUnion { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => true + match self.abi { + layout::Abi::Scalar(_) | + layout::Abi::Vector => false, + layout::Abi::Aggregate => true } } fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { - match *self.layout { - // The primitives for this algorithm. - Layout::Scalar { value, .. } | - Layout::RawNullablePointer { discr: value, .. } => { + match self.abi { + // The primitive for this algorithm. + layout::Abi::Scalar(value) => { let kind = match value { layout::Int(_) | layout::Pointer => RegKind::Integer, @@ -308,70 +299,35 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { }) } - Layout::CEnum { .. } => { - Some(Reg { - kind: RegKind::Integer, - size: self.size(ccx) - }) - } - - Layout::Vector { .. } => { + layout::Abi::Vector => { Some(Reg { kind: RegKind::Vector, size: self.size(ccx) }) } - Layout::Array { count, .. } => { - if count > 0 { - self.field(ccx, 0).homogeneous_aggregate(ccx) - } else { - None + layout::Abi::Aggregate => { + if let Layout::Array { count, .. } = *self.layout { + if count > 0 { + return self.field(ccx, 0).homogeneous_aggregate(ccx); + } } - } - Layout::Univariant(ref variant) => { - let mut unaligned_offset = Size::from_bytes(0); + let mut total = Size::from_bytes(0); let mut result = None; - for i in 0..self.fields.count() { - if unaligned_offset != variant.offsets[i] { - return None; + let is_union = match self.fields { + layout::FieldPlacement::Linear { stride, .. } => { + stride.bytes() == 0 } + layout::FieldPlacement::Arbitrary { .. } => false + }; - let field = self.field(ccx, i); - match (result, field.homogeneous_aggregate(ccx)) { - // The field itself must be a homogeneous aggregate. - (_, None) => return None, - // If this is the first field, record the unit. - (None, Some(unit)) => { - result = Some(unit); - } - // For all following fields, the unit must be the same. - (Some(prev_unit), Some(unit)) => { - if prev_unit != unit { - return None; - } - } + for i in 0..self.fields.count() { + if !is_union && total != self.fields.offset(i) { + return None; } - // Keep track of the offset (without padding). - unaligned_offset += field.size(ccx); - } - - // There needs to be no padding. - if unaligned_offset != self.size(ccx) { - None - } else { - result - } - } - - Layout::UntaggedUnion { .. } => { - let mut max = Size::from_bytes(0); - let mut result = None; - - for i in 0..self.fields.count() { let field = self.field(ccx, i); match (result, field.homogeneous_aggregate(ccx)) { // The field itself must be a homogeneous aggregate. @@ -390,23 +346,20 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { // Keep track of the offset (without padding). let size = field.size(ccx); - if size > max { - max = size; + if is_union { + total = cmp::max(total, size); + } else { + total += size; } } // There needs to be no padding. - if max != self.size(ccx) { + if total != self.size(ccx) { None } else { result } } - - // Rust-specific types, which we can ignore for C ABIs. - Layout::FatPointer { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => None } } } @@ -870,8 +823,9 @@ impl<'a, 'tcx> FnType<'tcx> { if abi == Abi::Rust || abi == Abi::RustCall || abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { let fixup = |arg: &mut ArgType<'tcx>| { - if !arg.layout.is_aggregate() { - return; + match arg.layout.abi { + layout::Abi::Aggregate => {} + _ => return } let size = arg.layout.size(ccx); diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 95470b075640b..2c3df8f02e6fb 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -64,9 +64,8 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) return Ok(()); } - match *layout { - Layout::Scalar { value, .. } | - Layout::RawNullablePointer { discr: value, .. } => { + match layout.abi { + layout::Abi::Scalar(value) => { let reg = match value { layout::Int(_) | layout::Pointer => Class::Int, @@ -76,47 +75,32 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) unify(cls, off, reg); } - Layout::CEnum { .. } => { - unify(cls, off, Class::Int); - } - - Layout::Vector { element, count } => { + layout::Abi::Vector => { unify(cls, off, Class::Sse); // everything after the first one is the upper // half of a register. - let eltsz = element.size(ccx); - for i in 1..count { - unify(cls, off + eltsz * i, Class::SseUp); + let eltsz = layout.field(ccx, 0).size(ccx); + for i in 1..layout.fields.count() { + unify(cls, off + eltsz * (i as u64), Class::SseUp); } } - Layout::Array { count, .. } => { - if count > 0 { - let elt = layout.field(ccx, 0); - let eltsz = elt.size(ccx); - for i in 0..count { - classify(ccx, elt, cls, off + eltsz * i)?; - } + layout::Abi::Aggregate => { + // FIXME(eddyb) have to work around Rust enums for now. + // Fix is either guarantee no data where there is no field, + // by putting variants in fields, or be more clever. + match *layout { + Layout::General { .. } | + Layout::StructWrappedNullablePointer { .. } => return Err(Memory), + _ => {} } - } - - Layout::Univariant(ref variant) => { for i in 0..layout.fields.count() { - let field_off = off + variant.offsets[i]; + let field_off = off + layout.fields.offset(i); classify(ccx, layout.field(ccx, i), cls, field_off)?; } } - Layout::UntaggedUnion { .. } => { - for i in 0..layout.fields.count() { - classify(ccx, layout.field(ccx, i), cls, off)?; - } - } - - Layout::FatPointer { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => return Err(Memory) } Ok(()) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 40ddc329dddd5..b80fded638d76 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -27,7 +27,7 @@ use type_::Type; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, Layout, LayoutOf}; +use rustc::ty::layout::{self, HasDataLayout, Layout, LayoutOf}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -50,19 +50,10 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); - match *layout { - Layout::CEnum { .. } | - Layout::Scalar { .. } | - Layout::Vector { .. } => true, - - Layout::FatPointer { .. } => false, - - Layout::Array { .. } | - Layout::Univariant { .. } | - Layout::General { .. } | - Layout::UntaggedUnion { .. } | - Layout::RawNullablePointer { .. } | - Layout::StructWrappedNullablePointer { .. } => { + match layout.abi { + layout::Abi::Scalar(_) | layout::Abi::Vector => true, + + layout::Abi::Aggregate => { !layout.is_unsized() && layout.size(ccx).bytes() == 0 } } From 02276e9f49761b12c3f4a71f8d51777114d37a3f Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Fri, 15 Sep 2017 22:42:23 +0300 Subject: [PATCH 21/69] rustc: collapse Layout::{Raw,StructWrapped}NullablePointer into one variant. --- src/librustc/ty/layout.rs | 89 +++++++------------- src/librustc_trans/adt.rs | 22 ++--- src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/debuginfo/metadata.rs | 77 +---------------- src/librustc_trans/mir/constant.rs | 10 +-- src/librustc_trans/mir/lvalue.rs | 82 +++++++++--------- src/librustc_trans/mir/operand.rs | 3 +- src/librustc_trans/type_of.rs | 10 ++- src/test/ui/print_type_sizes/nullable.stdout | 1 + 9 files changed, 91 insertions(+), 205 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 99dd73e03c752..aebe0c852b365 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1133,24 +1133,14 @@ pub enum Layout { }, /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` must have single field which is known to be nonnull due to its type. - /// The other case is known to be zero sized. Hence we represent the enum - /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, - /// otherwise it indicates the other case. + /// `nndiscr` is represented by the struct `nonnull`, where the field at the + /// `discr_offset` offset is known to be nonnull due to its type; if that field is null, then + /// it represents the other case, which is known to be zero sized. /// /// For example, `std::option::Option` instantiated at a safe pointer type /// is represented such that `None` is a null pointer and `Some` is the /// identity function. - RawNullablePointer { - nndiscr: u64, - discr: Primitive - }, - - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the field at the - /// `discr_offset` offset is known to be nonnull due to its type; if that field is null, then - /// it represents the other case, which is known to be zero sized. - StructWrappedNullablePointer { + NullablePointer { nndiscr: u64, nonnull: Struct, discr: Primitive, @@ -1259,18 +1249,16 @@ impl<'a, 'tcx> Layout { FieldPlacement::union(def.struct_variant().fields.len()) } - General { .. } | - RawNullablePointer { .. } => FieldPlacement::union(1), + General { .. } => FieldPlacement::union(1), - StructWrappedNullablePointer { ref discr_offset, .. } => { + NullablePointer { ref discr_offset, .. } => { FieldPlacement::Arbitrary { offsets: ref_slice(discr_offset) } } }; let abi = match *layout { - Scalar { value, .. } | - RawNullablePointer { discr: value, .. } => Abi::Scalar(value), + Scalar { value, .. } => Abi::Scalar(value), CEnum { discr, .. } => Abi::Scalar(Int(discr)), Vector { .. } => Abi::Vector, @@ -1279,8 +1267,15 @@ impl<'a, 'tcx> Layout { FatPointer { .. } | Univariant(_) | UntaggedUnion(_) | - General { .. } | - StructWrappedNullablePointer { .. } => Abi::Aggregate + General { .. } => Abi::Aggregate, + + NullablePointer { discr, discr_offset, .. } => { + if discr_offset.bytes() == 0 && discr.size(cx) == layout.size(cx) { + Abi::Scalar(discr) + } else { + Abi::Aggregate + } + } }; Ok(CachedLayout { layout, @@ -1562,15 +1557,6 @@ impl<'a, 'tcx> Layout { // out of arrays with just the indexing operator. let mut st = if discr == 0 { st0 } else { st1 }; - // FIXME(eddyb) should take advantage of a newtype. - if offset.bytes() == 0 && primitive.size(dl) == st.stride() && - variants[discr].len() == 1 { - return success(RawNullablePointer { - nndiscr: discr as u64, - discr: primitive, - }); - } - let mut discr_align = primitive.align(dl); if offset.abi_align(discr_align) != offset { st.packed = true; @@ -1579,7 +1565,7 @@ impl<'a, 'tcx> Layout { st.align = st.align.max(discr_align); st.primitive_align = st.primitive_align.max(discr_align); - return success(StructWrappedNullablePointer { + return success(NullablePointer { nndiscr: discr as u64, nonnull: st, discr: primitive, @@ -1715,8 +1701,7 @@ impl<'a, 'tcx> Layout { match *self { Scalar {..} | Vector {..} | FatPointer {..} | CEnum {..} | UntaggedUnion {..} | General {..} | - RawNullablePointer {..} | - StructWrappedNullablePointer {..} => false, + NullablePointer {..} => false, Array { sized, .. } | Univariant(Struct { sized, .. }) => !sized @@ -1727,7 +1712,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } | RawNullablePointer { discr: value, .. } => { + Scalar { value, .. } => { value.size(dl) } @@ -1760,7 +1745,7 @@ impl<'a, 'tcx> Layout { UntaggedUnion(ref un) => un.stride(), Univariant(ref variant) | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { + NullablePointer { nonnull: ref variant, .. } => { variant.stride() } } @@ -1770,7 +1755,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } | RawNullablePointer { discr: value, .. } => { + Scalar { value, .. } => { value.align(dl) } @@ -1794,7 +1779,7 @@ impl<'a, 'tcx> Layout { UntaggedUnion(ref un) => un.align, Univariant(ref variant) | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { + NullablePointer { nonnull: ref variant, .. } => { variant.align } } @@ -1809,7 +1794,7 @@ impl<'a, 'tcx> Layout { match *self { Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, Univariant(ref variant) | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { + NullablePointer { nonnull: ref variant, .. } => { variant.primitive_align }, @@ -1924,11 +1909,11 @@ impl<'a, 'tcx> Layout { }; match *layout { - Layout::StructWrappedNullablePointer { nonnull: ref variant_layout, - nndiscr, - discr: _, - discr_offset: _ } => { - debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}", + Layout::NullablePointer { nonnull: ref variant_layout, + nndiscr, + discr: _, + discr_offset: _ } => { + debug!("print-type-size t: `{:?}` adt nullable nndiscr {} is {:?}", ty, nndiscr, variant_layout); let variant_def = &adt_def.variants[nndiscr as usize]; let fields: Vec<_> = @@ -1941,13 +1926,6 @@ impl<'a, 'tcx> Layout { &fields, variant_layout)]); } - Layout::RawNullablePointer { nndiscr, discr } => { - debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}", - ty, nndiscr, discr); - let variant_def = &adt_def.variants[nndiscr as usize]; - record(adt_kind.into(), None, - vec![build_primitive_info(variant_def.name, &discr)]); - } Layout::Univariant(ref variant_layout) => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() @@ -2314,7 +2292,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { } } - StructWrappedNullablePointer { nndiscr, ref nonnull, .. } + NullablePointer { nndiscr, ref nonnull, .. } if nndiscr as usize == variant_index => { FieldPlacement::Arbitrary { offsets: &nonnull.offsets @@ -2402,8 +2380,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { General { discr, .. } => { return [discr.to_ty(tcx, false)][i]; } - RawNullablePointer { discr, .. } | - StructWrappedNullablePointer { discr, .. } => { + NullablePointer { discr, .. } => { return [discr.to_ty(tcx)][i]; } _ if def.variants.len() > 1 => return [][i], @@ -2483,11 +2460,7 @@ impl<'gcx> HashStable> for Layout align.hash_stable(hcx, hasher); primitive_align.hash_stable(hcx, hasher); } - RawNullablePointer { nndiscr, ref discr } => { - nndiscr.hash_stable(hcx, hasher); - discr.hash_stable(hcx, hasher); - } - StructWrappedNullablePointer { + NullablePointer { nndiscr, ref nonnull, ref discr, diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 7f64019550634..871d25c046849 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -69,10 +69,11 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let l = cx.layout_of(t); debug!("finish_type_of: {} with layout {:#?}", t, l); match *l { - layout::CEnum { .. } | layout::General { .. } - | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } - layout::Univariant { ..} - | layout::StructWrappedNullablePointer { .. } => { + layout::CEnum { .. } | layout::General { .. } | layout::UntaggedUnion { .. } => { } + layout::Univariant { ..} | layout::NullablePointer { .. } => { + if let layout::Abi::Scalar(_) = l.abi { + return; + } let (variant_layout, variant) = match *l { layout::Univariant(ref variant) => { let is_enum = if let ty::TyAdt(def, _) = t.sty { @@ -86,7 +87,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, (l, variant) } } - layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => + layout::NullablePointer { nndiscr, ref nonnull, .. } => (l.for_variant(nndiscr as usize), nonnull), _ => unreachable!() }; @@ -103,15 +104,10 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, debug!("adt::generic_type_of t: {:?} name: {:?}", t, name); match *l { layout::CEnum { discr, .. } => Type::from_integer(cx, discr), - layout::RawNullablePointer { nndiscr, .. } => { - let nnfield = l.for_variant(nndiscr as usize).field(cx, 0); - if let layout::Scalar { value: layout::Pointer, .. } = *nnfield { - Type::i8p(cx) - } else { - cx.llvm_type_of(nnfield.ty) + layout::NullablePointer { nndiscr, ref nonnull, .. } => { + if let layout::Abi::Scalar(_) = l.abi { + return cx.llvm_type_of(l.field(cx, 0).ty); } - } - layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { match name { None => { Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize), diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 2c3df8f02e6fb..6e5bc576be5f5 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -92,7 +92,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) // by putting variants in fields, or be more clever. match *layout { Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => return Err(Memory), + Layout::NullablePointer { .. } => return Err(Memory), _ => {} } for i in 0..layout.fields.count() { diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index e08141f2fc39e..e8af81a3d3bde 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1142,7 +1142,6 @@ struct EnumMemberDescriptionFactory<'tcx> { type_rep: FullLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, - file_metadata: DIFile, span: Span, } @@ -1218,76 +1217,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ] } } - layout::RawNullablePointer { nndiscr, .. } => { - // As far as debuginfo is concerned, the pointer this enum - // represents is still wrapped in a struct. This is to make the - // DWARF representation of enums uniform. - - // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[nndiscr as usize]; - let non_null_variant_name = non_null_variant.name.as_str(); - - // The llvm type and metadata of the pointer - let nnfield = self.type_rep.for_variant(nndiscr as usize).field(cx, 0); - let (size, align) = nnfield.size_and_align(cx); - let non_null_type_metadata = type_metadata(cx, nnfield.ty, self.span); - - // For the metadata of the wrapper struct, we need to create a - // MemberDescription of the struct's single field. - let sole_struct_member_description = MemberDescription { - name: match non_null_variant.ctor_kind { - CtorKind::Fn => "__0".to_string(), - CtorKind::Fictive => { - non_null_variant.fields[0].name.to_string() - } - CtorKind::Const => bug!() - }, - type_metadata: non_null_type_metadata, - offset: Size::from_bytes(0), - size, - align, - flags: DIFlags::FlagZero - }; - - let unique_type_id = debug_context(cx).type_map - .borrow_mut() - .get_unique_type_id_of_enum_variant( - cx, - self.enum_type, - &non_null_variant_name); - - // Now we can create the metadata of the artificial struct - let artificial_struct_metadata = - composite_type_metadata(cx, - nnfield.ty, - &non_null_variant_name, - unique_type_id, - &[sole_struct_member_description], - self.containing_scope, - self.file_metadata, - syntax_pos::DUMMY_SP); - - // Encode the information about the null variant in the union - // member's name. - let null_variant_name = adt.variants[(1 - nndiscr) as usize].name; - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - 0, - null_variant_name); - - // Finally create the (singleton) list of descriptions of union - // members. - vec![ - MemberDescription { - name: union_member_name, - type_metadata: artificial_struct_metadata, - offset: Size::from_bytes(0), - size, - align, - flags: DIFlags::FlagZero - } - ] - }, - layout::StructWrappedNullablePointer { + layout::NullablePointer { nonnull: ref struct_def, nndiscr, discr, @@ -1566,9 +1496,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::CEnum { discr, signed, .. } => { return FinalMetadata(discriminant_type_metadata(discr, signed)) }, - layout::RawNullablePointer { .. } | - layout::StructWrappedNullablePointer { .. } | - layout::Univariant { .. } => None, + layout::NullablePointer { .. } | layout::Univariant { .. } => None, layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; @@ -1604,7 +1532,6 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, type_rep, discriminant_type_metadata, containing_scope, - file_metadata, span, }), ); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index badc9100c40a6..befb5409e46f0 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1127,15 +1127,7 @@ fn trans_const_adt<'a, 'tcx>( layout::Vector { .. } => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } - layout::RawNullablePointer { nndiscr, .. } => { - if variant_index as u64 == nndiscr { - assert_eq!(vals.len(), 1); - Const::new(vals[0].llval, t) - } else { - Const::new(C_null(ccx.llvm_type_of(t)), t) - } - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + layout::NullablePointer { ref nonnull, nndiscr, .. } => { if variant_index as u64 == nndiscr { build_const_struct(ccx, l, &nonnull, vals, None) } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index cb4abc61c6221..1b0486dbf9642 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -18,7 +18,7 @@ use abi; use adt; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_u32, C_int, C_null, val_ty}; +use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -210,17 +210,33 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let alignment = self.alignment | Alignment::from(&*l); - // Handle all the non-aggregate cases first. + // Unions and newtypes only use an offset of 0. match *l { - layout::UntaggedUnion { .. } => { + // FIXME(eddyb) The fields of a fat pointer aren't correct, especially + // to unsized structs, we can't represent their pointee types in `Ty`. + Layout::FatPointer { .. } => {} + + _ if offset == 0 => { let ty = ccx.llvm_type_of(field.ty); - return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), field.ty, alignment); + return LvalueRef { + llval: bcx.pointercast(self.llval, ty.ptr_to()), + llextra: if field.is_unsized() { + self.llextra + } else { + ptr::null_mut() + }, + ty: LvalueTy::from_ty(field.ty), + alignment, + }; } - // Discriminant field of enums. + + _ => {} + } + + // Discriminant field of enums. + match *l { layout::General { .. } | - layout::RawNullablePointer { .. } | - layout::StructWrappedNullablePointer { .. } if l.variant_index.is_none() => { + layout::NullablePointer { .. } if l.variant_index.is_none() => { let ty = ccx.llvm_type_of(field.ty); let size = field.size(ccx).bytes(); @@ -239,22 +255,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), field.ty, alignment); } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } - if l.variant_index.unwrap() as u64 != nndiscr => { - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = ccx.llvm_type_of(field.ty); - assert_eq!(field.size(ccx).bytes(), 0); - return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), field.ty, - Alignment::Packed(Align::from_bytes(1, 1).unwrap())); - } - layout::RawNullablePointer { .. } => { - let ty = ccx.llvm_type_of(field.ty); - return LvalueRef::new_sized( - bcx.pointercast(self.llval, ty.ptr_to()), field.ty, alignment); - } _ => {} } @@ -274,7 +274,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Check whether the variant being used is packed, if applicable. let is_packed = match (&*l, l.variant_index) { (&layout::Univariant(ref variant), _) => variant.packed, - (&layout::StructWrappedNullablePointer { ref nonnull, .. }, _) => nonnull.packed, + (&layout::NullablePointer { ref nonnull, .. }, _) => nonnull.packed, (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, _ => return simple() }; @@ -351,10 +351,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } /// Helper for cases where the discriminant is simply loaded. - fn load_discr(self, bcx: &Builder, ity: layout::Integer, ptr: ValueRef, - min: u64, max: u64) -> ValueRef { - let llty = Type::from_integer(bcx.ccx, ity); - assert_eq!(val_ty(ptr), llty.ptr_to()); + fn load_discr(self, bcx: &Builder, ity: layout::Integer, min: u64, max: u64) -> ValueRef { let bits = ity.size().bits(); assert!(bits <= 64); let bits = bits as usize; @@ -366,11 +363,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - bcx.load(ptr, self.alignment.non_abi()) + bcx.load(self.llval, self.alignment.non_abi()) } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ llvm::True, + bcx.load_range_assert(self.llval, min, max.wrapping_add(1), /* signed: */ llvm::True, self.alignment.non_abi()) } } @@ -379,17 +376,18 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); + let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); let val = match *l { + layout::Univariant { .. } | + layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), layout::CEnum { discr, min, max, .. } => { - self.load_discr(bcx, discr, self.llval, min, max) + self.load_discr(bcx, discr, min, max) } layout::General { discr, ref variants, .. } => { let ptr = self.project_field(bcx, 0); - self.load_discr(bcx, discr, ptr.llval, 0, variants.len() as u64 - 1) + ptr.load_discr(bcx, discr, 0, variants.len() as u64 - 1) } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } => { + layout::NullablePointer { nndiscr, .. } => { let ptr = self.project_field(bcx, 0); let lldiscr = bcx.load(ptr.llval, ptr.alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; @@ -397,7 +395,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }, _ => bug!("{} is not an enum", l.ty) }; - let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); bcx.intcast(val, cast_to, adt::is_discr_signed(&l)) } @@ -424,14 +421,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> { | layout::Vector { .. } => { assert_eq!(to, 0); } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } => { + layout::NullablePointer { nndiscr, .. } => { if to != nndiscr { - let use_memset = match *l { - layout::StructWrappedNullablePointer { .. } => { - target_sets_discr_via_memset(bcx) - } - _ => false, + let use_memset = match l.abi { + layout::Abi::Scalar(_) => false, + _ => target_sets_discr_via_memset(bcx) }; if use_memset { // Issue #34427: As workaround for LLVM bug on diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index f43115b84d46a..11c09960a3e09 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -115,8 +115,9 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// Immediate aggregate with the two values. pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { - // Reconstruct the immediate aggregate. let llty = bcx.ccx.llvm_type_of(self.ty); + debug!("Operand::pack_if_pair: packing {:?} into {:?}", self, llty); + // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); let elems = [a, b]; let layout = bcx.ccx.layout_of(self.ty); diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 06a82bb2de476..264f711de8f93 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -12,7 +12,7 @@ use abi::FnType; use adt; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{Align, Layout, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, Align, Layout, LayoutOf, Size, FullLayout}; use trans_item::DefPathBasedNames; use type_::Type; @@ -237,11 +237,13 @@ pub trait LayoutLlvmExt { impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { fn llvm_field_index(&self, index: usize) -> u64 { + if let layout::Abi::Scalar(_) = self.abi { + bug!("FullLayout::llvm_field_index({:?}): not applicable", self); + } match **self { Layout::Scalar { .. } | Layout::CEnum { .. } | - Layout::UntaggedUnion { .. } | - Layout::RawNullablePointer { .. } => { + Layout::UntaggedUnion { .. } => { bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } @@ -266,7 +268,7 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { } } - Layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + Layout::NullablePointer { nndiscr, ref nonnull, .. } => { if self.variant_index == Some(nndiscr as usize) { adt::memory_index_to_gep(nonnull.memory_index[index] as u64) } else { diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/nullable.stdout index c9cdde78a4d1b..830678f174f88 100644 --- a/src/test/ui/print_type_sizes/nullable.stdout +++ b/src/test/ui/print_type_sizes/nullable.stdout @@ -19,5 +19,6 @@ print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes print-type-size variant `Some`: 4 bytes +print-type-size field `.0`: 4 bytes print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes print-type-size field `.0`: 4 bytes From 335bd8ea1b5929fc55911374b55623f066835850 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 16 Sep 2017 10:44:27 +0300 Subject: [PATCH 22/69] rustc: do not track `non_zero` in Layout. --- src/librustc/ty/layout.rs | 95 +++++++++++++------------------- src/librustc_trans/abi.rs | 13 +---- src/librustc_trans/cabi_s390x.rs | 12 ++-- src/librustc_trans/cabi_x86.rs | 12 ++-- 4 files changed, 51 insertions(+), 81 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index aebe0c852b365..d798982f3c86e 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -838,14 +838,18 @@ impl<'a, 'tcx> Struct { -> Result, LayoutError<'tcx>> { let cx = (tcx, param_env); match (layout.layout, &layout.ty.sty) { - (&Scalar { non_zero: true, value, .. }, _) => { - Ok(Some((Size::from_bytes(0), value))) + (&Scalar(Pointer), _) if !layout.ty.is_unsafe_ptr() => { + Ok(Some((Size::from_bytes(0), Pointer))) } - (&CEnum { non_zero: true, discr, .. }, _) => { - Ok(Some((Size::from_bytes(0), Int(discr)))) + (&CEnum { discr, .. }, &ty::TyAdt(def, _)) => { + if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { + Ok(Some((Size::from_bytes(0), Int(discr)))) + } else { + Ok(None) + } } - (&FatPointer { non_zero: true, .. }, _) => { + (&FatPointer(_), _) if !layout.ty.is_unsafe_ptr() => { Ok(Some((layout.fields.offset(FAT_PTR_ADDR), Pointer))) } @@ -853,12 +857,10 @@ impl<'a, 'tcx> Struct { (_, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { let field = layout.field(cx, 0)?; match *field { - // FIXME(eddyb) also allow floating-point types here. - Scalar { value: value @ Int(_), non_zero: false } | - Scalar { value: value @ Pointer, non_zero: false } => { + Scalar(value) => { Ok(Some((layout.fields.offset(0), value))) } - FatPointer { non_zero: false, .. } => { + FatPointer(_) => { Ok(Some((layout.fields.offset(0) + field.fields.offset(FAT_PTR_ADDR), Pointer))) @@ -1070,11 +1072,7 @@ pub enum Abi { #[derive(Debug, PartialEq, Eq, Hash)] pub enum Layout { /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. - Scalar { - value: Primitive, - // If true, the value cannot represent a bit pattern of all zeroes. - non_zero: bool - }, + Scalar(Primitive), /// SIMD vectors, from structs marked with #[repr(simd)]. Vector { @@ -1092,12 +1090,8 @@ pub enum Layout { count: u64 }, - /// TyRawPtr or TyRef with a !Sized pointee. - FatPointer { - metadata: Primitive, - /// If true, the pointer cannot be null. - non_zero: bool - }, + /// TyRawPtr or TyRef with a !Sized pointee. The primitive is the metadata. + FatPointer(Primitive), // Remaining variants are all ADTs such as structs, enums or tuples. @@ -1105,7 +1099,6 @@ pub enum Layout { CEnum { discr: Integer, signed: bool, - non_zero: bool, /// Inclusive discriminant range. /// If min > max, it represents min...u64::MAX followed by 0...max. // FIXME(eddyb) always use the shortest range, e.g. by finding @@ -1211,7 +1204,7 @@ impl<'a, 'tcx> Layout { let success = |layout| { let layout = tcx.intern_layout(layout); let fields = match *layout { - Scalar { .. } | + Scalar(_) | CEnum { .. } => { FieldPlacement::union(0) } @@ -1258,7 +1251,7 @@ impl<'a, 'tcx> Layout { } }; let abi = match *layout { - Scalar { value, .. } => Abi::Scalar(value), + Scalar(value) => Abi::Scalar(value), CEnum { discr, .. } => Abi::Scalar(Int(discr)), Vector { .. } => Abi::Vector, @@ -1286,43 +1279,36 @@ impl<'a, 'tcx> Layout { assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { - let non_zero = !ty.is_unsafe_ptr(); let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - Ok(Scalar { value: Pointer, non_zero }) + Ok(Scalar(Pointer)) } else { let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { - ty::TyForeign(..) => return Ok(Scalar { value: Pointer, non_zero }), + ty::TyForeign(..) => return Ok(Scalar(Pointer)), ty::TySlice(_) | ty::TyStr => { Int(dl.ptr_sized_integer()) } ty::TyDynamic(..) => Pointer, _ => return Err(LayoutError::Unknown(unsized_part)) }; - Ok(FatPointer { metadata, non_zero }) + Ok(FatPointer(metadata)) } }; let layout = match ty.sty { // Basic scalars. - ty::TyBool => Scalar { value: Int(I1), non_zero: false }, - ty::TyChar => Scalar { value: Int(I32), non_zero: false }, + ty::TyBool => Scalar(Int(I1)), + ty::TyChar => Scalar(Int(I32)), ty::TyInt(ity) => { - Scalar { - value: Int(Integer::from_attr(dl, attr::SignedInt(ity))), - non_zero: false - } + Scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)))) } ty::TyUint(ity) => { - Scalar { - value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))), - non_zero: false - } + Scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)))) } - ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false }, - ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false }, - ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true }, + ty::TyFloat(FloatTy::F32) => Scalar(F32), + ty::TyFloat(FloatTy::F64) => Scalar(F64), + ty::TyFnPtr(_) => Scalar(Pointer), // The never type. ty::TyNever => { @@ -1430,7 +1416,7 @@ impl<'a, 'tcx> Layout { ty::TyAdt(def, ..) if def.repr.simd() => { let element = ty.simd_type(tcx); match *cx.layout_of(element)? { - Scalar { value, .. } => { + Scalar(value) => { return success(Vector { element: value, count: ty.simd_size(tcx) as u64 @@ -1456,12 +1442,9 @@ impl<'a, 'tcx> Layout { if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { // All bodies empty -> intlike - let (mut min, mut max, mut non_zero) = (i64::max_value(), - i64::min_value(), - true); + let (mut min, mut max) = (i64::max_value(), i64::min_value()); for discr in def.discriminants(tcx) { let x = discr.to_u128_unchecked() as i64; - if x == 0 { non_zero = false; } if x < min { min = x; } if x > max { max = x; } } @@ -1472,7 +1455,6 @@ impl<'a, 'tcx> Layout { return success(CEnum { discr, signed, - non_zero, // FIXME: should be u128? min: min as u64, max: max as u64 @@ -1699,7 +1681,7 @@ impl<'a, 'tcx> Layout { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Scalar {..} | Vector {..} | FatPointer {..} | + Scalar(_) | Vector {..} | FatPointer {..} | CEnum {..} | UntaggedUnion {..} | General {..} | NullablePointer {..} => false, @@ -1712,7 +1694,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } => { + Scalar(value) => { value.size(dl) } @@ -1734,7 +1716,7 @@ impl<'a, 'tcx> Layout { } } - FatPointer { metadata, .. } => { + FatPointer(metadata) => { // Effectively a (ptr, meta) tuple. (Pointer.size(dl).abi_align(metadata.align(dl)) + metadata.size(dl)).abi_align(self.align(dl)) @@ -1755,7 +1737,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); match *self { - Scalar { value, .. } => { + Scalar(value) => { value.align(dl) } @@ -1769,7 +1751,7 @@ impl<'a, 'tcx> Layout { dl.vector_align(vec_size) } - FatPointer { metadata, .. } => { + FatPointer(metadata) => { // Effectively a (ptr, meta) tuple. Pointer.align(dl).max(metadata.align(dl)) } @@ -1993,7 +1975,7 @@ impl<'a, 'tcx> Layout { // other cases provide little interesting (i.e. adjustable // via representation tweaks) size info beyond total size. - Layout::Scalar { .. } | + Layout::Scalar(_) | Layout::Vector { .. } | Layout::Array { .. } | Layout::FatPointer { .. } => { @@ -2421,9 +2403,8 @@ impl<'gcx> HashStable> for Layout mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Scalar { value, non_zero } => { + Scalar(ref value) => { value.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); } Vector { element, count } => { element.hash_stable(hcx, hasher); @@ -2436,14 +2417,12 @@ impl<'gcx> HashStable> for Layout element_size.hash_stable(hcx, hasher); count.hash_stable(hcx, hasher); } - FatPointer { ref metadata, non_zero } => { + FatPointer(ref metadata) => { metadata.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); } - CEnum { discr, signed, non_zero, min, max } => { + CEnum { discr, signed, min, max } => { discr.hash_stable(hcx, hasher); signed.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); min.hash_stable(hcx, hasher); max.hash_stable(hcx, hasher); } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 8be2cb2a1d326..c3b6c1cce67fb 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -468,17 +468,8 @@ impl<'a, 'tcx> ArgType<'tcx> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness let (i, signed) = match *self.layout { - Layout::Scalar { value, .. } => { - match value { - layout::Int(i) => { - if self.layout.ty.is_integral() { - (i, self.layout.ty.is_signed()) - } else { - return; - } - } - _ => return - } + Layout::Scalar(layout::Int(i)) if self.layout.ty.is_integral() => { + (i, self.layout.ty.is_signed()) } // Rust enum types that map onto C enums also need to follow diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 4db33b9204a1e..a45fe662bd65d 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -14,7 +14,7 @@ use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, FullLayout}; +use rustc::ty::layout::{self, FullLayout}; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { @@ -26,11 +26,11 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> bool { - match *layout { - Layout::Scalar { value: layout::F32, .. } | - Layout::Scalar { value: layout::F64, .. } => true, - Layout::Univariant { .. } => { - if layout.fields.count() == 1 { + match layout.abi { + layout::Abi::Scalar(layout::F32) | + layout::Abi::Scalar(layout::F64) => true, + layout::Abi::Aggregate => { + if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 362ceb6060139..bc7c9a3ed0565 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -11,7 +11,7 @@ use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use common::CrateContext; -use rustc::ty::layout::{self, Layout, FullLayout}; +use rustc::ty::layout::{self, FullLayout}; #[derive(PartialEq)] pub enum Flavor { @@ -21,11 +21,11 @@ pub enum Flavor { fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> bool { - match *layout { - Layout::Scalar { value: layout::F32, .. } | - Layout::Scalar { value: layout::F64, .. } => true, - Layout::Univariant { .. } => { - if layout.fields.count() == 1 { + match layout.abi { + layout::Abi::Scalar(layout::F32) | + layout::Abi::Scalar(layout::F64) => true, + layout::Abi::Aggregate => { + if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false From 61c2bd9ca457d421d2388136d40194ac9b9eb477 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 16 Sep 2017 12:23:22 +0300 Subject: [PATCH 23/69] rustc: use Primitive instead of Integer for CEnum and General discriminants. --- src/librustc/ty/layout.rs | 31 +++++++-------- src/librustc_lint/types.rs | 4 +- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/adt.rs | 10 +---- src/librustc_trans/debuginfo/metadata.rs | 11 ++++-- src/librustc_trans/debuginfo/mod.rs | 2 +- src/librustc_trans/mir/constant.rs | 12 +++--- src/librustc_trans/mir/lvalue.rs | 48 ++++++++++++------------ 8 files changed, 57 insertions(+), 63 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d798982f3c86e..65efc3b5186f0 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -843,7 +843,7 @@ impl<'a, 'tcx> Struct { } (&CEnum { discr, .. }, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { - Ok(Some((Size::from_bytes(0), Int(discr)))) + Ok(Some((Size::from_bytes(0), discr))) } else { Ok(None) } @@ -1097,7 +1097,7 @@ pub enum Layout { /// C-like enums; basically an integer. CEnum { - discr: Integer, + discr: Primitive, signed: bool, /// Inclusive discriminant range. /// If min > max, it represents min...u64::MAX followed by 0...max. @@ -1118,7 +1118,7 @@ pub enum Layout { /// all space reserved for the discriminant, and their first field starts /// at a non-0 offset, after where the discriminant would go. General { - discr: Integer, + discr: Primitive, variants: Vec, size: Size, align: Align, @@ -1251,8 +1251,8 @@ impl<'a, 'tcx> Layout { } }; let abi = match *layout { - Scalar(value) => Abi::Scalar(value), - CEnum { discr, .. } => Abi::Scalar(Int(discr)), + Scalar(value) | + CEnum { discr: value, .. } => Abi::Scalar(value), Vector { .. } => Abi::Vector, @@ -1453,7 +1453,7 @@ impl<'a, 'tcx> Layout { // grok. let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); return success(CEnum { - discr, + discr: Int(discr), signed, // FIXME: should be u128? min: min as u64, @@ -1646,7 +1646,7 @@ impl<'a, 'tcx> Layout { } General { - discr: ity, + discr: Int(ity), variants, size, align, @@ -1722,7 +1722,7 @@ impl<'a, 'tcx> Layout { metadata.size(dl)).abi_align(self.align(dl)) } - CEnum { discr, .. } => Int(discr).size(dl), + CEnum { discr, .. } => discr.size(dl), General { size, .. } => size, UntaggedUnion(ref un) => un.stride(), @@ -1756,7 +1756,7 @@ impl<'a, 'tcx> Layout { Pointer.align(dl).max(metadata.align(dl)) } - CEnum { discr, .. } => Int(discr).align(dl), + CEnum { discr, .. } => discr.align(dl), Array { align, .. } | General { align, .. } => align, UntaggedUnion(ref un) => un.align, @@ -1858,7 +1858,7 @@ impl<'a, 'tcx> Layout { } }; - let build_primitive_info = |name: ast::Name, value: &Primitive| { + let build_primitive_info = |name: ast::Name, value: Primitive| { session::VariantInfo { name: Some(name.to_string()), kind: session::SizeKind::Exact, @@ -1951,7 +1951,7 @@ impl<'a, 'tcx> Layout { variant_layout) }) .collect(); - record(adt_kind.into(), Some(discr.size()), variant_infos); + record(adt_kind.into(), Some(discr.size(tcx)), variant_infos); } Layout::UntaggedUnion(ref un) => { @@ -1966,11 +1966,10 @@ impl<'a, 'tcx> Layout { let variant_infos: Vec<_> = adt_def.variants.iter() .map(|variant_def| { - build_primitive_info(variant_def.name, - &Primitive::Int(discr)) + build_primitive_info(variant_def.name, discr) }) .collect(); - record(adt_kind.into(), Some(discr.size()), variant_infos); + record(adt_kind.into(), Some(discr.size(tcx)), variant_infos); } // other cases provide little interesting (i.e. adjustable @@ -2359,9 +2358,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { match self.variant_index { None => match *self.layout { // Discriminant field for enums (where applicable). - General { discr, .. } => { - return [discr.to_ty(tcx, false)][i]; - } + General { discr, .. } | NullablePointer { discr, .. } => { return [discr.to_ty(tcx)][i]; } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 86bd227b1af07..879d2e326d883 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -13,7 +13,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, LayoutOf, Primitive}; +use rustc::ty::layout::{Layout, LayoutOf}; use middle::const_val::ConstVal; use rustc_const_eval::ConstContext; use util::nodemap::FxHashSet; @@ -754,7 +754,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { }); if let Layout::General { ref variants, ref size, discr, .. } = *layout { - let discr_size = Primitive::Int(discr).size(cx.tcx).bytes(); + let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", t, size.bytes(), layout); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index c3b6c1cce67fb..45367508c94bc 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -474,7 +474,7 @@ impl<'a, 'tcx> ArgType<'tcx> { // Rust enum types that map onto C enums also need to follow // the target ABI zero-/sign-extension rules. - Layout::CEnum { discr, signed, .. } => (discr, signed), + Layout::CEnum { discr: layout::Int(i), signed, .. } => (i, signed), _ => return }; diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 871d25c046849..e718c7f6f4854 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -103,7 +103,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let l = cx.layout_of(t); debug!("adt::generic_type_of t: {:?} name: {:?}", t, name); match *l { - layout::CEnum { discr, .. } => Type::from_integer(cx, discr), + layout::CEnum { discr, .. } => cx.llvm_type_of(discr.to_ty(cx.tcx())), layout::NullablePointer { nndiscr, ref nonnull, .. } => { if let layout::Abi::Scalar(_) = l.abi { return cx.llvm_type_of(l.field(cx, 0).ty); @@ -236,11 +236,3 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { _ => false, } } - -pub fn assert_discr_in_range(min: D, max: D, discr: D) { - if min <= max { - assert!(min <= discr && discr <= max) - } else { - assert!(min <= discr || discr <= max) - } -} diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index e8af81a3d3bde..9ae1ded780504 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1451,8 +1451,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) .collect(); - let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| { - let disr_type_key = (enum_def_id, inttype); + let discriminant_type_metadata = |discr: layout::Primitive, signed: bool| { + let disr_type_key = (enum_def_id, discr); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() .get(&disr_type_key).cloned(); @@ -1460,10 +1460,13 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, Some(discriminant_type_metadata) => discriminant_type_metadata, None => { let (discriminant_size, discriminant_align) = - (inttype.size(), inttype.align(cx)); + (discr.size(cx), discr.align(cx)); let discriminant_base_type_metadata = type_metadata(cx, - inttype.to_ty(cx.tcx(), signed), + match discr { + layout::Int(i) => i.to_ty(cx.tcx(), signed), + _ => discr.to_ty(cx.tcx()) + }, syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 53c5c82b8e3f1..8d3a0fd572510 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -71,7 +71,7 @@ pub struct CrateDebugContext<'tcx> { llmod: ModuleRef, builder: DIBuilderRef, created_files: RefCell>, - created_enum_disr_types: RefCell>, + created_enum_disr_types: RefCell>, type_map: RefCell>, namespace_map: RefCell>, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index befb5409e46f0..dd06f36947078 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1093,7 +1093,7 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; match *l { - layout::CEnum { discr: d, min, max, .. } => { + layout::CEnum { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1102,14 +1102,14 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; assert_eq!(vals.len(), 0); - adt::assert_discr_in_range(min, max, discr); - Const::new(C_int(Type::from_integer(ccx, d), discr as i64), t) + Const::new(C_int(ccx.llvm_type_of(t), discr as i64), t) } - layout::General { discr: d, ref variants, .. } => { + layout::General { ref variants, .. } => { + let discr_ty = l.field(ccx, 0).ty; let variant = &variants[variant_index]; - let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); + let lldiscr = C_int(ccx.llvm_type_of(discr_ty), variant_index as i64); build_const_struct(ccx, l, &variant, vals, - Some(Const::new(lldiscr, d.to_ty(ccx.tcx(), false)))) + Some(Const::new(lldiscr, discr_ty))) } layout::UntaggedUnion(ref un) => { assert_eq!(variant_index, 0); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 1b0486dbf9642..dd134138f78d5 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -351,25 +351,28 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } /// Helper for cases where the discriminant is simply loaded. - fn load_discr(self, bcx: &Builder, ity: layout::Integer, min: u64, max: u64) -> ValueRef { - let bits = ity.size().bits(); - assert!(bits <= 64); - let bits = bits as usize; - let mask = !0u64 >> (64 - bits); - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - if max.wrapping_add(1) & mask == min & mask { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - bcx.load(self.llval, self.alignment.non_abi()) - } else { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - bcx.load_range_assert(self.llval, min, max.wrapping_add(1), /* signed: */ llvm::True, - self.alignment.non_abi()) + fn load_discr(self, bcx: &Builder, discr: layout::Primitive, min: u64, max: u64) -> ValueRef { + if let layout::Int(ity) = discr { + let bits = ity.size().bits(); + assert!(bits <= 64); + let bits = bits as usize; + let mask = !0u64 >> (64 - bits); + // For a (max) discr of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + if max.wrapping_add(1) & mask == min & mask { + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + } else { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + return bcx.load_range_assert(self.llval, min, max.wrapping_add(1), + /* signed: */ llvm::True, + self.alignment.non_abi()); + } } + bcx.load(self.llval, self.alignment.non_abi()) } /// Obtain the actual discriminant of a value. @@ -406,14 +409,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; match *l { - layout::CEnum { discr, min, max, .. } => { - adt::assert_discr_in_range(min, max, to); - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), + layout::CEnum { .. } => { + bcx.store(C_int(bcx.ccx.llvm_type_of(self.ty.to_ty(bcx.tcx())), to as i64), self.llval, self.alignment.non_abi()); } - layout::General { discr, .. } => { + layout::General { .. } => { let ptr = self.project_field(bcx, 0); - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), + bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), ptr.llval, ptr.alignment.non_abi()); } layout::Univariant { .. } From d318b9c27b46b8d59a21c9015a6847ce1964394c Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 16 Sep 2017 16:39:53 +0300 Subject: [PATCH 24/69] rustc: move CEnum's signedness into Primitive::Int. --- src/librustc/ty/layout.rs | 53 +++++++++++------------- src/librustc_trans/abi.rs | 28 +++++-------- src/librustc_trans/adt.rs | 6 --- src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/debuginfo/metadata.rs | 15 +++---- src/librustc_trans/mir/constant.rs | 10 ++--- src/librustc_trans/mir/lvalue.rs | 17 +++++--- src/librustc_trans/mir/rvalue.rs | 14 ++++--- 8 files changed, 65 insertions(+), 80 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 65efc3b5186f0..c7a49b7ae0ae6 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -498,7 +498,7 @@ impl<'a, 'tcx> Integer { let wanted = align.abi(); for &candidate in &[I8, I16, I32, I64] { - let ty = Int(candidate); + let ty = Int(candidate, false); if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { return Some(candidate); } @@ -577,7 +577,14 @@ impl<'a, 'tcx> Integer { /// Fundamental unit of memory access and layout. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Primitive { - Int(Integer), + /// The `bool` is the signedness of the `Integer` type. + /// + /// One would think we would not care about such details this low down, + /// but some ABIs are described in terms of C types and ISAs where the + /// integer arithmetic is done on {sign,zero}-extended registers, e.g. + /// a negative integer passed by zero-extension will appear positive in + /// the callee, and most operations on it will produce the wrong values. + Int(Integer, bool), F32, F64, Pointer @@ -588,11 +595,9 @@ impl<'a, 'tcx> Primitive { let dl = cx.data_layout(); match self { - Int(I1) | Int(I8) => Size::from_bits(8), - Int(I16) => Size::from_bits(16), - Int(I32) | F32 => Size::from_bits(32), - Int(I64) | F64 => Size::from_bits(64), - Int(I128) => Size::from_bits(128), + Int(i, _) => i.size(), + F32 => Size::from_bits(32), + F64 => Size::from_bits(64), Pointer => dl.pointer_size } } @@ -601,12 +606,7 @@ impl<'a, 'tcx> Primitive { let dl = cx.data_layout(); match self { - Int(I1) => dl.i1_align, - Int(I8) => dl.i8_align, - Int(I16) => dl.i16_align, - Int(I32) => dl.i32_align, - Int(I64) => dl.i64_align, - Int(I128) => dl.i128_align, + Int(i, _) => i.align(dl), F32 => dl.f32_align, F64 => dl.f64_align, Pointer => dl.pointer_align @@ -615,7 +615,7 @@ impl<'a, 'tcx> Primitive { pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { match *self { - Int(i) => i.to_ty(tcx, false), + Int(i, signed) => i.to_ty(tcx, signed), F32 => tcx.types.f32, F64 => tcx.types.f64, Pointer => tcx.mk_mut_ptr(tcx.mk_nil()), @@ -1098,7 +1098,6 @@ pub enum Layout { /// C-like enums; basically an integer. CEnum { discr: Primitive, - signed: bool, /// Inclusive discriminant range. /// If min > max, it represents min...u64::MAX followed by 0...max. // FIXME(eddyb) always use the shortest range, e.g. by finding @@ -1287,7 +1286,7 @@ impl<'a, 'tcx> Layout { let metadata = match unsized_part.sty { ty::TyForeign(..) => return Ok(Scalar(Pointer)), ty::TySlice(_) | ty::TyStr => { - Int(dl.ptr_sized_integer()) + Int(dl.ptr_sized_integer(), false) } ty::TyDynamic(..) => Pointer, _ => return Err(LayoutError::Unknown(unsized_part)) @@ -1298,13 +1297,13 @@ impl<'a, 'tcx> Layout { let layout = match ty.sty { // Basic scalars. - ty::TyBool => Scalar(Int(I1)), - ty::TyChar => Scalar(Int(I32)), + ty::TyBool => Scalar(Int(I1, false)), + ty::TyChar => Scalar(Int(I32, false)), ty::TyInt(ity) => { - Scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)))) + Scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) } ty::TyUint(ity) => { - Scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)))) + Scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) } ty::TyFloat(FloatTy::F32) => Scalar(F32), ty::TyFloat(FloatTy::F64) => Scalar(F64), @@ -1453,8 +1452,7 @@ impl<'a, 'tcx> Layout { // grok. let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); return success(CEnum { - discr: Int(discr), - signed, + discr: Int(discr, signed), // FIXME: should be u128? min: min as u64, max: max as u64 @@ -1629,8 +1627,8 @@ impl<'a, 'tcx> Layout { ity = min_ity; } else { // Patch up the variants' first few fields. - let old_ity_size = Int(min_ity).size(dl); - let new_ity_size = Int(ity).size(dl); + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); for variant in &mut variants { for i in variant.offsets.iter_mut() { if *i <= old_ity_size { @@ -1646,7 +1644,7 @@ impl<'a, 'tcx> Layout { } General { - discr: Int(ity), + discr: Int(ity, false), variants, size, align, @@ -2417,9 +2415,8 @@ impl<'gcx> HashStable> for Layout FatPointer(ref metadata) => { metadata.hash_stable(hcx, hasher); } - CEnum { discr, signed, min, max } => { + CEnum { discr, min, max } => { discr.hash_stable(hcx, hasher); - signed.hash_stable(hcx, hasher); min.hash_stable(hcx, hasher); max.hash_stable(hcx, hasher); } @@ -2505,7 +2502,7 @@ impl_stable_hash_for!(enum ::ty::layout::Integer { }); impl_stable_hash_for!(enum ::ty::layout::Primitive { - Int(integer), + Int(integer, signed), F32, F64, Pointer diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 45367508c94bc..93cfd967643f3 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -288,7 +288,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { // The primitive for this algorithm. layout::Abi::Scalar(value) => { let kind = match value { - layout::Int(_) | + layout::Int(..) | layout::Pointer => RegKind::Integer, layout::F32 | layout::F64 => RegKind::Float @@ -467,24 +467,18 @@ impl<'a, 'tcx> ArgType<'tcx> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - let (i, signed) = match *self.layout { - Layout::Scalar(layout::Int(i)) if self.layout.ty.is_integral() => { - (i, self.layout.ty.is_signed()) + match self.layout.abi { + layout::Abi::Scalar(layout::Int(i, signed)) => { + if i.size().bits() < bits { + self.attrs.set(if signed { + ArgAttribute::SExt + } else { + ArgAttribute::ZExt + }); + } } - // Rust enum types that map onto C enums also need to follow - // the target ABI zero-/sign-extension rules. - Layout::CEnum { discr: layout::Int(i), signed, .. } => (i, signed), - - _ => return - }; - - if i.size().bits() < bits { - self.attrs.set(if signed { - ArgAttribute::SExt - } else { - ArgAttribute::ZExt - }); + _ => {} } } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index e718c7f6f4854..de42676a90d1d 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -230,9 +230,3 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, result } -pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { - match *l { - layout::CEnum { signed, .. }=> signed, - _ => false, - } -} diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 6e5bc576be5f5..f2208e4909e02 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -67,7 +67,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) match layout.abi { layout::Abi::Scalar(value) => { let reg = match value { - layout::Int(_) | + layout::Int(..) | layout::Pointer => Class::Int, layout::F32 | layout::F64 => Class::Sse diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 9ae1ded780504..aad6f3446ee37 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1451,7 +1451,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) .collect(); - let discriminant_type_metadata = |discr: layout::Primitive, signed: bool| { + let discriminant_type_metadata = |discr: layout::Primitive| { let disr_type_key = (enum_def_id, discr); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() @@ -1462,12 +1462,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let (discriminant_size, discriminant_align) = (discr.size(cx), discr.align(cx)); let discriminant_base_type_metadata = - type_metadata(cx, - match discr { - layout::Int(i) => i.to_ty(cx.tcx(), signed), - _ => discr.to_ty(cx.tcx()) - }, - syntax_pos::DUMMY_SP); + type_metadata(cx, discr.to_ty(cx.tcx()), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); let name = CString::new(discriminant_name.as_bytes()).unwrap(); @@ -1496,11 +1491,11 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); let discriminant_type_metadata = match *type_rep { - layout::CEnum { discr, signed, .. } => { - return FinalMetadata(discriminant_type_metadata(discr, signed)) + layout::CEnum { discr, .. } => { + return FinalMetadata(discriminant_type_metadata(discr)) }, layout::NullablePointer { .. } | layout::Univariant { .. } => None, - layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), + layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index dd06f36947078..c677352c27897 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -23,7 +23,7 @@ use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_apfloat::{ieee, Float, Status}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {adt, base}; +use base; use abi::{self, Abi}; use callee; use builder::Builder; @@ -683,11 +683,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); let ll_t_out = self.ccx.immediate_llvm_type_of(cast_ty); let llval = operand.llval; - let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let l = self.ccx.layout_of(operand.ty); - adt::is_discr_signed(&l) - } else { - operand.ty.is_signed() + let signed = match self.ccx.layout_of(operand.ty).abi { + layout::Abi::Scalar(layout::Int(_, signed)) => signed, + _ => false }; unsafe { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index dd134138f78d5..4ad6e985e7aa1 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -352,7 +352,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Helper for cases where the discriminant is simply loaded. fn load_discr(self, bcx: &Builder, discr: layout::Primitive, min: u64, max: u64) -> ValueRef { - if let layout::Int(ity) = discr { + if let layout::Int(ity, _) = discr { let bits = ity.size().bits(); assert!(bits <= 64); let bits = bits as usize; @@ -380,25 +380,30 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); - let val = match *l { + let (val, discr) = match *l { layout::Univariant { .. } | layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), layout::CEnum { discr, min, max, .. } => { - self.load_discr(bcx, discr, min, max) + (self.load_discr(bcx, discr, min, max), discr) } layout::General { discr, ref variants, .. } => { let ptr = self.project_field(bcx, 0); - ptr.load_discr(bcx, discr, 0, variants.len() as u64 - 1) + (ptr.load_discr(bcx, discr, 0, variants.len() as u64 - 1), discr) } layout::NullablePointer { nndiscr, .. } => { let ptr = self.project_field(bcx, 0); let lldiscr = bcx.load(ptr.llval, ptr.alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - bcx.icmp(cmp, lldiscr, C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())))) + (bcx.icmp(cmp, lldiscr, C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())))), + layout::Int(layout::I1, false)) }, _ => bug!("{} is not an enum", l.ty) }; - bcx.intcast(val, cast_to, adt::is_discr_signed(&l)) + let signed = match discr { + layout::Int(_, signed) => signed, + _ => false + }; + bcx.intcast(val, cast_to, signed) } /// Set the discriminant for a new value of the given case of the given diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index e83d4c586e7bc..b931d9a254901 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{Layout, LayoutOf}; +use rustc::ty::layout::{self, Layout, LayoutOf}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; @@ -276,7 +276,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast_ty); let llval = operand.immediate(); let l = bcx.ccx.layout_of(operand.ty); - let signed = if let Layout::CEnum { signed, min, max, .. } = *l { + + if let Layout::CEnum { min, max, .. } = *l { if max > min { // We want `table[e as usize]` to not // have bound checks, and this is the most @@ -285,13 +286,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, - C_uint(common::val_ty(llval), max) + C_uint(ll_t_in, max) )); } + } - signed - } else { - operand.ty.is_signed() + let signed = match l.abi { + layout::Abi::Scalar(layout::Int(_, signed)) => signed, + _ => false }; let newval = match (r_t_in, r_t_out) { From 658ebfc788d1926b6eaaeb38da52a13a424e1242 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 16 Sep 2017 16:40:29 +0300 Subject: [PATCH 25/69] rustc: give Layout::CEnum a discriminant field like Layout::General. --- src/librustc/ty/layout.rs | 5 +- src/librustc_trans/mir/lvalue.rs | 105 +++++++++++++++---------------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index c7a49b7ae0ae6..18c3a27078773 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1203,8 +1203,7 @@ impl<'a, 'tcx> Layout { let success = |layout| { let layout = tcx.intern_layout(layout); let fields = match *layout { - Scalar(_) | - CEnum { .. } => { + Scalar(_) => { FieldPlacement::union(0) } @@ -1241,6 +1240,7 @@ impl<'a, 'tcx> Layout { FieldPlacement::union(def.struct_variant().fields.len()) } + CEnum { .. } | General { .. } => FieldPlacement::union(1), NullablePointer { ref discr_offset, .. } => { @@ -2356,6 +2356,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { match self.variant_index { None => match *self.layout { // Discriminant field for enums (where applicable). + CEnum { discr, .. } | General { discr, .. } | NullablePointer { discr, .. } => { return [discr.to_ty(tcx)][i]; diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 4ad6e985e7aa1..883751d25a069 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -235,7 +235,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Discriminant field of enums. match *l { - layout::General { .. } | layout::NullablePointer { .. } if l.variant_index.is_none() => { let ty = ccx.llvm_type_of(field.ty); let size = field.size(ccx).bytes(); @@ -350,60 +349,66 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } - /// Helper for cases where the discriminant is simply loaded. - fn load_discr(self, bcx: &Builder, discr: layout::Primitive, min: u64, max: u64) -> ValueRef { - if let layout::Int(ity, _) = discr { - let bits = ity.size().bits(); - assert!(bits <= 64); - let bits = bits as usize; - let mask = !0u64 >> (64 - bits); - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - if max.wrapping_add(1) & mask == min & mask { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - } else { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - return bcx.load_range_assert(self.llval, min, max.wrapping_add(1), - /* signed: */ llvm::True, - self.alignment.non_abi()); - } - } - bcx.load(self.llval, self.alignment.non_abi()) - } - /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); - let (val, discr) = match *l { + match *l { layout::Univariant { .. } | layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), - layout::CEnum { discr, min, max, .. } => { - (self.load_discr(bcx, discr, min, max), discr) + _ => {} + } + + let discr = self.project_field(bcx, 0); + let discr_layout = bcx.ccx.layout_of(discr.ty.to_ty(bcx.tcx())); + let discr_scalar = match discr_layout.abi { + layout::Abi::Scalar(discr) => discr, + _ => bug!("discriminant not scalar: {:#?}", discr_layout) + }; + let (min, max) = match *l { + layout::CEnum { min, max, .. } => (min, max), + layout::General { ref variants, .. } => (0, variants.len() as u64 - 1), + _ => (0, u64::max_value()), + }; + let max_next = max.wrapping_add(1); + let bits = discr_scalar.size(bcx.ccx).bits(); + assert!(bits <= 64); + let mask = !0u64 >> (64 - bits); + let lldiscr = match discr_scalar { + // For a (max) discr of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + layout::Int(..) if max_next & mask != min & mask => { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + bcx.load_range_assert(discr.llval, min, max_next, + /* signed: */ llvm::True, + discr.alignment.non_abi()) } - layout::General { discr, ref variants, .. } => { - let ptr = self.project_field(bcx, 0); - (ptr.load_discr(bcx, discr, 0, variants.len() as u64 - 1), discr) + _ => { + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + bcx.load(discr.llval, discr.alignment.non_abi()) + } + }; + match *l { + layout::CEnum { .. } | + layout::General { .. } => { + let signed = match discr_scalar { + layout::Int(_, signed) => signed, + _ => false + }; + bcx.intcast(lldiscr, cast_to, signed) } layout::NullablePointer { nndiscr, .. } => { - let ptr = self.project_field(bcx, 0); - let lldiscr = bcx.load(ptr.llval, ptr.alignment.non_abi()); let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - (bcx.icmp(cmp, lldiscr, C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())))), - layout::Int(layout::I1, false)) - }, + let zero = C_null(bcx.ccx.llvm_type_of(discr_layout.ty)); + bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) + } _ => bug!("{} is not an enum", l.ty) - }; - let signed = match discr { - layout::Int(_, signed) => signed, - _ => false - }; - bcx.intcast(val, cast_to, signed) + } } /// Set the discriminant for a new value of the given case of the given @@ -414,20 +419,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; match *l { - layout::CEnum { .. } => { - bcx.store(C_int(bcx.ccx.llvm_type_of(self.ty.to_ty(bcx.tcx())), to as i64), - self.llval, self.alignment.non_abi()); - } + layout::CEnum { .. } | layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), ptr.llval, ptr.alignment.non_abi()); } - layout::Univariant { .. } - | layout::UntaggedUnion { .. } - | layout::Vector { .. } => { - assert_eq!(to, 0); - } layout::NullablePointer { nndiscr, .. } => { if to != nndiscr { let use_memset = match l.abi { @@ -451,7 +448,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } } - _ => bug!("Cannot handle {} represented as {:#?}", l.ty, l) + _ => { + assert_eq!(to, 0); + } } } From 33a205b56fbe5039d3f64743334b634be8dc4f0c Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 16 Sep 2017 23:12:39 +0300 Subject: [PATCH 26/69] rustc: collapse Layout::CEnum into Layout::General. --- src/librustc/lib.rs | 3 +- src/librustc/ty/layout.rs | 168 ++++++++--------------- src/librustc_trans/adt.rs | 11 +- src/librustc_trans/debuginfo/metadata.rs | 9 +- src/librustc_trans/lib.rs | 1 + src/librustc_trans/mir/constant.rs | 17 ++- src/librustc_trans/mir/lvalue.rs | 5 +- src/librustc_trans/mir/rvalue.rs | 6 +- src/librustc_trans/type_of.rs | 1 - 9 files changed, 86 insertions(+), 135 deletions(-) diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 5e9019c92c5b7..44039817e7206 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -47,10 +47,11 @@ #![feature(core_intrinsics)] #![feature(drain_filter)] #![feature(i128_type)] -#![feature(match_default_bindings)] +#![feature(inclusive_range)] #![feature(inclusive_range_syntax)] #![cfg_attr(windows, feature(libc))] #![feature(macro_vis_matcher)] +#![feature(match_default_bindings)] #![feature(never_type)] #![feature(nonzero)] #![feature(quote)] diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 18c3a27078773..5775fc957b5bb 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -25,7 +25,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::{Deref, Add, Sub, Mul, AddAssign}; +use std::ops::{Deref, Add, Sub, Mul, AddAssign, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -841,9 +841,9 @@ impl<'a, 'tcx> Struct { (&Scalar(Pointer), _) if !layout.ty.is_unsafe_ptr() => { Ok(Some((Size::from_bytes(0), Pointer))) } - (&CEnum { discr, .. }, &ty::TyAdt(def, _)) => { + (&General { discr, .. }, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { - Ok(Some((Size::from_bytes(0), discr))) + Ok(Some((layout.fields.offset(0), discr))) } else { Ok(None) } @@ -1095,18 +1095,6 @@ pub enum Layout { // Remaining variants are all ADTs such as structs, enums or tuples. - /// C-like enums; basically an integer. - CEnum { - discr: Primitive, - /// Inclusive discriminant range. - /// If min > max, it represents min...u64::MAX followed by 0...max. - // FIXME(eddyb) always use the shortest range, e.g. by finding - // the largest space between two consecutive discriminants and - // taking everything else as the (shortest) discriminant range. - min: u64, - max: u64 - }, - /// Single-case enums, and structs/tuples. Univariant(Struct), @@ -1118,6 +1106,12 @@ pub enum Layout { /// at a non-0 offset, after where the discriminant would go. General { discr: Primitive, + /// Inclusive wrap-around range of discriminant values, that is, + /// if min > max, it represents min..=u64::MAX followed by 0..=max. + // FIXME(eddyb) always use the shortest range, e.g. by finding + // the largest space between two consecutive discriminants and + // taking everything else as the (shortest) discriminant range. + discr_range: RangeInclusive, variants: Vec, size: Size, align: Align, @@ -1240,7 +1234,6 @@ impl<'a, 'tcx> Layout { FieldPlacement::union(def.struct_variant().fields.len()) } - CEnum { .. } | General { .. } => FieldPlacement::union(1), NullablePointer { ref discr_offset, .. } => { @@ -1250,19 +1243,17 @@ impl<'a, 'tcx> Layout { } }; let abi = match *layout { - Scalar(value) | - CEnum { discr: value, .. } => Abi::Scalar(value), - + Scalar(value) => Abi::Scalar(value), Vector { .. } => Abi::Vector, Array { .. } | FatPointer { .. } | Univariant(_) | - UntaggedUnion(_) | - General { .. } => Abi::Aggregate, + UntaggedUnion(_) => Abi::Aggregate, - NullablePointer { discr, discr_offset, .. } => { - if discr_offset.bytes() == 0 && discr.size(cx) == layout.size(cx) { + General { discr, .. } | + NullablePointer { discr, .. } => { + if fields.offset(0).bytes() == 0 && discr.size(cx) == layout.size(cx) { Abi::Scalar(discr) } else { Abi::Aggregate @@ -1431,7 +1422,14 @@ impl<'a, 'tcx> Layout { // ADTs. ty::TyAdt(def, substs) => { - if def.variants.is_empty() { + // Cache the field layouts. + let variants = def.variants.iter().map(|v| { + v.fields.iter().map(|field| { + cx.layout_of(field.ty(tcx, substs)) + }).collect::, _>>() + }).collect::, _>>()?; + + if variants.is_empty() { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) @@ -1439,74 +1437,39 @@ impl<'a, 'tcx> Layout { &def.repr, StructKind::AlwaysSizedUnivariant, ty)?)); } - if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { - // All bodies empty -> intlike - let (mut min, mut max) = (i64::max_value(), i64::min_value()); - for discr in def.discriminants(tcx) { - let x = discr.to_u128_unchecked() as i64; - if x < min { min = x; } - if x > max { max = x; } - } - - // FIXME: should handle i128? signed-value based impl is weird and hard to - // grok. - let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); - return success(CEnum { - discr: Int(discr, signed), - // FIXME: should be u128? - min: min as u64, - max: max as u64 - }); - } - - if !def.is_enum() || (def.variants.len() == 1 && - !def.repr.inhibit_enum_layout_opt()) { + if !def.is_enum() || (variants.len() == 1 && + !def.repr.inhibit_enum_layout_opt() && + !variants[0].is_empty()) { // Struct, or union, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) - let kind = if def.is_enum() || def.variants[0].fields.len() == 0{ + let kind = if def.is_enum() || variants[0].len() == 0 { StructKind::AlwaysSizedUnivariant } else { let param_env = tcx.param_env(def.did); - let fields = &def.variants[0].fields; - let last_field = &fields[fields.len()-1]; + let last_field = def.variants[0].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) .is_sized(tcx, param_env, DUMMY_SP); if !always_sized { StructKind::MaybeUnsizedUnivariant } else { StructKind::AlwaysSizedUnivariant } }; - let fields = def.variants[0].fields.iter().map(|field| { - cx.layout_of(field.ty(tcx, substs)) - }).collect::, _>>()?; let layout = if def.is_union() { let mut un = Union::new(dl, &def.repr); - un.extend(dl, fields.iter().map(|&f| Ok(f.layout)), ty)?; + un.extend(dl, variants[0].iter().map(|&f| Ok(f.layout)), ty)?; UntaggedUnion(un) } else { - Univariant(Struct::new(dl, &fields, &def.repr, kind, ty)?) + Univariant(Struct::new(dl, &variants[0], &def.repr, kind, ty)?) }; return success(layout); } - // Since there's at least one - // non-empty body, explicit discriminants should have - // been rejected by a checker before this point. - for (i, v) in def.variants.iter().enumerate() { - if v.discr != ty::VariantDiscr::Relative(i) { - bug!("non-C-like enum {} with specified discriminants", - tcx.item_path_str(def.did)); - } - } + let no_explicit_discriminants = def.variants.iter().enumerate() + .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); - // Cache the substituted and normalized variant field types. - let variants = def.variants.iter().map(|v| { - v.fields.iter().map(|field| { - cx.layout_of(field.ty(tcx, substs)) - }).collect::, _>>() - }).collect::, _>>()?; - - if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() { + if variants.len() == 2 && + !def.repr.inhibit_enum_layout_opt() && + no_explicit_discriminants { // Nullable pointer optimization let st0 = Struct::new(dl, &variants[0], &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; @@ -1554,16 +1517,23 @@ impl<'a, 'tcx> Layout { } } - // The general case. - let discr_max = (variants.len() - 1) as i64; - assert!(discr_max >= 0); - let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max); + let (mut min, mut max) = (i64::max_value(), i64::min_value()); + for discr in def.discriminants(tcx) { + let x = discr.to_u128_unchecked() as i64; + if x < min { min = x; } + if x > max { max = x; } + } + // FIXME: should handle i128? signed-value based impl is weird and hard to + // grok. + let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); + let mut align = dl.aggregate_align; let mut primitive_align = dl.aggregate_align; let mut size = Size::from_bytes(0); // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256, 256).unwrap(); + assert_eq!(Integer::for_abi_align(dl, start_align), None); // Create the set of structs that represent each variant. let mut variants = variants.into_iter().map(|fields| { @@ -1644,7 +1614,10 @@ impl<'a, 'tcx> Layout { } General { - discr: Int(ity, false), + discr: Int(ity, signed), + + // FIXME: should be u128? + discr_range: (min as u64)..=(max as u64), variants, size, align, @@ -1680,7 +1653,7 @@ impl<'a, 'tcx> Layout { pub fn is_unsized(&self) -> bool { match *self { Scalar(_) | Vector {..} | FatPointer {..} | - CEnum {..} | UntaggedUnion {..} | General {..} | + UntaggedUnion {..} | General {..} | NullablePointer {..} => false, Array { sized, .. } | @@ -1720,7 +1693,6 @@ impl<'a, 'tcx> Layout { metadata.size(dl)).abi_align(self.align(dl)) } - CEnum { discr, .. } => discr.size(dl), General { size, .. } => size, UntaggedUnion(ref un) => un.stride(), @@ -1754,7 +1726,6 @@ impl<'a, 'tcx> Layout { Pointer.align(dl).max(metadata.align(dl)) } - CEnum { discr, .. } => discr.align(dl), Array { align, .. } | General { align, .. } => align, UntaggedUnion(ref un) => un.align, @@ -1856,16 +1827,6 @@ impl<'a, 'tcx> Layout { } }; - let build_primitive_info = |name: ast::Name, value: Primitive| { - session::VariantInfo { - name: Some(name.to_string()), - kind: session::SizeKind::Exact, - align: value.align(tcx).abi(), - size: value.size(tcx).bytes(), - fields: vec![], - } - }; - let build_variant_info = |n: Option, flds: &[(ast::Name, Ty<'tcx>)], s: &Struct| { @@ -1959,17 +1920,6 @@ impl<'a, 'tcx> Layout { record(adt_kind.into(), None, Vec::new()); } - Layout::CEnum { discr, .. } => { - debug!("print-type-size t: `{:?}` adt c-like enum", ty); - let variant_infos: Vec<_> = - adt_def.variants.iter() - .map(|variant_def| { - build_primitive_info(variant_def.name, discr) - }) - .collect(); - record(adt_kind.into(), Some(discr.size(tcx)), variant_infos); - } - // other cases provide little interesting (i.e. adjustable // via representation tweaks) size info beyond total size. Layout::Scalar(_) | @@ -2284,6 +2234,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { FullLayout { variant_index: Some(variant_index), fields, + abi: Abi::Aggregate, ..*self } } @@ -2356,7 +2307,6 @@ impl<'a, 'tcx> FullLayout<'tcx> { match self.variant_index { None => match *self.layout { // Discriminant field for enums (where applicable). - CEnum { discr, .. } | General { discr, .. } | NullablePointer { discr, .. } => { return [discr.to_ty(tcx)][i]; @@ -2416,19 +2366,23 @@ impl<'gcx> HashStable> for Layout FatPointer(ref metadata) => { metadata.hash_stable(hcx, hasher); } - CEnum { discr, min, max } => { - discr.hash_stable(hcx, hasher); - min.hash_stable(hcx, hasher); - max.hash_stable(hcx, hasher); - } Univariant(ref variant) => { variant.hash_stable(hcx, hasher); } UntaggedUnion(ref un) => { un.hash_stable(hcx, hasher); } - General { discr, ref variants, size, align, primitive_align } => { + General { + discr, + discr_range: RangeInclusive { start, end }, + ref variants, + size, + align, + primitive_align + } => { discr.hash_stable(hcx, hasher); + start.hash_stable(hcx, hasher); + end.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); size.hash_stable(hcx, hasher); align.hash_stable(hcx, hasher); diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index de42676a90d1d..e1a65f37eff82 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -69,7 +69,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let l = cx.layout_of(t); debug!("finish_type_of: {} with layout {:#?}", t, l); match *l { - layout::CEnum { .. } | layout::General { .. } | layout::UntaggedUnion { .. } => { } + layout::General { .. } | layout::UntaggedUnion { .. } => { } layout::Univariant { ..} | layout::NullablePointer { .. } => { if let layout::Abi::Scalar(_) = l.abi { return; @@ -101,13 +101,12 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, name: Option<&str>) -> Type { let l = cx.layout_of(t); - debug!("adt::generic_type_of t: {:?} name: {:?}", t, name); + debug!("adt::generic_type_of {:#?} name: {:?}", l, name); + if let layout::Abi::Scalar(value) = l.abi { + return cx.llvm_type_of(value.to_ty(cx.tcx())); + } match *l { - layout::CEnum { discr, .. } => cx.llvm_type_of(discr.to_ty(cx.tcx())), layout::NullablePointer { nndiscr, ref nonnull, .. } => { - if let layout::Abi::Scalar(_) = l.abi { - return cx.llvm_type_of(l.field(cx, 0).ty); - } match name { None => { Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize), diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index aad6f3446ee37..16bca343dd07d 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1282,7 +1282,6 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."), ref l @ _ => bug!("Not an enum layout: {:#?}", l) } } @@ -1491,14 +1490,16 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); let discriminant_type_metadata = match *type_rep { - layout::CEnum { discr, .. } => { - return FinalMetadata(discriminant_type_metadata(discr)) - }, layout::NullablePointer { .. } | layout::Univariant { .. } => None, layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; + match (type_rep.abi, discriminant_type_metadata) { + (layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), + _ => {} + } + let (enum_type_size, enum_type_align) = type_rep.size_and_align(cx); let enum_name = CString::new(enum_name).unwrap(); diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 2776125bd8288..dd33012e900fa 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -25,6 +25,7 @@ #![allow(unused_attributes)] #![feature(i128_type)] #![feature(i128)] +#![feature(inclusive_range)] #![feature(libc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index c677352c27897..c8d8199a05eb7 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1091,7 +1091,7 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; match *l { - layout::CEnum { .. } => { + layout::General { ref variants, .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1099,15 +1099,14 @@ fn trans_const_adt<'a, 'tcx>( }, _ => 0, }; - assert_eq!(vals.len(), 0); - Const::new(C_int(ccx.llvm_type_of(t), discr as i64), t) - } - layout::General { ref variants, .. } => { let discr_ty = l.field(ccx, 0).ty; - let variant = &variants[variant_index]; - let lldiscr = C_int(ccx.llvm_type_of(discr_ty), variant_index as i64); - build_const_struct(ccx, l, &variant, vals, - Some(Const::new(lldiscr, discr_ty))) + let discr = Const::new(C_int(ccx.llvm_type_of(discr_ty), discr as i64), + discr_ty); + if let layout::Abi::Scalar(_) = l.abi { + discr + } else { + build_const_struct(ccx, l, &variants[variant_index], vals, Some(discr)) + } } layout::UntaggedUnion(ref un) => { assert_eq!(variant_index, 0); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 883751d25a069..f39e3cb78126c 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -367,8 +367,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { _ => bug!("discriminant not scalar: {:#?}", discr_layout) }; let (min, max) = match *l { - layout::CEnum { min, max, .. } => (min, max), - layout::General { ref variants, .. } => (0, variants.len() as u64 - 1), + layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; let max_next = max.wrapping_add(1); @@ -394,7 +393,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } }; match *l { - layout::CEnum { .. } | layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, @@ -419,7 +417,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; match *l { - layout::CEnum { .. } | layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index b931d9a254901..b7143f23691d9 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -277,8 +277,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llval = operand.immediate(); let l = bcx.ccx.layout_of(operand.ty); - if let Layout::CEnum { min, max, .. } = *l { - if max > min { + if let Layout::General { ref discr_range, .. } = *l { + if discr_range.end > discr_range.start { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. @@ -286,7 +286,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, - C_uint(ll_t_in, max) + C_uint(ll_t_in, discr_range.end) )); } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 264f711de8f93..eca6057db3663 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -242,7 +242,6 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { } match **self { Layout::Scalar { .. } | - Layout::CEnum { .. } | Layout::UntaggedUnion { .. } => { bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } From bd86f3739e76484e410ec5e651ab3ee9049f31ba Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 17 Sep 2017 02:25:20 +0300 Subject: [PATCH 27/69] rustc: make Layout::NullablePointer a lot more like Layout::General. --- src/librustc/ty/layout.rs | 109 ++++++++----------- src/librustc_trans/adt.rs | 51 +++------ src/librustc_trans/debuginfo/metadata.rs | 6 +- src/librustc_trans/mir/constant.rs | 4 +- src/librustc_trans/mir/lvalue.rs | 16 ++- src/librustc_trans/type_of.rs | 9 +- src/test/ui/print_type_sizes/nullable.stdout | 3 + 7 files changed, 83 insertions(+), 115 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 5775fc957b5bb..bd99ae0204aa2 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1128,9 +1128,12 @@ pub enum Layout { /// identity function. NullablePointer { nndiscr: u64, - nonnull: Struct, discr: Primitive, discr_offset: Size, + variants: Vec, + size: Size, + align: Align, + primitive_align: Align, } } @@ -1471,23 +1474,20 @@ impl<'a, 'tcx> Layout { !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { // Nullable pointer optimization - let st0 = Struct::new(dl, &variants[0], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; - let st1 = Struct::new(dl, &variants[1], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; + let mut st = vec![ + Struct::new(dl, &variants[0], + &def.repr, StructKind::AlwaysSizedUnivariant, ty)?, + Struct::new(dl, &variants[1], + &def.repr, StructKind::AlwaysSizedUnivariant, ty)? + ]; let mut choice = None; for discr in 0..2 { - let (st, other) = if discr == 0 { - (&st0, &st1) - } else { - (&st1, &st0) - }; - if other.stride().bytes() > 0 { + if st[1 - discr].stride().bytes() > 0 { continue; } - let field = st.non_zero_field(tcx, param_env, + let field = st[discr].non_zero_field(tcx, param_env, variants[discr].iter().map(|&f| Ok(f)))?; if let Some((offset, primitive)) = field { choice = Some((discr, offset, primitive)); @@ -1496,23 +1496,22 @@ impl<'a, 'tcx> Layout { } if let Some((discr, offset, primitive)) = choice { - // HACK(eddyb) work around not being able to move - // out of arrays with just the indexing operator. - let mut st = if discr == 0 { st0 } else { st1 }; - let mut discr_align = primitive.align(dl); if offset.abi_align(discr_align) != offset { - st.packed = true; + st[discr].packed = true; discr_align = dl.i8_align; } - st.align = st.align.max(discr_align); - st.primitive_align = st.primitive_align.max(discr_align); + let align = st[discr].align.max(discr_align); + let primitive_align = st[discr].primitive_align.max(discr_align); return success(NullablePointer { nndiscr: discr as u64, - nonnull: st, discr: primitive, discr_offset: offset, + size: st[discr].stride(), + align, + primitive_align, + variants: st, }); } } @@ -1693,13 +1692,10 @@ impl<'a, 'tcx> Layout { metadata.size(dl)).abi_align(self.align(dl)) } + NullablePointer { size, .. } | General { size, .. } => size, UntaggedUnion(ref un) => un.stride(), - - Univariant(ref variant) | - NullablePointer { nonnull: ref variant, .. } => { - variant.stride() - } + Univariant(ref variant) => variant.stride() } } @@ -1726,13 +1722,11 @@ impl<'a, 'tcx> Layout { Pointer.align(dl).max(metadata.align(dl)) } - Array { align, .. } | General { align, .. } => align, + Array { align, .. } | + NullablePointer { align, .. } | + General { align, .. } => align, UntaggedUnion(ref un) => un.align, - - Univariant(ref variant) | - NullablePointer { nonnull: ref variant, .. } => { - variant.align - } + Univariant(ref variant) => variant.align } } @@ -1743,11 +1737,11 @@ impl<'a, 'tcx> Layout { /// Returns alignment before repr alignment is applied pub fn primitive_align(&self, cx: C) -> Align { match *self { - Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, - Univariant(ref variant) | - NullablePointer { nonnull: ref variant, .. } => { - variant.primitive_align - }, + Array { primitive_align, .. } | + NullablePointer { primitive_align, .. } | + General { primitive_align, .. } => primitive_align, + + Univariant(ref variant) => variant.primitive_align, _ => self.align(cx.data_layout()) } @@ -1850,23 +1844,6 @@ impl<'a, 'tcx> Layout { }; match *layout { - Layout::NullablePointer { nonnull: ref variant_layout, - nndiscr, - discr: _, - discr_offset: _ } => { - debug!("print-type-size t: `{:?}` adt nullable nndiscr {} is {:?}", - ty, nndiscr, variant_layout); - let variant_def = &adt_def.variants[nndiscr as usize]; - let fields: Vec<_> = - variant_def.fields.iter() - .map(|field_def| (field_def.name, field_def.ty(tcx, substs))) - .collect(); - record(adt_kind.into(), - None, - vec![build_variant_info(Some(variant_def.name), - &fields, - variant_layout)]); - } Layout::Univariant(ref variant_layout) => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() @@ -1893,7 +1870,8 @@ impl<'a, 'tcx> Layout { } } - Layout::General { ref variants, discr, .. } => { + Layout::NullablePointer { ref variants, .. } | + Layout::General { ref variants, .. } => { debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}", ty, adt_def.variants.len(), variants.len(), variants); let variant_infos: Vec<_> = @@ -1910,7 +1888,10 @@ impl<'a, 'tcx> Layout { variant_layout) }) .collect(); - record(adt_kind.into(), Some(discr.size(tcx)), variant_infos); + record(adt_kind.into(), match *layout { + Layout::General { discr, .. } => Some(discr.size(tcx)), + _ => None + }, variant_infos); } Layout::UntaggedUnion(ref un) => { @@ -2215,19 +2196,13 @@ impl<'a, 'tcx> FullLayout<'tcx> { } } + NullablePointer { ref variants, .. } | General { ref variants, .. } => { FieldPlacement::Arbitrary { offsets: &variants[variant_index].offsets } } - NullablePointer { nndiscr, ref nonnull, .. } - if nndiscr as usize == variant_index => { - FieldPlacement::Arbitrary { - offsets: &nonnull.offsets - } - } - _ => FieldPlacement::union(count) }; @@ -2390,14 +2365,20 @@ impl<'gcx> HashStable> for Layout } NullablePointer { nndiscr, - ref nonnull, + ref variants, ref discr, discr_offset, + size, + align, + primitive_align } => { nndiscr.hash_stable(hcx, hasher); - nonnull.hash_stable(hcx, hasher); + variants.hash_stable(hcx, hasher); discr.hash_stable(hcx, hasher); discr_offset.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); + align.hash_stable(hcx, hasher); + primitive_align.hash_stable(hcx, hasher); } } } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index e1a65f37eff82..634dba3660e03 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -68,28 +68,24 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, llty: &mut Type) { let l = cx.layout_of(t); debug!("finish_type_of: {} with layout {:#?}", t, l); + if let layout::Abi::Scalar(_) = l.abi { + return; + } match *l { - layout::General { .. } | layout::UntaggedUnion { .. } => { } - layout::Univariant { ..} | layout::NullablePointer { .. } => { - if let layout::Abi::Scalar(_) = l.abi { - return; - } - let (variant_layout, variant) = match *l { - layout::Univariant(ref variant) => { - let is_enum = if let ty::TyAdt(def, _) = t.sty { - def.is_enum() - } else { - false - }; - if is_enum { - (l.for_variant(0), variant) - } else { - (l, variant) - } - } - layout::NullablePointer { nndiscr, ref nonnull, .. } => - (l.for_variant(nndiscr as usize), nonnull), - _ => unreachable!() + layout::NullablePointer { .. } | + layout::General { .. } | + layout::UntaggedUnion { .. } => { } + + layout::Univariant(ref variant) => { + let is_enum = if let ty::TyAdt(def, _) = t.sty { + def.is_enum() + } else { + false + }; + let variant_layout = if is_enum { + l.for_variant(0) + } else { + l }; llty.set_struct_body(&struct_llfields(cx, variant_layout, variant), variant.packed) }, @@ -106,18 +102,6 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return cx.llvm_type_of(value.to_ty(cx.tcx())); } match *l { - layout::NullablePointer { nndiscr, ref nonnull, .. } => { - match name { - None => { - Type::struct_(cx, &struct_llfields(cx, l.for_variant(nndiscr as usize), - nonnull), - nonnull.packed) - } - Some(name) => { - Type::named_struct(cx, name) - } - } - } layout::Univariant(ref variant) => { match name { None => { @@ -143,6 +127,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } + layout::NullablePointer { size, align, .. } | layout::General { size, align, .. } => { let fill = union_fill(cx, size, align); match name { diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 16bca343dd07d..0e74d98557070 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1218,11 +1218,13 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } } layout::NullablePointer { - nonnull: ref struct_def, nndiscr, discr, - discr_offset + discr_offset, + ref variants, + .. } => { + let struct_def = &variants[nndiscr as usize]; // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index c8d8199a05eb7..4f7c91efccdc2 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1124,9 +1124,9 @@ fn trans_const_adt<'a, 'tcx>( layout::Vector { .. } => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } - layout::NullablePointer { ref nonnull, nndiscr, .. } => { + layout::NullablePointer { ref variants, nndiscr, .. } => { if variant_index as u64 == nndiscr { - build_const_struct(ccx, l, &nonnull, vals, None) + build_const_struct(ccx, l, &variants[variant_index], vals, None) } else { // Always use null even if it's not the `discrfield`th // field; see #8506. diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index f39e3cb78126c..325ccd4fde34b 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -273,7 +273,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Check whether the variant being used is packed, if applicable. let is_packed = match (&*l, l.variant_index) { (&layout::Univariant(ref variant), _) => variant.packed, - (&layout::NullablePointer { ref nonnull, .. }, _) => nonnull.packed, + (&layout::NullablePointer { ref variants, .. }, Some(v)) | (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, _ => return simple() }; @@ -471,11 +471,15 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // If this is an enum, cast to the appropriate variant struct type. let layout = bcx.ccx.layout_of(ty).for_variant(variant_index); - if let layout::General { ref variants, .. } = *layout { - let st = &variants[variant_index]; - let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, layout, st), st.packed); - downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); + match *layout { + layout::NullablePointer { ref variants, .. } | + layout::General { ref variants, .. } => { + let st = &variants[variant_index]; + let variant_ty = Type::struct_(bcx.ccx, + &adt::struct_llfields(bcx.ccx, layout, st), st.packed); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); + } + _ => {} } downcast diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index eca6057db3663..eb52d58098d0f 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -259,6 +259,7 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { adt::memory_index_to_gep(variant.memory_index[index] as u64) } + Layout::NullablePointer { ref variants, .. } | Layout::General { ref variants, .. } => { if let Some(v) = self.variant_index { adt::memory_index_to_gep(variants[v].memory_index[index] as u64) @@ -266,14 +267,6 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } } - - Layout::NullablePointer { nndiscr, ref nonnull, .. } => { - if self.variant_index == Some(nndiscr as usize) { - adt::memory_index_to_gep(nonnull.memory_index[index] as u64) - } else { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) - } - } } } } diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/nullable.stdout index 830678f174f88..ec51adb25af2c 100644 --- a/src/test/ui/print_type_sizes/nullable.stdout +++ b/src/test/ui/print_type_sizes/nullable.stdout @@ -4,9 +4,11 @@ print-type-size field `.post`: 2 bytes print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes print-type-size type: `MyOption>`: 12 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 12 bytes print-type-size field `.0`: 12 bytes print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes print-type-size variant `Record`: 7 bytes print-type-size field `.val`: 4 bytes print-type-size field `.post`: 2 bytes @@ -18,6 +20,7 @@ print-type-size field `.post`: 2 bytes print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 4 bytes print-type-size field `.0`: 4 bytes print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes From bd51a2bc192f323d0da2ea7716860b1699d315d8 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 17 Sep 2017 04:42:22 +0300 Subject: [PATCH 28/69] rustc: move size/alignment from Layout into layout::Abi. --- src/librustc/ty/layout.rs | 359 +++++++++++++---------- src/librustc_lint/types.rs | 2 +- src/librustc_trans/abi.rs | 12 +- src/librustc_trans/adt.rs | 4 +- src/librustc_trans/cabi_s390x.rs | 2 +- src/librustc_trans/cabi_x86.rs | 2 +- src/librustc_trans/cabi_x86_64.rs | 10 +- src/librustc_trans/cabi_x86_win64.rs | 36 ++- src/librustc_trans/common.rs | 8 +- src/librustc_trans/debuginfo/metadata.rs | 8 +- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/mir/constant.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 18 +- src/librustc_trans/mir/mod.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 2 +- src/librustc_trans/type_of.rs | 2 +- 16 files changed, 257 insertions(+), 214 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index bd99ae0204aa2..eb22b6f2ce990 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -25,7 +25,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::{Deref, Add, Sub, Mul, AddAssign, RangeInclusive}; +use std::ops::{Add, Sub, Mul, AddAssign, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -856,7 +856,7 @@ impl<'a, 'tcx> Struct { // Is this the NonZero lang item wrapping a pointer or integer type? (_, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { let field = layout.field(cx, 0)?; - match *field { + match *field.layout { Scalar(value) => { Ok(Some((layout.fields.offset(0), value))) } @@ -965,7 +965,7 @@ impl<'a, 'tcx> Union { fields: I, scapegoat: Ty<'tcx>) -> Result<(), LayoutError<'tcx>> - where I: Iterator>> { + where I: Iterator, LayoutError<'tcx>>> { for (index, field) in fields.enumerate() { let field = field?; if field.is_unsized() { @@ -1061,8 +1061,80 @@ impl<'a> FieldPlacement<'a> { #[derive(Copy, Clone, Debug)] pub enum Abi { Scalar(Primitive), - Vector, - Aggregate + Vector { + element: Primitive, + count: u64 + }, + Aggregate { + /// If true, the size is exact, otherwise it's only a lower bound. + sized: bool, + align: Align, + primitive_align: Align, + size: Size + } +} + +impl Abi { + /// Returns true if the layout corresponds to an unsized type. + pub fn is_unsized(&self) -> bool { + match *self { + Abi::Scalar(_) | Abi::Vector {..} => false, + Abi::Aggregate { sized, .. } => !sized + } + } + + pub fn size(&self, cx: C) -> Size { + let dl = cx.data_layout(); + + match *self { + Abi::Scalar(value) => value.size(dl), + + Abi::Vector { element, count } => { + let element_size = element.size(dl); + let vec_size = match element_size.checked_mul(count, dl) { + Some(size) => size, + None => bug!("Layout::size({:?}): {} * {} overflowed", + self, element_size.bytes(), count) + }; + vec_size.abi_align(self.align(dl)) + } + + Abi::Aggregate { size, .. } => size + } + } + + pub fn align(&self, cx: C) -> Align { + let dl = cx.data_layout(); + + match *self { + Abi::Scalar(value) => value.align(dl), + + Abi::Vector { element, count } => { + let elem_size = element.size(dl); + let vec_size = match elem_size.checked_mul(count, dl) { + Some(size) => size, + None => bug!("Layout::align({:?}): {} * {} overflowed", + self, elem_size.bytes(), count) + }; + dl.vector_align(vec_size) + } + + Abi::Aggregate { align, .. } => align + } + } + + pub fn size_and_align(&self, cx: C) -> (Size, Align) { + (self.size(cx), self.align(cx)) + } + + /// Returns alignment before repr alignment is applied + pub fn primitive_align(&self, cx: C) -> Align { + match *self { + Abi::Aggregate { primitive_align, .. } => primitive_align, + + _ => self.align(cx.data_layout()) + } + } } /// Type layout, from which size and alignment can be cheaply computed. @@ -1247,19 +1319,63 @@ impl<'a, 'tcx> Layout { }; let abi = match *layout { Scalar(value) => Abi::Scalar(value), - Vector { .. } => Abi::Vector, + Vector { element, count } => Abi::Vector { element, count }, + + Array { sized, align, primitive_align, element_size, count, .. } => { + let size = match element_size.checked_mul(count, dl) { + Some(size) => size, + None => return Err(LayoutError::SizeOverflow(ty)) + }; + Abi::Aggregate { + sized, + align, + primitive_align, + size + } + } - Array { .. } | - FatPointer { .. } | - Univariant(_) | - UntaggedUnion(_) => Abi::Aggregate, + FatPointer(metadata) => { + // Effectively a (ptr, meta) tuple. + let align = Pointer.align(dl).max(metadata.align(dl)); + Abi::Aggregate { + sized: true, + align, + primitive_align: align, + size: (Pointer.size(dl).abi_align(metadata.align(dl)) + + metadata.size(dl)) + .abi_align(align) + } + } - General { discr, .. } | - NullablePointer { discr, .. } => { - if fields.offset(0).bytes() == 0 && discr.size(cx) == layout.size(cx) { + Univariant(ref st) => { + Abi::Aggregate { + sized: st.sized, + align: st.align, + primitive_align: st.primitive_align, + size: st.stride() + } + } + + UntaggedUnion(ref un ) => { + Abi::Aggregate { + sized: true, + align: un.align, + primitive_align: un.primitive_align, + size: un.stride() + } + } + + General { discr, align, primitive_align, size, .. } | + NullablePointer { discr, align, primitive_align, size, .. } => { + if fields.offset(0).bytes() == 0 && discr.size(cx) == size { Abi::Scalar(discr) } else { - Abi::Aggregate + Abi::Aggregate { + sized: true, + align, + primitive_align, + size + } } } }; @@ -1330,9 +1446,6 @@ impl<'a, 'tcx> Layout { let element = cx.layout_of(element)?; let element_size = element.size(dl); let count = count.val.to_const_int().unwrap().to_u64().unwrap(); - if element_size.checked_mul(count, dl).is_none() { - return Err(LayoutError::SizeOverflow(ty)); - } Array { sized: true, align: element.align(dl), @@ -1408,8 +1521,8 @@ impl<'a, 'tcx> Layout { // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { let element = ty.simd_type(tcx); - match *cx.layout_of(element)? { - Scalar(value) => { + match cx.layout_of(element)?.abi { + Abi::Scalar(value) => { return success(Vector { element: value, count: ty.simd_size(tcx) as u64 @@ -1459,7 +1572,7 @@ impl<'a, 'tcx> Layout { let layout = if def.is_union() { let mut un = Union::new(dl, &def.repr); - un.extend(dl, variants[0].iter().map(|&f| Ok(f.layout)), ty)?; + un.extend(dl, variants[0].iter().map(|&f| Ok(f)), ty)?; UntaggedUnion(un) } else { Univariant(Struct::new(dl, &variants[0], &def.repr, kind, ty)?) @@ -1648,112 +1761,13 @@ impl<'a, 'tcx> Layout { success(layout) } - /// Returns true if the layout corresponds to an unsized type. - pub fn is_unsized(&self) -> bool { - match *self { - Scalar(_) | Vector {..} | FatPointer {..} | - UntaggedUnion {..} | General {..} | - NullablePointer {..} => false, - - Array { sized, .. } | - Univariant(Struct { sized, .. }) => !sized - } - } - - pub fn size(&self, cx: C) -> Size { - let dl = cx.data_layout(); - - match *self { - Scalar(value) => { - value.size(dl) - } - - Vector { element, count } => { - let element_size = element.size(dl); - let vec_size = match element_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::size({:?}): {} * {} overflowed", - self, element_size.bytes(), count) - }; - vec_size.abi_align(self.align(dl)) - } - - Array { element_size, count, .. } => { - match element_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::size({:?}): {} * {} overflowed", - self, element_size.bytes(), count) - } - } - - FatPointer(metadata) => { - // Effectively a (ptr, meta) tuple. - (Pointer.size(dl).abi_align(metadata.align(dl)) + - metadata.size(dl)).abi_align(self.align(dl)) - } - - NullablePointer { size, .. } | - General { size, .. } => size, - UntaggedUnion(ref un) => un.stride(), - Univariant(ref variant) => variant.stride() - } - } - - pub fn align(&self, cx: C) -> Align { - let dl = cx.data_layout(); - - match *self { - Scalar(value) => { - value.align(dl) - } - - Vector { element, count } => { - let elem_size = element.size(dl); - let vec_size = match elem_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::align({:?}): {} * {} overflowed", - self, elem_size.bytes(), count) - }; - dl.vector_align(vec_size) - } - - FatPointer(metadata) => { - // Effectively a (ptr, meta) tuple. - Pointer.align(dl).max(metadata.align(dl)) - } - - Array { align, .. } | - NullablePointer { align, .. } | - General { align, .. } => align, - UntaggedUnion(ref un) => un.align, - Univariant(ref variant) => variant.align - } - } - - pub fn size_and_align(&self, cx: C) -> (Size, Align) { - (self.size(cx), self.align(cx)) - } - - /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, cx: C) -> Align { - match *self { - Array { primitive_align, .. } | - NullablePointer { primitive_align, .. } | - General { primitive_align, .. } => primitive_align, - - Univariant(ref variant) => variant.primitive_align, - - _ => self.align(cx.data_layout()) - } - } - /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: &Layout) { + layout: FullLayout) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1773,7 +1787,7 @@ impl<'a, 'tcx> Layout { fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: &Layout) { + layout: FullLayout) { // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { let type_desc = format!("{:?}", ty); @@ -1843,7 +1857,7 @@ impl<'a, 'tcx> Layout { } }; - match *layout { + match *layout.layout { Layout::Univariant(ref variant_layout) => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() @@ -1888,7 +1902,7 @@ impl<'a, 'tcx> Layout { variant_layout) }) .collect(); - record(adt_kind.into(), match *layout { + record(adt_kind.into(), match *layout.layout { Layout::General { discr, .. } => Some(discr.size(tcx)), _ => None }, variant_infos); @@ -2075,13 +2089,6 @@ pub struct FullLayout<'tcx> { pub abi: Abi, } -impl<'tcx> Deref for FullLayout<'tcx> { - type Target = Layout; - fn deref(&self) -> &Layout { - self.layout - } -} - pub trait HasTyCtxt<'tcx>: HasDataLayout { fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; } @@ -2127,6 +2134,13 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?; + let layout = FullLayout { + ty, + variant_index: None, + layout: cached.layout, + fields: cached.fields, + abi: cached.abi + }; // NB: This recording is normally disabled; when enabled, it // can however trigger recursive invocations of `layout_of`. @@ -2134,15 +2148,9 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - Layout::record_layout_for_printing(tcx, ty, param_env, cached.layout); + Layout::record_layout_for_printing(tcx, ty, param_env, layout); - Ok(FullLayout { - ty, - variant_index: None, - layout: cached.layout, - fields: cached.fields, - abi: cached.abi - }) + Ok(layout) } } @@ -2158,6 +2166,13 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; + let layout = FullLayout { + ty, + variant_index: None, + layout: cached.layout, + fields: cached.fields, + abi: cached.abi + }; // NB: This recording is normally disabled; when enabled, it // can however trigger recursive invocations of `layout_of`. @@ -2165,15 +2180,9 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - Layout::record_layout_for_printing(tcx_at.tcx, ty, param_env, cached.layout); + Layout::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); - Ok(FullLayout { - ty, - variant_index: None, - layout: cached.layout, - fields: cached.fields, - abi: cached.abi - }) + Ok(layout) } } @@ -2189,27 +2198,29 @@ impl<'a, 'tcx> FullLayout<'tcx> { variants[variant_index].fields.len() }; - let fields = match *self.layout { - Univariant(ref variant) => { - FieldPlacement::Arbitrary { - offsets: &variant.offsets - } - } + let (fields, abi) = match *self.layout { + Univariant(_) => (self.fields, self.abi), NullablePointer { ref variants, .. } | General { ref variants, .. } => { - FieldPlacement::Arbitrary { - offsets: &variants[variant_index].offsets - } + let variant = &variants[variant_index]; + (FieldPlacement::Arbitrary { + offsets: &variant.offsets + }, Abi::Aggregate { + sized: true, + align: variant.align, + primitive_align: variant.primitive_align, + size: variant.stride(), + }) } - _ => FieldPlacement::union(count) + _ => (FieldPlacement::union(count), self.abi) }; FullLayout { variant_index: Some(variant_index), fields, - abi: Abi::Aggregate, + abi, ..*self } } @@ -2313,6 +2324,28 @@ impl<'a, 'tcx> FullLayout<'tcx> { -> C::FullLayout { cx.layout_of(self.field_type_unnormalized(cx.tcx(), i)) } + + /// Returns true if the layout corresponds to an unsized type. + pub fn is_unsized(&self) -> bool { + self.abi.is_unsized() + } + + pub fn size(&self, cx: C) -> Size { + self.abi.size(cx) + } + + pub fn align(&self, cx: C) -> Align { + self.abi.align(cx) + } + + pub fn size_and_align(&self, cx: C) -> (Size, Align) { + self.abi.size_and_align(cx) + } + + /// Returns alignment before repr alignment is applied + pub fn primitive_align(&self, cx: C) -> Align { + self.abi.primitive_align(cx) + } } impl<'gcx> HashStable> for Layout @@ -2411,12 +2444,18 @@ impl<'gcx> HashStable> for Abi { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Scalar(value) => { + Scalar(ref value) => { value.hash_stable(hcx, hasher); } - Vector => { + Vector { ref element, count } => { + element.hash_stable(hcx, hasher); + count.hash_stable(hcx, hasher); } - Aggregate => { + Aggregate { sized, size, align, primitive_align } => { + sized.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); + align.hash_stable(hcx, hasher); + primitive_align.hash_stable(hcx, hasher); } } } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 879d2e326d883..b993b161877a0 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -753,7 +753,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, ref size, discr, .. } = *layout { + if let Layout::General { ref variants, size, discr, .. } = *layout.layout { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 93cfd967643f3..712108bf437bc 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -278,8 +278,8 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { fn is_aggregate(&self) -> bool { match self.abi { layout::Abi::Scalar(_) | - layout::Abi::Vector => false, - layout::Abi::Aggregate => true + layout::Abi::Vector { .. } => false, + layout::Abi::Aggregate { .. } => true } } @@ -299,14 +299,14 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { }) } - layout::Abi::Vector => { + layout::Abi::Vector { .. } => { Some(Reg { kind: RegKind::Vector, size: self.size(ccx) }) } - layout::Abi::Aggregate => { + layout::Abi::Aggregate { .. } => { if let Layout::Array { count, .. } = *self.layout { if count > 0 { return self.field(ccx, 0).homogeneous_aggregate(ccx); @@ -767,7 +767,7 @@ impl<'a, 'tcx> FnType<'tcx> { for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if let ty::layout::FatPointer { .. } = *arg.layout { + if let ty::layout::FatPointer { .. } = *arg.layout.layout { let mut data = ArgType::new(arg.layout.field(ccx, 0)); let mut info = ArgType::new(arg.layout.field(ccx, 1)); @@ -809,7 +809,7 @@ impl<'a, 'tcx> FnType<'tcx> { abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { let fixup = |arg: &mut ArgType<'tcx>| { match arg.layout.abi { - layout::Abi::Aggregate => {} + layout::Abi::Aggregate { .. } => {} _ => return } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 634dba3660e03..314d929fe8c37 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -71,7 +71,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, if let layout::Abi::Scalar(_) = l.abi { return; } - match *l { + match *l.layout { layout::NullablePointer { .. } | layout::General { .. } | layout::UntaggedUnion { .. } => { } @@ -101,7 +101,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, if let layout::Abi::Scalar(value) = l.abi { return cx.llvm_type_of(value.to_ty(cx.tcx())); } - match *l { + match *l.layout { layout::Univariant(ref variant) => { match name { None => { diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index a45fe662bd65d..2766edb59c1d8 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -29,7 +29,7 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, - layout::Abi::Aggregate => { + layout::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index bc7c9a3ed0565..7d3621d53e0dd 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -24,7 +24,7 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, - layout::Abi::Aggregate => { + layout::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index f2208e4909e02..d6d46307a4ff5 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -75,22 +75,22 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) unify(cls, off, reg); } - layout::Abi::Vector => { + layout::Abi::Vector { element, count } => { unify(cls, off, Class::Sse); // everything after the first one is the upper // half of a register. - let eltsz = layout.field(ccx, 0).size(ccx); - for i in 1..layout.fields.count() { + let eltsz = element.size(ccx); + for i in 1..count { unify(cls, off + eltsz * (i as u64), Class::SseUp); } } - layout::Abi::Aggregate => { + layout::Abi::Aggregate { .. } => { // FIXME(eddyb) have to work around Rust enums for now. // Fix is either guarantee no data where there is no field, // by putting variants in fields, or be more clever. - match *layout { + match *layout.layout { Layout::General { .. } | Layout::NullablePointer { .. } => return Err(Memory), _ => {} diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index 1d391da5993fe..b27ccc98861aa 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -8,32 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{ArgType, FnType, LayoutExt, Reg}; +use abi::{ArgType, FnType, Reg}; use common::CrateContext; -use rustc::ty::layout::Layout; +use rustc::ty::layout; // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { let fixup = |a: &mut ArgType<'tcx>| { let size = a.layout.size(ccx); - if a.layout.is_aggregate() { - match size.bits() { - 8 => a.cast_to(Reg::i8()), - 16 => a.cast_to(Reg::i16()), - 32 => a.cast_to(Reg::i32()), - 64 => a.cast_to(Reg::i64()), - _ => a.make_indirect(ccx) - }; - } else { - if let Layout::Vector { .. } = *a.layout { + match a.layout.abi { + layout::Abi::Aggregate { .. } => { + match size.bits() { + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), + _ => a.make_indirect(ccx) + } + } + layout::Abi::Vector { .. } => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). - } else if size.bytes() > 8 { - a.make_indirect(ccx); - } else { - a.extend_integer_width_to(32); + } + layout::Abi::Scalar(_) => { + if size.bytes() > 8 { + a.make_indirect(ccx); + } else { + a.extend_integer_width_to(32); + } } } }; diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b80fded638d76..bc6ddef0e7206 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -41,7 +41,7 @@ use syntax_pos::{Span, DUMMY_SP}; pub use context::{CrateContext, SharedCrateContext}; pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - if let Layout::FatPointer { .. } = *ccx.layout_of(ty) { + if let Layout::FatPointer { .. } = *ccx.layout_of(ty).layout { true } else { false @@ -51,9 +51,9 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); match layout.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector => true, + layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, - layout::Abi::Aggregate => { + layout::Abi::Aggregate { .. } => { !layout.is_unsized() && layout.size(ccx).bytes() == 0 } } @@ -63,7 +63,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); - match *layout { + match *layout.layout { Layout::FatPointer { .. } => true, Layout::Univariant(ref variant) => { // There must be only 2 fields. diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 0e74d98557070..6ab89f5c3eb67 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -941,7 +941,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { let layout = cx.layout_of(self.ty); let tmp; - let offsets = match *layout { + let offsets = match *layout.layout { layout::Univariant(ref variant) => &variant.offsets, layout::Vector { element, count } => { let element_size = element.size(cx).bytes(); @@ -1022,7 +1022,7 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - let offsets = if let layout::Univariant(ref variant) = *layout { + let offsets = if let layout::Univariant(ref variant) = *layout.layout { &variant.offsets } else { bug!("{} is not a tuple", self.ty); @@ -1339,7 +1339,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> (DICompositeType, MemberDescriptionFactory<'tcx>) { let layout = cx.layout_of(enum_type); - let maybe_discr = match *layout { + let maybe_discr = match *layout.layout { layout::General { .. } => Some(layout.field(cx, 0).ty), _ => None, }; @@ -1491,7 +1491,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); - let discriminant_type_metadata = match *type_rep { + let discriminant_type_metadata = match *type_rep.layout { layout::NullablePointer { .. } | layout::Univariant { .. } => None, layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), ref l @ _ => bug!("Not an enum layout: {:#?}", l) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 61c0820539d56..209083a8e25d6 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -58,7 +58,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let layout = ccx.layout_of(t); debug!("DST {} layout: {:?}", t, layout); - let (sized_size, sized_align) = match *layout { + let (sized_size, sized_align) = match *layout.layout { ty::layout::Layout::Univariant(ref variant) => { (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi()) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 4f7c91efccdc2..c64333fc04435 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1090,7 +1090,7 @@ fn trans_const_adt<'a, 'tcx>( mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; - match *l { + match *l.layout { layout::General { ref variants, .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 325ccd4fde34b..00c76bee1a8e4 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -208,10 +208,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let field = l.field(ccx, ix); let offset = l.fields.offset(ix).bytes(); - let alignment = self.alignment | Alignment::from(&*l); + let alignment = self.alignment | Alignment::from(l.layout); // Unions and newtypes only use an offset of 0. - match *l { + match *l.layout { // FIXME(eddyb) The fields of a fat pointer aren't correct, especially // to unsized structs, we can't represent their pointee types in `Ty`. Layout::FatPointer { .. } => {} @@ -234,7 +234,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } // Discriminant field of enums. - match *l { + match *l.layout { layout::NullablePointer { .. } if l.variant_index.is_none() => { let ty = ccx.llvm_type_of(field.ty); let size = field.size(ccx).bytes(); @@ -271,7 +271,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; // Check whether the variant being used is packed, if applicable. - let is_packed = match (&*l, l.variant_index) { + let is_packed = match (l.layout, l.variant_index) { (&layout::Univariant(ref variant), _) => variant.packed, (&layout::NullablePointer { ref variants, .. }, Some(v)) | (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, @@ -354,7 +354,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); - match *l { + match *l.layout { layout::Univariant { .. } | layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), _ => {} @@ -366,7 +366,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Abi::Scalar(discr) => discr, _ => bug!("discriminant not scalar: {:#?}", discr_layout) }; - let (min, max) = match *l { + let (min, max) = match *l.layout { layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; @@ -392,7 +392,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.load(discr.llval, discr.alignment.non_abi()) } }; - match *l { + match *l.layout { layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, @@ -416,7 +416,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let to = l.ty.ty_adt_def().unwrap() .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; - match *l { + match *l.layout { layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), @@ -471,7 +471,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // If this is an enum, cast to the appropriate variant struct type. let layout = bcx.ccx.layout_of(ty).for_variant(variant_index); - match *layout { + match *layout.layout { layout::NullablePointer { ref variants, .. } | layout::General { ref variants, .. } => { let st = &variants[variant_index]; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index b1a9be881f789..a71bcf4783801 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -576,7 +576,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }; let layout = bcx.ccx.layout_of(closure_ty); - let offsets = match *layout { + let offsets = match *layout.layout { layout::Univariant(ref variant) => &variant.offsets[..], _ => bug!("Closures are only supposed to be Univariant") }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index b7143f23691d9..518f36a77b5fc 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -277,7 +277,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llval = operand.immediate(); let l = bcx.ccx.layout_of(operand.ty); - if let Layout::General { ref discr_range, .. } = *l { + if let Layout::General { ref discr_range, .. } = *l.layout { if discr_range.end > discr_range.start { // We want `table[e as usize]` to not // have bound checks, and this is the most diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index eb52d58098d0f..b829d33600c82 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -240,7 +240,7 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { if let layout::Abi::Scalar(_) = self.abi { bug!("FullLayout::llvm_field_index({:?}): not applicable", self); } - match **self { + match *self.layout { Layout::Scalar { .. } | Layout::UntaggedUnion { .. } => { bug!("FullLayout::llvm_field_index({:?}): not applicable", self) From ed788a62f62db010f3e92ec4756728151af368a2 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 17 Sep 2017 19:34:28 +0300 Subject: [PATCH 29/69] rustc: store CachedLayout for each variant of enum Layout's instead of Struct. --- src/librustc/ty/context.rs | 6 +- src/librustc/ty/layout.rs | 54 ++++----- src/librustc_lint/types.rs | 2 +- src/librustc_trans/adt.rs | 12 +- src/librustc_trans/debuginfo/metadata.rs | 143 +++++++++-------------- src/librustc_trans/mir/constant.rs | 27 +++-- src/librustc_trans/mir/lvalue.rs | 55 ++++----- src/librustc_trans/type_of.rs | 13 +-- 8 files changed, 134 insertions(+), 178 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 22a3edd200c4f..9ad5e07d8feab 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -78,7 +78,7 @@ use hir; /// Internal storage pub struct GlobalArenas<'tcx> { // internings - layout: TypedArena, + layout: TypedArena>, // references generics: TypedArena, @@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> { stability_interner: RefCell>, - layout_interner: RefCell>, + layout_interner: RefCell>>, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for @@ -1016,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { interned } - pub fn intern_layout(self, layout: Layout) -> &'gcx Layout { + pub fn intern_layout(self, layout: Layout<'gcx>) -> &'gcx Layout<'gcx> { if let Some(layout) = self.layout_interner.borrow().get(&layout) { return layout; } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index eb22b6f2ce990..096c74a6163cf 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1004,7 +1004,7 @@ pub const FAT_PTR_ADDR: usize = 0; pub const FAT_PTR_EXTRA: usize = 1; /// Describes how the fields of a type are located in memory. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum FieldPlacement<'a> { /// Array-like placement. Can also express /// unions, by using a stride of zero bytes. @@ -1058,7 +1058,7 @@ impl<'a> FieldPlacement<'a> { /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Abi { Scalar(Primitive), Vector { @@ -1141,8 +1141,8 @@ impl Abi { /// For ADTs, it also includes field placement and enum optimizations. /// NOTE: Because Layout is interned, redundant information should be /// kept to a minimum, e.g. it includes no sub-component Ty or Layout. -#[derive(Debug, PartialEq, Eq, Hash)] -pub enum Layout { +#[derive(PartialEq, Eq, Hash, Debug)] +pub enum Layout<'a> { /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. Scalar(Primitive), @@ -1184,7 +1184,7 @@ pub enum Layout { // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. discr_range: RangeInclusive, - variants: Vec, + variants: Vec>, size: Size, align: Align, primitive_align: Align, @@ -1202,7 +1202,7 @@ pub enum Layout { nndiscr: u64, discr: Primitive, discr_offset: Size, - variants: Vec, + variants: Vec>, size: Size, align: Align, primitive_align: Align, @@ -1228,9 +1228,9 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct CachedLayout<'tcx> { - pub layout: &'tcx Layout, + pub layout: &'tcx Layout<'tcx>, pub fields: FieldPlacement<'tcx>, pub abi: Abi, } @@ -1262,7 +1262,7 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; } -impl<'a, 'tcx> Layout { +impl<'a, 'tcx> Layout<'tcx> { fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) @@ -1624,7 +1624,9 @@ impl<'a, 'tcx> Layout { size: st[discr].stride(), align, primitive_align, - variants: st, + variants: st.into_iter().map(|variant| { + success(Univariant(variant)) + }).collect::, _>>()?, }); } } @@ -1730,7 +1732,9 @@ impl<'a, 'tcx> Layout { // FIXME: should be u128? discr_range: (min as u64)..=(max as u64), - variants, + variants: variants.into_iter().map(|variant| { + success(Univariant(variant)) + }).collect::, _>>()?, size, align, primitive_align, @@ -1897,6 +1901,10 @@ impl<'a, 'tcx> Layout { .iter() .map(|f| (f.name, f.ty(tcx, substs))) .collect(); + let variant_layout = match *variant_layout.layout { + Univariant(ref variant) => variant, + _ => bug!() + }; build_variant_info(Some(variant_def.name), &fields, variant_layout) @@ -2084,7 +2092,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { pub struct FullLayout<'tcx> { pub ty: Ty<'tcx>, pub variant_index: Option, - pub layout: &'tcx Layout, + pub layout: &'tcx Layout<'tcx>, pub fields: FieldPlacement<'tcx>, pub abi: Abi, } @@ -2198,27 +2206,22 @@ impl<'a, 'tcx> FullLayout<'tcx> { variants[variant_index].fields.len() }; - let (fields, abi) = match *self.layout { - Univariant(_) => (self.fields, self.abi), + let (layout, fields, abi) = match *self.layout { + Univariant(_) => (self.layout, self.fields, self.abi), NullablePointer { ref variants, .. } | General { ref variants, .. } => { - let variant = &variants[variant_index]; - (FieldPlacement::Arbitrary { - offsets: &variant.offsets - }, Abi::Aggregate { - sized: true, - align: variant.align, - primitive_align: variant.primitive_align, - size: variant.stride(), - }) + let variant = variants[variant_index]; + (variant.layout, variant.fields, variant.abi) } - _ => (FieldPlacement::union(count), self.abi) + _ => bug!() }; + assert_eq!(fields.count(), count); FullLayout { variant_index: Some(variant_index), + layout, fields, abi, ..*self @@ -2348,8 +2351,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { } } -impl<'gcx> HashStable> for Layout -{ +impl<'gcx> HashStable> for Layout<'gcx> { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index b993b161877a0..f59b372e7d531 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -764,7 +764,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { .zip(variants) .map(|(variant, variant_layout)| { // Subtract the size of the enum discriminant - let bytes = variant_layout.min_size + let bytes = variant_layout.abi.size(cx.tcx) .bytes() .saturating_sub(discr_size); diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 314d929fe8c37..ff66090dc8c16 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -87,7 +87,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } else { l }; - llty.set_struct_body(&struct_llfields(cx, variant_layout, variant), variant.packed) + llty.set_struct_body(&struct_llfields(cx, variant_layout), variant.packed) }, _ => bug!("This function cannot handle {} with layout {:#?}", t, l) } @@ -105,8 +105,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::Univariant(ref variant) => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, l, &variant), - variant.packed) + Type::struct_(cx, &struct_llfields(cx, l), variant.packed) } Some(name) => { Type::named_struct(cx, name) @@ -166,8 +165,11 @@ pub fn memory_index_to_gep(index: u64) -> u64 { } pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>, - variant: &layout::Struct) -> Vec { + layout: FullLayout<'tcx>) -> Vec { + let variant = match *layout.layout { + layout::Univariant(ref variant) => variant, + _ => bug!("unexpected {:#?}", layout) + }; let field_count = layout.fields.count(); debug!("struct_llfields: variant: {:?}", variant); let mut offset = Size::from_bytes(0); diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 6ab89f5c3eb67..89f1bb6fd5112 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1153,37 +1153,33 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); - variants - .iter() - .enumerate() - .map(|(i, struct_def)| { - let (variant_type_metadata, member_desc_factory) = - describe_enum_variant(cx, - self.enum_type, - struct_def, - i, - &adt.variants[i], - discriminant_info, - self.containing_scope, - self.span); - - let member_descriptions = member_desc_factory - .create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - &member_descriptions); - MemberDescription { - name: "".to_string(), - type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), - size: struct_def.stride(), - align: struct_def.align, - flags: DIFlags::FlagZero - } - }).collect() + (0..variants.len()).map(|i| { + let variant = self.type_rep.for_variant(i); + let (variant_type_metadata, member_desc_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[i], + discriminant_info, + self.containing_scope, + self.span); + + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + &member_descriptions); + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::from_bytes(0), + size: variant.size(cx), + align: variant.align(cx), + flags: DIFlags::FlagZero + } + }).collect() }, - layout::Univariant(ref variant) => { + layout::Univariant(_) => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1191,9 +1187,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } else { let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, - self.enum_type, - variant, - 0, + self.type_rep, &adt.variants[0], NoDiscriminant, self.containing_scope, @@ -1210,8 +1204,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name: "".to_string(), type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: variant.stride(), - align: variant.align, + size: self.type_rep.size(cx), + align: self.type_rep.align(cx), flags: DIFlags::FlagZero } ] @@ -1221,16 +1215,13 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { nndiscr, discr, discr_offset, - ref variants, .. } => { - let struct_def = &variants[nndiscr as usize]; + let variant = self.type_rep.for_variant(nndiscr as usize); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, - self.enum_type, - struct_def, - nndiscr as usize, + variant, &adt.variants[nndiscr as usize], OptimizedDiscriminant, self.containing_scope, @@ -1278,8 +1269,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name, type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: struct_def.stride(), - align: struct_def.align, + size: variant.size(cx), + align: variant.align(cx), flags: DIFlags::FlagZero } ] @@ -1330,78 +1321,48 @@ enum EnumDiscriminantInfo { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - enum_type: Ty<'tcx>, - struct_def: &'tcx layout::Struct, - variant_index: usize, + layout: layout::FullLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) -> (DICompositeType, MemberDescriptionFactory<'tcx>) { - let layout = cx.layout_of(enum_type); - let maybe_discr = match *layout.layout { - layout::General { .. } => Some(layout.field(cx, 0).ty), - _ => None, - }; - - let layout = layout.for_variant(variant_index); - let mut field_tys = (0..layout.fields.count()).map(|i| { - layout.field(cx, i).ty - }).collect::>(); - - if let Some(discr) = maybe_discr { - field_tys.insert(0, discr); - } - - // Could do some consistency checks here: size, align, field count, discr type - let variant_name = variant.name.as_str(); let unique_type_id = debug_context(cx).type_map .borrow_mut() .get_unique_type_id_of_enum_variant( cx, - enum_type, + layout.ty, &variant_name); let metadata_stub = create_struct_stub(cx, - enum_type, + layout.ty, &variant_name, unique_type_id, containing_scope); - // Get the argument names from the enum variant info - let mut arg_names: Vec<_> = match variant.ctor_kind { - CtorKind::Const => vec![], - CtorKind::Fn => { - variant.fields - .iter() - .enumerate() - .map(|(i, _)| format!("__{}", i)) - .collect() - } - CtorKind::Fictive => { - variant.fields - .iter() - .map(|f| f.name.to_string()) - .collect() - } - }; - // If this is not a univariant enum, there is also the discriminant field. - let mut offsets = struct_def.offsets.clone(); - match discriminant_info { + let (discr_offset, discr_arg) = match discriminant_info { RegularDiscriminant(_) => { - arg_names.insert(0, "RUST$ENUM$DISR".to_string()); - offsets.insert(0, Size::from_bytes(0)); + let enum_layout = cx.layout_of(layout.ty); + (Some(enum_layout.fields.offset(0)), + Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty))) } - _ => { /* do nothing */ } + _ => (None, None), }; + let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { + layout.fields.offset(i) + })).collect(); // Build an array of (field name, field type) pairs to be captured in the factory closure. - let args: Vec<(String, Ty)> = arg_names.iter() - .zip(field_tys.iter()) - .map(|(s, &t)| (s.to_string(), t)) - .collect(); + let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { + let name = if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].name.to_string() + }; + (name, layout.field(cx, i).ty) + })).collect(); let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index c64333fc04435..8924fc3b5acb8 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1091,7 +1091,7 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; match *l.layout { - layout::General { ref variants, .. } => { + layout::General { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1105,7 +1105,7 @@ fn trans_const_adt<'a, 'tcx>( if let layout::Abi::Scalar(_) = l.abi { discr } else { - build_const_struct(ccx, l, &variants[variant_index], vals, Some(discr)) + build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } layout::UntaggedUnion(ref un) => { @@ -1117,16 +1117,16 @@ fn trans_const_adt<'a, 'tcx>( Const::new(C_struct(ccx, &contents, un.packed), t) } - layout::Univariant(ref variant) => { + layout::Univariant(_) => { assert_eq!(variant_index, 0); - build_const_struct(ccx, l, &variant, vals, None) + build_const_struct(ccx, l, vals, None) } layout::Vector { .. } => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } - layout::NullablePointer { ref variants, nndiscr, .. } => { + layout::NullablePointer { nndiscr, .. } => { if variant_index as u64 == nndiscr { - build_const_struct(ccx, l, &variants[variant_index], vals, None) + build_const_struct(ccx, l.for_variant(variant_index), vals, None) } else { // Always use null even if it's not the `discrfield`th // field; see #8506. @@ -1147,24 +1147,27 @@ fn trans_const_adt<'a, 'tcx>( /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: layout::FullLayout<'tcx>, - st: &layout::Struct, vals: &[Const<'tcx>], discr: Option>) -> Const<'tcx> { - assert_eq!(vals.len(), st.offsets.len()); + assert_eq!(vals.len(), layout.fields.count()); // offset of current value let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); - cfields.reserve(discr.is_some() as usize + 1 + st.offsets.len() * 2); + cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2); if let Some(discr) = discr { cfields.push(discr.llval); offset = ccx.size_of(discr.ty); } + let st = match *layout.layout { + layout::Univariant(ref variant) => variant, + _ => bug!("unexpected {:#?}", layout) + }; let parts = st.field_index_by_increasing_offset().map(|i| { - (vals[i], st.offsets[i]) + (vals[i], layout.fields.offset(i)) }); for (val, target_offset) in parts { cfields.push(padding(ccx, target_offset - offset)); @@ -1172,8 +1175,8 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, offset = target_offset + ccx.size_of(val.ty); } - let size = layout.size(ccx); - cfields.push(padding(ccx, size - offset)); + // Pad to the size of the whole type, not e.g. the variant. + cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset)); Const::new(C_struct(ccx, &cfields, st.packed), layout.ty) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 00c76bee1a8e4..ab31bcde52aeb 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -55,7 +55,7 @@ impl ops::BitOr for Alignment { } } -impl<'a> From<&'a Layout> for Alignment { +impl<'a> From<&'a Layout<'a>> for Alignment { fn from(layout: &Layout) -> Self { let (packed, align) = match *layout { Layout::UntaggedUnion(ref un) => (un.packed, un.align), @@ -234,27 +234,24 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } // Discriminant field of enums. - match *l.layout { - layout::NullablePointer { .. } if l.variant_index.is_none() => { - let ty = ccx.llvm_type_of(field.ty); - let size = field.size(ccx).bytes(); - - // If the discriminant is not on a multiple of the primitive's size, - // we need to go through i8*. Also assume the worst alignment. - if offset % size != 0 { - let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); - let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(ccx, offset)]); - let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); - return LvalueRef::new_sized( - bcx.pointercast(byte_ptr, ty.ptr_to()), field.ty, byte_align); - } - - let discr_ptr = bcx.pointercast(self.llval, ty.ptr_to()); + if let layout::NullablePointer { .. } = *l.layout { + let ty = ccx.llvm_type_of(field.ty); + let size = field.size(ccx).bytes(); + + // If the discriminant is not on a multiple of the primitive's size, + // we need to go through i8*. Also assume the worst alignment. + if offset % size != 0 { + let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); + let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(ccx, offset)]); + let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); return LvalueRef::new_sized( - bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), - field.ty, alignment); + bcx.pointercast(byte_ptr, ty.ptr_to()), field.ty, byte_align); } - _ => {} + + let discr_ptr = bcx.pointercast(self.llval, ty.ptr_to()); + return LvalueRef::new_sized( + bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), + field.ty, alignment); } let simple = || { @@ -271,10 +268,8 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; // Check whether the variant being used is packed, if applicable. - let is_packed = match (l.layout, l.variant_index) { - (&layout::Univariant(ref variant), _) => variant.packed, - (&layout::NullablePointer { ref variants, .. }, Some(v)) | - (&layout::General { ref variants, .. }, Some(v)) => variants[v].packed, + let is_packed = match *l.layout { + layout::Univariant(ref variant) => variant.packed, _ => return simple() }; @@ -470,13 +465,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; // If this is an enum, cast to the appropriate variant struct type. - let layout = bcx.ccx.layout_of(ty).for_variant(variant_index); - match *layout.layout { - layout::NullablePointer { ref variants, .. } | - layout::General { ref variants, .. } => { - let st = &variants[variant_index]; + let layout = bcx.ccx.layout_of(ty); + let variant_layout = layout.for_variant(variant_index); + match (layout.layout, variant_layout.layout) { + (&layout::NullablePointer { .. }, &layout::Univariant(ref st)) | + (&layout::General { .. }, &layout::Univariant(ref st)) => { let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, layout, st), st.packed); + &adt::struct_llfields(bcx.ccx, variant_layout), st.packed); downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } _ => {} diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index b829d33600c82..679632d91133c 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -242,7 +242,9 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { } match *self.layout { Layout::Scalar { .. } | - Layout::UntaggedUnion { .. } => { + Layout::UntaggedUnion { .. } | + Layout::NullablePointer { .. } | + Layout::General { .. } => { bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } @@ -258,15 +260,6 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { Layout::Univariant(ref variant) => { adt::memory_index_to_gep(variant.memory_index[index] as u64) } - - Layout::NullablePointer { ref variants, .. } | - Layout::General { ref variants, .. } => { - if let Some(v) = self.variant_index { - adt::memory_index_to_gep(variants[v].memory_index[index] as u64) - } else { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) - } - } } } } From 08f9f134fd9d790c99ffab590fee264bd062f599 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 17 Sep 2017 23:37:18 +0300 Subject: [PATCH 30/69] rustc: hide details in Layout in favor of Abi or FieldPlacement. --- src/librustc/ty/layout.rs | 618 ++++++++++------------- src/librustc_lint/types.rs | 4 +- src/librustc_trans/abi.rs | 4 +- src/librustc_trans/adt.rs | 112 ++-- src/librustc_trans/common.rs | 4 +- src/librustc_trans/debuginfo/metadata.rs | 24 +- src/librustc_trans/glue.rs | 14 +- src/librustc_trans/mir/constant.rs | 14 +- src/librustc_trans/mir/lvalue.rs | 34 +- src/librustc_trans/mir/mod.rs | 9 +- 10 files changed, 353 insertions(+), 484 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 096c74a6163cf..13d3ec68a31b3 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -627,27 +627,27 @@ impl<'a, 'tcx> Primitive { #[derive(PartialEq, Eq, Hash, Debug)] pub struct Struct { /// Maximum alignment of fields and repr alignment. - pub align: Align, + align: Align, /// Primitive alignment of fields without repr alignment. - pub primitive_align: Align, + primitive_align: Align, /// If true, no alignment padding is used. - pub packed: bool, + packed: bool, /// If true, the size is exact, otherwise it's only a lower bound. - pub sized: bool, + sized: bool, /// Offsets for the first byte of each field, ordered to match the source definition order. /// This vector does not go in increasing order. /// FIXME(eddyb) use small vector optimization for the common case. - pub offsets: Vec, + offsets: Vec, /// Maps source order field indices to memory order indices, depending how fields were permuted. /// FIXME (camlorn) also consider small vector optimization here. pub memory_index: Vec, - pub min_size: Size, + min_size: Size, } /// Info required to optimize struct layout. @@ -799,7 +799,7 @@ impl<'a, 'tcx> Struct { } /// Get the size with trailing alignment padding. - pub fn stride(&self) -> Size { + fn stride(&self) -> Size { self.min_size.abi_align(self.align) } @@ -837,11 +837,11 @@ impl<'a, 'tcx> Struct { layout: FullLayout<'tcx>) -> Result, LayoutError<'tcx>> { let cx = (tcx, param_env); - match (layout.layout, &layout.ty.sty) { - (&Scalar(Pointer), _) if !layout.ty.is_unsafe_ptr() => { + match (layout.layout, layout.abi, &layout.ty.sty) { + (&Scalar, Abi::Scalar(Pointer), _) if !layout.ty.is_unsafe_ptr() => { Ok(Some((Size::from_bytes(0), Pointer))) } - (&General { discr, .. }, &ty::TyAdt(def, _)) => { + (&General { discr, .. }, _, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { Ok(Some((layout.fields.offset(0), discr))) } else { @@ -849,18 +849,18 @@ impl<'a, 'tcx> Struct { } } - (&FatPointer(_), _) if !layout.ty.is_unsafe_ptr() => { + (&FatPointer, _, _) if !layout.ty.is_unsafe_ptr() => { Ok(Some((layout.fields.offset(FAT_PTR_ADDR), Pointer))) } // Is this the NonZero lang item wrapping a pointer or integer type? - (_, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { + (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { let field = layout.field(cx, 0)?; - match *field.layout { - Scalar(value) => { + match (field.layout, field.abi) { + (&Scalar, Abi::Scalar(value)) => { Ok(Some((layout.fields.offset(0), value))) } - FatPointer(_) => { + (&FatPointer, _) => { Ok(Some((layout.fields.offset(0) + field.fields.offset(FAT_PTR_ADDR), Pointer))) @@ -870,7 +870,7 @@ impl<'a, 'tcx> Struct { } // Perhaps one of the fields is non-zero, let's recurse and find out. - (&Univariant(ref variant), _) => { + (&Univariant(ref variant), _, _) => { variant.non_zero_field( tcx, param_env, @@ -879,7 +879,7 @@ impl<'a, 'tcx> Struct { // Is this a fixed-size array of something non-zero // with at least one element? - (_, &ty::TyArray(ety, mut count)) => { + (_, _, &ty::TyArray(ety, mut count)) => { if count.has_projections() { count = tcx.normalize_associated_type_in_env(&count, param_env); if count.has_projections() { @@ -893,7 +893,7 @@ impl<'a, 'tcx> Struct { } } - (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => { + (_, _, &ty::TyProjection(_)) | (_, _, &ty::TyAnon(..)) => { bug!("Struct::non_zero_field_in_type: {:?} not normalized", layout); } @@ -920,79 +920,6 @@ impl<'a, 'tcx> Struct { } } -/// An untagged union. -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct Union { - pub align: Align, - pub primitive_align: Align, - - pub min_size: Size, - - /// If true, no alignment padding is used. - pub packed: bool, -} - -impl<'a, 'tcx> Union { - fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union { - if repr.packed() && repr.align > 0 { - bug!("Union cannot be packed and aligned"); - } - - let primitive_align = if repr.packed() { - dl.i8_align - } else { - dl.aggregate_align - }; - - let align = if repr.align > 0 { - let repr_align = repr.align as u64; - debug!("Union::new repr_align: {:?}", repr_align); - primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap()) - } else { - primitive_align - }; - - Union { - align, - primitive_align, - min_size: Size::from_bytes(0), - packed: repr.packed(), - } - } - - /// Extend the Union with more fields. - fn extend(&mut self, dl: &TargetDataLayout, - fields: I, - scapegoat: Ty<'tcx>) - -> Result<(), LayoutError<'tcx>> - where I: Iterator, LayoutError<'tcx>>> { - for (index, field) in fields.enumerate() { - let field = field?; - if field.is_unsized() { - bug!("Union::extend: field #{} of `{}` is unsized", - index, scapegoat); - } - - debug!("Union::extend field: {:?} {:?}", field, field.size(dl)); - - if !self.packed { - self.align = self.align.max(field.align(dl)); - self.primitive_align = self.primitive_align.max(field.primitive_align(dl)); - } - self.min_size = cmp::max(self.min_size, field.size(dl)); - } - - debug!("Union::extend min-size: {:?}", self.min_size); - - Ok(()) - } - - /// Get the size with trailing alignment padding. - pub fn stride(&self) -> Size { - self.min_size.abi_align(self.align) - } -} - /// The first half of a fat pointer. /// - For a trait object, this is the address of the box. /// - For a slice, this is the base address. @@ -1068,6 +995,7 @@ pub enum Abi { Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, + packed: bool, align: Align, primitive_align: Align, size: Size @@ -1078,11 +1006,19 @@ impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Abi::Scalar(_) | Abi::Vector {..} => false, + Abi::Scalar(_) | Abi::Vector { .. } => false, Abi::Aggregate { sized, .. } => !sized } } + /// Returns true if the fields of the layout are packed. + pub fn is_packed(&self) -> bool { + match *self { + Abi::Scalar(_) | Abi::Vector { .. } => false, + Abi::Aggregate { packed, .. } => packed + } + } + pub fn size(&self, cx: C) -> Size { let dl = cx.data_layout(); @@ -1144,26 +1080,16 @@ impl Abi { #[derive(PartialEq, Eq, Hash, Debug)] pub enum Layout<'a> { /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. - Scalar(Primitive), + Scalar, /// SIMD vectors, from structs marked with #[repr(simd)]. - Vector { - element: Primitive, - count: u64 - }, + Vector, /// TyArray, TySlice or TyStr. - Array { - /// If true, the size is exact, otherwise it's only a lower bound. - sized: bool, - align: Align, - primitive_align: Align, - element_size: Size, - count: u64 - }, + Array, /// TyRawPtr or TyRef with a !Sized pointee. The primitive is the metadata. - FatPointer(Primitive), + FatPointer, // Remaining variants are all ADTs such as structs, enums or tuples. @@ -1171,7 +1097,7 @@ pub enum Layout<'a> { Univariant(Struct), /// Untagged unions. - UntaggedUnion(Union), + UntaggedUnion, /// General-case enums: for each case there is a struct, and they all have /// all space reserved for the discriminant, and their first field starts @@ -1185,9 +1111,6 @@ pub enum Layout<'a> { // taking everything else as the (shortest) discriminant range. discr_range: RangeInclusive, variants: Vec>, - size: Size, - align: Align, - primitive_align: Align, }, /// Two cases distinguished by a nullable pointer: the case with discriminant @@ -1203,9 +1126,6 @@ pub enum Layout<'a> { discr: Primitive, discr_offset: Size, variants: Vec>, - size: Size, - align: Align, - primitive_align: Align, } } @@ -1269,159 +1189,98 @@ impl<'a, 'tcx> Layout<'tcx> { -> Result, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); - let success = |layout| { - let layout = tcx.intern_layout(layout); + let scalar = |value| { + CachedLayout { + layout: &Layout::Scalar, + fields: FieldPlacement::union(0), + abi: Abi::Scalar(value) + } + }; + let univariant = |st| { + let layout = tcx.intern_layout(Layout::Univariant(st)); let fields = match *layout { - Scalar(_) => { - FieldPlacement::union(0) - } - - Vector { element, count } => { - FieldPlacement::Linear { - stride: element.size(tcx), - count - } - } - - Array { element_size, count, .. } => { - FieldPlacement::Linear { - stride: element_size, - count - } - } - - FatPointer { .. } => { - FieldPlacement::Linear { - stride: Pointer.size(tcx), - count: 2 - } - } - Univariant(ref variant) => { FieldPlacement::Arbitrary { offsets: &variant.offsets } } - - UntaggedUnion(_) => { - // Handle unions through the type rather than Layout. - let def = ty.ty_adt_def().unwrap(); - FieldPlacement::union(def.struct_variant().fields.len()) - } - - General { .. } => FieldPlacement::union(1), - - NullablePointer { ref discr_offset, .. } => { - FieldPlacement::Arbitrary { - offsets: ref_slice(discr_offset) - } - } + _ => bug!() }; let abi = match *layout { - Scalar(value) => Abi::Scalar(value), - Vector { element, count } => Abi::Vector { element, count }, - - Array { sized, align, primitive_align, element_size, count, .. } => { - let size = match element_size.checked_mul(count, dl) { - Some(size) => size, - None => return Err(LayoutError::SizeOverflow(ty)) - }; - Abi::Aggregate { - sized, - align, - primitive_align, - size - } - } - - FatPointer(metadata) => { - // Effectively a (ptr, meta) tuple. - let align = Pointer.align(dl).max(metadata.align(dl)); - Abi::Aggregate { - sized: true, - align, - primitive_align: align, - size: (Pointer.size(dl).abi_align(metadata.align(dl)) + - metadata.size(dl)) - .abi_align(align) - } - } - Univariant(ref st) => { Abi::Aggregate { sized: st.sized, + packed: st.packed, align: st.align, primitive_align: st.primitive_align, size: st.stride() } } - - UntaggedUnion(ref un ) => { - Abi::Aggregate { - sized: true, - align: un.align, - primitive_align: un.primitive_align, - size: un.stride() - } - } - - General { discr, align, primitive_align, size, .. } | - NullablePointer { discr, align, primitive_align, size, .. } => { - if fields.offset(0).bytes() == 0 && discr.size(cx) == size { - Abi::Scalar(discr) - } else { - Abi::Aggregate { - sized: true, - align, - primitive_align, - size - } - } - } + _ => bug!() }; - Ok(CachedLayout { + CachedLayout { layout, fields, abi - }) + } }; assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - Ok(Scalar(Pointer)) - } else { - let unsized_part = tcx.struct_tail(pointee); - let metadata = match unsized_part.sty { - ty::TyForeign(..) => return Ok(Scalar(Pointer)), - ty::TySlice(_) | ty::TyStr => { - Int(dl.ptr_sized_integer(), false) - } - ty::TyDynamic(..) => Pointer, - _ => return Err(LayoutError::Unknown(unsized_part)) - }; - Ok(FatPointer(metadata)) + return Ok(scalar(Pointer)); } + + let unsized_part = tcx.struct_tail(pointee); + let metadata = match unsized_part.sty { + ty::TyForeign(..) => return Ok(scalar(Pointer)), + ty::TySlice(_) | ty::TyStr => { + Int(dl.ptr_sized_integer(), false) + } + ty::TyDynamic(..) => Pointer, + _ => return Err(LayoutError::Unknown(unsized_part)) + }; + + // Effectively a (ptr, meta) tuple. + let align = Pointer.align(dl).max(metadata.align(dl)); + let fields = FieldPlacement::Linear { + stride: Pointer.size(dl), + count: 2 + }; + let meta_offset = fields.offset(1); + assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl))); + Ok(CachedLayout { + layout: tcx.intern_layout(Layout::FatPointer), + fields, + abi: + Abi::Aggregate { + sized: true, + packed: false, + align, + primitive_align: align, + size: (meta_offset + metadata.size(dl)).abi_align(align) + } + }) }; - let layout = match ty.sty { + Ok(match ty.sty { // Basic scalars. - ty::TyBool => Scalar(Int(I1, false)), - ty::TyChar => Scalar(Int(I32, false)), + ty::TyBool => scalar(Int(I1, false)), + ty::TyChar => scalar(Int(I32, false)), ty::TyInt(ity) => { - Scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) + scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) } ty::TyUint(ity) => { - Scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) + scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) } - ty::TyFloat(FloatTy::F32) => Scalar(F32), - ty::TyFloat(FloatTy::F64) => Scalar(F64), - ty::TyFnPtr(_) => Scalar(Pointer), + ty::TyFloat(FloatTy::F32) => scalar(F32), + ty::TyFloat(FloatTy::F64) => scalar(F64), + ty::TyFnPtr(_) => scalar(Pointer), // The never type. ty::TyNever => { - Univariant(Struct::new(dl, &[], &ReprOptions::default(), + univariant(Struct::new(dl, &[], &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?) } @@ -1446,50 +1305,74 @@ impl<'a, 'tcx> Layout<'tcx> { let element = cx.layout_of(element)?; let element_size = element.size(dl); let count = count.val.to_const_int().unwrap().to_u64().unwrap(); - Array { - sized: true, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - element_size, - count, + let size = element_size.checked_mul(count, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + + CachedLayout { + layout: &Layout::Array, + fields: FieldPlacement::Linear { + stride: element_size, + count + }, + abi: Abi::Aggregate { + sized: true, + packed: false, + align: element.align(dl), + primitive_align: element.primitive_align(dl), + size + } } } ty::TySlice(element) => { let element = cx.layout_of(element)?; - Array { - sized: false, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - element_size: element.size(dl), - count: 0 + CachedLayout { + layout: &Layout::Array, + fields: FieldPlacement::Linear { + stride: element.size(dl), + count: 0 + }, + abi: Abi::Aggregate { + sized: false, + packed: false, + align: element.align(dl), + primitive_align: element.primitive_align(dl), + size: Size::from_bytes(0) + } } } ty::TyStr => { - Array { - sized: false, - align: dl.i8_align, - primitive_align: dl.i8_align, - element_size: Size::from_bytes(1), - count: 0 + CachedLayout { + layout: &Layout::Array, + fields: FieldPlacement::Linear { + stride: Size::from_bytes(1), + count: 0 + }, + abi: Abi::Aggregate { + sized: false, + packed: false, + align: dl.i8_align, + primitive_align: dl.i8_align, + size: Size::from_bytes(0) + } } } // Odd unit types. ty::TyFnDef(..) => { - Univariant(Struct::new(dl, &[], &ReprOptions::default(), + univariant(Struct::new(dl, &[], &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?) } ty::TyDynamic(..) | ty::TyForeign(..) => { let mut unit = Struct::new(dl, &[], &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?; unit.sized = false; - Univariant(unit) + univariant(unit) } // Tuples, generators and closures. ty::TyGenerator(def_id, ref substs, _) => { let tys = substs.field_tys(def_id, tcx); - Univariant(Struct::new(dl, + univariant(Struct::new(dl, &tys.map(|ty| cx.layout_of(ty)) .collect::, _>>()?, &ReprOptions::default(), @@ -1498,7 +1381,7 @@ impl<'a, 'tcx> Layout<'tcx> { ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); - Univariant(Struct::new(dl, + univariant(Struct::new(dl, &tys.map(|ty| cx.layout_of(ty)) .collect::, _>>()?, &ReprOptions::default(), @@ -1512,7 +1395,7 @@ impl<'a, 'tcx> Layout<'tcx> { StructKind::MaybeUnsizedUnivariant }; - Univariant(Struct::new(dl, + univariant(Struct::new(dl, &tys.iter().map(|ty| cx.layout_of(ty)) .collect::, _>>()?, &ReprOptions::default(), kind, ty)?) @@ -1520,19 +1403,23 @@ impl<'a, 'tcx> Layout<'tcx> { // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { + let count = ty.simd_size(tcx) as u64; let element = ty.simd_type(tcx); - match cx.layout_of(element)?.abi { - Abi::Scalar(value) => { - return success(Vector { - element: value, - count: ty.simd_size(tcx) as u64 - }); - } + let element = match cx.layout_of(element)?.abi { + Abi::Scalar(value) => value, _ => { tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ a non-machine element type `{}`", ty, element)); } + }; + CachedLayout { + layout: &Layout::Vector, + fields: FieldPlacement::Linear { + stride: element.size(tcx), + count + }, + abi: Abi::Vector { element, count } } } @@ -1549,10 +1436,54 @@ impl<'a, 'tcx> Layout<'tcx> { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) - return success(Univariant(Struct::new(dl, &[], + return Ok(univariant(Struct::new(dl, &[], &def.repr, StructKind::AlwaysSizedUnivariant, ty)?)); } + if def.is_union() { + let packed = def.repr.packed(); + if packed && def.repr.align > 0 { + bug!("Union cannot be packed and aligned"); + } + + let mut primitive_align = if def.repr.packed() { + dl.i8_align + } else { + dl.aggregate_align + }; + + let mut align = if def.repr.align > 0 { + let repr_align = def.repr.align as u64; + primitive_align.max( + Align::from_bytes(repr_align, repr_align).unwrap()) + } else { + primitive_align + }; + + let mut size = Size::from_bytes(0); + for field in &variants[0] { + assert!(!field.is_unsized()); + + if !packed { + align = align.max(field.align(dl)); + primitive_align = primitive_align.max(field.primitive_align(dl)); + } + size = cmp::max(size, field.size(dl)); + } + + return Ok(CachedLayout { + layout: &Layout::UntaggedUnion, + fields: FieldPlacement::union(variants[0].len()), + abi: Abi::Aggregate { + sized: true, + packed, + align, + primitive_align, + size: size.abi_align(align) + } + }); + } + if !def.is_enum() || (variants.len() == 1 && !def.repr.inhibit_enum_layout_opt() && !variants[0].is_empty()) { @@ -1570,14 +1501,7 @@ impl<'a, 'tcx> Layout<'tcx> { else { StructKind::AlwaysSizedUnivariant } }; - let layout = if def.is_union() { - let mut un = Union::new(dl, &def.repr); - un.extend(dl, variants[0].iter().map(|&f| Ok(f)), ty)?; - UntaggedUnion(un) - } else { - Univariant(Struct::new(dl, &variants[0], &def.repr, kind, ty)?) - }; - return success(layout); + return Ok(univariant(Struct::new(dl, &variants[0], &def.repr, kind, ty)?)); } let no_explicit_discriminants = def.variants.iter().enumerate() @@ -1608,25 +1532,49 @@ impl<'a, 'tcx> Layout<'tcx> { } } - if let Some((discr, offset, primitive)) = choice { - let mut discr_align = primitive.align(dl); - if offset.abi_align(discr_align) != offset { - st[discr].packed = true; - discr_align = dl.i8_align; + if let Some((nndiscr, offset, discr)) = choice { + let variants: Vec<_> = st.into_iter().map(&univariant).collect(); + let mut abi = variants[nndiscr].abi; + + let mut discr_align = discr.align(dl); + match abi { + Abi::Aggregate { + ref mut align, + ref mut primitive_align, + ref mut packed, + .. + } => { + if offset.abi_align(discr_align) != offset { + *packed = true; + discr_align = dl.i8_align; + } + *align = align.max(discr_align); + *primitive_align = primitive_align.max(discr_align); + } + _ => {} } - let align = st[discr].align.max(discr_align); - let primitive_align = st[discr].primitive_align.max(discr_align); - return success(NullablePointer { - nndiscr: discr as u64, - discr: primitive, + let layout = tcx.intern_layout(Layout::NullablePointer { + nndiscr: nndiscr as u64, + discr, discr_offset: offset, - size: st[discr].stride(), - align, - primitive_align, - variants: st.into_iter().map(|variant| { - success(Univariant(variant)) - }).collect::, _>>()?, + variants, + }); + return Ok(CachedLayout { + layout, + fields: match *layout { + Layout::NullablePointer { ref discr_offset, .. } => { + FieldPlacement::Arbitrary { + offsets: ref_slice(discr_offset) + } + } + _ => bug!() + }, + abi: if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { + Abi::Scalar(discr) + } else { + abi + } }); } } @@ -1727,17 +1675,27 @@ impl<'a, 'tcx> Layout<'tcx> { } } - General { - discr: Int(ity, signed), - - // FIXME: should be u128? - discr_range: (min as u64)..=(max as u64), - variants: variants.into_iter().map(|variant| { - success(Univariant(variant)) - }).collect::, _>>()?, - size, - align, - primitive_align, + let discr = Int(ity, signed); + CachedLayout { + layout: tcx.intern_layout(Layout::General { + discr, + + // FIXME: should be u128? + discr_range: (min as u64)..=(max as u64), + variants: variants.into_iter().map(&univariant).collect(), + }), + fields: FieldPlacement::union(1), + abi: if discr.size(dl) == size { + Abi::Scalar(discr) + } else { + Abi::Aggregate { + sized: true, + packed: false, + align, + primitive_align, + size + } + } } } @@ -1748,11 +1706,11 @@ impl<'a, 'tcx> Layout<'tcx> { return Err(LayoutError::Unknown(ty)); } let layout = cx.layout_of(normalized)?; - return Ok(CachedLayout { + CachedLayout { layout: layout.layout, fields: layout.fields, abi: layout.abi - }); + } } ty::TyParam(_) => { return Err(LayoutError::Unknown(ty)); @@ -1760,9 +1718,7 @@ impl<'a, 'tcx> Layout<'tcx> { ty::TyInfer(_) | ty::TyError => { bug!("Layout::compute: unexpected type `{}`", ty) } - }; - - success(layout) + }) } /// This is invoked by the `layout_raw` query to record the final @@ -1916,8 +1872,8 @@ impl<'a, 'tcx> Layout<'tcx> { }, variant_infos); } - Layout::UntaggedUnion(ref un) => { - debug!("print-type-size t: `{:?}` adt union {:?}", ty, un); + Layout::UntaggedUnion => { + debug!("print-type-size t: `{:?}` adt union", ty); // layout does not currently store info about each // variant... record(adt_kind.into(), None, Vec::new()); @@ -1925,9 +1881,9 @@ impl<'a, 'tcx> Layout<'tcx> { // other cases provide little interesting (i.e. adjustable // via representation tweaks) size info beyond total size. - Layout::Scalar(_) | - Layout::Vector { .. } | - Layout::Array { .. } | + Layout::Scalar | + Layout::Vector | + Layout::Array | Layout::FatPointer { .. } => { debug!("print-type-size t: `{:?}` adt other", ty); record(adt_kind.into(), None, Vec::new()) @@ -2333,6 +2289,11 @@ impl<'a, 'tcx> FullLayout<'tcx> { self.abi.is_unsized() } + /// Returns true if the fields of the layout are packed. + pub fn is_packed(&self) -> bool { + self.abi.is_packed() + } + pub fn size(&self, cx: C) -> Size { self.abi.size(cx) } @@ -2359,61 +2320,34 @@ impl<'gcx> HashStable> for Layout<'gcx> { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Scalar(ref value) => { - value.hash_stable(hcx, hasher); - } - Vector { element, count } => { - element.hash_stable(hcx, hasher); - count.hash_stable(hcx, hasher); - } - Array { sized, align, primitive_align, element_size, count } => { - sized.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); - element_size.hash_stable(hcx, hasher); - count.hash_stable(hcx, hasher); - } - FatPointer(ref metadata) => { - metadata.hash_stable(hcx, hasher); - } + Scalar => {} + Vector => {} + Array => {} + FatPointer => {} Univariant(ref variant) => { variant.hash_stable(hcx, hasher); } - UntaggedUnion(ref un) => { - un.hash_stable(hcx, hasher); - } + UntaggedUnion => {} General { discr, discr_range: RangeInclusive { start, end }, ref variants, - size, - align, - primitive_align } => { discr.hash_stable(hcx, hasher); start.hash_stable(hcx, hasher); end.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); } NullablePointer { nndiscr, ref variants, ref discr, discr_offset, - size, - align, - primitive_align } => { nndiscr.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); discr.hash_stable(hcx, hasher); discr_offset.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); } } } @@ -2453,7 +2387,8 @@ impl<'gcx> HashStable> for Abi { element.hash_stable(hcx, hasher); count.hash_stable(hcx, hasher); } - Aggregate { sized, size, align, primitive_align } => { + Aggregate { packed, sized, size, align, primitive_align } => { + packed.hash_stable(hcx, hasher); sized.hash_stable(hcx, hasher); size.hash_stable(hcx, hasher); align.hash_stable(hcx, hasher); @@ -2518,10 +2453,3 @@ impl_stable_hash_for!(struct ::ty::layout::Struct { memory_index, min_size }); - -impl_stable_hash_for!(struct ::ty::layout::Union { - align, - primitive_align, - min_size, - packed -}); diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index f59b372e7d531..dd5e97544c82c 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -753,11 +753,11 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, size, discr, .. } = *layout.layout { + if let Layout::General { ref variants, discr, .. } = *layout.layout { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", - t, size.bytes(), layout); + t, layout.size(cx.tcx).bytes(), layout); let (largest, slargest, largest_index) = enum_definition.variants .iter() diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 712108bf437bc..b727629e23353 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -307,8 +307,8 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { } layout::Abi::Aggregate { .. } => { - if let Layout::Array { count, .. } = *self.layout { - if count > 0 { + if let Layout::Array { .. } = *self.layout { + if self.fields.count() > 0 { return self.field(ccx, 0).homogeneous_aggregate(ccx); } } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index ff66090dc8c16..cd68d04247394 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -42,7 +42,7 @@ //! taken to it, implementing them for Rust seems difficult. use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, HasDataLayout, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, FullLayout}; use context::CrateContext; use type_::Type; @@ -72,11 +72,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return; } match *l.layout { - layout::NullablePointer { .. } | - layout::General { .. } | - layout::UntaggedUnion { .. } => { } - - layout::Univariant(ref variant) => { + layout::Univariant(_) => { let is_enum = if let ty::TyAdt(def, _) = t.sty { def.is_enum() } else { @@ -87,9 +83,11 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } else { l }; - llty.set_struct_body(&struct_llfields(cx, variant_layout), variant.packed) - }, - _ => bug!("This function cannot handle {} with layout {:#?}", t, l) + llty.set_struct_body(&struct_llfields(cx, variant_layout), + variant_layout.is_packed()) + } + + _ => {} } } @@ -102,63 +100,44 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return cx.llvm_type_of(value.to_ty(cx.tcx())); } match *l.layout { - layout::Univariant(ref variant) => { + layout::Univariant(_) => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, l), variant.packed) + Type::struct_(cx, &struct_llfields(cx, l), l.is_packed()) } Some(name) => { Type::named_struct(cx, name) } } } - layout::UntaggedUnion(ref un) => { - // Use alignment-sized ints to fill all the union storage. - let fill = union_fill(cx, un.stride(), un.align); - match name { - None => { - Type::struct_(cx, &[fill], un.packed) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], un.packed); - llty - } - } - } - layout::NullablePointer { size, align, .. } | - layout::General { size, align, .. } => { - let fill = union_fill(cx, size, align); + _ => { + let align = l.align(cx); + let abi_align = align.abi(); + let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { + Type::from_integer(cx, ity) + } else { + let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); + assert_eq!(vec_align.abi(), abi_align); + Type::vector(&Type::i32(cx), abi_align / 4) + }; + + let size = l.size(cx).bytes(); + assert_eq!(size % abi_align, 0); + let fill = Type::array(&elem_ty, size / abi_align); match name { None => { - Type::struct_(cx, &[fill], false) + Type::struct_(cx, &[fill], l.is_packed()) } Some(name) => { let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], false); + llty.set_struct_body(&[fill], l.is_packed()); llty } } } - _ => bug!("Unsupported type {} represented as {:#?}", t, l) } } -fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { - let abi_align = align.abi(); - let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { - Type::from_integer(cx, ity) - } else { - let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); - assert_eq!(vec_align.abi(), abi_align); - Type::vector(&Type::i32(cx), abi_align / 4) - }; - - let size = size.bytes(); - assert_eq!(size % abi_align, 0); - Type::array(&elem_ty, size / abi_align) -} - /// Double an index and add 1 to account for padding. pub fn memory_index_to_gep(index: u64) -> u64 { 1 + index * 2 @@ -166,17 +145,20 @@ pub fn memory_index_to_gep(index: u64) -> u64 { pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> Vec { - let variant = match *layout.layout { - layout::Univariant(ref variant) => variant, - _ => bug!("unexpected {:#?}", layout) - }; + debug!("struct_llfields: {:#?}", layout); + let align = layout.align(cx); + let size = layout.size(cx); let field_count = layout.fields.count(); - debug!("struct_llfields: variant: {:?}", variant); + let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); - for i in variant.field_index_by_increasing_offset() { + let field_index_by_increasing_offset = match *layout.layout { + layout::Univariant(ref variant) => variant.field_index_by_increasing_offset(), + _ => bug!("unexpected {:#?}", layout) + }; + for i in field_index_by_increasing_offset { let field = layout.field(cx, i); - let target_offset = variant.offsets[i as usize]; + let target_offset = layout.fields.offset(i as usize); debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", i, field, offset, target_offset); assert!(target_offset >= offset); @@ -187,30 +169,30 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let llty = cx.llvm_type_of(field.ty); result.push(llty); - if variant.packed { + if layout.is_packed() { assert_eq!(padding.bytes(), 0); } else { let field_align = field.align(cx); - assert!(field_align.abi() <= variant.align.abi(), + assert!(field_align.abi() <= align.abi(), "non-packed type has field with larger align ({}): {:#?}", - field_align.abi(), variant); + field_align.abi(), layout); } offset = target_offset + field.size(cx); } - if variant.sized && field_count > 0 { - if offset > variant.stride() { - bug!("variant: {:?} stride: {:?} offset: {:?}", - variant, variant.stride(), offset); + if !layout.is_unsized() && field_count > 0 { + if offset > size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", + layout, size, offset); } - let padding = variant.stride() - offset; - debug!("struct_llfields: pad_bytes: {:?} offset: {:?} min_size: {:?} stride: {:?}", - padding, offset, variant.min_size, variant.stride()); + let padding = size - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, size); result.push(Type::array(&Type::i8(cx), padding.bytes())); assert!(result.len() == 1 + field_count * 2); } else { - debug!("struct_llfields: offset: {:?} min_size: {:?} stride: {:?}", - offset, variant.min_size, variant.stride()); + debug!("struct_llfields: offset: {:?} stride: {:?}", + offset, size); } result diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index bc6ddef0e7206..82a4095aa0118 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -65,9 +65,9 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) let layout = ccx.layout_of(ty); match *layout.layout { Layout::FatPointer { .. } => true, - Layout::Univariant(ref variant) => { + Layout::Univariant(_) => { // There must be only 2 fields. - if variant.offsets.len() != 2 { + if layout.fields.count() != 2 { return false; } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 89f1bb6fd5112..b9ff46166a8d8 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -939,20 +939,6 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - - let tmp; - let offsets = match *layout.layout { - layout::Univariant(ref variant) => &variant.offsets, - layout::Vector { element, count } => { - let element_size = element.size(cx).bytes(); - tmp = (0..count). - map(|i| layout::Size::from_bytes(i*element_size)) - .collect::>(); - &tmp - } - _ => bug!("{} is not a struct", self.ty) - }; - self.variant.fields.iter().enumerate().map(|(i, f)| { let name = if self.variant.ctor_kind == CtorKind::Fn { format!("__{}", i) @@ -964,7 +950,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { MemberDescription { name, type_metadata: type_metadata(cx, field.ty, self.span), - offset: offsets[i], + offset: layout.fields.offset(i), size, align, flags: DIFlags::FlagZero, @@ -1022,18 +1008,12 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - let offsets = if let layout::Univariant(ref variant) = *layout.layout { - &variant.offsets - } else { - bug!("{} is not a tuple", self.ty); - }; - self.component_types.iter().enumerate().map(|(i, &component_type)| { let (size, align) = cx.size_and_align_of(component_type); MemberDescription { name: format!("__{}", i), type_metadata: type_metadata(cx, component_type, self.span), - offset: offsets[i], + offset: layout.fields.offset(i), size, align, flags: DIFlags::FlagZero, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 209083a8e25d6..f374ed90c342d 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -58,15 +58,9 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let layout = ccx.layout_of(t); debug!("DST {} layout: {:?}", t, layout); - let (sized_size, sized_align) = match *layout.layout { - ty::layout::Layout::Univariant(ref variant) => { - (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi()) - } - _ => { - bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", - t, layout); - } - }; + let i = layout.fields.count() - 1; + let sized_size = layout.fields.offset(i).bytes(); + let sized_align = layout.align(ccx).abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = C_usize(ccx, sized_size); @@ -74,7 +68,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(ccx, layout.fields.count() - 1).ty; + let field_ty = layout.field(ccx, i).ty; let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 8924fc3b5acb8..d6e2257ab2422 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1108,14 +1108,14 @@ fn trans_const_adt<'a, 'tcx>( build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } - layout::UntaggedUnion(ref un) => { + layout::UntaggedUnion => { assert_eq!(variant_index, 0); let contents = [ vals[0].llval, - padding(ccx, un.stride() - ccx.size_of(vals[0].ty)) + padding(ccx, l.size(ccx) - ccx.size_of(vals[0].ty)) ]; - Const::new(C_struct(ccx, &contents, un.packed), t) + Const::new(C_struct(ccx, &contents, l.is_packed()), t) } layout::Univariant(_) => { assert_eq!(variant_index, 0); @@ -1162,11 +1162,11 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, offset = ccx.size_of(discr.ty); } - let st = match *layout.layout { - layout::Univariant(ref variant) => variant, + let field_index_by_increasing_offset = match *layout.layout { + layout::Univariant(ref variant) => variant.field_index_by_increasing_offset(), _ => bug!("unexpected {:#?}", layout) }; - let parts = st.field_index_by_increasing_offset().map(|i| { + let parts = field_index_by_increasing_offset.map(|i| { (vals[i], layout.fields.offset(i)) }); for (val, target_offset) in parts { @@ -1178,7 +1178,7 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Pad to the size of the whole type, not e.g. the variant. cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset)); - Const::new(C_struct(ccx, &cfields, st.packed), layout.ty) + Const::new(C_struct(ccx, &cfields, layout.is_packed()), layout.ty) } fn padding(ccx: &CrateContext, size: Size) -> ValueRef { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index ab31bcde52aeb..b21e4ffc2c3ad 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutOf}; +use rustc::ty::layout::{self, Align, FullLayout, Layout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -55,14 +55,9 @@ impl ops::BitOr for Alignment { } } -impl<'a> From<&'a Layout<'a>> for Alignment { - fn from(layout: &Layout) -> Self { - let (packed, align) = match *layout { - Layout::UntaggedUnion(ref un) => (un.packed, un.align), - Layout::Univariant(ref variant) => (variant.packed, variant.align), - _ => return Alignment::AbiAligned - }; - if packed { +impl<'a> From> for Alignment { + fn from(layout: FullLayout) -> Self { + if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi { Alignment::Packed(align) } else { Alignment::AbiAligned @@ -208,7 +203,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let field = l.field(ccx, ix); let offset = l.fields.offset(ix).bytes(); - let alignment = self.alignment | Alignment::from(l.layout); + let alignment = self.alignment | Alignment::from(l); // Unions and newtypes only use an offset of 0. match *l.layout { @@ -267,16 +262,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } }; - // Check whether the variant being used is packed, if applicable. - let is_packed = match *l.layout { - layout::Univariant(ref variant) => variant.packed, - _ => return simple() - }; - // Simple case - we can just GEP the field // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if is_packed || !field.is_unsized() { + if l.is_packed() || !field.is_unsized() { return simple(); } @@ -466,12 +455,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // If this is an enum, cast to the appropriate variant struct type. let layout = bcx.ccx.layout_of(ty); - let variant_layout = layout.for_variant(variant_index); - match (layout.layout, variant_layout.layout) { - (&layout::NullablePointer { .. }, &layout::Univariant(ref st)) | - (&layout::General { .. }, &layout::Univariant(ref st)) => { + match *layout.layout { + layout::NullablePointer { .. } | + layout::General { .. } => { + let variant_layout = layout.for_variant(variant_index); let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, variant_layout), st.packed); + &adt::struct_llfields(bcx.ccx, variant_layout), + variant_layout.is_packed()); downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } _ => {} diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index a71bcf4783801..a1e89013bdbdc 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -12,7 +12,7 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::LayoutOf; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; @@ -576,13 +576,8 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }; let layout = bcx.ccx.layout_of(closure_ty); - let offsets = match *layout.layout { - layout::Univariant(ref variant) => &variant.offsets[..], - _ => bug!("Closures are only supposed to be Univariant") - }; - for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { - let byte_offset_of_var_in_env = offsets[i].bytes(); + let byte_offset_of_var_in_env = layout.fields.offset(i).bytes(); let ops = unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), From 18d54aa7d52c271c8a352fe6ccdefa80b100d0d3 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 19 Sep 2017 12:38:20 +0300 Subject: [PATCH 31/69] rustc: move layout::Struct into FieldPlacement/Abi. --- src/librustc/ty/context.rs | 8 +- src/librustc/ty/layout.rs | 1038 ++++++++++------------ src/librustc/ty/maps/mod.rs | 2 +- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/adt.rs | 10 +- src/librustc_trans/common.rs | 4 +- src/librustc_trans/debuginfo/metadata.rs | 5 +- src/librustc_trans/mir/constant.rs | 10 +- src/librustc_trans/type_of.rs | 17 +- 9 files changed, 496 insertions(+), 600 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 9ad5e07d8feab..f69e714a99724 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -41,7 +41,7 @@ use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predic use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; -use ty::layout::{Layout, TargetDataLayout}; +use ty::layout::{CachedLayout, TargetDataLayout}; use ty::maps; use ty::steal::Steal; use ty::BindingMode; @@ -78,7 +78,7 @@ use hir; /// Internal storage pub struct GlobalArenas<'tcx> { // internings - layout: TypedArena>, + layout: TypedArena, // references generics: TypedArena, @@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> { stability_interner: RefCell>, - layout_interner: RefCell>>, + layout_interner: RefCell>, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for @@ -1016,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { interned } - pub fn intern_layout(self, layout: Layout<'gcx>) -> &'gcx Layout<'gcx> { + pub fn intern_layout(self, layout: CachedLayout) -> &'gcx CachedLayout { if let Some(layout) = self.layout_interner.borrow().get(&layout) { return layout; } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 13d3ec68a31b3..1a8f46d834404 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -12,7 +12,6 @@ pub use self::Integer::*; pub use self::Layout::*; pub use self::Primitive::*; -use rustc_back::slice::ref_slice; use session::{self, DataTypeKind, Session}; use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags}; @@ -623,303 +622,6 @@ impl<'a, 'tcx> Primitive { } } -/// A structure, a product type in ADT terms. -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct Struct { - /// Maximum alignment of fields and repr alignment. - align: Align, - - /// Primitive alignment of fields without repr alignment. - primitive_align: Align, - - /// If true, no alignment padding is used. - packed: bool, - - /// If true, the size is exact, otherwise it's only a lower bound. - sized: bool, - - /// Offsets for the first byte of each field, ordered to match the source definition order. - /// This vector does not go in increasing order. - /// FIXME(eddyb) use small vector optimization for the common case. - offsets: Vec, - - /// Maps source order field indices to memory order indices, depending how fields were permuted. - /// FIXME (camlorn) also consider small vector optimization here. - pub memory_index: Vec, - - min_size: Size, -} - -/// Info required to optimize struct layout. -#[derive(Copy, Clone, Debug)] -enum StructKind { - /// A tuple, closure, or univariant which cannot be coerced to unsized. - AlwaysSizedUnivariant, - /// A univariant, the last field of which may be coerced to unsized. - MaybeUnsizedUnivariant, - /// A univariant, but part of an enum. - EnumVariant(Integer), -} - -impl<'a, 'tcx> Struct { - fn new(dl: &TargetDataLayout, - fields: &[FullLayout], - repr: &ReprOptions, - kind: StructKind, - scapegoat: Ty<'tcx>) - -> Result> { - if repr.packed() && repr.align > 0 { - bug!("Struct cannot be packed and aligned"); - } - - let align = if repr.packed() { - dl.i8_align - } else { - dl.aggregate_align - }; - - let mut ret = Struct { - align, - primitive_align: align, - packed: repr.packed(), - sized: true, - offsets: vec![], - memory_index: vec![], - min_size: Size::from_bytes(0), - }; - - // Anything with repr(C) or repr(packed) doesn't optimize. - // Neither do 1-member and 2-member structs. - // In addition, code in trans assume that 2-element structs can become pairs. - // It's easier to just short-circuit here. - let (mut optimize, sort_ascending) = match kind { - StructKind::AlwaysSizedUnivariant | - StructKind::MaybeUnsizedUnivariant => (fields.len() > 2, false), - StructKind::EnumVariant(discr) => { - (discr.size().bytes() == 1, true) - } - }; - - optimize &= (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); - - ret.offsets = vec![Size::from_bytes(0); fields.len()]; - let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - - if optimize { - let end = if let StructKind::MaybeUnsizedUnivariant = kind { - fields.len() - 1 - } else { - fields.len() - }; - if end > 0 { - let optimizing = &mut inverse_memory_index[..end]; - if sort_ascending { - optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi()); - } else { - optimizing.sort_by(| &a, &b | { - let a = fields[a as usize].align(dl).abi(); - let b = fields[b as usize].align(dl).abi(); - b.cmp(&a) - }); - } - } - } - - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we use inverse_memory_index to produce memory_index. - - let mut offset = Size::from_bytes(0); - - if let StructKind::EnumVariant(discr) = kind { - offset = discr.size(); - if !ret.packed { - let align = discr.align(dl); - ret.align = ret.align.max(align); - ret.primitive_align = ret.primitive_align.max(align); - } - } - - for i in inverse_memory_index.iter() { - let field = fields[*i as usize]; - if !ret.sized { - bug!("Struct::new: field #{} of `{}` comes after unsized field", - ret.offsets.len(), scapegoat); - } - - if field.is_unsized() { - ret.sized = false; - } - - // Invariant: offset < dl.obj_size_bound() <= 1<<61 - if !ret.packed { - let align = field.align(dl); - let primitive_align = field.primitive_align(dl); - ret.align = ret.align.max(align); - ret.primitive_align = ret.primitive_align.max(primitive_align); - offset = offset.abi_align(align); - } - - debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl)); - ret.offsets[*i as usize] = offset; - - offset = offset.checked_add(field.size(dl), dl) - .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?; - } - - if repr.align > 0 { - let repr_align = repr.align as u64; - ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap()); - debug!("Struct::new repr_align: {:?}", repr_align); - } - - debug!("Struct::new min_size: {:?}", offset); - ret.min_size = offset; - - // As stated above, inverse_memory_index holds field indices by increasing offset. - // This makes it an already-sorted view of the offsets vec. - // To invert it, consider: - // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. - // Field 5 would be the first element, so memory_index is i: - // Note: if we didn't optimize, it's already right. - - if optimize { - ret.memory_index = vec![0; inverse_memory_index.len()]; - - for i in 0..inverse_memory_index.len() { - ret.memory_index[inverse_memory_index[i] as usize] = i as u32; - } - } else { - ret.memory_index = inverse_memory_index; - } - - Ok(ret) - } - - /// Get the size with trailing alignment padding. - fn stride(&self) -> Size { - self.min_size.abi_align(self.align) - } - - /// Get indices of the tys that made this struct by increasing offset. - #[inline] - pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator+'b { - let mut inverse_small = [0u8; 64]; - let mut inverse_big = vec![]; - let use_small = self.memory_index.len() <= inverse_small.len(); - - // We have to write this logic twice in order to keep the array small. - if use_small { - for i in 0..self.memory_index.len() { - inverse_small[self.memory_index[i] as usize] = i as u8; - } - } else { - inverse_big = vec![0; self.memory_index.len()]; - for i in 0..self.memory_index.len() { - inverse_big[self.memory_index[i] as usize] = i as u32; - } - } - - (0..self.memory_index.len()).map(move |i| { - if use_small { inverse_small[i] as usize } - else { inverse_big[i] as usize } - }) - } - - /// Find the offset of a non-zero leaf field, starting from - /// the given type and recursing through aggregates. - /// The tuple is `(offset, primitive, source_path)`. - // FIXME(eddyb) track value ranges and traverse already optimized enums. - fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: FullLayout<'tcx>) - -> Result, LayoutError<'tcx>> { - let cx = (tcx, param_env); - match (layout.layout, layout.abi, &layout.ty.sty) { - (&Scalar, Abi::Scalar(Pointer), _) if !layout.ty.is_unsafe_ptr() => { - Ok(Some((Size::from_bytes(0), Pointer))) - } - (&General { discr, .. }, _, &ty::TyAdt(def, _)) => { - if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { - Ok(Some((layout.fields.offset(0), discr))) - } else { - Ok(None) - } - } - - (&FatPointer, _, _) if !layout.ty.is_unsafe_ptr() => { - Ok(Some((layout.fields.offset(FAT_PTR_ADDR), Pointer))) - } - - // Is this the NonZero lang item wrapping a pointer or integer type? - (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { - let field = layout.field(cx, 0)?; - match (field.layout, field.abi) { - (&Scalar, Abi::Scalar(value)) => { - Ok(Some((layout.fields.offset(0), value))) - } - (&FatPointer, _) => { - Ok(Some((layout.fields.offset(0) + - field.fields.offset(FAT_PTR_ADDR), - Pointer))) - } - _ => Ok(None) - } - } - - // Perhaps one of the fields is non-zero, let's recurse and find out. - (&Univariant(ref variant), _, _) => { - variant.non_zero_field( - tcx, - param_env, - (0..layout.fields.count()).map(|i| layout.field(cx, i))) - } - - // Is this a fixed-size array of something non-zero - // with at least one element? - (_, _, &ty::TyArray(ety, mut count)) => { - if count.has_projections() { - count = tcx.normalize_associated_type_in_env(&count, param_env); - if count.has_projections() { - return Err(LayoutError::Unknown(layout.ty)); - } - } - if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { - Struct::non_zero_field_in_type(tcx, param_env, cx.layout_of(ety)?) - } else { - Ok(None) - } - } - - (_, _, &ty::TyProjection(_)) | (_, _, &ty::TyAnon(..)) => { - bug!("Struct::non_zero_field_in_type: {:?} not normalized", layout); - } - - // Anything else is not a non-zero type. - _ => Ok(None) - } - } - - /// Find the offset of a non-zero leaf field, starting from - /// the given set of fields and recursing through aggregates. - /// Returns Some((offset, primitive, source_path)) on success. - fn non_zero_field(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - fields: I) - -> Result, LayoutError<'tcx>> - where I: Iterator, LayoutError<'tcx>>> { - for (field, &field_offset) in fields.zip(&self.offsets) { - let r = Struct::non_zero_field_in_type(tcx, param_env, field?)?; - if let Some((offset, primitive)) = r { - return Ok(Some((field_offset + offset, primitive))); - } - } - Ok(None) - } -} - /// The first half of a fat pointer. /// - For a trait object, this is the address of the box. /// - For a slice, this is the base address. @@ -931,8 +633,8 @@ pub const FAT_PTR_ADDR: usize = 0; pub const FAT_PTR_EXTRA: usize = 1; /// Describes how the fields of a type are located in memory. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum FieldPlacement<'a> { +#[derive(PartialEq, Eq, Hash, Debug)] +pub enum FieldPlacement { /// Array-like placement. Can also express /// unions, by using a stride of zero bytes. Linear { @@ -948,11 +650,20 @@ pub enum FieldPlacement<'a> { /// For example, enum variants leave a gap at the start, /// where the discriminant field in the enum layout goes. Arbitrary { - offsets: &'a [Size] + /// Offsets for the first byte of each field, + /// ordered to match the source definition order. + /// This vector does not go in increasing order. + // FIXME(eddyb) use small vector optimization for the common case. + offsets: Vec, + + /// Maps source order field indices to memory order indices, + /// depending how fields were permuted. + // FIXME(camlorn) also consider small vector optimization here. + memory_index: Vec } } -impl<'a> FieldPlacement<'a> { +impl FieldPlacement { pub fn union(count: usize) -> Self { FieldPlacement::Linear { stride: Size::from_bytes(0), @@ -967,19 +678,62 @@ impl<'a> FieldPlacement<'a> { assert_eq!(usize_count as u64, count); usize_count } - FieldPlacement::Arbitrary { offsets } => offsets.len() + FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len() } } pub fn offset(&self, i: usize) -> Size { match *self { - FieldPlacement::Linear { stride, count, .. } => { + FieldPlacement::Linear { stride, count } => { let i = i as u64; assert!(i < count); stride * i } - FieldPlacement::Arbitrary { offsets } => offsets[i] + FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i] + } + } + + pub fn memory_index(&self, i: usize) -> usize { + match *self { + FieldPlacement::Linear { .. } => i, + FieldPlacement::Arbitrary { ref memory_index, .. } => { + let r = memory_index[i]; + assert_eq!(r as usize as u32, r); + r as usize + } + } + } + + /// Get source indices of the fields by increasing offsets. + #[inline] + pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator+'a { + let mut inverse_small = [0u8; 64]; + let mut inverse_big = vec![]; + let use_small = self.count() <= inverse_small.len(); + + // We have to write this logic twice in order to keep the array small. + if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self { + if use_small { + for i in 0..self.count() { + inverse_small[memory_index[i] as usize] = i as u8; + } + } else { + inverse_big = vec![0; self.count()]; + for i in 0..self.count() { + inverse_big[memory_index[i] as usize] = i as u32; + } + } } + + (0..self.count()).map(move |i| { + match *self { + FieldPlacement::Linear { .. } => i, + FieldPlacement::Arbitrary { .. } => { + if use_small { inverse_small[i] as usize } + else { inverse_big[i] as usize } + } + } + }) } } @@ -1078,7 +832,7 @@ impl Abi { /// NOTE: Because Layout is interned, redundant information should be /// kept to a minimum, e.g. it includes no sub-component Ty or Layout. #[derive(PartialEq, Eq, Hash, Debug)] -pub enum Layout<'a> { +pub enum Layout { /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. Scalar, @@ -1094,7 +848,7 @@ pub enum Layout<'a> { // Remaining variants are all ADTs such as structs, enums or tuples. /// Single-case enums, and structs/tuples. - Univariant(Struct), + Univariant, /// Untagged unions. UntaggedUnion, @@ -1110,12 +864,12 @@ pub enum Layout<'a> { // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. discr_range: RangeInclusive, - variants: Vec>, + variants: Vec, }, /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the field at the - /// `discr_offset` offset is known to be nonnull due to its type; if that field is null, then + /// `nndiscr` is represented by the struct `nonnull`, where field `0` + /// is known to be nonnull due to its type; if that field is null, then /// it represents the other case, which is known to be zero sized. /// /// For example, `std::option::Option` instantiated at a safe pointer type @@ -1124,8 +878,7 @@ pub enum Layout<'a> { NullablePointer { nndiscr: u64, discr: Primitive, - discr_offset: Size, - variants: Vec>, + variants: Vec, } } @@ -1148,16 +901,16 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct CachedLayout<'tcx> { - pub layout: &'tcx Layout<'tcx>, - pub fields: FieldPlacement<'tcx>, +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct CachedLayout { + pub layout: Layout, + pub fields: FieldPlacement, pub abi: Abi, } fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result, LayoutError<'tcx>> + -> Result<&'tcx CachedLayout, LayoutError<'tcx>> { let (param_env, ty) = query.into_parts(); @@ -1182,47 +935,168 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; } -impl<'a, 'tcx> Layout<'tcx> { +impl<'a, 'tcx> Layout { fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) - -> Result, LayoutError<'tcx>> { + -> Result<&'tcx CachedLayout, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); let scalar = |value| { - CachedLayout { - layout: &Layout::Scalar, + tcx.intern_layout(CachedLayout { + layout: Layout::Scalar, fields: FieldPlacement::union(0), abi: Abi::Scalar(value) - } + }) }; - let univariant = |st| { - let layout = tcx.intern_layout(Layout::Univariant(st)); - let fields = match *layout { - Univariant(ref variant) => { - FieldPlacement::Arbitrary { - offsets: &variant.offsets - } + #[derive(Copy, Clone, Debug)] + enum StructKind { + /// A tuple, closure, or univariant which cannot be coerced to unsized. + AlwaysSized, + /// A univariant, the last field of which may be coerced to unsized. + MaybeUnsized, + /// A univariant, but part of an enum. + EnumVariant(Integer), + } + let univariant_uninterned = |fields: &[FullLayout], repr: &ReprOptions, kind| { + let packed = repr.packed(); + if packed && repr.align > 0 { + bug!("struct cannot be packed and aligned"); + } + + let mut align = if packed { + dl.i8_align + } else { + dl.aggregate_align + }; + + let mut primitive_align = align; + let mut sized = true; + + // Anything with repr(C) or repr(packed) doesn't optimize. + // Neither do 1-member and 2-member structs. + // In addition, code in trans assume that 2-element structs can become pairs. + // It's easier to just short-circuit here. + let (mut optimize, sort_ascending) = match kind { + StructKind::AlwaysSized | + StructKind::MaybeUnsized => (fields.len() > 2, false), + StructKind::EnumVariant(discr) => { + (discr.size().bytes() == 1, true) } - _ => bug!() }; - let abi = match *layout { - Univariant(ref st) => { - Abi::Aggregate { - sized: st.sized, - packed: st.packed, - align: st.align, - primitive_align: st.primitive_align, - size: st.stride() + + optimize &= (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); + + let mut offsets = vec![Size::from_bytes(0); fields.len()]; + let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); + + if optimize { + let end = if let StructKind::MaybeUnsized = kind { + fields.len() - 1 + } else { + fields.len() + }; + if end > 0 { + let optimizing = &mut inverse_memory_index[..end]; + if sort_ascending { + optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi()); + } else { + optimizing.sort_by(| &a, &b | { + let a = fields[a as usize].align(dl).abi(); + let b = fields[b as usize].align(dl).abi(); + b.cmp(&a) + }); } } - _ => bug!() - }; - CachedLayout { - layout, - fields, - abi } + + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we use inverse_memory_index to produce memory_index. + + let mut offset = Size::from_bytes(0); + + if let StructKind::EnumVariant(discr) = kind { + offset = discr.size(); + if !packed { + let discr_align = discr.align(dl); + align = align.max(discr_align); + primitive_align = primitive_align.max(discr_align); + } + } + + for i in inverse_memory_index.iter() { + let field = fields[*i as usize]; + if !sized { + bug!("univariant: field #{} of `{}` comes after unsized field", + offsets.len(), ty); + } + + if field.is_unsized() { + sized = false; + } + + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + if !packed { + let field_align = field.align(dl); + align = align.max(field_align); + primitive_align = primitive_align.max(field.primitive_align(dl)); + offset = offset.abi_align(field_align); + } + + debug!("univariant offset: {:?} field: {:?} {:?}", offset, field, field.size(dl)); + offsets[*i as usize] = offset; + + offset = offset.checked_add(field.size(dl), dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + } + + if repr.align > 0 { + let repr_align = repr.align as u64; + align = align.max(Align::from_bytes(repr_align, repr_align).unwrap()); + debug!("univariant repr_align: {:?}", repr_align); + } + + debug!("univariant min_size: {:?}", offset); + let min_size = offset; + + // As stated above, inverse_memory_index holds field indices by increasing offset. + // This makes it an already-sorted view of the offsets vec. + // To invert it, consider: + // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. + // Field 5 would be the first element, so memory_index is i: + // Note: if we didn't optimize, it's already right. + + let mut memory_index; + if optimize { + memory_index = vec![0; inverse_memory_index.len()]; + + for i in 0..inverse_memory_index.len() { + memory_index[inverse_memory_index[i] as usize] = i as u32; + } + } else { + memory_index = inverse_memory_index; + } + + Ok(CachedLayout { + layout: Layout::Univariant, + fields: FieldPlacement::Arbitrary { + offsets, + memory_index + }, + abi: Abi::Aggregate { + sized, + packed, + align, + primitive_align, + size: min_size.abi_align(align) + } + }) + }; + let univariant = |fields: &[FullLayout], repr: &ReprOptions, kind| { + Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) }; assert!(!ty.has_infer_types()); @@ -1250,18 +1124,17 @@ impl<'a, 'tcx> Layout<'tcx> { }; let meta_offset = fields.offset(1); assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl))); - Ok(CachedLayout { - layout: tcx.intern_layout(Layout::FatPointer), + Ok(tcx.intern_layout(CachedLayout { + layout: Layout::FatPointer, fields, - abi: - Abi::Aggregate { + abi: Abi::Aggregate { sized: true, packed: false, align, primitive_align: align, size: (meta_offset + metadata.size(dl)).abi_align(align) } - }) + })) }; Ok(match ty.sty { @@ -1280,8 +1153,7 @@ impl<'a, 'tcx> Layout<'tcx> { // The never type. ty::TyNever => { - univariant(Struct::new(dl, &[], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?) + univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? } // Potentially-fat pointers. @@ -1308,8 +1180,8 @@ impl<'a, 'tcx> Layout<'tcx> { let size = element_size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; - CachedLayout { - layout: &Layout::Array, + tcx.intern_layout(CachedLayout { + layout: Layout::Array, fields: FieldPlacement::Linear { stride: element_size, count @@ -1321,12 +1193,12 @@ impl<'a, 'tcx> Layout<'tcx> { primitive_align: element.primitive_align(dl), size } - } + }) } ty::TySlice(element) => { let element = cx.layout_of(element)?; - CachedLayout { - layout: &Layout::Array, + tcx.intern_layout(CachedLayout { + layout: Layout::Array, fields: FieldPlacement::Linear { stride: element.size(dl), count: 0 @@ -1338,11 +1210,11 @@ impl<'a, 'tcx> Layout<'tcx> { primitive_align: element.primitive_align(dl), size: Size::from_bytes(0) } - } + }) } ty::TyStr => { - CachedLayout { - layout: &Layout::Array, + tcx.intern_layout(CachedLayout { + layout: Layout::Array, fields: FieldPlacement::Linear { stride: Size::from_bytes(1), count: 0 @@ -1354,51 +1226,47 @@ impl<'a, 'tcx> Layout<'tcx> { primitive_align: dl.i8_align, size: Size::from_bytes(0) } - } + }) } // Odd unit types. ty::TyFnDef(..) => { - univariant(Struct::new(dl, &[], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?) + univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? } ty::TyDynamic(..) | ty::TyForeign(..) => { - let mut unit = Struct::new(dl, &[], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - unit.sized = false; - univariant(unit) + let mut unit = univariant_uninterned(&[], &ReprOptions::default(), + StructKind::AlwaysSized)?; + match unit.abi { + Abi::Aggregate { ref mut sized, .. } => *sized = false, + _ => bug!() + } + tcx.intern_layout(unit) } // Tuples, generators and closures. ty::TyGenerator(def_id, ref substs, _) => { let tys = substs.field_tys(def_id, tcx); - univariant(Struct::new(dl, - &tys.map(|ty| cx.layout_of(ty)) - .collect::, _>>()?, + univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?) + StructKind::AlwaysSized)? } ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); - univariant(Struct::new(dl, - &tys.map(|ty| cx.layout_of(ty)) - .collect::, _>>()?, + univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?) + StructKind::AlwaysSized)? } ty::TyTuple(tys, _) => { let kind = if tys.len() == 0 { - StructKind::AlwaysSizedUnivariant + StructKind::AlwaysSized } else { - StructKind::MaybeUnsizedUnivariant + StructKind::MaybeUnsized }; - univariant(Struct::new(dl, - &tys.iter().map(|ty| cx.layout_of(ty)) - .collect::, _>>()?, - &ReprOptions::default(), kind, ty)?) + univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::, _>>()?, + &ReprOptions::default(), kind)? } // SIMD vector types. @@ -1413,14 +1281,14 @@ impl<'a, 'tcx> Layout<'tcx> { ty, element)); } }; - CachedLayout { - layout: &Layout::Vector, + tcx.intern_layout(CachedLayout { + layout: Layout::Vector, fields: FieldPlacement::Linear { stride: element.size(tcx), count }, abi: Abi::Vector { element, count } - } + }) } // ADTs. @@ -1436,8 +1304,7 @@ impl<'a, 'tcx> Layout<'tcx> { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) - return Ok(univariant(Struct::new(dl, &[], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?)); + return univariant(&[], &def.repr, StructKind::AlwaysSized); } if def.is_union() { @@ -1471,8 +1338,8 @@ impl<'a, 'tcx> Layout<'tcx> { size = cmp::max(size, field.size(dl)); } - return Ok(CachedLayout { - layout: &Layout::UntaggedUnion, + return Ok(tcx.intern_layout(CachedLayout { + layout: Layout::UntaggedUnion, fields: FieldPlacement::union(variants[0].len()), abi: Abi::Aggregate { sized: true, @@ -1481,7 +1348,7 @@ impl<'a, 'tcx> Layout<'tcx> { primitive_align, size: size.abi_align(align) } - }); + })); } if !def.is_enum() || (variants.len() == 1 && @@ -1491,17 +1358,17 @@ impl<'a, 'tcx> Layout<'tcx> { // (Typechecking will reject discriminant-sizing attrs.) let kind = if def.is_enum() || variants[0].len() == 0 { - StructKind::AlwaysSizedUnivariant + StructKind::AlwaysSized } else { let param_env = tcx.param_env(def.did); let last_field = def.variants[0].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) .is_sized(tcx, param_env, DUMMY_SP); - if !always_sized { StructKind::MaybeUnsizedUnivariant } - else { StructKind::AlwaysSizedUnivariant } + if !always_sized { StructKind::MaybeUnsized } + else { StructKind::AlwaysSized } }; - return Ok(univariant(Struct::new(dl, &variants[0], &def.repr, kind, ty)?)); + return univariant(&variants[0], &def.repr, kind); } let no_explicit_discriminants = def.variants.iter().enumerate() @@ -1511,71 +1378,56 @@ impl<'a, 'tcx> Layout<'tcx> { !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { // Nullable pointer optimization - let mut st = vec![ - Struct::new(dl, &variants[0], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?, - Struct::new(dl, &variants[1], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)? - ]; - - let mut choice = None; - for discr in 0..2 { - if st[1 - discr].stride().bytes() > 0 { + for i in 0..2 { + if !variants[1 - i].iter().all(|f| f.size(dl).bytes() == 0) { continue; } - let field = st[discr].non_zero_field(tcx, param_env, - variants[discr].iter().map(|&f| Ok(f)))?; - if let Some((offset, primitive)) = field { - choice = Some((discr, offset, primitive)); - break; - } - } - - if let Some((nndiscr, offset, discr)) = choice { - let variants: Vec<_> = st.into_iter().map(&univariant).collect(); - let mut abi = variants[nndiscr].abi; - - let mut discr_align = discr.align(dl); - match abi { - Abi::Aggregate { - ref mut align, - ref mut primitive_align, - ref mut packed, - .. - } => { - if offset.abi_align(discr_align) != offset { - *packed = true; - discr_align = dl.i8_align; + for (field_index, field) in variants[i].iter().enumerate() { + if let Some((offset, discr)) = field.non_zero_field(cx)? { + let st = vec![ + univariant_uninterned(&variants[0], + &def.repr, StructKind::AlwaysSized)?, + univariant_uninterned(&variants[1], + &def.repr, StructKind::AlwaysSized)? + ]; + let offset = st[i].fields.offset(field_index) + offset; + let mut abi = st[i].abi; + if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { + abi = Abi::Scalar(discr); } - *align = align.max(discr_align); - *primitive_align = primitive_align.max(discr_align); - } - _ => {} - } - - let layout = tcx.intern_layout(Layout::NullablePointer { - nndiscr: nndiscr as u64, - discr, - discr_offset: offset, - variants, - }); - return Ok(CachedLayout { - layout, - fields: match *layout { - Layout::NullablePointer { ref discr_offset, .. } => { - FieldPlacement::Arbitrary { - offsets: ref_slice(discr_offset) + let mut discr_align = discr.align(dl); + match abi { + Abi::Aggregate { + ref mut align, + ref mut primitive_align, + ref mut packed, + .. + } => { + if offset.abi_align(discr_align) != offset { + *packed = true; + discr_align = dl.i8_align; + } + *align = align.max(discr_align); + *primitive_align = primitive_align.max(discr_align); } + _ => {} } - _ => bug!() - }, - abi: if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { - Abi::Scalar(discr) - } else { - abi + return Ok(tcx.intern_layout(CachedLayout { + layout: Layout::NullablePointer { + nndiscr: i as u64, + + discr, + variants: st, + }, + fields: FieldPlacement::Arbitrary { + offsets: vec![offset], + memory_index: vec![0] + }, + abi + })); } - }); + } } } @@ -1598,22 +1450,22 @@ impl<'a, 'tcx> Layout<'tcx> { assert_eq!(Integer::for_abi_align(dl, start_align), None); // Create the set of structs that represent each variant. - let mut variants = variants.into_iter().map(|fields| { - let st = Struct::new(dl, &fields, - &def.repr, StructKind::EnumVariant(min_ity), ty)?; + let mut variants = variants.into_iter().map(|field_layouts| { + let st = univariant_uninterned(&field_layouts, + &def.repr, StructKind::EnumVariant(min_ity))?; // Find the first field we can't move later // to make room for a larger discriminant. - for i in st.field_index_by_increasing_offset() { - let field = fields[i]; + for i in st.fields.index_by_increasing_offset() { + let field = field_layouts[i]; let field_align = field.align(dl); if field.size(dl).bytes() != 0 || field_align.abi() != 1 { start_align = start_align.min(field_align); break; } } - size = cmp::max(size, st.min_size); - align = align.max(st.align); - primitive_align = primitive_align.max(st.primitive_align); + size = cmp::max(size, st.abi.size(dl)); + align = align.max(st.abi.align(dl)); + primitive_align = primitive_align.max(st.abi.primitive_align(dl)); Ok(st) }).collect::, _>>()?; @@ -1662,29 +1514,38 @@ impl<'a, 'tcx> Layout<'tcx> { let old_ity_size = min_ity.size(); let new_ity_size = ity.size(); for variant in &mut variants { - for i in variant.offsets.iter_mut() { - if *i <= old_ity_size { - assert_eq!(*i, old_ity_size); - *i = new_ity_size; + match (&mut variant.fields, &mut variant.abi) { + (&mut FieldPlacement::Arbitrary { ref mut offsets, .. }, + &mut Abi::Aggregate { ref mut size, .. }) => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if *size <= old_ity_size { + *size = new_ity_size; + } } - } - // We might be making the struct larger. - if variant.min_size <= old_ity_size { - variant.min_size = new_ity_size; + _ => bug!() } } } let discr = Int(ity, signed); - CachedLayout { - layout: tcx.intern_layout(Layout::General { + tcx.intern_layout(CachedLayout { + layout: Layout::General { discr, // FIXME: should be u128? discr_range: (min as u64)..=(max as u64), - variants: variants.into_iter().map(&univariant).collect(), - }), - fields: FieldPlacement::union(1), + variants + }, + fields: FieldPlacement::Arbitrary { + offsets: vec![Size::from_bytes(0)], + memory_index: vec![0] + }, abi: if discr.size(dl) == size { Abi::Scalar(discr) } else { @@ -1696,7 +1557,7 @@ impl<'a, 'tcx> Layout<'tcx> { size } } - } + }) } // Types with no meaningful known layout. @@ -1705,12 +1566,7 @@ impl<'a, 'tcx> Layout<'tcx> { if ty == normalized { return Err(LayoutError::Unknown(ty)); } - let layout = cx.layout_of(normalized)?; - CachedLayout { - layout: layout.layout, - fields: layout.fields, - abi: layout.abi - } + tcx.layout_raw(param_env.and(normalized))? } ty::TyParam(_) => { return Err(LayoutError::Unknown(ty)); @@ -1727,7 +1583,7 @@ impl<'a, 'tcx> Layout<'tcx> { fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout) { + layout: FullLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1747,7 +1603,8 @@ impl<'a, 'tcx> Layout<'tcx> { fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout) { + layout: FullLayout<'tcx>) { + let cx = (tcx, param_env); // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { let type_desc = format!("{:?}", ty); @@ -1761,10 +1618,10 @@ impl<'a, 'tcx> Layout<'tcx> { variants); }; - let (adt_def, substs) = match ty.sty { - ty::TyAdt(ref adt_def, substs) => { + let adt_def = match ty.sty { + ty::TyAdt(ref adt_def, _) => { debug!("print-type-size t: `{:?}` process adt", ty); - (adt_def, substs) + adt_def } ty::TyClosure(..) => { @@ -1781,62 +1638,67 @@ impl<'a, 'tcx> Layout<'tcx> { let adt_kind = adt_def.adt_kind(); - let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| { - match (tcx, param_env).layout_of(field_ty) { - Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty), - Ok(field_layout) => { - session::FieldInfo { - name: field_name.to_string(), - offset: offset.bytes(), - size: field_layout.size(tcx).bytes(), - align: field_layout.align(tcx).abi(), + let build_variant_info = |n: Option, + flds: &[ast::Name], + layout: FullLayout<'tcx>| { + let mut min_size = Size::from_bytes(0); + let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { + match layout.field(cx, i) { + Err(err) => { + bug!("no layout found for field {}: `{:?}`", name, err); + } + Ok(field_layout) => { + let offset = layout.fields.offset(i); + let field_size = field_layout.size(tcx); + let field_end = offset + field_size; + if min_size < field_end { + min_size = field_end; + } + session::FieldInfo { + name: name.to_string(), + offset: offset.bytes(), + size: field_size.bytes(), + align: field_layout.align(tcx).abi(), + } } } - } - }; - - let build_variant_info = |n: Option, - flds: &[(ast::Name, Ty<'tcx>)], - s: &Struct| { - let field_info: Vec<_> = - flds.iter() - .zip(&s.offsets) - .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset)) - .collect(); + }).collect(); session::VariantInfo { name: n.map(|n|n.to_string()), - kind: if s.sized { + kind: if layout.is_unsized() { + session::SizeKind::Min + } else { session::SizeKind::Exact + }, + align: layout.align(tcx).abi(), + size: if min_size.bytes() == 0 { + layout.size(tcx).bytes() } else { - session::SizeKind::Min + min_size.bytes() }, - align: s.align.abi(), - size: s.min_size.bytes(), fields: field_info, } }; match *layout.layout { - Layout::Univariant(ref variant_layout) => { + Layout::Univariant => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() }; - debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}", - ty, variant_layout, variant_names()); + debug!("print-type-size `{:#?}` variants: {:?}", + layout, variant_names()); assert!(adt_def.variants.len() <= 1, "univariant with variants {:?}", variant_names()); if adt_def.variants.len() == 1 { let variant_def = &adt_def.variants[0]; let fields: Vec<_> = - variant_def.fields.iter() - .map(|f| (f.name, f.ty(tcx, substs))) - .collect(); + variant_def.fields.iter().map(|f| f.name).collect(); record(adt_kind.into(), None, vec![build_variant_info(Some(variant_def.name), &fields, - variant_layout)]); + layout)]); } else { // (This case arises for *empty* enums; so give it // zero variants.) @@ -1844,28 +1706,19 @@ impl<'a, 'tcx> Layout<'tcx> { } } - Layout::NullablePointer { ref variants, .. } | - Layout::General { ref variants, .. } => { - debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}", - ty, adt_def.variants.len(), variants.len(), variants); + Layout::NullablePointer { .. } | + Layout::General { .. } => { + debug!("print-type-size `{:#?}` adt general variants def {}", + ty, adt_def.variants.len()); let variant_infos: Vec<_> = - adt_def.variants.iter() - .zip(variants.iter()) - .map(|(variant_def, variant_layout)| { - let fields: Vec<_> = - variant_def.fields - .iter() - .map(|f| (f.name, f.ty(tcx, substs))) - .collect(); - let variant_layout = match *variant_layout.layout { - Univariant(ref variant) => variant, - _ => bug!() - }; - build_variant_info(Some(variant_def.name), - &fields, - variant_layout) - }) - .collect(); + adt_def.variants.iter().enumerate().map(|(i, variant_def)| { + let fields: Vec<_> = + variant_def.fields.iter().map(|f| f.name).collect(); + build_variant_info(Some(variant_def.name), + &fields, + layout.for_variant(i)) + }) + .collect(); record(adt_kind.into(), match *layout.layout { Layout::General { discr, .. } => Some(discr.size(tcx)), _ => None @@ -2048,8 +1901,8 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { pub struct FullLayout<'tcx> { pub ty: Ty<'tcx>, pub variant_index: Option, - pub layout: &'tcx Layout<'tcx>, - pub fields: FieldPlacement<'tcx>, + pub layout: &'tcx Layout, + pub fields: &'tcx FieldPlacement, pub abi: Abi, } @@ -2101,8 +1954,8 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx let layout = FullLayout { ty, variant_index: None, - layout: cached.layout, - fields: cached.fields, + layout: &cached.layout, + fields: &cached.fields, abi: cached.abi }; @@ -2133,8 +1986,8 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, let layout = FullLayout { ty, variant_index: None, - layout: cached.layout, - fields: cached.fields, + layout: &cached.layout, + fields: &cached.fields, abi: cached.abi }; @@ -2163,12 +2016,12 @@ impl<'a, 'tcx> FullLayout<'tcx> { }; let (layout, fields, abi) = match *self.layout { - Univariant(_) => (self.layout, self.fields, self.abi), + Univariant => (self.layout, self.fields, self.abi), NullablePointer { ref variants, .. } | General { ref variants, .. } => { - let variant = variants[variant_index]; - (variant.layout, variant.fields, variant.abi) + let variant = &variants[variant_index]; + (&variant.layout, &variant.fields, variant.abi) } _ => bug!() @@ -2310,9 +2163,81 @@ impl<'a, 'tcx> FullLayout<'tcx> { pub fn primitive_align(&self, cx: C) -> Align { self.abi.primitive_align(cx) } + + /// Find the offset of a non-zero leaf field, starting from + /// the given type and recursing through aggregates. + /// The tuple is `(offset, primitive, source_path)`. + // FIXME(eddyb) track value ranges and traverse already optimized enums. + fn non_zero_field(&self, cx: C) + -> Result, LayoutError<'tcx>> + where C: LayoutOf, FullLayout = Result>> + + HasTyCtxt<'tcx> + { + let tcx = cx.tcx(); + match (self.layout, self.abi, &self.ty.sty) { + (&Scalar, Abi::Scalar(Pointer), _) if !self.ty.is_unsafe_ptr() => { + Ok(Some((Size::from_bytes(0), Pointer))) + } + (&General { discr, .. }, _, &ty::TyAdt(def, _)) => { + if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { + Ok(Some((self.fields.offset(0), discr))) + } else { + Ok(None) + } + } + + (&FatPointer, _, _) if !self.ty.is_unsafe_ptr() => { + Ok(Some((self.fields.offset(FAT_PTR_ADDR), Pointer))) + } + + // Is this the NonZero lang item wrapping a pointer or integer type? + (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { + let field = self.field(cx, 0)?; + match (field.layout, field.abi) { + (&Scalar, Abi::Scalar(value)) => { + Ok(Some((self.fields.offset(0), value))) + } + (&FatPointer, _) => { + Ok(Some((self.fields.offset(0) + + field.fields.offset(FAT_PTR_ADDR), + Pointer))) + } + _ => Ok(None) + } + } + + // Perhaps one of the fields is non-zero, let's recurse and find out. + (&Univariant, _, _) => { + for i in 0..self.fields.count() { + let r = self.field(cx, i)?.non_zero_field(cx)?; + if let Some((offset, primitive)) = r { + return Ok(Some((self.fields.offset(i) + offset, primitive))); + } + } + Ok(None) + } + + // Is this a fixed-size array of something non-zero + // with at least one element? + (_, _, &ty::TyArray(ety, _)) => { + if self.fields.count() != 0 { + cx.layout_of(ety)?.non_zero_field(cx) + } else { + Ok(None) + } + } + + (_, _, &ty::TyProjection(_)) | (_, _, &ty::TyAnon(..)) => { + bug!("FullLayout::non_zero_field: {:#?} not normalized", self); + } + + // Anything else is not a non-zero type. + _ => Ok(None) + } + } } -impl<'gcx> HashStable> for Layout<'gcx> { +impl<'gcx> HashStable> for Layout { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { @@ -2324,9 +2249,7 @@ impl<'gcx> HashStable> for Layout<'gcx> { Vector => {} Array => {} FatPointer => {} - Univariant(ref variant) => { - variant.hash_stable(hcx, hasher); - } + Univariant => {} UntaggedUnion => {} General { discr, @@ -2342,18 +2265,16 @@ impl<'gcx> HashStable> for Layout<'gcx> { nndiscr, ref variants, ref discr, - discr_offset, } => { nndiscr.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); discr.hash_stable(hcx, hasher); - discr_offset.hash_stable(hcx, hasher); } } } } -impl<'gcx> HashStable> for FieldPlacement<'gcx> { +impl<'gcx> HashStable> for FieldPlacement { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { @@ -2365,8 +2286,9 @@ impl<'gcx> HashStable> for FieldPlacement<'gcx> { count.hash_stable(hcx, hasher); stride.hash_stable(hcx, hasher); } - Arbitrary { offsets } => { + Arbitrary { ref offsets, ref memory_index } => { offsets.hash_stable(hcx, hasher); + memory_index.hash_stable(hcx, hasher); } } } @@ -2398,7 +2320,7 @@ impl<'gcx> HashStable> for Abi { } } -impl_stable_hash_for!(struct ::ty::layout::CachedLayout<'tcx> { +impl_stable_hash_for!(struct ::ty::layout::CachedLayout { layout, fields, abi @@ -2443,13 +2365,3 @@ impl<'gcx> HashStable> for LayoutError<'gcx> } } } - -impl_stable_hash_for!(struct ::ty::layout::Struct { - align, - primitive_align, - packed, - sized, - offsets, - memory_index, - min_size -}); diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 6746776308903..ebd17ebabe79f 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -264,7 +264,7 @@ define_maps! { <'tcx> [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result, + -> Result<&'tcx ty::layout::CachedLayout, ty::layout::LayoutError<'tcx>>, [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index b727629e23353..739b2a3789a3f 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -316,7 +316,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { let mut total = Size::from_bytes(0); let mut result = None; - let is_union = match self.fields { + let is_union = match *self.fields { layout::FieldPlacement::Linear { stride, .. } => { stride.bytes() == 0 } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index cd68d04247394..07c64c35c07d6 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -72,7 +72,7 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return; } match *l.layout { - layout::Univariant(_) => { + layout::Univariant => { let is_enum = if let ty::TyAdt(def, _) = t.sty { def.is_enum() } else { @@ -100,7 +100,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return cx.llvm_type_of(value.to_ty(cx.tcx())); } match *l.layout { - layout::Univariant(_) => { + layout::Univariant => { match name { None => { Type::struct_(cx, &struct_llfields(cx, l), l.is_packed()) @@ -152,11 +152,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); - let field_index_by_increasing_offset = match *layout.layout { - layout::Univariant(ref variant) => variant.field_index_by_increasing_offset(), - _ => bug!("unexpected {:#?}", layout) - }; - for i in field_index_by_increasing_offset { + for i in layout.fields.index_by_increasing_offset() { let field = layout.field(cx, i); let target_offset = layout.fields.offset(i as usize); debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 82a4095aa0118..d7397e359a148 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -64,8 +64,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); match *layout.layout { - Layout::FatPointer { .. } => true, - Layout::Univariant(_) => { + Layout::FatPointer => true, + Layout::Univariant => { // There must be only 2 fields. if layout.fields.count() != 2 { return false; diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index b9ff46166a8d8..5948c3a3e5926 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1159,7 +1159,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - layout::Univariant(_) => { + layout::Univariant => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1194,7 +1194,6 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { layout::NullablePointer { nndiscr, discr, - discr_offset, .. } => { let variant = self.type_rep.for_variant(nndiscr as usize); @@ -1239,7 +1238,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } compute_field_path(cx, &mut name, self.type_rep, - discr_offset, + self.type_rep.fields.offset(0), discr.size(cx)); name.push_str(&adt.variants[(1 - nndiscr) as usize].name.as_str()); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index d6e2257ab2422..ae19c865d1c93 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1117,11 +1117,11 @@ fn trans_const_adt<'a, 'tcx>( Const::new(C_struct(ccx, &contents, l.is_packed()), t) } - layout::Univariant(_) => { + layout::Univariant => { assert_eq!(variant_index, 0); build_const_struct(ccx, l, vals, None) } - layout::Vector { .. } => { + layout::Vector => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } layout::NullablePointer { nndiscr, .. } => { @@ -1162,11 +1162,7 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, offset = ccx.size_of(discr.ty); } - let field_index_by_increasing_offset = match *layout.layout { - layout::Univariant(ref variant) => variant.field_index_by_increasing_offset(), - _ => bug!("unexpected {:#?}", layout) - }; - let parts = field_index_by_increasing_offset.map(|i| { + let parts = layout.fields.index_by_increasing_offset().map(|i| { (vals[i], layout.fields.offset(i)) }); for (val, target_offset) in parts { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 679632d91133c..bd37bfb01d71a 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -240,25 +240,18 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { if let layout::Abi::Scalar(_) = self.abi { bug!("FullLayout::llvm_field_index({:?}): not applicable", self); } + let index = self.fields.memory_index(index); match *self.layout { - Layout::Scalar { .. } | - Layout::UntaggedUnion { .. } | - Layout::NullablePointer { .. } | - Layout::General { .. } => { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) - } - - Layout::Vector { .. } | - Layout::Array { .. } => { + Layout::Vector | Layout::Array => { index as u64 } - Layout::FatPointer { .. } => { + Layout::FatPointer | Layout::Univariant => { adt::memory_index_to_gep(index as u64) } - Layout::Univariant(ref variant) => { - adt::memory_index_to_gep(variant.memory_index[index] as u64) + _ => { + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } } } From fad99542c8643984b7630d8e297007aef824b268 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 19 Sep 2017 12:52:30 +0300 Subject: [PATCH 32/69] rustc: split layout::FieldPlacement::Linear back into Union and Array. --- src/librustc/ty/layout.rs | 54 ++++++++++++++++++++------------------- src/librustc_trans/abi.rs | 17 ++++++------ 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 1a8f46d834404..4d74e5eed5919 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -635,9 +635,11 @@ pub const FAT_PTR_EXTRA: usize = 1; /// Describes how the fields of a type are located in memory. #[derive(PartialEq, Eq, Hash, Debug)] pub enum FieldPlacement { - /// Array-like placement. Can also express - /// unions, by using a stride of zero bytes. - Linear { + /// All fields start at no offset. The `usize` is the field count. + Union(usize), + + /// Array/vector-like placement, with all fields of identical types. + Array { stride: Size, count: u64 }, @@ -664,16 +666,10 @@ pub enum FieldPlacement { } impl FieldPlacement { - pub fn union(count: usize) -> Self { - FieldPlacement::Linear { - stride: Size::from_bytes(0), - count: count as u64 - } - } - pub fn count(&self) -> usize { match *self { - FieldPlacement::Linear { count, .. } => { + FieldPlacement::Union(count) => count, + FieldPlacement::Array { count, .. } => { let usize_count = count as usize; assert_eq!(usize_count as u64, count); usize_count @@ -684,7 +680,8 @@ impl FieldPlacement { pub fn offset(&self, i: usize) -> Size { match *self { - FieldPlacement::Linear { stride, count } => { + FieldPlacement::Union(_) => Size::from_bytes(0), + FieldPlacement::Array { stride, count } => { let i = i as u64; assert!(i < count); stride * i @@ -695,7 +692,8 @@ impl FieldPlacement { pub fn memory_index(&self, i: usize) -> usize { match *self { - FieldPlacement::Linear { .. } => i, + FieldPlacement::Union(_) | + FieldPlacement::Array { .. } => i, FieldPlacement::Arbitrary { ref memory_index, .. } => { let r = memory_index[i]; assert_eq!(r as usize as u32, r); @@ -727,7 +725,8 @@ impl FieldPlacement { (0..self.count()).map(move |i| { match *self { - FieldPlacement::Linear { .. } => i, + FieldPlacement::Union(_) | + FieldPlacement::Array { .. } => i, FieldPlacement::Arbitrary { .. } => { if use_small { inverse_small[i] as usize } else { inverse_big[i] as usize } @@ -945,7 +944,7 @@ impl<'a, 'tcx> Layout { let scalar = |value| { tcx.intern_layout(CachedLayout { layout: Layout::Scalar, - fields: FieldPlacement::union(0), + fields: FieldPlacement::Union(0), abi: Abi::Scalar(value) }) }; @@ -1118,12 +1117,12 @@ impl<'a, 'tcx> Layout { // Effectively a (ptr, meta) tuple. let align = Pointer.align(dl).max(metadata.align(dl)); - let fields = FieldPlacement::Linear { - stride: Pointer.size(dl), - count: 2 - }; - let meta_offset = fields.offset(1); + let meta_offset = Pointer.size(dl); assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl))); + let fields = FieldPlacement::Arbitrary { + offsets: vec![Size::from_bytes(0), meta_offset], + memory_index: vec![0, 1] + }; Ok(tcx.intern_layout(CachedLayout { layout: Layout::FatPointer, fields, @@ -1182,7 +1181,7 @@ impl<'a, 'tcx> Layout { tcx.intern_layout(CachedLayout { layout: Layout::Array, - fields: FieldPlacement::Linear { + fields: FieldPlacement::Array { stride: element_size, count }, @@ -1199,7 +1198,7 @@ impl<'a, 'tcx> Layout { let element = cx.layout_of(element)?; tcx.intern_layout(CachedLayout { layout: Layout::Array, - fields: FieldPlacement::Linear { + fields: FieldPlacement::Array { stride: element.size(dl), count: 0 }, @@ -1215,7 +1214,7 @@ impl<'a, 'tcx> Layout { ty::TyStr => { tcx.intern_layout(CachedLayout { layout: Layout::Array, - fields: FieldPlacement::Linear { + fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 }, @@ -1283,7 +1282,7 @@ impl<'a, 'tcx> Layout { }; tcx.intern_layout(CachedLayout { layout: Layout::Vector, - fields: FieldPlacement::Linear { + fields: FieldPlacement::Array { stride: element.size(tcx), count }, @@ -1340,7 +1339,7 @@ impl<'a, 'tcx> Layout { return Ok(tcx.intern_layout(CachedLayout { layout: Layout::UntaggedUnion, - fields: FieldPlacement::union(variants[0].len()), + fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { sized: true, packed, @@ -2282,7 +2281,10 @@ impl<'gcx> HashStable> for FieldPlacement { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Linear { count, stride } => { + Union(count) => { + count.hash_stable(hcx, hasher); + } + Array { count, stride } => { count.hash_stable(hcx, hasher); stride.hash_stable(hcx, hasher); } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 739b2a3789a3f..365570edd65ef 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -35,7 +35,7 @@ use type_::Type; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, Layout, Size, FullLayout}; +use rustc::ty::layout::{self, Align, Size, FullLayout}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc_back::PanicStrategy; @@ -307,19 +307,18 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { } layout::Abi::Aggregate { .. } => { - if let Layout::Array { .. } = *self.layout { - if self.fields.count() > 0 { - return self.field(ccx, 0).homogeneous_aggregate(ccx); - } - } - let mut total = Size::from_bytes(0); let mut result = None; let is_union = match *self.fields { - layout::FieldPlacement::Linear { stride, .. } => { - stride.bytes() == 0 + layout::FieldPlacement::Array { count, .. } => { + if count > 0 { + return self.field(ccx, 0).homogeneous_aggregate(ccx); + } else { + return None; + } } + layout::FieldPlacement::Union(_) => true, layout::FieldPlacement::Arbitrary { .. } => false }; From d0ab6e8644ded75c9a43b46151568f6b782bec59 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 19 Sep 2017 23:43:55 +0300 Subject: [PATCH 33/69] rustc_trans: compute LLVM types from type layouts, not Rust types. --- src/librustc/ty/layout.rs | 8 +- src/librustc_trans/adt.rs | 196 --------------------- src/librustc_trans/lib.rs | 1 - src/librustc_trans/meth.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 65 +++---- src/librustc_trans/type_.rs | 7 - src/librustc_trans/type_of.rs | 285 +++++++++++++++++-------------- 7 files changed, 184 insertions(+), 380 deletions(-) delete mode 100644 src/librustc_trans/adt.rs diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 4d74e5eed5919..d905592347f64 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1541,10 +1541,10 @@ impl<'a, 'tcx> Layout { discr_range: (min as u64)..=(max as u64), variants }, - fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0)], - memory_index: vec![0] - }, + // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results + // in lost optimizations, specifically around allocations, see + // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`. + fields: FieldPlacement::Union(1), abi: if discr.size(dl) == size { Abi::Scalar(discr) } else { diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs deleted file mode 100644 index 07c64c35c07d6..0000000000000 --- a/src/librustc_trans/adt.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Representation of Algebraic Data Types -//! -//! This module determines how to represent enums, structs, and tuples -//! based on their monomorphized types; it is responsible both for -//! choosing a representation and translating basic operations on -//! values of those types. (Note: exporting the representations for -//! debuggers is handled in debuginfo.rs, not here.) -//! -//! Note that the interface treats everything as a general case of an -//! enum, so structs/tuples/etc. have one pseudo-variant with -//! discriminant 0; i.e., as if they were a univariant enum. -//! -//! Having everything in one place will enable improvements to data -//! structure representation; possibilities include: -//! -//! - User-specified alignment (e.g., cacheline-aligning parts of -//! concurrently accessed data structures); LLVM can't represent this -//! directly, so we'd have to insert padding fields in any structure -//! that might contain one and adjust GEP indices accordingly. See -//! issue #4578. -//! -//! - Store nested enums' discriminants in the same word. Rather, if -//! some variants start with enums, and those enums representations -//! have unused alignment padding between discriminant and body, the -//! outer enum's discriminant can be stored there and those variants -//! can start at offset 0. Kind of fancy, and might need work to -//! make copies of the inner enum type cooperate, but it could help -//! with `Option` or `Result` wrapped around another enum. -//! -//! - Tagged pointers would be neat, but given that any type can be -//! used unboxed and any field can have pointers (including mutable) -//! taken to it, implementing them for Rust seems difficult. - -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, FullLayout}; - -use context::CrateContext; -use type_::Type; - -/// LLVM-level types are a little complicated. -/// -/// C-like enums need to be actual ints, not wrapped in a struct, -/// because that changes the ABI on some platforms (see issue #10308). -/// -/// For nominal types, in some cases, we need to use LLVM named structs -/// and fill in the actual contents in a second pass to prevent -/// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - generic_type_of(cx, t, None) -} - -pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, name: &str) -> Type { - generic_type_of(cx, t, Some(name)) -} - -pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, llty: &mut Type) { - let l = cx.layout_of(t); - debug!("finish_type_of: {} with layout {:#?}", t, l); - if let layout::Abi::Scalar(_) = l.abi { - return; - } - match *l.layout { - layout::Univariant => { - let is_enum = if let ty::TyAdt(def, _) = t.sty { - def.is_enum() - } else { - false - }; - let variant_layout = if is_enum { - l.for_variant(0) - } else { - l - }; - llty.set_struct_body(&struct_llfields(cx, variant_layout), - variant_layout.is_packed()) - } - - _ => {} - } -} - -fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: Option<&str>) -> Type { - let l = cx.layout_of(t); - debug!("adt::generic_type_of {:#?} name: {:?}", l, name); - if let layout::Abi::Scalar(value) = l.abi { - return cx.llvm_type_of(value.to_ty(cx.tcx())); - } - match *l.layout { - layout::Univariant => { - match name { - None => { - Type::struct_(cx, &struct_llfields(cx, l), l.is_packed()) - } - Some(name) => { - Type::named_struct(cx, name) - } - } - } - _ => { - let align = l.align(cx); - let abi_align = align.abi(); - let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { - Type::from_integer(cx, ity) - } else { - let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); - assert_eq!(vec_align.abi(), abi_align); - Type::vector(&Type::i32(cx), abi_align / 4) - }; - - let size = l.size(cx).bytes(); - assert_eq!(size % abi_align, 0); - let fill = Type::array(&elem_ty, size / abi_align); - match name { - None => { - Type::struct_(cx, &[fill], l.is_packed()) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], l.is_packed()); - llty - } - } - } - } -} - -/// Double an index and add 1 to account for padding. -pub fn memory_index_to_gep(index: u64) -> u64 { - 1 + index * 2 -} - -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> Vec { - debug!("struct_llfields: {:#?}", layout); - let align = layout.align(cx); - let size = layout.size(cx); - let field_count = layout.fields.count(); - - let mut offset = Size::from_bytes(0); - let mut result: Vec = Vec::with_capacity(1 + field_count * 2); - for i in layout.fields.index_by_increasing_offset() { - let field = layout.field(cx, i); - let target_offset = layout.fields.offset(i as usize); - debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", - i, field, offset, target_offset); - assert!(target_offset >= offset); - let padding = target_offset - offset; - result.push(Type::array(&Type::i8(cx), padding.bytes())); - debug!(" padding before: {:?}", padding); - - let llty = cx.llvm_type_of(field.ty); - result.push(llty); - - if layout.is_packed() { - assert_eq!(padding.bytes(), 0); - } else { - let field_align = field.align(cx); - assert!(field_align.abi() <= align.abi(), - "non-packed type has field with larger align ({}): {:#?}", - field_align.abi(), layout); - } - - offset = target_offset + field.size(cx); - } - if !layout.is_unsized() && field_count > 0 { - if offset > size { - bug!("layout: {:#?} stride: {:?} offset: {:?}", - layout, size, offset); - } - let padding = size - offset; - debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", - padding, offset, size); - result.push(Type::array(&Type::i8(cx), padding.bytes())); - assert!(result.len() == 1 + field_count * 2); - } else { - debug!("struct_llfields: offset: {:?} stride: {:?}", - offset, size); - } - - result -} - diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index dd33012e900fa..83fc10173166c 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -104,7 +104,6 @@ pub mod back { } mod abi; -mod adt; mod allocator; mod asm; mod assert_module_sources; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 2289adb01ea6b..8dbef1f8d0845 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -77,7 +77,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } // Not in the cache. Build it. - let nullptr = C_null(Type::nil(ccx).ptr_to()); + let nullptr = C_null(Type::i8p(ccx)); let (size, align) = ccx.size_and_align_of(ty); let mut components: Vec<_> = [ diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index b21e4ffc2c3ad..b72ccf6ba28ea 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,17 +10,16 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, FullLayout, Layout, LayoutOf}; +use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use abi; -use adt; use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use consts; -use type_of::LayoutLlvmExt; +use type_of::{self, LayoutLlvmExt}; use type_::Type; use value::Value; use glue; @@ -206,52 +205,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let alignment = self.alignment | Alignment::from(l); // Unions and newtypes only use an offset of 0. - match *l.layout { - // FIXME(eddyb) The fields of a fat pointer aren't correct, especially - // to unsized structs, we can't represent their pointee types in `Ty`. - Layout::FatPointer { .. } => {} - - _ if offset == 0 => { - let ty = ccx.llvm_type_of(field.ty); - return LvalueRef { - llval: bcx.pointercast(self.llval, ty.ptr_to()), - llextra: if field.is_unsized() { - self.llextra - } else { - ptr::null_mut() - }, - ty: LvalueTy::from_ty(field.ty), - alignment, - }; - } - - _ => {} - } - - // Discriminant field of enums. - if let layout::NullablePointer { .. } = *l.layout { - let ty = ccx.llvm_type_of(field.ty); - let size = field.size(ccx).bytes(); - - // If the discriminant is not on a multiple of the primitive's size, - // we need to go through i8*. Also assume the worst alignment. - if offset % size != 0 { - let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); - let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(ccx, offset)]); - let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); - return LvalueRef::new_sized( - bcx.pointercast(byte_ptr, ty.ptr_to()), field.ty, byte_align); + let has_llvm_fields = match *l.fields { + layout::FieldPlacement::Union(_) => false, + layout::FieldPlacement::Array { .. } => true, + layout::FieldPlacement::Arbitrary { .. } => { + match l.abi { + layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false, + layout::Abi::Aggregate { .. } => true + } } - - let discr_ptr = bcx.pointercast(self.llval, ty.ptr_to()); - return LvalueRef::new_sized( - bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), - field.ty, alignment); - } + }; let simple = || { LvalueRef { - llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)), + llval: if has_llvm_fields { + bcx.struct_gep(self.llval, l.llvm_field_index(ix)) + } else { + assert_eq!(offset, 0); + let ty = ccx.llvm_type_of(field.ty); + bcx.pointercast(self.llval, ty.ptr_to()) + }, llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra } else { @@ -460,7 +433,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::General { .. } => { let variant_layout = layout.for_variant(variant_index); let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, variant_layout), + &type_of::struct_llfields(bcx.ccx, variant_layout), variant_layout.is_packed()); downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index bb8f3f23108ec..dbdc8919da9c1 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -66,10 +66,6 @@ impl Type { ty!(llvm::LLVMVoidTypeInContext(ccx.llcx())) } - pub fn nil(ccx: &CrateContext) -> Type { - Type::empty_struct(ccx) - } - pub fn metadata(ccx: &CrateContext) -> Type { ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx())) } @@ -202,9 +198,6 @@ impl Type { ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr())) } - pub fn empty_struct(ccx: &CrateContext) -> Type { - Type::struct_(ccx, &[], false) - } pub fn array(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index bd37bfb01d71a..7474e71a715f7 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -9,10 +9,9 @@ // except according to those terms. use abi::FnType; -use adt; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout}; use trans_item::DefPathBasedNames; use type_::Type; @@ -43,30 +42,10 @@ pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - // Check the cache. - if let Some(&llty) = cx.lltypes().borrow().get(&t) { - return llty; - } - - debug!("type_of {:?}", t); - - assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t); - - // Replace any typedef'd types with their equivalent non-typedef - // type. This ensures that all LLVM nominal types that contain - // Rust types are defined as the same LLVM types. If we don't do - // this then, e.g. `Option<{myfield: bool}>` would be a different - // type than `Option`. - let t_norm = cx.tcx().erase_regions(&t); - - if t != t_norm { - let llty = cx.llvm_type_of(t_norm); - debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); - cx.lltypes().borrow_mut().insert(t, llty); - return llty; - } - +fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + ty: Ty<'tcx>, + defer: &mut Option<(Type, FullLayout<'tcx>)>) + -> Type { let ptr_ty = |ty: Ty<'tcx>| { if cx.shared().type_has_metadata(ty) { if let ty::TyStr = ty.sty { @@ -88,97 +67,130 @@ fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type cx.llvm_type_of(ty).ptr_to() } }; + match ty.sty { + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { + return ptr_ty(ty); + } + ty::TyAdt(def, _) if def.is_box() => { + return ptr_ty(ty.boxed_ty()); + } + ty::TyFnPtr(sig) => { + let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); + return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to(); + } + _ => {} + } + + let layout = cx.layout_of(ty); + if let layout::Abi::Scalar(value) = layout.abi { + let llty = match value { + layout::Int(layout::I1, _) => Type::i8(cx), + layout::Int(i, _) => Type::from_integer(cx, i), + layout::F32 => Type::f32(cx), + layout::F64 => Type::f64(cx), + layout::Pointer => cx.llvm_type_of(layout::Pointer.to_ty(cx.tcx())) + }; + return llty; + } + + if let layout::Abi::Vector { .. } = layout.abi { + return Type::vector(&cx.llvm_type_of(layout.field(cx, 0).ty), + layout.fields.count() as u64); + } - let mut llty = match t.sty { - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - ty::TyNever => Type::nil(cx), - ty::TyClosure(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "closure") - } - ty::TyGenerator(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "generator") - } - - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - ptr_ty(ty) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_ty(t.boxed_ty()) - } - - ty::TyArray(ty, size) => { - let llty = cx.llvm_type_of(ty); - let size = size.val.to_const_int().unwrap().to_u64().unwrap(); - Type::array(&llty, size) - } - - ty::TySlice(ty) => { - Type::array(&cx.llvm_type_of(ty), 0) - } - ty::TyStr => { - Type::array(&Type::i8(cx), 0) - } - ty::TyDynamic(..) | - ty::TyForeign(..) => adt::type_of(cx, t), - - ty::TyFnDef(..) => Type::nil(cx), - ty::TyFnPtr(sig) => { - let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() - } - ty::TyTuple(ref tys, _) if tys.is_empty() => Type::nil(cx), - ty::TyTuple(..) => { - adt::type_of(cx, t) - } - ty::TyAdt(..) if t.is_simd() => { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = cx.llvm_type_of(e); - let n = t.simd_size(cx.tcx()) as u64; - Type::vector(&llet, n) - } - ty::TyAdt(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. This - // avoids creating more than one copy of the enum when one - // of the enum's variants refers to the enum itself. - let name = llvm_type_name(cx, t); - adt::incomplete_type_of(cx, t, &name[..]) - } - - ty::TyInfer(..) | - ty::TyProjection(..) | - ty::TyParam(..) | - ty::TyAnon(..) | - ty::TyError => bug!("type_of with {:?}", t), + let name = match ty.sty { + ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => { + let mut name = String::with_capacity(32); + let printer = DefPathBasedNames::new(cx.tcx(), true, true); + printer.push_type_name(ty, &mut name); + Some(name) + } + _ => None }; - debug!("--> mapped t={:?} to llty={:?}", t, llty); + match *layout.fields { + layout::FieldPlacement::Union(_) => { + let size = layout.size(cx).bytes(); + let fill = Type::array(&Type::i8(cx), size); + match name { + None => { + Type::struct_(cx, &[fill], layout.is_packed()) + } + Some(ref name) => { + let mut llty = Type::named_struct(cx, name); + llty.set_struct_body(&[fill], layout.is_packed()); + llty + } + } + } + layout::FieldPlacement::Array { count, .. } => { + Type::array(&cx.llvm_type_of(layout.field(cx, 0).ty), count) + } + layout::FieldPlacement::Arbitrary { .. } => { + match name { + None => { + Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed()) + } + Some(ref name) => { + let llty = Type::named_struct(cx, name); + *defer = Some((llty, layout)); + llty + } + } + } + } +} + +pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + layout: FullLayout<'tcx>) -> Vec { + debug!("struct_llfields: {:#?}", layout); + let align = layout.align(cx); + let size = layout.size(cx); + let field_count = layout.fields.count(); + + let mut offset = Size::from_bytes(0); + let mut result: Vec = Vec::with_capacity(1 + field_count * 2); + for i in layout.fields.index_by_increasing_offset() { + let field = layout.field(cx, i); + let target_offset = layout.fields.offset(i as usize); + debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", + i, field, offset, target_offset); + assert!(target_offset >= offset); + let padding = target_offset - offset; + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); - cx.lltypes().borrow_mut().insert(t, llty); + let llty = cx.llvm_type_of(field.ty); + result.push(llty); - // If this was an enum or struct, fill in the type now. - match t.sty { - ty::TyAdt(..) | ty::TyClosure(..) | ty::TyGenerator(..) if !t.is_simd() && !t.is_box() => { - adt::finish_type_of(cx, t, &mut llty); + if layout.is_packed() { + assert_eq!(padding.bytes(), 0); + } else { + let field_align = field.align(cx); + assert!(field_align.abi() <= align.abi(), + "non-packed type has field with larger align ({}): {:#?}", + field_align.abi(), layout); } - _ => () + + offset = target_offset + field.size(cx); + } + if !layout.is_unsized() && field_count > 0 { + if offset > size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", + layout, size, offset); + } + let padding = size - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, size); + result.push(Type::array(&Type::i8(cx), padding.bytes())); + assert!(result.len() == 1 + field_count * 2); + } else { + debug!("struct_llfields: offset: {:?} stride: {:?}", + offset, size); } - llty + result } impl<'a, 'tcx> CrateContext<'a, 'tcx> { @@ -219,7 +231,38 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. pub fn llvm_type_of(&self, ty: Ty<'tcx>) -> Type { - compute_llvm_type(self, ty) + // Check the cache. + if let Some(&llty) = self.lltypes().borrow().get(&ty) { + return llty; + } + + debug!("type_of {:?}", ty); + + assert!(!ty.has_escaping_regions(), "{:?} has escaping regions", ty); + + // Make sure lifetimes are erased, to avoid generating distinct LLVM + // types for Rust types that only differ in the choice of lifetimes. + let normal_ty = self.tcx().erase_regions(&ty); + + if ty != normal_ty { + let llty = self.llvm_type_of(normal_ty); + debug!("--> normalized {:?} to {:?} llty={:?}", ty, normal_ty, llty); + self.lltypes().borrow_mut().insert(ty, llty); + return llty; + } + + let mut defer = None; + let llty = uncached_llvm_type(self, ty, &mut defer); + + debug!("--> mapped ty={:?} to llty={:?}", ty, llty); + + self.lltypes().borrow_mut().insert(ty, llty); + + if let Some((mut llty, layout)) = defer { + llty.set_struct_body(&struct_llfields(self, layout), layout.is_packed()) + } + + llty } pub fn immediate_llvm_type_of(&self, ty: Ty<'tcx>) -> Type { @@ -240,26 +283,18 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { if let layout::Abi::Scalar(_) = self.abi { bug!("FullLayout::llvm_field_index({:?}): not applicable", self); } - let index = self.fields.memory_index(index); - match *self.layout { - Layout::Vector | Layout::Array => { - index as u64 + match *self.fields { + layout::FieldPlacement::Union(_) => { + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } - Layout::FatPointer | Layout::Univariant => { - adt::memory_index_to_gep(index as u64) + layout::FieldPlacement::Array { .. } => { + index as u64 } - _ => { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) + layout::FieldPlacement::Arbitrary { .. } => { + 1 + (self.fields.memory_index(index) as u64) * 2 } } } } - -fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { - let mut name = String::with_capacity(32); - let printer = DefPathBasedNames::new(cx.tcx(), true, true); - printer.push_type_name(ty, &mut name); - name -} From b2d52d2132b54794a7e4bfcb4fb0aa31169af207 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 20 Sep 2017 00:29:34 +0300 Subject: [PATCH 34/69] rustc: do not pub use Layout::* in layout. --- src/librustc/ty/layout.rs | 23 +++++++++++------------ src/librustc_trans/abi.rs | 2 +- src/librustc_trans/debuginfo/metadata.rs | 11 ++++++----- src/librustc_trans/mir/constant.rs | 10 +++++----- src/librustc_trans/mir/lvalue.rs | 18 +++++++++--------- 5 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d905592347f64..a0c0fb481aaae 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -9,7 +9,6 @@ // except according to those terms. pub use self::Integer::*; -pub use self::Layout::*; pub use self::Primitive::*; use session::{self, DataTypeKind, Session}; @@ -2015,10 +2014,10 @@ impl<'a, 'tcx> FullLayout<'tcx> { }; let (layout, fields, abi) = match *self.layout { - Univariant => (self.layout, self.fields, self.abi), + Layout::Univariant => (self.layout, self.fields, self.abi), - NullablePointer { ref variants, .. } | - General { ref variants, .. } => { + Layout::NullablePointer { ref variants, .. } | + Layout::General { ref variants, .. } => { let variant = &variants[variant_index]; (&variant.layout, &variant.fields, variant.abi) } @@ -2104,8 +2103,8 @@ impl<'a, 'tcx> FullLayout<'tcx> { match self.variant_index { None => match *self.layout { // Discriminant field for enums (where applicable). - General { discr, .. } | - NullablePointer { discr, .. } => { + Layout::General { discr, .. } | + Layout::NullablePointer { discr, .. } => { return [discr.to_ty(tcx)][i]; } _ if def.variants.len() > 1 => return [][i], @@ -2174,10 +2173,10 @@ impl<'a, 'tcx> FullLayout<'tcx> { { let tcx = cx.tcx(); match (self.layout, self.abi, &self.ty.sty) { - (&Scalar, Abi::Scalar(Pointer), _) if !self.ty.is_unsafe_ptr() => { + (&Layout::Scalar, Abi::Scalar(Pointer), _) if !self.ty.is_unsafe_ptr() => { Ok(Some((Size::from_bytes(0), Pointer))) } - (&General { discr, .. }, _, &ty::TyAdt(def, _)) => { + (&Layout::General { discr, .. }, _, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { Ok(Some((self.fields.offset(0), discr))) } else { @@ -2185,7 +2184,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { } } - (&FatPointer, _, _) if !self.ty.is_unsafe_ptr() => { + (&Layout::FatPointer, _, _) if !self.ty.is_unsafe_ptr() => { Ok(Some((self.fields.offset(FAT_PTR_ADDR), Pointer))) } @@ -2193,10 +2192,10 @@ impl<'a, 'tcx> FullLayout<'tcx> { (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { let field = self.field(cx, 0)?; match (field.layout, field.abi) { - (&Scalar, Abi::Scalar(value)) => { + (&Layout::Scalar, Abi::Scalar(value)) => { Ok(Some((self.fields.offset(0), value))) } - (&FatPointer, _) => { + (&Layout::FatPointer, _) => { Ok(Some((self.fields.offset(0) + field.fields.offset(FAT_PTR_ADDR), Pointer))) @@ -2206,7 +2205,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { } // Perhaps one of the fields is non-zero, let's recurse and find out. - (&Univariant, _, _) => { + (&Layout::Univariant, _, _) => { for i in 0..self.fields.count() { let r = self.field(cx, i)?.non_zero_field(cx)?; if let Some((offset, primitive)) = r { diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 365570edd65ef..6aa49080dd0d8 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -766,7 +766,7 @@ impl<'a, 'tcx> FnType<'tcx> { for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if let ty::layout::FatPointer { .. } = *arg.layout.layout { + if let ty::layout::Layout::FatPointer { .. } = *arg.layout.layout { let mut data = ArgType::new(arg.layout.field(ccx, 0)); let mut info = ArgType::new(arg.layout.field(ccx, 1)); diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 5948c3a3e5926..f488ebaa4f513 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1130,7 +1130,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); match *self.type_rep.layout { - layout::General { ref variants, .. } => { + layout::Layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); (0..variants.len()).map(|i| { @@ -1159,7 +1159,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - layout::Univariant => { + layout::Layout::Univariant => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1191,7 +1191,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ] } } - layout::NullablePointer { + layout::Layout::NullablePointer { nndiscr, discr, .. @@ -1432,8 +1432,9 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); let discriminant_type_metadata = match *type_rep.layout { - layout::NullablePointer { .. } | layout::Univariant { .. } => None, - layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), + layout::Layout::NullablePointer { .. } | + layout::Layout::Univariant { .. } => None, + layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index ae19c865d1c93..9c43d8b36272e 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1091,7 +1091,7 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; match *l.layout { - layout::General { .. } => { + layout::Layout::General { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1108,7 +1108,7 @@ fn trans_const_adt<'a, 'tcx>( build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } - layout::UntaggedUnion => { + layout::Layout::UntaggedUnion => { assert_eq!(variant_index, 0); let contents = [ vals[0].llval, @@ -1117,14 +1117,14 @@ fn trans_const_adt<'a, 'tcx>( Const::new(C_struct(ccx, &contents, l.is_packed()), t) } - layout::Univariant => { + layout::Layout::Univariant => { assert_eq!(variant_index, 0); build_const_struct(ccx, l, vals, None) } - layout::Vector => { + layout::Layout::Vector => { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } - layout::NullablePointer { nndiscr, .. } => { + layout::Layout::NullablePointer { nndiscr, .. } => { if variant_index as u64 == nndiscr { build_const_struct(ccx, l.for_variant(variant_index), vals, None) } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index b72ccf6ba28ea..f6c260e4c1523 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -312,8 +312,8 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); match *l.layout { - layout::Univariant { .. } | - layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), + layout::Layout::Univariant { .. } | + layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), _ => {} } @@ -324,7 +324,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { _ => bug!("discriminant not scalar: {:#?}", discr_layout) }; let (min, max) = match *l.layout { - layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), + layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; let max_next = max.wrapping_add(1); @@ -350,14 +350,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } }; match *l.layout { - layout::General { .. } => { + layout::Layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, _ => false }; bcx.intcast(lldiscr, cast_to, signed) } - layout::NullablePointer { nndiscr, .. } => { + layout::Layout::NullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; let zero = C_null(bcx.ccx.llvm_type_of(discr_layout.ty)); bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) @@ -374,12 +374,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; match *l.layout { - layout::General { .. } => { + layout::Layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), ptr.llval, ptr.alignment.non_abi()); } - layout::NullablePointer { nndiscr, .. } => { + layout::Layout::NullablePointer { nndiscr, .. } => { if to != nndiscr { let use_memset = match l.abi { layout::Abi::Scalar(_) => false, @@ -429,8 +429,8 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // If this is an enum, cast to the appropriate variant struct type. let layout = bcx.ccx.layout_of(ty); match *layout.layout { - layout::NullablePointer { .. } | - layout::General { .. } => { + layout::Layout::NullablePointer { .. } | + layout::Layout::General { .. } => { let variant_layout = layout.for_variant(variant_index); let variant_ty = Type::struct_(bcx.ccx, &type_of::struct_llfields(bcx.ccx, variant_layout), From f2e7e17d9e9b8faeb13c388c56ef135978a77c58 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 20 Sep 2017 02:32:22 +0300 Subject: [PATCH 35/69] rustc_trans: pass OperandRef arguments to trans_intrinsic_call. --- src/librustc_trans/intrinsic.rs | 291 ++++++++++++++++---------------- src/librustc_trans/mir/block.rs | 113 +++++++------ 2 files changed, 206 insertions(+), 198 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index dbb8ef261f83d..25729449dbca3 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -13,7 +13,7 @@ use intrinsics::{self, Intrinsic}; use llvm; use llvm::{ValueRef}; -use abi::{self, Abi, FnType}; +use abi::{Abi, FnType}; use mir::lvalue::{LvalueRef, Alignment}; use mir::operand::{OperandRef, OperandValue}; use base::*; @@ -87,7 +87,7 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, - llargs: &[ValueRef], + args: &[OperandRef<'tcx>], llresult: ValueRef, span: Span) { let ccx = bcx.ccx; @@ -110,21 +110,27 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let simple = get_simple_intrinsic(ccx, name); let llval = match name { _ if simple.is_some() => { - bcx.call(simple.unwrap(), &llargs, None) + bcx.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } "unreachable" => { return; }, "likely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) + bcx.call(expect, &[args[0].immediate(), C_bool(ccx, true)], None) } "unlikely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) + bcx.call(expect, &[args[0].immediate(), C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); + try_intrinsic(bcx, ccx, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); return; } "breakpoint" => { @@ -137,14 +143,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "size_of_val" => { let tp_ty = substs.type_at(0); - if bcx.ccx.shared().type_is_sized(tp_ty) { - C_usize(ccx, ccx.size_of(tp_ty).bytes()) - } else if bcx.ccx.shared().type_has_metadata(tp_ty) { + if let OperandValue::Pair(_, meta) = args[0].val { let (llsize, _) = - glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, meta); llsize } else { - C_usize(ccx, 0) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } } "min_align_of" => { @@ -153,14 +157,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "min_align_of_val" => { let tp_ty = substs.type_at(0); - if bcx.ccx.shared().type_is_sized(tp_ty) { - C_usize(ccx, ccx.align_of(tp_ty).abi()) - } else if bcx.ccx.shared().type_has_metadata(tp_ty) { + if let OperandValue::Pair(_, meta) = args[0].val { let (_, llalign) = - glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, meta); llalign } else { - C_usize(ccx, 1) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } } "pref_align_of" => { @@ -196,38 +198,44 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty)) } "offset" => { - let ptr = llargs[0]; - let offset = llargs[1]; + let ptr = args[0].immediate(); + let offset = args[1].immediate(); bcx.inbounds_gep(ptr, &[offset]) } "arith_offset" => { - let ptr = llargs[0]; - let offset = llargs[1]; + let ptr = args[0].immediate(); + let offset = args[1].immediate(); bcx.gep(ptr, &[offset]) } "copy_nonoverlapping" => { - copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) + copy_intrinsic(bcx, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) } "copy" => { - copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) + copy_intrinsic(bcx, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) } "write_bytes" => { - memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + memset_intrinsic(bcx, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + copy_intrinsic(bcx, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_memory" => { - copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + copy_intrinsic(bcx, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_set_memory" => { - memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + memset_intrinsic(bcx, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_load" => { let tp_ty = substs.type_at(0); - let mut ptr = llargs[0]; + let mut ptr = args[0].immediate(); if let Some(ty) = fn_ty.ret.cast { ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to()); } @@ -239,18 +247,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }, "volatile_store" => { let tp_ty = substs.type_at(0); - let dst = LvalueRef::new_sized(llargs[0], tp_ty, Alignment::AbiAligned); - if type_is_fat_ptr(bcx.ccx, tp_ty) { - bcx.volatile_store(llargs[1], dst.project_field(bcx, abi::FAT_PTR_ADDR).llval); - bcx.volatile_store(llargs[2], dst.project_field(bcx, abi::FAT_PTR_EXTRA).llval); + let dst = LvalueRef::new_sized(args[0].immediate(), tp_ty, Alignment::AbiAligned); + if let OperandValue::Pair(a, b) = args[1].val { + bcx.volatile_store(a, dst.project_field(bcx, 0).llval); + bcx.volatile_store(b, dst.project_field(bcx, 1).llval); } else { - let val = if fn_ty.args[1].is_indirect() { - bcx.load(llargs[1], None) + let val = if let OperandValue::Ref(ptr, align) = args[1].val { + bcx.load(ptr, align.non_abi()) } else { if type_is_zero_size(ccx, tp_ty) { return; } - from_immediate(bcx, llargs[1]) + from_immediate(bcx, args[1].immediate()) }; let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); @@ -270,7 +278,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "prefetch_write_instruction" => (1, 0), _ => bug!() }; - bcx.call(expect, &[llargs[0], C_i32(ccx, rw), llargs[1], C_i32(ccx, cache_type)], None) + bcx.call(expect, &[ + args[0].immediate(), + C_i32(ccx, rw), + args[1].immediate(), + C_i32(ccx, cache_type) + ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | @@ -283,22 +296,22 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "ctlz" | "cttz" => { let y = C_bool(bcx.ccx, false); let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bcx.call(llfn, &[llargs[0], y], None) + bcx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { let y = C_bool(bcx.ccx, true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = ccx.get_intrinsic(llvm_name); - bcx.call(llfn, &[llargs[0], y], None) + bcx.call(llfn, &[args[0].immediate(), y], None) } "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, None), + &[args[0].immediate()], None), "bswap" => { if width == 8 { - llargs[0] // byte swap a u8/i8 is just a no-op + args[0].immediate() // byte swap a u8/i8 is just a no-op } else { bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, None) + &[args[0].immediate()], None) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { @@ -308,7 +321,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llfn = bcx.ccx.get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter - let pair = bcx.call(llfn, &[llargs[0], llargs[1]], None); + let pair = bcx.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); let val = bcx.extract_value(pair, 0); let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx)); @@ -319,27 +335,27 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, return; }, - "overflowing_add" => bcx.add(llargs[0], llargs[1]), - "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), - "overflowing_mul" => bcx.mul(llargs[0], llargs[1]), + "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()), "unchecked_div" => if signed { - bcx.sdiv(llargs[0], llargs[1]) + bcx.sdiv(args[0].immediate(), args[1].immediate()) } else { - bcx.udiv(llargs[0], llargs[1]) + bcx.udiv(args[0].immediate(), args[1].immediate()) }, "unchecked_rem" => if signed { - bcx.srem(llargs[0], llargs[1]) + bcx.srem(args[0].immediate(), args[1].immediate()) } else { - bcx.urem(llargs[0], llargs[1]) + bcx.urem(args[0].immediate(), args[1].immediate()) }, - "unchecked_shl" => bcx.shl(llargs[0], llargs[1]), + "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()), "unchecked_shr" => if signed { - bcx.ashr(llargs[0], llargs[1]) + bcx.ashr(args[0].immediate(), args[1].immediate()) } else { - bcx.lshr(llargs[0], llargs[1]) + bcx.lshr(args[0].immediate(), args[1].immediate()) }, _ => bug!(), }, @@ -358,11 +374,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, match float_type_width(sty) { Some(_width) => match name { - "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]), - "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]), - "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]), - "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]), - "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]), + "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()), _ => bug!(), }, None => { @@ -378,7 +394,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "discriminant_value" => { let val_ty = substs.type_at(0); - let adt_val = LvalueRef::new_sized(llargs[0], val_ty, Alignment::AbiAligned); + let adt_val = LvalueRef::new_sized(args[0].immediate(), + val_ty, + Alignment::AbiAligned); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { adt_val.trans_get_discr(bcx, ret_ty) @@ -389,19 +407,20 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "align_offset" => { // `ptr as usize` - let ptr_val = bcx.ptrtoint(llargs[0], bcx.ccx.isize_ty()); + let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.ccx.isize_ty()); // `ptr_val % align` - let offset = bcx.urem(ptr_val, llargs[1]); + let align = args[1].immediate(); + let offset = bcx.urem(ptr_val, align); let zero = C_null(bcx.ccx.isize_ty()); // `offset == 0` let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero); // `if offset == 0 { 0 } else { offset - align }` - bcx.select(is_zero, zero, bcx.sub(offset, llargs[1])) + bcx.select(is_zero, zero, bcx.sub(offset, align)) } name if name.starts_with("simd_") => { match generic_simd_intrinsic(bcx, name, callee_ty, - &llargs, + args, ret_ty, llret_ty, span) { Ok(llval) => llval, @@ -451,8 +470,13 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ty = substs.type_at(0); if int_type_width_signed(ty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let pair = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, - failorder, weak); + let pair = bcx.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); let val = bcx.extract_value(pair, 0); let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx)); @@ -470,7 +494,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ty = substs.type_at(0); if int_type_width_signed(ty, ccx).is_some() { let align = ccx.align_of(ty); - bcx.atomic_load(llargs[0], order, align) + bcx.atomic_load(args[0].immediate(), order, align) } else { return invalid_monomorphization(ty); } @@ -480,7 +504,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ty = substs.type_at(0); if int_type_width_signed(ty, ccx).is_some() { let align = ccx.align_of(ty); - bcx.atomic_store(llargs[1], llargs[0], order, align); + bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); return; } else { return invalid_monomorphization(ty); @@ -516,7 +540,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ty = substs.type_at(0); if int_type_width_signed(ty, ccx).is_some() { - bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) + bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) } else { return invalid_monomorphization(ty); } @@ -533,13 +557,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert_eq!(x.len(), 1); x.into_iter().next().unwrap() } - fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, - any_changes_needed: &mut bool) -> Vec { + fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type) -> Vec { use intrinsics::Type::*; match *t { Void => vec![Type::void(ccx)], - Integer(_signed, width, llvm_width) => { - *any_changes_needed |= width != llvm_width; + Integer(_signed, _width, llvm_width) => { vec![Type::ix(ccx, llvm_width as u64)] } Float(x) => { @@ -550,29 +572,24 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } } Pointer(ref t, ref llvm_elem, _const) => { - *any_changes_needed |= llvm_elem.is_some(); - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, any_changes_needed)); + let elem = one(ty_to_type(ccx, t)); vec![elem.ptr_to()] } Vector(ref t, ref llvm_elem, length) => { - *any_changes_needed |= llvm_elem.is_some(); - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, any_changes_needed)); + let elem = one(ty_to_type(ccx, t)); vec![Type::vector(&elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() - .map(|t| one(ty_to_type(ccx, t, any_changes_needed))) + .map(|t| one(ty_to_type(ccx, t))) .collect::>(); vec![Type::struct_(ccx, &elems, false)] } Aggregate(true, ref contents) => { - *any_changes_needed = true; contents.iter() - .flat_map(|t| ty_to_type(ccx, t, any_changes_needed)) + .flat_map(|t| ty_to_type(ccx, t)) .collect() } } @@ -584,8 +601,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // cast. fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: &intrinsics::Type, - arg_type: Ty<'tcx>, - llarg: ValueRef) + arg: &OperandRef<'tcx>) -> Vec { match *t { @@ -596,54 +612,44 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = LvalueRef::new_sized(llarg, arg_type, Alignment::AbiAligned); + assert!(!bcx.ccx.shared().type_needs_drop(arg.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, align) => (ptr, align), + _ => bug!() + }; + let arg = LvalueRef::new_sized(ptr, arg.ty, align); (0..contents.len()).map(|i| { arg.project_field(bcx, i).load(bcx).immediate() }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); - vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem)); + vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); - vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem)); + vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))] + vec![bcx.trunc(arg.immediate(), Type::ix(bcx.ccx, llvm_width as u64))] } - _ => vec![llarg], + _ => vec![arg.immediate()], } } - let mut any_changes_needed = false; let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed)) + .flat_map(|t| ty_to_type(ccx, t)) .collect::>(); - let mut out_changes = false; - let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes)); - // outputting a flattened aggregate is nonsense - assert!(!out_changes); + let outputs = one(ty_to_type(ccx, &intr.output)); - let llargs = if !any_changes_needed { - // no aggregates to flatten, so no change needed - llargs.to_vec() - } else { - // there are some aggregates that need to be flattened - // in the LLVM call, so we need to run over the types - // again to find them and extract the arguments - intr.inputs.iter() - .zip(llargs) - .zip(arg_tys) - .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) - .collect() - }; + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(bcx, t, arg) + }).collect(); assert_eq!(inputs.len(), llargs.len()); let val = match intr.definition { @@ -977,7 +983,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, - llargs: &[ValueRef], + args: &[OperandRef<'tcx>], ret_ty: Ty<'tcx>, llret_ty: Type, span: Span @@ -1046,8 +1052,8 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty.simd_type(tcx)); return Ok(compare_simd_types(bcx, - llargs[0], - llargs[1], + args[0].immediate(), + args[1].immediate(), in_elem, llret_ty, cmp_op)) @@ -1074,7 +1080,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( let total_len = in_len as u128 * 2; - let vector = llargs[2]; + let vector = args[2].immediate(); let indices: Option> = (0..n) .map(|i| { @@ -1099,20 +1105,24 @@ fn generic_simd_intrinsic<'a, 'tcx>( None => return Ok(C_null(llret_ty)) }; - return Ok(bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))) + return Ok(bcx.shuffle_vector(args[0].immediate(), + args[1].immediate(), + C_vector(&indices))) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return Ok(bcx.insert_element(llargs[0], llargs[2], llargs[1])) + return Ok(bcx.insert_element(args[0].immediate(), + args[2].immediate(), + args[1].immediate())) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return Ok(bcx.extract_element(llargs[0], llargs[1])) + return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate())) } if name == "simd_cast" { @@ -1126,7 +1136,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( // casting cares about nominal type, not just structural type let out_elem = ret_ty.simd_type(tcx); - if in_elem == out_elem { return Ok(llargs[0]); } + if in_elem == out_elem { return Ok(args[0].immediate()); } enum Style { Float, Int(/* is signed? */ bool), Unsupported } @@ -1148,34 +1158,34 @@ fn generic_simd_intrinsic<'a, 'tcx>( match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bcx.trunc(llargs[0], llret_ty), - Ordering::Equal => llargs[0], + Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), Ordering::Less => if in_is_signed { - bcx.sext(llargs[0], llret_ty) + bcx.sext(args[0].immediate(), llret_ty) } else { - bcx.zext(llargs[0], llret_ty) + bcx.zext(args[0].immediate(), llret_ty) } }) } (Style::Int(in_is_signed), Style::Float) => { return Ok(if in_is_signed { - bcx.sitofp(llargs[0], llret_ty) + bcx.sitofp(args[0].immediate(), llret_ty) } else { - bcx.uitofp(llargs[0], llret_ty) + bcx.uitofp(args[0].immediate(), llret_ty) }) } (Style::Float, Style::Int(out_is_signed)) => { return Ok(if out_is_signed { - bcx.fptosi(llargs[0], llret_ty) + bcx.fptosi(args[0].immediate(), llret_ty) } else { - bcx.fptoui(llargs[0], llret_ty) + bcx.fptoui(args[0].immediate(), llret_ty) }) } (Style::Float, Style::Float) => { return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), - Ordering::Equal => llargs[0], - Ordering::Less => bcx.fpext(llargs[0], llret_ty) + Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty) }) } _ => {/* Unsupported. Fallthrough. */} @@ -1187,21 +1197,18 @@ fn generic_simd_intrinsic<'a, 'tcx>( } macro_rules! arith { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { - $( - if name == stringify!($name) { - match in_elem.sty { - $( - $(ty::$p(_))|* => { - return Ok(bcx.$call(llargs[0], llargs[1])) - } - )* - _ => {}, - } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) - })* + $(if name == stringify!($name) { + match in_elem.sty { + $($(ty::$p(_))|* => { + return Ok(bcx.$call(args[0].immediate(), args[1].immediate())) + })* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* } } arith! { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index cf5d43e2f2b6f..da3f6559dacef 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -493,74 +493,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ReturnDest::Nothing }; - // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { - let (tup, args) = args.split_last().unwrap(); - (args, Some(tup)) - } else { - (&args[..], None) - }; - - let is_shuffle = intrinsic.map_or(false, |name| { - name.starts_with("simd_shuffle") - }); - let mut idx = 0; - for arg in first_args { - // The indices passed to simd_shuffle* in the - // third argument must be constant. This is - // checked by const-qualification, which also - // promotes any complex rvalues to constants. - if is_shuffle && idx == 2 { - match *arg { - mir::Operand::Consume(_) => { - span_bug!(span, "shuffle indices must be constant"); - } - mir::Operand::Constant(ref constant) => { - let val = self.trans_constant(&bcx, constant); - llargs.push(val.llval); - idx += 1; - continue; - } - } - } - - let mut op = self.trans_operand(&bcx, arg); - - // The callee needs to own the argument memory if we pass it - // by-ref, so make a local copy of non-immediate constants. - if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) { - let tmp = LvalueRef::alloca(&bcx, op.ty, "const"); - self.store_operand(&bcx, tmp.llval, tmp.alignment.to_align(), op); - op.val = Ref(tmp.llval, tmp.alignment); - } - - self.trans_argument(&bcx, op, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def); - } - if let Some(tup) = untuple { - self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def) - } - if intrinsic.is_some() && intrinsic != Some("drop_in_place") { use intrinsic::trans_intrinsic_call; - let (dest, llargs) = match ret_dest { - _ if fn_ty.ret.is_indirect() => { - (llargs[0], &llargs[1..]) - } + let dest = match ret_dest { + _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) + C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()) } ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => (dst.llval, &llargs[..]), + ReturnDest::Store(dst) => dst.llval, ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") }; + let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { + match *arg { + mir::Operand::Consume(_) => { + span_bug!(span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let val = self.trans_constant(&bcx, constant); + return OperandRef { + val: Immediate(val.llval), + ty: val.ty + }; + } + } + } + + self.trans_operand(&bcx, arg) + }).collect(); + + let callee_ty = common::instance_ty( bcx.ccx.tcx(), instance.as_ref().unwrap()); - trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest, + trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &args, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -581,6 +554,34 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + let mut idx = 0; + for arg in first_args { + let mut op = self.trans_operand(&bcx, arg); + + // The callee needs to own the argument memory if we pass it + // by-ref, so make a local copy of non-immediate constants. + if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) { + let tmp = LvalueRef::alloca(&bcx, op.ty, "const"); + op.store(&bcx, tmp); + op.val = Ref(tmp.llval, tmp.alignment); + } + + self.trans_argument(&bcx, op, &mut llargs, &fn_ty, + &mut idx, &mut llfn, &def); + } + if let Some(tup) = untuple { + self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, + &mut idx, &mut llfn, &def) + } + let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, (None, Some(instance)) => callee::get_fn(bcx.ccx, instance), From 88f70323e451f63f812c6cd92cc1d654b32971e1 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 20 Sep 2017 05:16:06 +0300 Subject: [PATCH 36/69] rustc_trans: nest abi::ArgType's for fat pointers instead of eagerly flattening. --- src/librustc_trans/abi.rs | 108 ++++++++++++++++++++++++-------- src/librustc_trans/common.rs | 20 +++--- src/librustc_trans/mir/block.rs | 75 +++++++++------------- src/librustc_trans/mir/mod.rs | 95 ++++++++++++---------------- 4 files changed, 161 insertions(+), 137 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 6aa49080dd0d8..689976b6c42fc 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -420,7 +420,7 @@ impl CastTarget { /// should be passed to or returned from a function /// /// This is borrowed from clang's ABIInfo.h -#[derive(Clone, Copy, Debug)] +#[derive(Debug)] pub struct ArgType<'tcx> { kind: ArgKind, pub layout: FullLayout<'tcx>, @@ -429,7 +429,8 @@ pub struct ArgType<'tcx> { /// Dummy argument, which is emitted before the real argument. pub pad: Option, /// Attributes of argument. - pub attrs: ArgAttributes + pub attrs: ArgAttributes, + pub nested: Vec> } impl<'a, 'tcx> ArgType<'tcx> { @@ -439,11 +440,13 @@ impl<'a, 'tcx> ArgType<'tcx> { layout, cast: None, pad: None, - attrs: ArgAttributes::default() + attrs: ArgAttributes::default(), + nested: vec![] } } pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) { + assert!(self.nested.is_empty()); assert_eq!(self.kind, ArgKind::Direct); // Wipe old attributes, likely not valid through indirection. @@ -460,6 +463,7 @@ impl<'a, 'tcx> ArgType<'tcx> { } pub fn ignore(&mut self) { + assert!(self.nested.is_empty()); assert_eq!(self.kind, ArgKind::Direct); self.kind = ArgKind::Ignore; } @@ -482,10 +486,12 @@ impl<'a, 'tcx> ArgType<'tcx> { } pub fn cast_to>(&mut self, target: T) { + assert!(self.nested.is_empty()); self.cast = Some(target.into()); } pub fn pad_with(&mut self, reg: Reg) { + assert!(self.nested.is_empty()); self.pad = Some(reg); } @@ -561,6 +567,12 @@ impl<'a, 'tcx> ArgType<'tcx> { } pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) { + if !self.nested.is_empty() { + for (i, arg) in self.nested.iter().enumerate() { + arg.store_fn_arg(bcx, idx, dst.project_field(bcx, i)); + } + return; + } if self.pad.is_some() { *idx += 1; } @@ -578,7 +590,7 @@ impl<'a, 'tcx> ArgType<'tcx> { /// /// I will do my best to describe this structure, but these /// comments are reverse-engineered and may be inaccurate. -NDM -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct FnType<'tcx> { /// The LLVM types of each argument. pub args: Vec>, @@ -613,7 +625,8 @@ impl<'a, 'tcx> FnType<'tcx> { extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); // Don't pass the vtable, it's not an argument of the virtual fn. - fn_ty.args[1].ignore(); + assert_eq!(fn_ty.args[0].nested.len(), 2); + fn_ty.args[0].nested[1].ignore(); fn_ty.adjust_for_abi(ccx, sig); fn_ty } @@ -766,7 +779,7 @@ impl<'a, 'tcx> FnType<'tcx> { for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if let ty::layout::Layout::FatPointer { .. } = *arg.layout.layout { + if type_is_fat_ptr(ccx, ty) { let mut data = ArgType::new(arg.layout.field(ccx, 0)); let mut info = ArgType::new(arg.layout.field(ccx, 1)); @@ -780,14 +793,16 @@ impl<'a, 'tcx> FnType<'tcx> { info.attrs.set(ArgAttribute::NoAlias); } } - args.push(data); - args.push(info); + // FIXME(eddyb) other ABIs don't have logic for nested. + if rust_abi { + arg.nested = vec![data, info]; + } } else { if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { arg.attrs.set_dereferenceable(ccx.size_of(inner)); } - args.push(arg); } + args.push(arg); } FnType { @@ -854,6 +869,13 @@ impl<'a, 'tcx> FnType<'tcx> { } for arg in &mut self.args { if arg.is_ignore() { continue; } + if !arg.nested.is_empty() { + for arg in &mut arg.nested { + assert!(arg.nested.is_empty()); + fixup(arg); + } + continue; + } fixup(arg); } if self.ret.is_indirect() { @@ -915,24 +937,36 @@ impl<'a, 'tcx> FnType<'tcx> { ccx.immediate_llvm_type_of(self.ret.layout.ty) }; - for arg in &self.args { - if arg.is_ignore() { - continue; - } - // add padding - if let Some(ty) = arg.pad { - llargument_tys.push(ty.llvm_type(ccx)); - } + { + let mut push = |arg: &ArgType<'tcx>| { + if arg.is_ignore() { + return; + } + // add padding + if let Some(ty) = arg.pad { + llargument_tys.push(ty.llvm_type(ccx)); + } - let llarg_ty = if arg.is_indirect() { - arg.memory_ty(ccx).ptr_to() - } else if let Some(cast) = arg.cast { - cast.llvm_type(ccx) - } else { - ccx.immediate_llvm_type_of(arg.layout.ty) - }; + let llarg_ty = if arg.is_indirect() { + arg.memory_ty(ccx).ptr_to() + } else if let Some(cast) = arg.cast { + cast.llvm_type(ccx) + } else { + ccx.immediate_llvm_type_of(arg.layout.ty) + }; - llargument_tys.push(llarg_ty); + llargument_tys.push(llarg_ty); + }; + for arg in &self.args { + if !arg.nested.is_empty() { + for arg in &arg.nested { + assert!(arg.nested.is_empty()); + push(arg); + } + continue; + } + push(arg); + } } if self.variadic { @@ -948,12 +982,22 @@ impl<'a, 'tcx> FnType<'tcx> { self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); } i += 1; - for arg in &self.args { + let mut apply = |arg: &ArgType| { if !arg.is_ignore() { if arg.pad.is_some() { i += 1; } arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); i += 1; } + }; + for arg in &self.args { + if !arg.nested.is_empty() { + for arg in &arg.nested { + assert!(arg.nested.is_empty()); + apply(arg); + } + continue; + } + apply(arg); } } @@ -963,12 +1007,22 @@ impl<'a, 'tcx> FnType<'tcx> { self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); } i += 1; - for arg in &self.args { + let mut apply = |arg: &ArgType| { if !arg.is_ignore() { if arg.pad.is_some() { i += 1; } arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); i += 1; } + }; + for arg in &self.args { + if !arg.nested.is_empty() { + for arg in &arg.nested { + assert!(arg.nested.is_empty()); + apply(arg); + } + continue; + } + apply(arg); } if self.cconv != llvm::CCallConv { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index d7397e359a148..426a44671bccf 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -27,7 +27,7 @@ use type_::Type; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, HasDataLayout, Layout, LayoutOf}; +use rustc::ty::layout::{self, HasDataLayout, LayoutOf}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -41,10 +41,15 @@ use syntax_pos::{Span, DUMMY_SP}; pub use context::{CrateContext, SharedCrateContext}; pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - if let Layout::FatPointer { .. } = *ccx.layout_of(ty).layout { - true - } else { - false + match ty.sty { + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { + !ccx.shared().type_is_sized(ty) + } + ty::TyAdt(def, _) if def.is_box() => { + !ccx.shared().type_is_sized(ty.boxed_ty()) + } + _ => false } } @@ -63,9 +68,8 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); - match *layout.layout { - Layout::FatPointer => true, - Layout::Univariant => { + match *layout.fields { + layout::FieldPlacement::Arbitrary { .. } => { // There must be only 2 fields. if layout.fields.count() != 2 { return false; diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index da3f6559dacef..7dbb8253e4289 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -215,13 +215,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Return => { - let ret = self.fn_ty.ret; - if ret.is_ignore() || ret.is_indirect() { + if self.fn_ty.ret.is_ignore() || self.fn_ty.ret.is_indirect() { bcx.ret_void(); return; } - let llval = if let Some(cast_ty) = ret.cast { + let llval = if let Some(cast_ty) = self.fn_ty.ret.cast { let op = match self.locals[mir::RETURN_POINTER] { LocalRef::Operand(Some(op)) => op, LocalRef::Operand(None) => bug!("use of return before def"), @@ -234,7 +233,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = LvalueRef::alloca(&bcx, ret.layout.ty, "ret"); + let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout.ty, "ret"); op.store(&bcx, scratch); scratch.llval } @@ -246,7 +245,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let load = bcx.load( bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), - Some(ret.layout.align(bcx.ccx))); + Some(self.fn_ty.ret.layout.align(bcx.ccx))); load } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); @@ -562,9 +561,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (&args[..], None) }; - let mut idx = 0; - for arg in first_args { + for (idx, arg) in first_args.iter().enumerate() { let mut op = self.trans_operand(&bcx, arg); + if idx == 0 { + if let Pair(_, meta) = op.val { + if let Some(ty::InstanceDef::Virtual(_, idx)) = def { + let llmeth = meth::VirtualIndex::from_index(idx) + .get_fn(&bcx, meta); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); + llfn = Some(bcx.pointercast(llmeth, llty)); + } + } + } // The callee needs to own the argument memory if we pass it // by-ref, so make a local copy of non-immediate constants. @@ -574,12 +582,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { op.val = Ref(tmp.llval, tmp.alignment); } - self.trans_argument(&bcx, op, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def); + self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[idx]); } if let Some(tup) = untuple { - self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def) + self.trans_arguments_untupled(&bcx, tup, &mut llargs, + &fn_ty.args[first_args.len()..]) } let fn_ptr = match (llfn, instance) { @@ -602,36 +609,22 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, - fn_ty: &FnType<'tcx>, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { + arg: &ArgType<'tcx>) { if let Pair(a, b) = op.val { // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.ccx, op.ty) { - let (ptr, meta) = (a, b); - if *next_idx == 0 { - if let Some(ty::InstanceDef::Virtual(_, idx)) = *def { - let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta); - let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); - *llfn = Some(bcx.pointercast(llmeth, llty)); - } - } - + if !arg.nested.is_empty() { + assert_eq!(arg.nested.len(), 2); let imm_op = |x| OperandRef { val: Immediate(x), // We won't be checking the type again. ty: bcx.tcx().types.err }; - self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def); - self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def); + self.trans_argument(bcx, imm_op(a), llargs, &arg.nested[0]); + self.trans_argument(bcx, imm_op(b), llargs, &arg.nested[1]); return; } } - let arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { llargs.push(C_undef(ty.llvm_type(bcx.ccx))); @@ -686,10 +679,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, - fn_ty: &FnType<'tcx>, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { + args: &[ArgType<'tcx>]) { let tuple = self.trans_operand(bcx, operand); let arg_types = match tuple.ty.sty { @@ -702,18 +692,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match tuple.val { Ref(llval, align) => { let tuple_ptr = LvalueRef::new_sized(llval, tuple.ty, align); - for (n, &ty) in arg_types.iter().enumerate() { + for n in 0..arg_types.len() { let field_ptr = tuple_ptr.project_field(bcx, n); - let op = if common::type_is_fat_ptr(bcx.ccx, ty) { - field_ptr.load(bcx) - } else { - // trans_argument will load this if it needs to - OperandRef { - val: Ref(field_ptr.llval, field_ptr.alignment), - ty - } - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[n]); } } @@ -728,7 +709,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { val: Immediate(elem), ty, }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + self.trans_argument(bcx, op, llargs, &args[n]); } } Pair(a, b) => { @@ -740,7 +721,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { val: Immediate(elem), ty, }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + self.trans_argument(bcx, op, llargs, &args[n]); } } } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index a1e89013bdbdc..7440551f322f9 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -22,7 +22,7 @@ use builder::Builder; use common::{self, CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; -use abi::{self, ArgAttribute, FnType}; +use abi::{ArgAttribute, FnType}; use type_of; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; @@ -401,22 +401,10 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }; let lvalue = LvalueRef::alloca(bcx, arg_ty, &name); - for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { - let dst = lvalue.project_field(bcx, i); + for i in 0..tupled_arg_tys.len() { let arg = &mircx.fn_ty.args[idx]; idx += 1; - if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { - // We pass fat pointers as two words, but inside the tuple - // they are the two sub-fields of a single aggregate field. - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, - dst.project_field(bcx, abi::FAT_PTR_ADDR)); - meta.store_fn_arg(bcx, &mut llarg_idx, - dst.project_field(bcx, abi::FAT_PTR_EXTRA)); - } else { - arg.store_fn_arg(bcx, &mut llarg_idx, dst); - } + arg.store_fn_arg(bcx, &mut llarg_idx, lvalue.project_field(bcx, i)); } // Now that we have one alloca that contains the aggregate value, @@ -453,26 +441,19 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llarg_idx += 1; LvalueRef::new_sized(llarg, arg_ty, Alignment::AbiAligned) } else if !lvalue_locals.contains(local.index()) && - arg.cast.is_none() && arg_scope.is_none() { - if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx, arg_ty); - } + !arg.nested.is_empty() { + assert_eq!(arg.nested.len(), 2); + let (a, b) = (&arg.nested[0], &arg.nested[1]); + assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none()); + assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none()); - // We don't have to cast or keep the argument in the alloca. - // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead - // of putting everything in allocas just so we can use llvm.dbg.declare. - if arg.pad.is_some() { - llarg_idx += 1; - } - let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + let mut a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - assert!(meta.cast.is_none() && meta.pad.is_none()); - let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - llarg_idx += 1; + let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + llarg_idx += 1; + + if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // FIXME(eddyb) As we can't perfectly represent the data and/or // vtable pointer in a fat pointers in Rust's typesystem, and // because we split fat pointers into two ArgType's, they're @@ -486,36 +467,40 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let data_llty = bcx.ccx.llvm_type_of(pointee); let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); - let llarg = bcx.pointercast(llarg, data_llty.ptr_to()); - bcx.set_value_name(llarg, &(name.clone() + ".ptr")); - let llmeta = bcx.pointercast(llmeta, meta_llty); - bcx.set_value_name(llmeta, &(name + ".meta")); + a = bcx.pointercast(a, data_llty.ptr_to()); + bcx.set_value_name(a, &(name.clone() + ".ptr")); + b = bcx.pointercast(b, meta_llty); + bcx.set_value_name(b, &(name + ".meta")); + } - OperandValue::Pair(llarg, llmeta) - } else { - bcx.set_value_name(llarg, &name); - OperandValue::Immediate(llarg) - }; + return LocalRef::Operand(Some(OperandRef { + val: OperandValue::Pair(a, b), + ty: arg_ty + })); + } else if !lvalue_locals.contains(local.index()) && + !arg.is_indirect() && arg.cast.is_none() && + arg_scope.is_none() { + if arg.is_ignore() { + return LocalRef::new_operand(bcx.ccx, arg_ty); + } + + // We don't have to cast or keep the argument in the alloca. + // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead + // of putting everything in allocas just so we can use llvm.dbg.declare. + if arg.pad.is_some() { + llarg_idx += 1; + } + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(llarg, &name); + llarg_idx += 1; let operand = OperandRef { - val, + val: OperandValue::Immediate(llarg), ty: arg_ty }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { let tmp = LvalueRef::alloca(bcx, arg_ty, &name); - if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - // we pass fat pointers as two words, but we want to - // represent them internally as a pointer to two words, - // so make an alloca to store them in. - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_ADDR)); - meta.store_fn_arg(bcx, &mut llarg_idx, tmp.project_field(bcx, abi::FAT_PTR_EXTRA)); - } else { - // otherwise, arg is passed by value, so make a - // temporary and store it there - arg.store_fn_arg(bcx, &mut llarg_idx, tmp); - } + arg.store_fn_arg(bcx, &mut llarg_idx, tmp); tmp }; arg_scope.map(|scope| { From 1477119344e750d8cd6a5104ec6ea28cec11e574 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 20 Sep 2017 18:17:23 +0300 Subject: [PATCH 37/69] rustc_trans: keep a layout instead of a type in {Lvalue,Operand}Ref. --- src/librustc/ty/layout.rs | 18 ++- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/asm.rs | 25 ++-- src/librustc_trans/base.rs | 33 ++--- src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/common.rs | 10 +- src/librustc_trans/debuginfo/mod.rs | 3 +- src/librustc_trans/intrinsic.rs | 28 ++-- src/librustc_trans/mir/analyze.rs | 3 +- src/librustc_trans/mir/block.rs | 111 +++++++--------- src/librustc_trans/mir/constant.rs | 8 +- src/librustc_trans/mir/lvalue.rs | 198 +++++++++++++--------------- src/librustc_trans/mir/mod.rs | 69 +++++----- src/librustc_trans/mir/operand.rs | 89 +++++++------ src/librustc_trans/mir/rvalue.rs | 151 ++++++++++----------- src/librustc_trans/mir/statement.rs | 18 +-- src/librustc_trans/type_of.rs | 20 +-- 17 files changed, 374 insertions(+), 414 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index a0c0fb481aaae..cdb0d9f4451b6 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -771,6 +771,15 @@ impl Abi { } } + /// Returns true if the type is a ZST and not unsized. + pub fn is_zst(&self) -> bool { + match *self { + Abi::Scalar(_) => false, + Abi::Vector { count, .. } => count == 0, + Abi::Aggregate { sized, size, .. } => sized && size.bytes() == 0 + } + } + pub fn size(&self, cx: C) -> Size { let dl = cx.data_layout(); @@ -1377,7 +1386,7 @@ impl<'a, 'tcx> Layout { no_explicit_discriminants { // Nullable pointer optimization for i in 0..2 { - if !variants[1 - i].iter().all(|f| f.size(dl).bytes() == 0) { + if !variants[1 - i].iter().all(|f| f.is_zst()) { continue; } @@ -1456,7 +1465,7 @@ impl<'a, 'tcx> Layout { for i in st.fields.index_by_increasing_offset() { let field = field_layouts[i]; let field_align = field.align(dl); - if field.size(dl).bytes() != 0 || field_align.abi() != 1 { + if !field.is_zst() || field_align.abi() != 1 { start_align = start_align.min(field_align); break; } @@ -2145,6 +2154,11 @@ impl<'a, 'tcx> FullLayout<'tcx> { self.abi.is_packed() } + /// Returns true if the type is a ZST and not unsized. + pub fn is_zst(&self) -> bool { + self.abi.is_zst() + } + pub fn size(&self, cx: C) -> Size { self.abi.size(cx) } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 689976b6c42fc..0b09bca7b6889 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -697,7 +697,7 @@ impl<'a, 'tcx> FnType<'tcx> { if ty.is_bool() { arg.attrs.set(ArgAttribute::ZExt); } else { - if arg.layout.size(ccx).bytes() == 0 { + if arg.layout.is_zst() { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu. diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index f4fbde2535fcb..44bdc75f8461f 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -16,10 +16,9 @@ use type_::Type; use builder::Builder; use rustc::hir; -use rustc::ty::Ty; -use rustc::ty::layout::Align; -use mir::lvalue::{LvalueRef, Alignment}; +use mir::lvalue::LvalueRef; +use mir::operand::OperandValue; use std::ffi::CString; use syntax::ast::AsmDialect; @@ -29,7 +28,7 @@ use libc::{c_uint, c_char}; pub fn trans_inline_asm<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, ia: &hir::InlineAsm, - outputs: Vec<(ValueRef, Ty<'tcx>)>, + outputs: Vec>, mut inputs: Vec ) { let mut ext_constraints = vec![]; @@ -37,21 +36,15 @@ pub fn trans_inline_asm<'a, 'tcx>( // Prepare the output operands let mut indirect_outputs = vec![]; - for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { - let val = if out.is_rw || out.is_indirect { - Some(LvalueRef::new_sized(val, ty, - Alignment::Packed(Align::from_bytes(1, 1).unwrap())).load(bcx)) - } else { - None - }; + for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() { if out.is_rw { - inputs.push(val.unwrap().immediate()); + inputs.push(lvalue.load(bcx).immediate()); ext_constraints.push(i.to_string()); } if out.is_indirect { - indirect_outputs.push(val.unwrap().immediate()); + indirect_outputs.push(lvalue.load(bcx).immediate()); } else { - output_types.push(bcx.ccx.llvm_type_of(ty)); + output_types.push(bcx.ccx.llvm_type_of(lvalue.layout.ty)); } } if !indirect_outputs.is_empty() { @@ -106,9 +99,9 @@ pub fn trans_inline_asm<'a, 'tcx>( // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, &(val, _))) in outputs.enumerate() { + for (i, (_, &lvalue)) in outputs.enumerate() { let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) }; - bcx.store(v, val, None); + OperandValue::Immediate(v).store(bcx, lvalue); } // Store mark in a metadata node so we can map LLVM errors diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index a987fa4a40e55..036f1dfbcfd96 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -40,7 +40,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::Align; +use rustc::ty::layout::{Align, FullLayout}; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -55,10 +55,7 @@ use builder::Builder; use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_array}; -use common::CrateContext; -use common::{type_is_zero_size, val_ty}; -use common; +use common::{self, C_struct_in_context, C_array, CrateContext, val_ty}; use consts; use context::{self, LocalCrateContext, SharedCrateContext}; use debuginfo; @@ -88,7 +85,7 @@ use syntax::attr; use rustc::hir; use syntax::ast; -use mir::operand::{OperandRef, OperandValue}; +use mir::operand::OperandValue; pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr}; pub use rustc_trans_utils::trans_item::linkage_by_name; @@ -249,8 +246,8 @@ pub fn unsize_thin_ptr<'a, 'tcx>( pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, src: LvalueRef<'tcx>, dst: LvalueRef<'tcx>) { - let src_ty = src.ty.to_ty(bcx.tcx()); - let dst_ty = dst.ty.to_ty(bcx.tcx()); + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; let coerce_ptr = || { let (base, info) = match src.load(bcx).val { OperandValue::Pair(base, info) => { @@ -266,10 +263,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } OperandValue::Ref(..) => bug!() }; - OperandRef { - val: OperandValue::Pair(base, info), - ty: dst_ty - }.store(bcx, dst); + OperandValue::Pair(base, info).store(bcx, dst); }; match (&src_ty.sty, &dst_ty.sty) { (&ty::TyRef(..), &ty::TyRef(..)) | @@ -288,15 +282,12 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let src_f = src.project_field(bcx, i); let dst_f = dst.project_field(bcx, i); - let src_f_ty = src_f.ty.to_ty(bcx.tcx()); - let dst_f_ty = dst_f.ty.to_ty(bcx.tcx()); - - if type_is_zero_size(bcx.ccx, dst_f_ty) { + if dst_f.layout.is_zst() { continue; } - if src_f_ty == dst_f_ty { - memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f_ty, + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout, (src_f.alignment | dst_f.alignment).non_abi()); } else { coerce_unsized_into(bcx, src_f, dst_f); @@ -409,17 +400,17 @@ pub fn memcpy_ty<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, - t: Ty<'tcx>, + layout: FullLayout<'tcx>, align: Option, ) { let ccx = bcx.ccx; - let size = ccx.size_of(t).bytes(); + let size = layout.size(ccx).bytes(); if size == 0 { return; } - let align = align.unwrap_or_else(|| ccx.align_of(t)); + let align = align.unwrap_or_else(|| layout.align(ccx)); call_memcpy(bcx, dst, src, C_usize(ccx, size), align); } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index d6d46307a4ff5..00e8562c2a134 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -58,7 +58,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) off: Size) -> Result<(), Memory> { if !off.is_abi_aligned(layout.align(ccx)) { - if layout.size(ccx).bytes() > 0 { + if !layout.is_zst() { return Err(Memory); } return Ok(()); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 426a44671bccf..55f02ed5f9171 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -58,9 +58,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - match layout.abi { layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, - layout::Abi::Aggregate { .. } => { - !layout.is_unsized() && layout.size(ccx).bytes() == 0 - } + layout::Abi::Aggregate { .. } => layout.is_zst() } } @@ -83,12 +81,6 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) } } -/// Identify types which have size zero at runtime. -pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let layout = ccx.layout_of(ty); - !layout.is_unsized() && layout.size(ccx).bytes() == 0 -} - pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) } diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 8d3a0fd572510..c0df25202d8a9 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -335,8 +335,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, signature.extend(inputs.iter().map(|&t| { let t = match t.sty { ty::TyArray(ct, _) - if (ct == cx.tcx().types.u8) || - (cx.layout_of(ct).size(cx).bytes() == 0) => { + if (ct == cx.tcx().types.u8) || cx.layout_of(ct).is_zst() => { cx.tcx().mk_imm_ptr(ct) } _ => t diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 25729449dbca3..1cdd192bfeda6 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -22,7 +22,7 @@ use declare; use glue; use type_::Type; use rustc::ty::{self, Ty}; -use rustc::ty::layout::HasDataLayout; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -86,7 +86,7 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// add them to librustc_trans/trans/context.rs pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, callee_ty: Ty<'tcx>, - fn_ty: &FnType, + fn_ty: &FnType<'tcx>, args: &[OperandRef<'tcx>], llresult: ValueRef, span: Span) { @@ -105,7 +105,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let name = &*tcx.item_name(def_id); let llret_ty = ccx.llvm_type_of(ret_ty); - let result = LvalueRef::new_sized(llresult, ret_ty, Alignment::AbiAligned); + let result = LvalueRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned); let simple = get_simple_intrinsic(ccx, name); let llval = match name { @@ -179,7 +179,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "init" => { let ty = substs.type_at(0); - if !type_is_zero_size(ccx, ty) { + if !ccx.layout_of(ty).is_zst() { // Just zero out the stack slot. // If we store a zero constant, LLVM will drown in vreg allocation for large data // structures, and the generated code will be awful. (A telltale sign of this is @@ -247,7 +247,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }, "volatile_store" => { let tp_ty = substs.type_at(0); - let dst = LvalueRef::new_sized(args[0].immediate(), tp_ty, Alignment::AbiAligned); + let dst = args[0].deref(bcx.ccx); if let OperandValue::Pair(a, b) = args[1].val { bcx.volatile_store(a, dst.project_field(bcx, 0).llval); bcx.volatile_store(b, dst.project_field(bcx, 1).llval); @@ -255,7 +255,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let val = if let OperandValue::Ref(ptr, align) = args[1].val { bcx.load(ptr, align.non_abi()) } else { - if type_is_zero_size(ccx, tp_ty) { + if dst.layout.is_zst() { return; } from_immediate(bcx, args[1].immediate()) @@ -393,13 +393,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }, "discriminant_value" => { - let val_ty = substs.type_at(0); - let adt_val = LvalueRef::new_sized(args[0].immediate(), - val_ty, - Alignment::AbiAligned); - match val_ty.sty { + match substs.type_at(0).sty { ty::TyAdt(adt, ..) if adt.is_enum() => { - adt_val.trans_get_discr(bcx, ret_ty) + args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty) } _ => C_null(llret_ty) } @@ -612,12 +608,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.ccx.shared().type_needs_drop(arg.ty)); + assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty)); let (ptr, align) = match arg.val { OperandValue::Ref(ptr, align) => (ptr, align), _ => bug!() }; - let arg = LvalueRef::new_sized(ptr, arg.ty, align); + let arg = LvalueRef::new_sized(ptr, arg.layout, align); (0..contents.len()).map(|i| { arg.project_field(bcx, i).load(bcx).immediate() }).collect() @@ -685,8 +681,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } else { OperandRef { val: OperandValue::Immediate(llval), - ty: ret_ty - }.unpack_if_pair(bcx).store(bcx, result); + layout: result.layout + }.unpack_if_pair(bcx).val.store(bcx, result); } } } diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 73f60ff29a851..bca33a8c3074e 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -18,6 +18,7 @@ use rustc::mir::{self, Location, TerminatorKind, Literal}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use rustc::ty; +use rustc::ty::layout::LayoutOf; use common; use super::MirContext; @@ -34,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { ty.is_box() || ty.is_region_ptr() || ty.is_simd() || - common::type_is_zero_size(mircx.ccx, ty) + mircx.ccx.layout_of(ty).is_zst() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 7dbb8253e4289..82d5dabc86c7a 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; -use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::{self, TypeFoldable}; use rustc::ty::layout::LayoutOf; use rustc::traits; use rustc::mir; @@ -116,11 +116,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty: FnType<'tcx>, fn_ptr: ValueRef, llargs: &[ValueRef], - destination: Option<(ReturnDest<'tcx>, Ty<'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { - let ret_bcx = if let Some((_, _, target)) = destination { + let ret_bcx = if let Some((_, target)) = destination { this.blocks[target] } else { this.unreachable_block() @@ -132,12 +132,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); - if let Some((ret_dest, ret_ty, target)) = destination { + if let Some((ret_dest, target)) = destination { let ret_bcx = this.get_builder(target); this.set_debug_loc(&ret_bcx, terminator.source_info); let op = OperandRef { val: Immediate(invokeret), - ty: ret_ty, + layout: fn_ty.ret.layout, }; this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); } @@ -152,10 +152,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } - if let Some((ret_dest, ret_ty, target)) = destination { + if let Some((ret_dest, target)) = destination { let op = OperandRef { val: Immediate(llret), - ty: ret_ty, + layout: fn_ty.ret.layout, }; this.store_return(&bcx, ret_dest, &fn_ty.ret, op); funclet_br(this, bcx, target); @@ -227,14 +227,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(tr_lvalue) => { OperandRef { val: Ref(tr_lvalue.llval, tr_lvalue.alignment), - ty: tr_lvalue.ty.to_ty(bcx.tcx()) + layout: tr_lvalue.layout } } }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout.ty, "ret"); - op.store(&bcx, scratch); + let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret"); + op.val.store(&bcx, scratch); scratch.llval } Ref(llval, align) => { @@ -282,7 +282,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; do_call(self, bcx, fn_ty, drop_fn, args, - Some((ReturnDest::Nothing, tcx.mk_nil(), target)), + Some((ReturnDest::Nothing, target)), unwind); } @@ -427,7 +427,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); - let (instance, mut llfn) = match callee.ty.sty { + let (instance, mut llfn) = match callee.layout.ty.sty { ty::TyFnDef(def_id, substs) => { (Some(ty::Instance::resolve(bcx.ccx.tcx(), ty::ParamEnv::empty(traits::Reveal::All), @@ -438,10 +438,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ty::TyFnPtr(_) => { (None, Some(callee.immediate())) } - _ => bug!("{} is not callable", callee.ty) + _ => bug!("{} is not callable", callee.layout.ty) }; let def = instance.map(|i| i.def); - let sig = callee.ty.fn_sig(bcx.tcx()); + let sig = callee.layout.ty.fn_sig(bcx.tcx()); let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); let abi = sig.abi; @@ -520,7 +520,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let val = self.trans_constant(&bcx, constant); return OperandRef { val: Immediate(val.llval), - ty: val.ty + layout: bcx.ccx.layout_of(val.ty) }; } } @@ -539,7 +539,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Make a fake operand for store_return let op = OperandRef { val: Ref(dst.llval, Alignment::AbiAligned), - ty: sig.output(), + layout: fn_ty.ret.layout, }; self.store_return(&bcx, ret_dest, &fn_ty.ret, op); } @@ -577,8 +577,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // The callee needs to own the argument memory if we pass it // by-ref, so make a local copy of non-immediate constants. if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) { - let tmp = LvalueRef::alloca(&bcx, op.ty, "const"); - op.store(&bcx, tmp); + let tmp = LvalueRef::alloca(&bcx, op.layout, "const"); + op.val.store(&bcx, tmp); op.val = Ref(tmp.llval, tmp.alignment); } @@ -596,7 +596,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; do_call(self, bcx, fn_ty, fn_ptr, &llargs, - destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)), + destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } mir::TerminatorKind::GeneratorDrop | @@ -617,7 +617,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let imm_op = |x| OperandRef { val: Immediate(x), // We won't be checking the type again. - ty: bcx.tcx().types.err + layout: bcx.ccx.layout_of(bcx.tcx().types.never) }; self.trans_argument(bcx, imm_op(a), llargs, &arg.nested[0]); self.trans_argument(bcx, imm_op(b), llargs, &arg.nested[1]); @@ -638,8 +638,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg"); - op.store(bcx, scratch); + let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); + op.val.store(bcx, scratch); (scratch.llval, Alignment::AbiAligned, true) } else { (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) @@ -650,8 +650,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg"); - base::memcpy_ty(bcx, scratch.llval, llval, op.ty, align.non_abi()); + let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); + base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align.non_abi()); (scratch.llval, Alignment::AbiAligned, true) } Ref(llval, align) => (llval, align, true) @@ -682,16 +682,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { args: &[ArgType<'tcx>]) { let tuple = self.trans_operand(bcx, operand); - let arg_types = match tuple.ty.sty { + let arg_types = match tuple.layout.ty.sty { ty::TyTuple(ref tys, _) => tys, _ => span_bug!(self.mir.span, - "bad final argument to \"rust-call\" fn {:?}", tuple.ty) + "bad final argument to \"rust-call\" fn {:?}", tuple.layout.ty) }; // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval, align) => { - let tuple_ptr = LvalueRef::new_sized(llval, tuple.ty, align); + let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align); for n in 0..arg_types.len() { let field_ptr = tuple_ptr.project_field(bcx, n); self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[n]); @@ -699,15 +699,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } Immediate(llval) => { - let layout = bcx.ccx.layout_of(tuple.ty); for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = bcx.extract_value(llval, layout.llvm_field_index(n)); + let mut elem = bcx.extract_value(llval, tuple.layout.llvm_field_index(n)); // Truncate bools to i1, if needed elem = base::to_immediate(bcx, elem, ty); // If the tuple is immediate, the elements are as well let op = OperandRef { val: Immediate(elem), - ty, + layout: bcx.ccx.layout_of(ty), }; self.trans_argument(bcx, op, llargs, &args[n]); } @@ -719,7 +718,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Pair is always made up of immediates let op = OperandRef { val: Immediate(elem), - ty, + layout: bcx.ccx.layout_of(ty), }; self.trans_argument(bcx, op, llargs, &args[n]); } @@ -733,11 +732,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some(slot) = self.personality_slot { slot } else { - let ty = ccx.tcx().intern_tup(&[ + let layout = ccx.layout_of(ccx.tcx().intern_tup(&[ ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8), ccx.tcx().types.i32 - ], false); - let slot = LvalueRef::alloca(bcx, ty, "personalityslot"); + ], false)); + let slot = LvalueRef::alloca(bcx, layout, "personalityslot"); self.personality_slot = Some(slot); slot } @@ -764,7 +763,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let bcx = self.new_block("cleanup"); - let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); let llretty = self.landing_pad_type(); let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); @@ -772,10 +770,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let slot = self.get_personality_slot(&bcx); slot.storage_live(&bcx); - OperandRef { - val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)), - ty: slot.ty.to_ty(ccx.tcx()) - }.store(&bcx, slot); + Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot); bcx.br(target_bb); bcx.llbb() @@ -806,24 +801,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, - dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, + dest: &mir::Lvalue<'tcx>, fn_ret: &ArgType<'tcx>, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest<'tcx> { // If the return is ignored, we can just return a do-nothing ReturnDest - if fn_ret_ty.is_ignore() { + if fn_ret.is_ignore() { return ReturnDest::Nothing; } let dest = if let mir::Lvalue::Local(index) = *dest { - let ret_ty = self.monomorphized_lvalue_ty(dest); match self.locals[index] { LocalRef::Lvalue(dest) => dest, LocalRef::Operand(None) => { // Handle temporary lvalues, specifically Operand ones, as // they don't have allocas - return if fn_ret_ty.is_indirect() { + return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bcx); llargs.push(tmp.llval); ReturnDest::IndirectOperand(tmp, index) @@ -831,7 +825,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bcx); ReturnDest::IndirectOperand(tmp, index) } else { @@ -845,7 +839,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { self.trans_lvalue(bcx, dest) }; - if fn_ret_ty.is_indirect() { + if fn_ret.is_indirect() { match dest.alignment { Alignment::AbiAligned => { llargs.push(dest.llval); @@ -873,18 +867,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match self.locals[index] { LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue), LocalRef::Operand(None) => { - let lvalue_ty = self.monomorphized_lvalue_ty(dst); - assert!(!lvalue_ty.has_erasable_regions()); - let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); + let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst)); + assert!(!dst_layout.ty.has_erasable_regions()); + let lvalue = LvalueRef::alloca(bcx, dst_layout, "transmute_temp"); lvalue.storage_live(bcx); self.trans_transmute_into(bcx, src, lvalue); let op = lvalue.load(bcx); lvalue.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } - LocalRef::Operand(Some(_)) => { - let ty = self.monomorphized_lvalue_ty(dst); - assert!(common::type_is_zero_size(bcx.ccx, ty), + LocalRef::Operand(Some(op)) => { + assert!(op.layout.is_zst(), "assigning to initialized SSAtemp"); } } @@ -897,14 +890,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { - let val = self.trans_operand(bcx, src); - let llty = bcx.ccx.llvm_type_of(val.ty); + let src = self.trans_operand(bcx, src); + let llty = bcx.ccx.llvm_type_of(src.layout.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); - let in_type = val.ty; - let out_type = dst.ty.to_ty(bcx.tcx()); - let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type)); - val.store(bcx, - LvalueRef::new_sized(cast_ptr, val.ty, Alignment::Packed(align))); + let align = src.layout.align(bcx.ccx).min(dst.layout.align(bcx.ccx)); + src.val.store(bcx, + LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align))); } @@ -927,7 +918,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); + let tmp = LvalueRef::alloca(bcx, op.layout, "tmp_ret"); tmp.storage_live(bcx); ret_ty.store(bcx, op.immediate(), tmp); let op = tmp.load(bcx); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 9c43d8b36272e..8b86eca755825 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -158,7 +158,7 @@ impl<'a, 'tcx> Const<'tcx> { OperandRef { val, - ty: self.ty + layout: ccx.layout_of(self.ty) } } } @@ -1100,11 +1100,11 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; let discr_ty = l.field(ccx, 0).ty; - let discr = Const::new(C_int(ccx.llvm_type_of(discr_ty), discr as i64), - discr_ty); + let discr = C_int(ccx.llvm_type_of(discr_ty), discr as i64); if let layout::Abi::Scalar(_) = l.abi { - discr + Const::new(discr, t) } else { + let discr = Const::new(discr, discr_ty); build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index f6c260e4c1523..3c0776d479817 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{self, ValueRef}; -use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; @@ -86,36 +86,44 @@ pub struct LvalueRef<'tcx> { pub llextra: ValueRef, /// Monomorphized type of this lvalue, including variant information - pub ty: LvalueTy<'tcx>, + pub layout: FullLayout<'tcx>, /// Whether this lvalue is known to be aligned according to its layout pub alignment: Alignment, } impl<'a, 'tcx> LvalueRef<'tcx> { - pub fn new_sized(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { - LvalueRef { llval, llextra: ptr::null_mut(), ty: LvalueTy::from_ty(ty), alignment } + pub fn new_sized(llval: ValueRef, + layout: FullLayout<'tcx>, + alignment: Alignment) + -> LvalueRef<'tcx> { + LvalueRef { + llval, + llextra: ptr::null_mut(), + layout, + alignment + } } - pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { - debug!("alloca({:?}: {:?})", name, ty); + pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str) + -> LvalueRef<'tcx> { + debug!("alloca({:?}: {:?})", name, layout); let tmp = bcx.alloca( - bcx.ccx.llvm_type_of(ty), name, bcx.ccx.over_align_of(ty)); - assert!(!ty.has_param_types()); - Self::new_sized(tmp, ty, Alignment::AbiAligned) + bcx.ccx.llvm_type_of(layout.ty), name, layout.over_align(bcx.ccx)); + Self::new_sized(tmp, layout, Alignment::AbiAligned) } pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { - let ty = self.ty.to_ty(ccx.tcx()); - match ty.sty { - ty::TyArray(_, n) => { - common::C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap()) - } - ty::TySlice(_) | ty::TyStr => { - assert!(self.llextra != ptr::null_mut()); + if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields { + if self.layout.is_unsized() { + assert!(self.has_extra()); + assert_eq!(count, 0); self.llextra + } else { + common::C_usize(ccx, count) } - _ => bug!("unexpected type `{}` in LvalueRef::len", ty) + } else { + bug!("unexpected layout `{:#?}` in LvalueRef::len", self.layout) } } @@ -128,15 +136,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { assert!(!self.has_extra()); - let ty = self.ty.to_ty(bcx.tcx()); - - if common::type_is_zero_size(bcx.ccx, ty) { - return OperandRef::new_zst(bcx.ccx, ty); + if self.layout.is_zst() { + return OperandRef::new_zst(bcx.ccx, self.layout); } - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { + let val = if common::type_is_fat_ptr(bcx.ccx, self.layout.ty) { let data = self.project_field(bcx, abi::FAT_PTR_ADDR); - let lldata = if ty.is_region_ptr() || ty.is_box() { + let lldata = if self.layout.ty.is_region_ptr() || self.layout.ty.is_box() { bcx.load_nonnull(data.llval, data.alignment.non_abi()) } else { bcx.load(data.llval, data.alignment.non_abi()) @@ -153,11 +159,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx, ty) { + } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { OperandValue::Pair( self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) - } else if common::type_is_immediate(bcx.ccx, ty) { + } else if common::type_is_immediate(bcx.ccx, self.layout.ty) { let mut const_llval = ptr::null_mut(); unsafe { let global = llvm::LLVMIsAGlobalVariable(self.llval); @@ -168,48 +174,43 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let llval = if !const_llval.is_null() { const_llval - } else if ty.is_bool() { + } else if self.layout.ty.is_bool() { bcx.load_range_assert(self.llval, 0, 2, llvm::False, self.alignment.non_abi()) - } else if ty.is_char() { + } else if self.layout.ty.is_char() { // a char is a Unicode codepoint, and so takes values from 0 // to 0x10FFFF inclusive only. bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False, self.alignment.non_abi()) - } else if ty.is_region_ptr() || ty.is_box() || ty.is_fn() { + } else if self.layout.ty.is_region_ptr() || + self.layout.ty.is_box() || + self.layout.ty.is_fn() { bcx.load_nonnull(self.llval, self.alignment.non_abi()) } else { bcx.load(self.llval, self.alignment.non_abi()) }; - OperandValue::Immediate(base::to_immediate(bcx, llval, ty)) + OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout.ty)) } else { OperandValue::Ref(self.llval, self.alignment) }; - OperandRef { val, ty } + OperandRef { val, layout: self.layout } } /// Access a field, at a point when the value's case is known. pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { let ccx = bcx.ccx; - let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx())); - match self.ty { - LvalueTy::Ty { .. } => {} - LvalueTy::Downcast { variant_index, .. } => { - l = l.for_variant(variant_index) - } - } - let field = l.field(ccx, ix); - let offset = l.fields.offset(ix).bytes(); + let field = self.layout.field(ccx, ix); + let offset = self.layout.fields.offset(ix).bytes(); - let alignment = self.alignment | Alignment::from(l); + let alignment = self.alignment | Alignment::from(self.layout); // Unions and newtypes only use an offset of 0. - let has_llvm_fields = match *l.fields { + let has_llvm_fields = match *self.layout.fields { layout::FieldPlacement::Union(_) => false, layout::FieldPlacement::Array { .. } => true, layout::FieldPlacement::Arbitrary { .. } => { - match l.abi { + match self.layout.abi { layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false, layout::Abi::Aggregate { .. } => true } @@ -219,7 +220,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let simple = || { LvalueRef { llval: if has_llvm_fields { - bcx.struct_gep(self.llval, l.llvm_field_index(ix)) + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) } else { assert_eq!(offset, 0); let ty = ccx.llvm_type_of(field.ty); @@ -230,7 +231,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } else { ptr::null_mut() }, - ty: LvalueTy::from_ty(field.ty), + layout: field, alignment, } }; @@ -238,7 +239,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // Simple case - we can just GEP the field // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if l.is_packed() || !field.is_unsized() { + if self.layout.is_packed() || !field.is_unsized() { return simple(); } @@ -301,29 +302,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { LvalueRef { llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()), llextra: self.llextra, - ty: LvalueTy::from_ty(field.ty), + layout: field, alignment, } } /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { - let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); - let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); - match *l.layout { + match *self.layout.layout { layout::Layout::Univariant { .. } | layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), _ => {} } let discr = self.project_field(bcx, 0); - let discr_layout = bcx.ccx.layout_of(discr.ty.to_ty(bcx.tcx())); - let discr_scalar = match discr_layout.abi { + let discr_scalar = match discr.layout.abi { layout::Abi::Scalar(discr) => discr, - _ => bug!("discriminant not scalar: {:#?}", discr_layout) + _ => bug!("discriminant not scalar: {:#?}", discr.layout) }; - let (min, max) = match *l.layout { + let (min, max) = match *self.layout.layout { layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; @@ -349,7 +347,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.load(discr.llval, discr.alignment.non_abi()) } }; - match *l.layout { + match *self.layout.layout { layout::Layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, @@ -359,29 +357,28 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } layout::Layout::NullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - let zero = C_null(bcx.ccx.llvm_type_of(discr_layout.ty)); + let zero = C_null(bcx.ccx.llvm_type_of(discr.layout.ty)); bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) } - _ => bug!("{} is not an enum", l.ty) + _ => bug!("{} is not an enum", self.layout.ty) } } /// Set the discriminant for a new value of the given case of the given /// representation. pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { - let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx())); - let to = l.ty.ty_adt_def().unwrap() + let to = self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; - match *l.layout { + match *self.layout.layout { layout::Layout::General { .. } => { let ptr = self.project_field(bcx, 0); - bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64), + bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.layout.ty), to as i64), ptr.llval, ptr.alignment.non_abi()); } layout::Layout::NullablePointer { nndiscr, .. } => { if to != nndiscr { - let use_memset = match l.abi { + let use_memset = match self.layout.abi { layout::Abi::Scalar(_) => false, _ => target_sets_discr_via_memset(bcx) }; @@ -391,13 +388,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // than storing null to single target field. let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); - let (size, align) = l.size_and_align(bcx.ccx); + let (size, align) = self.layout.size_and_align(bcx.ccx); let size = C_usize(bcx.ccx, size.bytes()); let align = C_u32(bcx.ccx, align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let ptr = self.project_field(bcx, 0); - bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx()))), + bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.layout.ty)), ptr.llval, ptr.alignment.non_abi()); } } @@ -410,48 +407,40 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> LvalueRef<'tcx> { - let ptr = bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]); - let elem_ty = self.ty.to_ty(bcx.tcx()).builtin_index().unwrap(); - LvalueRef::new_sized(ptr, elem_ty, self.alignment) + LvalueRef { + llval: bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]), + llextra: ptr::null_mut(), + layout: self.layout.field(bcx.ccx, 0), + alignment: self.alignment + } } pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) -> LvalueRef<'tcx> { - let ty = self.ty.to_ty(bcx.tcx()); - if let ty::TyAdt(adt_def, substs) = ty.sty { - let mut downcast = *self; - downcast.ty = LvalueTy::Downcast { - adt_def, - substs, - variant_index, - }; + let mut downcast = *self; + downcast.layout = self.layout.for_variant(variant_index); - // If this is an enum, cast to the appropriate variant struct type. - let layout = bcx.ccx.layout_of(ty); - match *layout.layout { - layout::Layout::NullablePointer { .. } | - layout::Layout::General { .. } => { - let variant_layout = layout.for_variant(variant_index); - let variant_ty = Type::struct_(bcx.ccx, - &type_of::struct_llfields(bcx.ccx, variant_layout), - variant_layout.is_packed()); - downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); - } - _ => {} + // If this is an enum, cast to the appropriate variant struct type. + match *self.layout.layout { + layout::Layout::NullablePointer { .. } | + layout::Layout::General { .. } => { + let variant_ty = Type::struct_(bcx.ccx, + &type_of::struct_llfields(bcx.ccx, downcast.layout), + downcast.layout.is_packed()); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } - - downcast - } else { - bug!("unexpected type `{}` in LvalueRef::project_downcast", ty) + _ => {} } + + downcast } pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) { - bcx.lifetime_start(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + bcx.lifetime_start(self.llval, self.layout.size(bcx.ccx)); } pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) { - bcx.lifetime_end(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + bcx.lifetime_end(self.llval, self.layout.size(bcx.ccx)); } } @@ -480,7 +469,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Lvalue::Local(_) => bug!(), // handled above mir::Lvalue::Static(box mir::Static { def_id, ty }) => { LvalueRef::new_sized(consts::get_static(ccx, def_id), - self.monomorphize(&ty), + ccx.layout_of(self.monomorphize(&ty)), Alignment::AbiAligned) }, mir::Lvalue::Projection(box mir::Projection { @@ -488,7 +477,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.trans_consume(bcx, base).deref() + self.trans_consume(bcx, base).deref(bcx.ccx) } mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); @@ -521,23 +510,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::ProjectionElem::Subslice { from, to } => { let mut subslice = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64)); - subslice.ty = tr_base.ty.projection_ty(tcx, &projection.elem); - subslice.ty = self.monomorphize(&subslice.ty); - - match subslice.ty.to_ty(tcx).sty { - ty::TyArray(..) => {} - ty::TySlice(..) => { - assert!(tr_base.has_extra()); - subslice.llextra = bcx.sub(tr_base.llextra, - C_usize(bcx.ccx, (from as u64) + (to as u64))); - } - _ => bug!("unexpected type {:?} in Subslice", subslice.ty) + let projected_ty = LvalueTy::Ty { ty: tr_base.layout.ty } + .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx()); + subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty)); + + if subslice.layout.is_unsized() { + assert!(tr_base.has_extra()); + subslice.llextra = bcx.sub(tr_base.llextra, + C_usize(bcx.ccx, (from as u64) + (to as u64))); } // Cast the lvalue pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bcx.pointercast(subslice.llval, - bcx.ccx.llvm_type_of(subslice.ty.to_ty(tcx)).ptr_to()); + bcx.ccx.llvm_type_of(subslice.layout.ty).ptr_to()); subslice } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 7440551f322f9..21d2711df83c7 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -11,8 +11,8 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; -use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutOf; +use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::{LayoutOf, FullLayout}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; @@ -177,12 +177,12 @@ enum LocalRef<'tcx> { } impl<'a, 'tcx> LocalRef<'tcx> { - fn new_operand(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> LocalRef<'tcx> { - if common::type_is_zero_size(ccx, ty) { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> { + if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(Some(OperandRef::new_zst(ccx, ty))) + LocalRef::Operand(Some(OperandRef::new_zst(ccx, layout))) } else { LocalRef::Operand(None) } @@ -253,7 +253,8 @@ pub fn trans_mir<'a, 'tcx: 'a>( let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let ty = mircx.monomorphize(&decl.ty); + let layout = bcx.ccx.layout_of(mircx.monomorphize(&decl.ty)); + assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { // User variable @@ -262,15 +263,14 @@ pub fn trans_mir<'a, 'tcx: 'a>( if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bcx.ccx, ty); + return LocalRef::new_operand(bcx.ccx, layout); } debug!("alloc: {:?} ({}) -> lvalue", local, name); - assert!(!ty.has_erasable_regions()); - let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + let lvalue = LvalueRef::alloca(&bcx, layout, &name.as_str()); if dbg { let (scope, span) = mircx.debug_loc(decl.source_info); - declare_local(&bcx, &mircx.debug_context, name, ty, scope, + declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope, VariableAccess::DirectVariable { alloca: lvalue.llval }, VariableKind::LocalVariable, span); } @@ -280,17 +280,18 @@ pub fn trans_mir<'a, 'tcx: 'a>( if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); let llretptr = llvm::get_param(llfn, 0); - LocalRef::Lvalue(LvalueRef::new_sized(llretptr, ty, Alignment::AbiAligned)) + LocalRef::Lvalue(LvalueRef::new_sized(llretptr, + layout, + Alignment::AbiAligned)) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); - assert!(!ty.has_erasable_regions()); - LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + LocalRef::Lvalue(LvalueRef::alloca(&bcx, layout, &format!("{:?}", local))) } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bcx.ccx, ty) + LocalRef::new_operand(bcx.ccx, layout) } } }; @@ -381,7 +382,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; - let arg_ty = mircx.monomorphize(&arg_decl.ty); let name = if let Some(name) = arg_decl.name { name.as_str().to_string() @@ -395,12 +395,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. + let arg_ty = mircx.monomorphize(&arg_decl.ty); let tupled_arg_tys = match arg_ty.sty { ty::TyTuple(ref tys, _) => tys, _ => bug!("spread argument isn't a tuple?!") }; - let lvalue = LvalueRef::alloca(bcx, arg_ty, &name); + let lvalue = LvalueRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &mircx.fn_ty.args[idx]; idx += 1; @@ -439,7 +440,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); bcx.set_value_name(llarg, &name); llarg_idx += 1; - LvalueRef::new_sized(llarg, arg_ty, Alignment::AbiAligned) + LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned) } else if !lvalue_locals.contains(local.index()) && !arg.nested.is_empty() { assert_eq!(arg.nested.len(), 2); @@ -453,15 +454,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - if common::type_is_fat_ptr(bcx.ccx, arg_ty) { + if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) { // FIXME(eddyb) As we can't perfectly represent the data and/or // vtable pointer in a fat pointers in Rust's typesystem, and // because we split fat pointers into two ArgType's, they're // not the right type so we have to cast them for now. - let pointee = match arg_ty.sty { + let pointee = match arg.layout.ty.sty { ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty, - ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(), + ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(), _ => bug!() }; let data_llty = bcx.ccx.llvm_type_of(pointee); @@ -475,13 +476,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, return LocalRef::Operand(Some(OperandRef { val: OperandValue::Pair(a, b), - ty: arg_ty + layout: arg.layout })); } else if !lvalue_locals.contains(local.index()) && !arg.is_indirect() && arg.cast.is_none() && arg_scope.is_none() { if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx, arg_ty); + return LocalRef::new_operand(bcx.ccx, arg.layout); } // We don't have to cast or keep the argument in the alloca. @@ -495,11 +496,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llarg_idx += 1; let operand = OperandRef { val: OperandValue::Immediate(llarg), - ty: arg_ty + layout: arg.layout }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let tmp = LvalueRef::alloca(bcx, arg_ty, &name); + let tmp = LvalueRef::alloca(bcx, arg.layout, &name); arg.store_fn_arg(bcx, &mut llarg_idx, tmp); tmp }; @@ -523,7 +524,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx, &mircx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), - arg_ty, + arg.layout.ty, scope, variable_access, VariableKind::ArgumentVariable(arg_index + 1), @@ -533,15 +534,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } // Or is it the closure environment? - let (closure_ty, env_ref) = match arg_ty.sty { - ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (mt.ty, true), - _ => (arg_ty, false) + let (closure_layout, env_ref) = match arg.layout.ty.sty { + ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.ccx.layout_of(mt.ty), true), + _ => (arg.layout, false) }; - let upvar_tys = match closure_ty.sty { + let upvar_tys = match closure_layout.ty.sty { ty::TyClosure(def_id, substs) | ty::TyGenerator(def_id, substs, _) => substs.upvar_tys(def_id, tcx), - _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty) + _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty) }; // Store the pointer to closure data in an alloca for debuginfo @@ -552,17 +553,17 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc_ty = tcx.mk_mut_ptr(arg_ty); - let alloc = LvalueRef::alloca(bcx, alloc_ty, "__debuginfo_env_ptr"); + let alloc = LvalueRef::alloca(bcx, + bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + "__debuginfo_env_ptr"); bcx.store(lvalue.llval, alloc.llval, None); alloc.llval } else { lvalue.llval }; - let layout = bcx.ccx.layout_of(closure_ty); for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { - let byte_offset_of_var_in_env = layout.fields.offset(i).bytes(); + let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); let ops = unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 11c09960a3e09..24f6a84f62072 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -9,10 +9,9 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::LayoutOf; +use rustc::ty; +use rustc::ty::layout::{LayoutOf, FullLayout}; use rustc::mir; -use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use base; @@ -42,6 +41,22 @@ pub enum OperandValue { Pair(ValueRef, ValueRef) } +impl fmt::Debug for OperandValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + OperandValue::Ref(r, align) => { + write!(f, "Ref({:?}, {:?})", Value(r), align) + } + OperandValue::Immediate(i) => { + write!(f, "Immediate({:?})", Value(i)) + } + OperandValue::Pair(a, b) => { + write!(f, "Pair({:?}, {:?})", Value(a), Value(b)) + } + } + } +} + /// An `OperandRef` is an "SSA" reference to a Rust value, along with /// its type. /// @@ -55,35 +70,24 @@ pub struct OperandRef<'tcx> { // The value. pub val: OperandValue, - // The type of value being returned. - pub ty: Ty<'tcx> + // The layout of value, based on its Rust type. + pub layout: FullLayout<'tcx>, } impl<'tcx> fmt::Debug for OperandRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.val { - OperandValue::Ref(r, align) => { - write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})", - Value(r), align, self.ty) - } - OperandValue::Immediate(i) => { - write!(f, "OperandRef(Immediate({:?}) @ {:?})", - Value(i), self.ty) - } - OperandValue::Pair(a, b) => { - write!(f, "OperandRef(Pair({:?}, {:?}) @ {:?})", - Value(a), Value(b), self.ty) - } - } + write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> OperandRef<'tcx> { - assert!(common::type_is_zero_size(ccx, ty)); - let llty = ccx.llvm_type_of(ty); - Const::new(C_undef(llty), ty).to_operand(ccx) + layout: FullLayout<'tcx>) -> OperandRef<'tcx> { + assert!(layout.is_zst()); + let llty = ccx.llvm_type_of(layout.ty); + // FIXME(eddyb) ZSTs should always be immediate, not pairs. + // This hack only exists to unpack a constant undef pair. + Const::new(C_undef(llty), layout.ty).to_operand(ccx) } /// Asserts that this operand refers to a scalar and returns @@ -95,8 +99,8 @@ impl<'a, 'tcx> OperandRef<'tcx> { } } - pub fn deref(self) -> LvalueRef<'tcx> { - let projected_ty = self.ty.builtin_deref(true, ty::NoPreference) + pub fn deref(self, ccx: &CrateContext<'a, 'tcx>) -> LvalueRef<'tcx> { + let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), @@ -106,7 +110,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { LvalueRef { llval: llptr, llextra, - ty: LvalueTy::from_ty(projected_ty), + layout: ccx.layout_of(projected_ty), alignment: Alignment::AbiAligned, } } @@ -115,15 +119,14 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// Immediate aggregate with the two values. pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { - let llty = bcx.ccx.llvm_type_of(self.ty); + let llty = bcx.ccx.llvm_type_of(self.layout.ty); debug!("Operand::pack_if_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); let elems = [a, b]; - let layout = bcx.ccx.layout_of(self.ty); for i in 0..2 { let elem = base::from_immediate(bcx, elems[i]); - llpair = bcx.insert_value(llpair, elem, layout.llvm_field_index(i)); + llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i)); } self.val = OperandValue::Immediate(llpair); } @@ -135,33 +138,33 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. - if common::type_is_imm_pair(bcx.ccx, self.ty) { + if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { debug!("Operand::unpack_if_pair: unpacking {:?}", self); - let layout = bcx.ccx.layout_of(self.ty); - - let a = bcx.extract_value(llval, layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0).ty); + let a = bcx.extract_value(llval, self.layout.llvm_field_index(0)); + let a = base::to_immediate(bcx, a, self.layout.field(bcx.ccx, 0).ty); - let b = bcx.extract_value(llval, layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1).ty); + let b = bcx.extract_value(llval, self.layout.llvm_field_index(1)); + let b = base::to_immediate(bcx, b, self.layout.field(bcx.ccx, 1).ty); self.val = OperandValue::Pair(a, b); } } self } +} +impl<'a, 'tcx> OperandValue { pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx, self.ty) { + if dest.layout.is_zst() { return; } - match self.val { + match self { OperandValue::Ref(r, source_align) => - base::memcpy_ty(bcx, dest.llval, r, self.ty, + base::memcpy_ty(bcx, dest.llval, r, dest.layout, (source_align | dest.alignment).non_abi()), OperandValue::Immediate(s) => { bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi()); @@ -169,12 +172,12 @@ impl<'a, 'tcx> OperandRef<'tcx> { OperandValue::Pair(a, b) => { // See comment above about zero-sized values. let dest_a = dest.project_field(bcx, 0); - if !common::type_is_zero_size(bcx.ccx, dest_a.ty.to_ty(bcx.tcx())) { + if !dest_a.layout.is_zst() { let a = base::from_immediate(bcx, a); bcx.store(a, dest_a.llval, dest_a.alignment.non_abi()); } let dest_b = dest.project_field(bcx, 1); - if !common::type_is_zero_size(bcx.ccx, dest_b.ty.to_ty(bcx.tcx())) { + if !dest_b.layout.is_zst() { let b = base::from_immediate(bcx, b); bcx.store(b, dest_b.llval, dest_b.alignment.non_abi()); } @@ -217,7 +220,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llval = [a, b][f.index()]; let op = OperandRef { val: OperandValue::Immediate(llval), - ty: self.monomorphize(&ty) + layout: bcx.ccx.layout_of(self.monomorphize(&ty)) }; // Handle nested pairs. @@ -251,7 +254,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let operand = val.to_operand(bcx.ccx); if let OperandValue::Ref(ptr, align) = operand.val { // If this is a OperandValue::Ref to an immediate constant, load it. - LvalueRef::new_sized(ptr, operand.ty, align).load(bcx) + LvalueRef::new_sized(ptr, operand.layout, align).load(bcx) } else { operand } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 518f36a77b5fc..5c24e4b5536d2 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -49,7 +49,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tr_operand = self.trans_operand(&bcx, operand); // FIXME: consider not copying constants through stack. (fixable by translating // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - tr_operand.store(&bcx, dest); + tr_operand.val.store(&bcx, dest); bcx } @@ -60,7 +60,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - temp.store(&bcx, dest); + temp.val.store(&bcx, dest); return bcx; } @@ -80,14 +80,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); - let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp"); + let scratch = LvalueRef::alloca(&bcx, operand.layout, "__unsize_temp"); scratch.storage_live(&bcx); - operand.store(&bcx, scratch); + operand.val.store(&bcx, scratch); base::coerce_unsized_into(&bcx, scratch, dest); scratch.storage_dead(&bcx); } OperandValue::Ref(llref, align) => { - let source = LvalueRef::new_sized(llref, operand.ty, align); + let source = LvalueRef::new_sized(llref, operand.layout, align); base::coerce_unsized_into(&bcx, source, dest); } } @@ -98,8 +98,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tr_elem = self.trans_operand(&bcx, elem); // Do not generate the loop for zero-sized elements or empty arrays. - let dest_ty = dest.ty.to_ty(bcx.tcx()); - if common::type_is_zero_size(bcx.ccx, dest_ty) { + if dest.layout.is_zst() { return bcx; } @@ -107,9 +106,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let OperandValue::Immediate(v) = tr_elem.val { let align = dest.alignment.non_abi() - .unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + .unwrap_or_else(|| tr_elem.layout.align(bcx.ccx)); let align = C_i32(bcx.ccx, align.abi() as i32); - let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes()); + let size = C_usize(bcx.ccx, dest.layout.size(bcx.ccx).bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { @@ -139,8 +138,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let keep_going = header_bcx.icmp(llvm::IntNE, current, end); header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - tr_elem.store(&body_bcx, - LvalueRef::new_sized(current, tr_elem.ty, dest.alignment)); + tr_elem.val.store(&body_bcx, + LvalueRef::new_sized(current, tr_elem.layout, dest.alignment)); let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]); body_bcx.br(header_bcx.llbb()); @@ -164,9 +163,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx, op.ty) { + if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - op.store(&bcx, dest.project_field(&bcx, field_index)); + op.val.store(&bcx, dest.project_field(&bcx, field_index)); } } bcx @@ -175,7 +174,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { _ => { assert!(self.rvalue_creates_operand(rvalue)); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - temp.store(&bcx, dest); + temp.val.store(&bcx, dest); bcx } } @@ -189,32 +188,32 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { - mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { + mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); - let cast_ty = self.monomorphize(&cast_ty); + let cast = bcx.ccx.layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { - match operand.ty.sty { + match operand.layout.ty.sty { ty::TyFnDef(def_id, substs) => { OperandValue::Immediate( callee::resolve_and_get_fn(bcx.ccx, def_id, substs)) } _ => { - bug!("{} cannot be reified to a fn ptr", operand.ty) + bug!("{} cannot be reified to a fn ptr", operand.layout.ty) } } } mir::CastKind::ClosureFnPointer => { - match operand.ty.sty { + match operand.layout.ty.sty { ty::TyClosure(def_id, substs) => { let instance = monomorphize::resolve_closure( bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce); OperandValue::Immediate(callee::get_fn(bcx.ccx, instance)) } _ => { - bug!("{} cannot be cast to a fn ptr", operand.ty) + bug!("{} cannot be cast to a fn ptr", operand.layout.ty) } } } @@ -225,7 +224,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be operands. - assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty)); + assert!(common::type_is_fat_ptr(bcx.ccx, cast.ty)); match operand.val { OperandValue::Pair(lldata, llextra) => { @@ -235,14 +234,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); let lldata = bcx.pointercast(lldata, llcast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata, - operand.ty, cast_ty); + operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } OperandValue::Ref(..) => { @@ -251,16 +250,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { + mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if common::type_is_fat_ptr(bcx.ccx, cast_ty) { - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); + if common::type_is_fat_ptr(bcx.ccx, cast.ty) { + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); let data_cast = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast_ty); + let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast.ty); let llval = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -269,15 +268,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); - let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.ty); - let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast_ty); + debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty)); + let r_t_in = CastTy::from_ty(operand.layout.ty) + .expect("bad input type for cast"); + let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); + let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.layout.ty); + let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast.ty); let llval = operand.immediate(); - let l = bcx.ccx.layout_of(operand.ty); - if let Layout::General { ref discr_range, .. } = *l.layout { + if let Layout::General { ref discr_range, .. } = *operand.layout.layout { if discr_range.end > discr_range.start { // We want `table[e as usize]` to not // have bound checks, and this is the most @@ -291,7 +290,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } - let signed = match l.abi { + let signed = match operand.layout.abi { layout::Abi::Scalar(layout::Int(_, signed)) => signed, _ => false }; @@ -326,49 +325,43 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out), - _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) + _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) } }; - let operand = OperandRef { + (bcx, OperandRef { val, - ty: cast_ty - }; - (bcx, operand) + layout: cast + }) } mir::Rvalue::Ref(_, bk, ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let ref_ty = bcx.tcx().mk_ref( - bcx.tcx().types.re_erased, - ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } - ); + let ty = tr_lvalue.layout.ty; // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. - let operand = if !bcx.ccx.shared().type_has_metadata(ty) { - OperandRef { - val: OperandValue::Immediate(tr_lvalue.llval), - ty: ref_ty, - } + let val = if !bcx.ccx.shared().type_has_metadata(ty) { + OperandValue::Immediate(tr_lvalue.llval) } else { - OperandRef { - val: OperandValue::Pair(tr_lvalue.llval, - tr_lvalue.llextra), - ty: ref_ty, - } + OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra) }; - (bcx, operand) + (bcx, OperandRef { + val, + layout: self.ccx.layout_of(self.ccx.tcx().mk_ref( + self.ccx.tcx().types.re_erased, + ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } + )), + }) } mir::Rvalue::Len(ref lvalue) => { let size = self.evaluate_array_len(&bcx, lvalue); let operand = OperandRef { val: OperandValue::Immediate(size), - ty: bcx.tcx().types.usize, + layout: bcx.ccx.layout_of(bcx.tcx().types.usize), }; (bcx, operand) } @@ -376,14 +369,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) { + let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.layout.ty) { match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { self.trans_fat_ptr_binop(&bcx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, - lhs.ty) + lhs.layout.ty) } _ => bug!() } @@ -391,11 +384,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { self.trans_scalar_binop(&bcx, op, lhs.immediate(), rhs.immediate(), - lhs.ty) + lhs.layout.ty) }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), + layout: bcx.ccx.layout_of( + op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bcx, operand) } @@ -404,12 +398,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let rhs = self.trans_operand(&bcx, rhs); let result = self.trans_scalar_checked_binop(&bcx, op, lhs.immediate(), rhs.immediate(), - lhs.ty); - let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); + lhs.layout.ty); + let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false); let operand = OperandRef { val: result, - ty: operand_ty + layout: bcx.ccx.layout_of(operand_ty) }; (bcx, operand) @@ -418,7 +412,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::UnaryOp(op, ref operand) => { let operand = self.trans_operand(&bcx, operand); let lloperand = operand.immediate(); - let is_float = operand.ty.is_fp(); + let is_float = operand.layout.ty.is_fp(); let llval = match op { mir::UnOp::Not => bcx.not(lloperand), mir::UnOp::Neg => if is_float { @@ -429,7 +423,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; (bcx, OperandRef { val: OperandValue::Immediate(llval), - ty: operand.ty, + layout: operand.layout, }) } @@ -439,7 +433,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { .trans_get_discr(&bcx, discr_ty); (bcx, OperandRef { val: OperandValue::Immediate(discr), - ty: discr_ty + layout: self.ccx.layout_of(discr_ty) }) } @@ -449,7 +443,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tcx = bcx.tcx(); (bcx, OperandRef { val: OperandValue::Immediate(val), - ty: tcx.types.usize, + layout: self.ccx.layout_of(tcx.types.usize), }) } @@ -458,14 +452,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (size, align) = bcx.ccx.size_and_align_of(content_ty); let llsize = C_usize(bcx.ccx, size.bytes()); let llalign = C_usize(bcx.ccx, align.abi()); - let box_ty = bcx.tcx().mk_box(content_ty); - let llty_ptr = bcx.ccx.llvm_type_of(box_ty); + let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty)); + let llty_ptr = bcx.ccx.llvm_type_of(box_layout.ty); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bcx.tcx(), def_id); @@ -474,7 +468,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let operand = OperandRef { val: OperandValue::Immediate(val), - ty: box_ty, + layout: box_layout, }; (bcx, operand) } @@ -487,7 +481,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.ccx.tcx()); - (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty))) + (bcx, OperandRef::new_zst(self.ccx, + self.ccx.layout_of(self.monomorphize(&ty)))) } } } @@ -500,11 +495,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // because trans_lvalue() panics if Local is operand. if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Operand(Some(op)) = self.locals[index] { - if common::type_is_zero_size(bcx.ccx, op.ty) { - if let ty::TyArray(_, n) = op.ty.sty { - let n = n.val.to_const_int().unwrap().to_u64().unwrap(); - return common::C_usize(bcx.ccx, n); - } + if let ty::TyArray(_, n) = op.layout.ty.sty { + let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + return common::C_usize(bcx.ccx, n); } } } @@ -709,7 +702,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Aggregate(..) => { let ty = rvalue.ty(self.mir, self.ccx.tcx()); let ty = self.monomorphize(&ty); - common::type_is_zero_size(self.ccx, ty) + self.ccx.layout_of(ty).is_zst() } } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 2559b21c46b06..607ecd887fa78 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,7 +11,6 @@ use rustc::mir; use asm; -use common; use builder::Builder; use super::MirContext; @@ -37,18 +36,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { self.locals[index] = LocalRef::Operand(Some(operand)); bcx } - LocalRef::Operand(Some(_)) => { - let ty = self.monomorphized_lvalue_ty(lvalue); - - if !common::type_is_zero_size(bcx.ccx, ty) { + LocalRef::Operand(Some(op)) => { + if !op.layout.is_zst() { span_bug!(statement.source_info.span, "operand {:?} already assigned", rvalue); - } else { - // If the type is zero-sized, it's already been set here, - // but we still need to make sure we translate the operand - self.trans_rvalue_operand(bcx, rvalue).0 } + + // If the type is zero-sized, it's already been set here, + // but we still need to make sure we translate the operand + self.trans_rvalue_operand(bcx, rvalue).0 } } } else { @@ -75,8 +72,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - let lvalue = self.trans_lvalue(&bcx, output); - (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) + self.trans_lvalue(&bcx, output) }).collect(); let input_vals = inputs.iter().map(|input| { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 7474e71a715f7..e9547bd746bfd 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -209,14 +209,7 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { /// Returns alignment if it is different than the primitive alignment. pub fn over_align_of(&self, ty: Ty<'tcx>) -> Option { - let layout = self.layout_of(ty); - let align = layout.align(self); - let primitive_align = layout.primitive_align(self); - if align != primitive_align { - Some(align) - } else { - None - } + self.layout_of(ty).over_align(self) } /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. @@ -275,10 +268,21 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } pub trait LayoutLlvmExt { + fn over_align(&self, ccx: &CrateContext) -> Option; fn llvm_field_index(&self, index: usize) -> u64; } impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { + fn over_align(&self, ccx: &CrateContext) -> Option { + let align = self.align(ccx); + let primitive_align = self.primitive_align(ccx); + if align != primitive_align { + Some(align) + } else { + None + } + } + fn llvm_field_index(&self, index: usize) -> u64 { if let layout::Abi::Scalar(_) = self.abi { bug!("FullLayout::llvm_field_index({:?}): not applicable", self); From 3fd6b00082c16369119c2f67981ceb8b47bc71a2 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 20 Sep 2017 23:07:47 +0300 Subject: [PATCH 38/69] rustc_trans: query LLVM types from a layout instead of a Ty. --- src/librustc_trans/abi.rs | 7 +-- src/librustc_trans/asm.rs | 3 +- src/librustc_trans/base.rs | 12 ++--- src/librustc_trans/callee.rs | 5 +- src/librustc_trans/common.rs | 3 +- src/librustc_trans/consts.rs | 11 +++-- src/librustc_trans/context.rs | 4 +- src/librustc_trans/intrinsic.rs | 5 +- src/librustc_trans/mir/block.rs | 38 ++++++--------- src/librustc_trans/mir/constant.rs | 21 +++++---- src/librustc_trans/mir/lvalue.rs | 19 ++++---- src/librustc_trans/mir/mod.rs | 4 +- src/librustc_trans/mir/operand.rs | 8 ++-- src/librustc_trans/mir/rvalue.rs | 10 ++-- src/librustc_trans/trans_item.rs | 4 +- src/librustc_trans/type_of.rs | 76 ++++++++++++++---------------- 16 files changed, 114 insertions(+), 116 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 0b09bca7b6889..bd9a460846b19 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -32,6 +32,7 @@ use cabi_nvptx64; use cabi_hexagon; use mir::lvalue::LvalueRef; use type_::Type; +use type_of::LayoutLlvmExt; use rustc::hir; use rustc::ty::{self, Ty}; @@ -506,7 +507,7 @@ impl<'a, 'tcx> ArgType<'tcx> { /// Get the LLVM type for an lvalue of the original Rust type of /// this argument/return, i.e. the result of `type_of::type_of`. pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { - ccx.llvm_type_of(self.layout.ty) + self.layout.llvm_type(ccx) } /// Store a direct/indirect value described by this ArgType into a @@ -934,7 +935,7 @@ impl<'a, 'tcx> FnType<'tcx> { } else if let Some(cast) = self.ret.cast { cast.llvm_type(ccx) } else { - ccx.immediate_llvm_type_of(self.ret.layout.ty) + self.ret.layout.immediate_llvm_type(ccx) }; { @@ -952,7 +953,7 @@ impl<'a, 'tcx> FnType<'tcx> { } else if let Some(cast) = arg.cast { cast.llvm_type(ccx) } else { - ccx.immediate_llvm_type_of(arg.layout.ty) + arg.layout.immediate_llvm_type(ccx) }; llargument_tys.push(llarg_ty); diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 44bdc75f8461f..1959fd13ccb29 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -13,6 +13,7 @@ use llvm::{self, ValueRef}; use common::*; use type_::Type; +use type_of::LayoutLlvmExt; use builder::Builder; use rustc::hir; @@ -44,7 +45,7 @@ pub fn trans_inline_asm<'a, 'tcx>( if out.is_indirect { indirect_outputs.push(lvalue.load(bcx).immediate()); } else { - output_types.push(bcx.ccx.llvm_type_of(lvalue.layout.ty)); + output_types.push(lvalue.layout.llvm_type(bcx.ccx)); } } if !indirect_outputs.is_empty() { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 036f1dfbcfd96..545e986d7d10f 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -40,7 +40,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{Align, FullLayout}; +use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -68,7 +68,7 @@ use symbol_names_test; use time_graph; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use type_::Type; -use type_of; +use type_of::{self, LayoutLlvmExt}; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use CrateInfo; @@ -228,13 +228,13 @@ pub fn unsize_thin_ptr<'a, 'tcx>( (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = bcx.ccx.llvm_type_of(b).ptr_to(); + let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = bcx.ccx.llvm_type_of(b).ptr_to(); + let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } _ => bug!("unsize_thin_ptr: called on bad types"), @@ -371,8 +371,8 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { - if ty.is_bool() { +pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::FullLayout) -> ValueRef { + if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { bcx.trunc(val, Type::i1(bcx.ccx)) } else { val diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index bb271a574a5e9..4afeac2e8f589 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -20,8 +20,11 @@ use consts; use declare; use llvm::{self, ValueRef}; use monomorphize::Instance; +use type_of::LayoutLlvmExt; + use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use rustc::traits; use rustc::ty::subst::Substs; use rustc_back::PanicStrategy; @@ -55,7 +58,7 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty)); - let llptrty = ccx.llvm_type_of(fn_ptr_ty); + let llptrty = ccx.layout_of(fn_ptr_ty).llvm_type(ccx); let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { // This is subtle and surprising, but sometimes we have to bitcast diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 55f02ed5f9171..ed8f65834068b 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -24,6 +24,7 @@ use builder::Builder; use consts; use declare; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; @@ -254,7 +255,7 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), - cx.llvm_type_of(cx.tcx().mk_str()).ptr_to()); + cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to()); let empty = C_array(Type::i8(cx), &[]); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 339405ab1baf0..cfca3b57cb9d7 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -21,8 +21,9 @@ use common::{self, CrateContext, val_ty}; use declare; use monomorphize::Instance; use type_::Type; +use type_of::LayoutLlvmExt; use rustc::ty; -use rustc::ty::layout::Align; +use rustc::ty::layout::{Align, LayoutOf}; use rustc::hir; @@ -112,7 +113,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let ty = common::instance_ty(ccx.tcx(), &instance); let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) { - let llty = ccx.llvm_type_of(ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let (g, attrs) = match ccx.tcx().hir.get(id) { hir_map::NodeItem(&hir::Item { ref attrs, span, node: hir::ItemStatic(..), .. @@ -157,7 +158,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { } }; let llty2 = match ty.sty { - ty::TyRawPtr(ref mt) => ccx.llvm_type_of(mt.ty), + ty::TyRawPtr(ref mt) => ccx.layout_of(mt.ty).llvm_type(ccx), _ => { ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`"); } @@ -206,7 +207,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? // FIXME(nagisa): investigate whether it can be changed into define_global - let g = declare::declare_global(ccx, &sym, ccx.llvm_type_of(ty)); + let g = declare::declare_global(ccx, &sym, ccx.layout_of(ty).llvm_type(ccx)); // Thread-local statics in some other crate need to *always* be linked // against in a thread-local fashion, so we need to be sure to apply the // thread-local attribute locally if it was present remotely. If we @@ -266,7 +267,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = ccx.llvm_type_of(ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let g = if val_llty == llty { g } else { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 647cc54effe9d..555acaad505f0 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -24,6 +24,8 @@ use monomorphize::Instance; use partitioning::CodegenUnit; use type_::Type; +use type_of::LayoutLlvmExt; + use rustc_data_structures::base_n; use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; @@ -397,7 +399,7 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); str_slice_ty.set_struct_body(&[ Type::array(&Type::i8(&dummy_ccx), 0), - dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(), + dummy_ccx.layout_of(shared.tcx.mk_str()).llvm_type(&dummy_ccx).ptr_to(), Type::array(&Type::i8(&dummy_ccx), 0), Type::isize(&dummy_ccx), Type::array(&Type::i8(&dummy_ccx), 0) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 1cdd192bfeda6..45d2f7c69e94e 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -21,6 +21,7 @@ use common::*; use declare; use glue; use type_::Type; +use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; @@ -104,7 +105,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id); - let llret_ty = ccx.llvm_type_of(ret_ty); + let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx); let result = LvalueRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned); let simple = get_simple_intrinsic(ccx, name); @@ -243,7 +244,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, unsafe { llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32); } - to_immediate(bcx, load, tp_ty) + to_immediate(bcx, load, ccx.layout_of(tp_ty)) }, "volatile_store" => { let tp_ty = substs.type_at(0); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 82d5dabc86c7a..401d4d5221638 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -662,7 +662,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if arg.layout.ty == bcx.tcx().types.bool { llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); // We store bools as i8 so we need to truncate to i1. - llval = base::to_immediate(bcx, llval, arg.layout.ty); + llval = base::to_immediate(bcx, llval, arg.layout); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), (align | Alignment::Packed(arg.layout.align(bcx.ccx))) @@ -682,45 +682,37 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { args: &[ArgType<'tcx>]) { let tuple = self.trans_operand(bcx, operand); - let arg_types = match tuple.layout.ty.sty { - ty::TyTuple(ref tys, _) => tys, - _ => span_bug!(self.mir.span, - "bad final argument to \"rust-call\" fn {:?}", tuple.layout.ty) - }; - // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval, align) => { let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align); - for n in 0..arg_types.len() { - let field_ptr = tuple_ptr.project_field(bcx, n); - self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[n]); + for i in 0..tuple.layout.fields.count() { + let field_ptr = tuple_ptr.project_field(bcx, i); + self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]); } } Immediate(llval) => { - for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = bcx.extract_value(llval, tuple.layout.llvm_field_index(n)); - // Truncate bools to i1, if needed - elem = base::to_immediate(bcx, elem, ty); + for i in 0..tuple.layout.fields.count() { + let field = tuple.layout.field(bcx.ccx, i); + let elem = bcx.extract_value(llval, tuple.layout.llvm_field_index(i)); // If the tuple is immediate, the elements are as well let op = OperandRef { - val: Immediate(elem), - layout: bcx.ccx.layout_of(ty), + val: Immediate(base::to_immediate(bcx, elem, field)), + layout: field, }; - self.trans_argument(bcx, op, llargs, &args[n]); + self.trans_argument(bcx, op, llargs, &args[i]); } } Pair(a, b) => { let elems = [a, b]; - for (n, &ty) in arg_types.iter().enumerate() { - let elem = base::to_immediate(bcx, elems[n], ty); + for i in 0..tuple.layout.fields.count() { // Pair is always made up of immediates let op = OperandRef { - val: Immediate(elem), - layout: bcx.ccx.layout_of(ty), + val: Immediate(elems[i]), + layout: tuple.layout.field(bcx.ccx, i), }; - self.trans_argument(bcx, op, llargs, &args[n]); + self.trans_argument(bcx, op, llargs, &args[i]); } } } @@ -891,7 +883,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let src = self.trans_operand(bcx, src); - let llty = bcx.ccx.llvm_type_of(src.layout.ty); + let llty = src.layout.llvm_type(bcx.ccx); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let align = src.layout.align(bcx.ccx).min(dst.layout.align(bcx.ccx)); src.val.store(bcx, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 8b86eca755825..9465118568660 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -87,7 +87,7 @@ impl<'a, 'tcx> Const<'tcx> { cv: &ConstVal, ty: Ty<'tcx>) -> Const<'tcx> { - let llty = ccx.llvm_type_of(ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let val = match *cv { ConstVal::Float(v) => { let bits = match v.ty { @@ -139,7 +139,7 @@ impl<'a, 'tcx> Const<'tcx> { } pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { - let llty = ccx.immediate_llvm_type_of(self.ty); + let llty = ccx.layout_of(self.ty).immediate_llvm_type(ccx); let llvalty = val_ty(self.llval); let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { @@ -489,7 +489,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let llelem = if iv < len as u128 { const_get_elt(base.llval, iv as u64) } else { - C_undef(self.ccx.llvm_type_of(projected_ty)) + C_undef(self.ccx.layout_of(projected_ty).llvm_type(self.ccx)) }; (Base::Value(llelem), ptr::null_mut()) @@ -543,7 +543,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let elem_ty = array_ty.builtin_index().unwrap_or_else(|| { bug!("bad array type {:?}", array_ty) }); - let llunitty = self.ccx.llvm_type_of(elem_ty); + let llunitty = self.ccx.layout_of(elem_ty).llvm_type(self.ccx); // If the array contains enums, an LLVM array won't work. let val = if fields.iter().all(|&f| val_ty(f) == llunitty) { C_array(llunitty, fields) @@ -665,7 +665,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = self.ccx.llvm_type_of(unsized_ty).ptr_to(); + let ptr_ty = self.ccx.layout_of(unsized_ty).llvm_type(self.ccx).ptr_to(); let base = consts::ptrcast(base, ptr_ty); let info = base::unsized_info(self.ccx, pointee_ty, unsized_ty, old_info); @@ -681,7 +681,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_out = self.ccx.immediate_llvm_type_of(cast_ty); + let ll_t_out = self.ccx.layout_of(cast_ty).immediate_llvm_type(self.ccx); let llval = operand.llval; let signed = match self.ccx.layout_of(operand.ty).abi { layout::Abi::Scalar(layout::Int(_, signed)) => signed, @@ -734,7 +734,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = self.ccx.immediate_llvm_type_of(cast_ty); + let llcast_ty = self.ccx.layout_of(cast_ty) + .immediate_llvm_type(self.ccx); consts::ptrcast(data_ptr, llcast_ty) } } else { @@ -1041,7 +1042,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let result = result.unwrap_or_else(|_| { // We've errored, so we don't have to produce working code. - let llty = bcx.ccx.llvm_type_of(ty); + let llty = bcx.ccx.layout_of(ty).llvm_type(bcx.ccx); Const::new(C_undef(llty), ty) }); @@ -1100,7 +1101,7 @@ fn trans_const_adt<'a, 'tcx>( _ => 0, }; let discr_ty = l.field(ccx, 0).ty; - let discr = C_int(ccx.llvm_type_of(discr_ty), discr as i64); + let discr = C_int(ccx.layout_of(discr_ty).llvm_type(ccx), discr as i64); if let layout::Abi::Scalar(_) = l.abi { Const::new(discr, t) } else { @@ -1130,7 +1131,7 @@ fn trans_const_adt<'a, 'tcx>( } else { // Always use null even if it's not the `discrfield`th // field; see #8506. - Const::new(C_null(ccx.llvm_type_of(t)), t) + Const::new(C_null(ccx.layout_of(t).llvm_type(ccx)), t) } } _ => bug!("trans_const_adt: cannot handle type {} repreented as {:#?}", t, l) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 3c0776d479817..0732720bd1a14 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -109,7 +109,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, layout); let tmp = bcx.alloca( - bcx.ccx.llvm_type_of(layout.ty), name, layout.over_align(bcx.ccx)); + layout.llvm_type(bcx.ccx), name, layout.over_align(bcx.ccx)); Self::new_sized(tmp, layout, Alignment::AbiAligned) } @@ -189,7 +189,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } else { bcx.load(self.llval, self.alignment.non_abi()) }; - OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout.ty)) + OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) } else { OperandValue::Ref(self.llval, self.alignment) }; @@ -223,8 +223,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) } else { assert_eq!(offset, 0); - let ty = ccx.llvm_type_of(field.ty); - bcx.pointercast(self.llval, ty.ptr_to()) + bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) }, llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra @@ -296,7 +295,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = ccx.llvm_type_of(field.ty); + let ll_fty = field.llvm_type(ccx); debug!("struct_field_ptr: Field type is {:?}", ll_fty); LvalueRef { @@ -309,7 +308,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { - let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to); + let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); match *self.layout.layout { layout::Layout::Univariant { .. } | layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), @@ -357,7 +356,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } layout::Layout::NullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - let zero = C_null(bcx.ccx.llvm_type_of(discr.layout.ty)); + let zero = C_null(discr.layout.llvm_type(bcx.ccx)); bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) } _ => bug!("{} is not an enum", self.layout.ty) @@ -373,7 +372,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { match *self.layout.layout { layout::Layout::General { .. } => { let ptr = self.project_field(bcx, 0); - bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.layout.ty), to as i64), + bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), ptr.llval, ptr.alignment.non_abi()); } layout::Layout::NullablePointer { nndiscr, .. } => { @@ -394,7 +393,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let ptr = self.project_field(bcx, 0); - bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.layout.ty)), + bcx.store(C_null(ptr.layout.llvm_type(bcx.ccx)), ptr.llval, ptr.alignment.non_abi()); } } @@ -523,7 +522,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Cast the lvalue pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bcx.pointercast(subslice.llval, - bcx.ccx.llvm_type_of(subslice.layout.ty).ptr_to()); + subslice.layout.llvm_type(bcx.ccx).ptr_to()); subslice } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 21d2711df83c7..2cad096448497 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -23,7 +23,7 @@ use common::{self, CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{ArgAttribute, FnType}; -use type_of; +use type_of::{self, LayoutLlvmExt}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -465,7 +465,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(), _ => bug!() }; - let data_llty = bcx.ccx.llvm_type_of(pointee); + let data_llty = bcx.ccx.layout_of(pointee).llvm_type(bcx.ccx); let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); a = bcx.pointercast(a, data_llty.ptr_to()); diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 24f6a84f62072..d1922f8bf99b0 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -84,7 +84,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> OperandRef<'tcx> { assert!(layout.is_zst()); - let llty = ccx.llvm_type_of(layout.ty); + let llty = layout.llvm_type(ccx); // FIXME(eddyb) ZSTs should always be immediate, not pairs. // This hack only exists to unpack a constant undef pair. Const::new(C_undef(llty), layout.ty).to_operand(ccx) @@ -119,7 +119,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// Immediate aggregate with the two values. pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { - let llty = bcx.ccx.llvm_type_of(self.layout.ty); + let llty = self.layout.llvm_type(bcx.ccx); debug!("Operand::pack_if_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); @@ -142,10 +142,10 @@ impl<'a, 'tcx> OperandRef<'tcx> { debug!("Operand::unpack_if_pair: unpacking {:?}", self); let a = bcx.extract_value(llval, self.layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, self.layout.field(bcx.ccx, 0).ty); + let a = base::to_immediate(bcx, a, self.layout.field(bcx.ccx, 0)); let b = bcx.extract_value(llval, self.layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, self.layout.field(bcx.ccx, 1).ty); + let b = base::to_immediate(bcx, b, self.layout.field(bcx.ccx, 1)); self.val = OperandValue::Pair(a, b); } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 5c24e4b5536d2..c7cb69339f7f3 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -26,7 +26,7 @@ use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_i use consts; use monomorphize; use type_::Type; -use type_of; +use type_of::{self, LayoutLlvmExt}; use value::Value; use super::{MirContext, LocalRef}; @@ -259,7 +259,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast.ty); + let llcast_ty = cast.immediate_llvm_type(bcx.ccx); let llval = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -272,8 +272,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.layout.ty); - let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast.ty); + let ll_t_in = operand.layout.immediate_llvm_type(bcx.ccx); + let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); if let Layout::General { ref discr_range, .. } = *operand.layout.layout { @@ -453,7 +453,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llsize = C_usize(bcx.ccx, size.bytes()); let llalign = C_usize(bcx.ccx, align.abi()); let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty)); - let llty_ptr = bcx.ccx.llvm_type_of(box_layout.ty); + let llty_ptr = box_layout.llvm_type(bcx.ccx); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index a452ed21aef49..991f99e0f6c99 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -23,9 +23,11 @@ use common; use declare; use llvm; use monomorphize::Instance; +use type_of::LayoutLlvmExt; use rustc::hir; use rustc::middle::trans::{Linkage, Visibility}; use rustc::ty::{self, TyCtxt, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use syntax::ast; use syntax::attr; use syntax_pos::Span; @@ -172,7 +174,7 @@ fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let def_id = ccx.tcx().hir.local_def_id(node_id); let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = ccx.llvm_type_of(ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { ccx.sess().span_fatal(ccx.tcx().hir.span(node_id), diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index e9547bd746bfd..60c2b5397391a 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -21,10 +21,10 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> match ty.sty { ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => { - ccx.llvm_type_of(t).ptr_to() + ccx.layout_of(t).llvm_type(ccx).ptr_to() } ty::TyAdt(def, _) if def.is_box() => { - ccx.llvm_type_of(ty.boxed_ty()).ptr_to() + ccx.layout_of(ty.boxed_ty()).llvm_type(ccx).ptr_to() } _ => bug!("expected fat ptr ty but got {:?}", ty) } @@ -53,7 +53,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // unsized). cx.str_slice_type() } else { - let ptr_ty = cx.llvm_type_of(ty).ptr_to(); + let ptr_ty = cx.layout_of(ty).llvm_type(cx).ptr_to(); let info_ty = unsized_info_ty(cx, ty); Type::struct_(cx, &[ Type::array(&Type::i8(cx), 0), @@ -64,7 +64,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ], false) } } else { - cx.llvm_type_of(ty).ptr_to() + cx.layout_of(ty).llvm_type(cx).ptr_to() } }; match ty.sty { @@ -89,13 +89,15 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, layout::Int(i, _) => Type::from_integer(cx, i), layout::F32 => Type::f32(cx), layout::F64 => Type::f64(cx), - layout::Pointer => cx.llvm_type_of(layout::Pointer.to_ty(cx.tcx())) + layout::Pointer => { + cx.layout_of(layout::Pointer.to_ty(cx.tcx())).llvm_type(cx) + } }; return llty; } if let layout::Abi::Vector { .. } = layout.abi { - return Type::vector(&cx.llvm_type_of(layout.field(cx, 0).ty), + return Type::vector(&layout.field(cx, 0).llvm_type(cx), layout.fields.count() as u64); } @@ -125,7 +127,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } layout::FieldPlacement::Array { count, .. } => { - Type::array(&cx.llvm_type_of(layout.field(cx, 0).ty), count) + Type::array(&layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { @@ -161,8 +163,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, result.push(Type::array(&Type::i8(cx), padding.bytes())); debug!(" padding before: {:?}", padding); - let llty = cx.llvm_type_of(field.ty); - result.push(llty); + result.push(field.llvm_type(cx)); if layout.is_packed() { assert_eq!(padding.bytes(), 0); @@ -206,12 +207,16 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { let layout = self.layout_of(ty); (layout.size(self), layout.align(self)) } +} - /// Returns alignment if it is different than the primitive alignment. - pub fn over_align_of(&self, ty: Ty<'tcx>) -> Option { - self.layout_of(ty).over_align(self) - } +pub trait LayoutLlvmExt<'tcx> { + fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn over_align(&self, ccx: &CrateContext) -> Option; + fn llvm_field_index(&self, index: usize) -> u64; +} +impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. /// The pointee type of the pointer in `LvalueRef` is always this type. /// For sized types, it is also the right LLVM type for an `alloca` @@ -223,56 +228,45 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - pub fn llvm_type_of(&self, ty: Ty<'tcx>) -> Type { + fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { // Check the cache. - if let Some(&llty) = self.lltypes().borrow().get(&ty) { + if let Some(&llty) = ccx.lltypes().borrow().get(&self.ty) { return llty; } - debug!("type_of {:?}", ty); + debug!("llvm_type({:#?})", self); - assert!(!ty.has_escaping_regions(), "{:?} has escaping regions", ty); + assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty); // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. - let normal_ty = self.tcx().erase_regions(&ty); - - if ty != normal_ty { - let llty = self.llvm_type_of(normal_ty); - debug!("--> normalized {:?} to {:?} llty={:?}", ty, normal_ty, llty); - self.lltypes().borrow_mut().insert(ty, llty); - return llty; - } + let normal_ty = ccx.tcx().erase_regions(&self.ty); let mut defer = None; - let llty = uncached_llvm_type(self, ty, &mut defer); - - debug!("--> mapped ty={:?} to llty={:?}", ty, llty); + let llty = if self.ty != normal_ty { + ccx.layout_of(normal_ty).llvm_type(ccx) + } else { + uncached_llvm_type(ccx, self.ty, &mut defer) + }; + debug!("--> mapped {:#?} to llty={:?}", self, llty); - self.lltypes().borrow_mut().insert(ty, llty); + ccx.lltypes().borrow_mut().insert(self.ty, llty); if let Some((mut llty, layout)) = defer { - llty.set_struct_body(&struct_llfields(self, layout), layout.is_packed()) + llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) } llty } - pub fn immediate_llvm_type_of(&self, ty: Ty<'tcx>) -> Type { - if ty.is_bool() { - Type::i1(self) + fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = self.abi { + Type::i1(ccx) } else { - self.llvm_type_of(ty) + self.llvm_type(ccx) } } -} - -pub trait LayoutLlvmExt { - fn over_align(&self, ccx: &CrateContext) -> Option; - fn llvm_field_index(&self, index: usize) -> u64; -} -impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { fn over_align(&self, ccx: &CrateContext) -> Option { let align = self.align(ccx); let primitive_align = self.primitive_align(ccx); From 026214c85830719900133eb92a31a1e4dce8dd20 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 21 Sep 2017 01:56:20 +0300 Subject: [PATCH 39/69] rustc: collapse Layout::FatPointer into Layout::Univariant. --- src/librustc/ty/layout.rs | 89 +++++++++++++++++++++------------------ 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index cdb0d9f4451b6..3e64a6a4c5d36 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -849,9 +849,6 @@ pub enum Layout { /// TyArray, TySlice or TyStr. Array, - /// TyRawPtr or TyRef with a !Sized pointee. The primitive is the metadata. - FatPointer, - // Remaining variants are all ADTs such as structs, enums or tuples. /// Single-case enums, and structs/tuples. @@ -1132,7 +1129,7 @@ impl<'a, 'tcx> Layout { memory_index: vec![0, 1] }; Ok(tcx.intern_layout(CachedLayout { - layout: Layout::FatPointer, + layout: Layout::Univariant, fields, abi: Abi::Aggregate { sized: true, @@ -1743,8 +1740,7 @@ impl<'a, 'tcx> Layout { // via representation tweaks) size info beyond total size. Layout::Scalar | Layout::Vector | - Layout::Array | - Layout::FatPointer { .. } => { + Layout::Array => { debug!("print-type-size t: `{:?}` adt other", ty); record(adt_kind.into(), None, Vec::new()) } @@ -2047,9 +2043,23 @@ impl<'a, 'tcx> FullLayout<'tcx> { fn field_type_unnormalized(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, i: usize) -> Ty<'tcx> { let ptr_field_type = |pointee: Ty<'tcx>| { assert!(i < 2); + let mk_ptr = |ty: Ty<'tcx>| { + match self.ty.sty { + ty::TyRef(r, ty::TypeAndMut { mutbl, .. }) => { + tcx.mk_ref(r, ty::TypeAndMut { ty, mutbl }) + } + ty::TyRawPtr(ty::TypeAndMut { mutbl, .. }) => { + tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }) + } + ty::TyAdt(def, _) if def.is_box() => { + tcx.mk_box(ty) + } + _ => bug!() + } + }; let slice = |element: Ty<'tcx>| { if i == 0 { - tcx.mk_mut_ptr(element) + mk_ptr(element) } else { tcx.types.usize } @@ -2057,7 +2067,13 @@ impl<'a, 'tcx> FullLayout<'tcx> { match tcx.struct_tail(pointee).sty { ty::TySlice(element) => slice(element), ty::TyStr => slice(tcx.types.u8), - ty::TyDynamic(..) => Pointer.to_ty(tcx), + ty::TyDynamic(..) => { + if i == 0 { + mk_ptr(tcx.mk_nil()) + } else { + Pointer.to_ty(tcx) + } + } _ => bug!("FullLayout::field_type({:?}): not applicable", self) } }; @@ -2187,9 +2203,16 @@ impl<'a, 'tcx> FullLayout<'tcx> { { let tcx = cx.tcx(); match (self.layout, self.abi, &self.ty.sty) { - (&Layout::Scalar, Abi::Scalar(Pointer), _) if !self.ty.is_unsafe_ptr() => { + // FIXME(eddyb) check this via value ranges on scalars. + (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyRef(..)) | + (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { + Ok(Some((Size::from_bytes(0), Pointer))) + } + (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyAdt(def, _)) if def.is_box() => { Ok(Some((Size::from_bytes(0), Pointer))) } + + // FIXME(eddyb) check this via value ranges on scalars. (&Layout::General { discr, .. }, _, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { Ok(Some((self.fields.offset(0), discr))) @@ -2198,28 +2221,28 @@ impl<'a, 'tcx> FullLayout<'tcx> { } } - (&Layout::FatPointer, _, _) if !self.ty.is_unsafe_ptr() => { - Ok(Some((self.fields.offset(FAT_PTR_ADDR), Pointer))) - } - // Is this the NonZero lang item wrapping a pointer or integer type? (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { let field = self.field(cx, 0)?; - match (field.layout, field.abi) { - (&Layout::Scalar, Abi::Scalar(value)) => { - Ok(Some((self.fields.offset(0), value))) - } - (&Layout::FatPointer, _) => { - Ok(Some((self.fields.offset(0) + - field.fields.offset(FAT_PTR_ADDR), - Pointer))) - } - _ => Ok(None) + let offset = self.fields.offset(0); + if let Abi::Scalar(value) = field.abi { + Ok(Some((offset, value))) + } else if let ty::TyRawPtr(_) = field.ty.sty { + // If `NonZero` contains a non-scalar `*T`, it's + // a fat pointer, which starts with a thin pointer. + Ok(Some((offset, Pointer))) + } else { + Ok(None) } } // Perhaps one of the fields is non-zero, let's recurse and find out. - (&Layout::Univariant, _, _) => { + _ => { + if let FieldPlacement::Array { count, .. } = *self.fields { + if count > 0 { + return self.field(cx, 0)?.non_zero_field(cx); + } + } for i in 0..self.fields.count() { let r = self.field(cx, i)?.non_zero_field(cx)?; if let Some((offset, primitive)) = r { @@ -2228,23 +2251,6 @@ impl<'a, 'tcx> FullLayout<'tcx> { } Ok(None) } - - // Is this a fixed-size array of something non-zero - // with at least one element? - (_, _, &ty::TyArray(ety, _)) => { - if self.fields.count() != 0 { - cx.layout_of(ety)?.non_zero_field(cx) - } else { - Ok(None) - } - } - - (_, _, &ty::TyProjection(_)) | (_, _, &ty::TyAnon(..)) => { - bug!("FullLayout::non_zero_field: {:#?} not normalized", self); - } - - // Anything else is not a non-zero type. - _ => Ok(None) } } } @@ -2260,7 +2266,6 @@ impl<'gcx> HashStable> for Layout { Scalar => {} Vector => {} Array => {} - FatPointer => {} Univariant => {} UntaggedUnion => {} General { From b723af284a22e6c0f2d85c104067138c33f8859d Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 21 Sep 2017 20:40:50 +0300 Subject: [PATCH 40/69] rustc_trans: go through layouts uniformly for fat pointers and variants. --- src/librustc/lint/context.rs | 6 +- src/librustc/ty/layout.rs | 233 ++++++++++++----------- src/librustc_lint/types.rs | 2 +- src/librustc_llvm/ffi.rs | 5 - src/librustc_trans/abi.rs | 12 +- src/librustc_trans/base.rs | 17 +- src/librustc_trans/cabi_s390x.rs | 4 +- src/librustc_trans/cabi_x86.rs | 4 +- src/librustc_trans/cabi_x86_64.rs | 6 +- src/librustc_trans/common.rs | 32 +--- src/librustc_trans/context.rs | 44 ++--- src/librustc_trans/debuginfo/metadata.rs | 14 +- src/librustc_trans/meth.rs | 6 +- src/librustc_trans/mir/analyze.rs | 14 +- src/librustc_trans/mir/block.rs | 33 ++-- src/librustc_trans/mir/constant.rs | 21 +- src/librustc_trans/mir/lvalue.rs | 61 ++---- src/librustc_trans/mir/mod.rs | 35 +--- src/librustc_trans/mir/operand.rs | 6 +- src/librustc_trans/mir/rvalue.rs | 16 +- src/librustc_trans/type_.rs | 4 - src/librustc_trans/type_of.rs | 208 ++++++++++---------- src/test/codegen/adjustments.rs | 7 +- src/test/codegen/function-arguments.rs | 14 +- src/test/codegen/refs.rs | 6 +- 25 files changed, 363 insertions(+), 447 deletions(-) diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index a080f968da44e..4496e07b13814 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -35,7 +35,7 @@ use rustc_serialize::{Decoder, Decodable, Encoder, Encodable}; use session::{config, early_error, Session}; use traits::Reveal; use ty::{self, TyCtxt, Ty}; -use ty::layout::{FullLayout, LayoutError, LayoutOf}; +use ty::layout::{LayoutError, LayoutOf, TyLayout}; use util::nodemap::FxHashMap; use std::default::Default as StdDefault; @@ -628,9 +628,9 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a LateContext<'a, 'tcx> { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { (self.tcx, self.param_env.reveal_all()).layout_of(ty) } } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 3e64a6a4c5d36..7bf7d81037d9e 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -23,7 +23,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::{Add, Sub, Mul, AddAssign, RangeInclusive}; +use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -907,6 +907,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { #[derive(PartialEq, Eq, Hash, Debug)] pub struct CachedLayout { + pub variant_index: Option, pub layout: Layout, pub fields: FieldPlacement, pub abi: Abi, @@ -948,6 +949,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); let scalar = |value| { tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Scalar, fields: FieldPlacement::Union(0), abi: Abi::Scalar(value) @@ -962,7 +964,7 @@ impl<'a, 'tcx> Layout { /// A univariant, but part of an enum. EnumVariant(Integer), } - let univariant_uninterned = |fields: &[FullLayout], repr: &ReprOptions, kind| { + let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| { let packed = repr.packed(); if packed && repr.align > 0 { bug!("struct cannot be packed and aligned"); @@ -1085,6 +1087,7 @@ impl<'a, 'tcx> Layout { } Ok(CachedLayout { + variant_index: None, layout: Layout::Univariant, fields: FieldPlacement::Arbitrary { offsets, @@ -1099,7 +1102,7 @@ impl<'a, 'tcx> Layout { } }) }; - let univariant = |fields: &[FullLayout], repr: &ReprOptions, kind| { + let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) }; assert!(!ty.has_infer_types()); @@ -1129,6 +1132,7 @@ impl<'a, 'tcx> Layout { memory_index: vec![0, 1] }; Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Univariant, fields, abi: Abi::Aggregate { @@ -1185,6 +1189,7 @@ impl<'a, 'tcx> Layout { .ok_or(LayoutError::SizeOverflow(ty))?; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: element_size, @@ -1202,6 +1207,7 @@ impl<'a, 'tcx> Layout { ty::TySlice(element) => { let element = cx.layout_of(element)?; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: element.size(dl), @@ -1218,6 +1224,7 @@ impl<'a, 'tcx> Layout { } ty::TyStr => { tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: Size::from_bytes(1), @@ -1286,6 +1293,7 @@ impl<'a, 'tcx> Layout { } }; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Vector, fields: FieldPlacement::Array { stride: element.size(tcx), @@ -1343,6 +1351,7 @@ impl<'a, 'tcx> Layout { } return Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::UntaggedUnion, fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { @@ -1372,7 +1381,11 @@ impl<'a, 'tcx> Layout { else { StructKind::AlwaysSized } }; - return univariant(&variants[0], &def.repr, kind); + let mut cached = univariant_uninterned(&variants[0], &def.repr, kind)?; + if def.is_enum() { + cached.variant_index = Some(0); + } + return Ok(tcx.intern_layout(cached)); } let no_explicit_discriminants = def.variants.iter().enumerate() @@ -1389,12 +1402,14 @@ impl<'a, 'tcx> Layout { for (field_index, field) in variants[i].iter().enumerate() { if let Some((offset, discr)) = field.non_zero_field(cx)? { - let st = vec![ + let mut st = vec![ univariant_uninterned(&variants[0], &def.repr, StructKind::AlwaysSized)?, univariant_uninterned(&variants[1], &def.repr, StructKind::AlwaysSized)? ]; + st[0].variant_index = Some(0); + st[1].variant_index = Some(1); let offset = st[i].fields.offset(field_index) + offset; let mut abi = st[i].abi; if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { @@ -1418,6 +1433,7 @@ impl<'a, 'tcx> Layout { _ => {} } return Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::NullablePointer { nndiscr: i as u64, @@ -1454,13 +1470,13 @@ impl<'a, 'tcx> Layout { assert_eq!(Integer::for_abi_align(dl, start_align), None); // Create the set of structs that represent each variant. - let mut variants = variants.into_iter().map(|field_layouts| { - let st = univariant_uninterned(&field_layouts, + let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| { + let mut st = univariant_uninterned(&field_layouts, &def.repr, StructKind::EnumVariant(min_ity))?; + st.variant_index = Some(i); // Find the first field we can't move later // to make room for a larger discriminant. - for i in st.fields.index_by_increasing_offset() { - let field = field_layouts[i]; + for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { let field_align = field.align(dl); if !field.is_zst() || field_align.abi() != 1 { start_align = start_align.min(field_align); @@ -1539,6 +1555,7 @@ impl<'a, 'tcx> Layout { let discr = Int(ity, signed); tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::General { discr, @@ -1587,7 +1604,7 @@ impl<'a, 'tcx> Layout { fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout<'tcx>) { + layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1607,7 +1624,7 @@ impl<'a, 'tcx> Layout { fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout<'tcx>) { + layout: TyLayout<'tcx>) { let cx = (tcx, param_env); // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { @@ -1644,7 +1661,7 @@ impl<'a, 'tcx> Layout { let build_variant_info = |n: Option, flds: &[ast::Name], - layout: FullLayout<'tcx>| { + layout: TyLayout<'tcx>| { let mut min_size = Size::from_bytes(0); let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { match layout.field(cx, i) { @@ -1685,7 +1702,7 @@ impl<'a, 'tcx> Layout { } }; - match *layout.layout { + match layout.layout { Layout::Univariant => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() @@ -1723,7 +1740,7 @@ impl<'a, 'tcx> Layout { layout.for_variant(i)) }) .collect(); - record(adt_kind.into(), match *layout.layout { + record(adt_kind.into(), match layout.layout { Layout::General { discr, .. } => Some(discr.size(tcx)), _ => None }, variant_infos); @@ -1901,12 +1918,16 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { /// layouts for which Rust types do not exist, such as enum variants /// or synthetic fields of enums (i.e. discriminants) and fat pointers. #[derive(Copy, Clone, Debug)] -pub struct FullLayout<'tcx> { +pub struct TyLayout<'tcx> { pub ty: Ty<'tcx>, - pub variant_index: Option, - pub layout: &'tcx Layout, - pub fields: &'tcx FieldPlacement, - pub abi: Abi, + cached: &'tcx CachedLayout +} + +impl<'tcx> Deref for TyLayout<'tcx> { + type Target = &'tcx CachedLayout; + fn deref(&self) -> &&'tcx CachedLayout { + &self.cached + } } pub trait HasTyCtxt<'tcx>: HasDataLayout { @@ -1937,29 +1958,42 @@ impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { } } +pub trait MaybeResult { + fn map_same T>(self, f: F) -> Self; +} + +impl MaybeResult for T { + fn map_same T>(self, f: F) -> Self { + f(self) + } +} + +impl MaybeResult for Result { + fn map_same T>(self, f: F) -> Self { + self.map(f) + } +} + pub trait LayoutOf { - type FullLayout; + type TyLayout; - fn layout_of(self, ty: T) -> Self::FullLayout; + fn layout_of(self, ty: T) -> Self::TyLayout; } impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { let (tcx, param_env) = self; let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?; - let layout = FullLayout { + let layout = TyLayout { ty, - variant_index: None, - layout: &cached.layout, - fields: &cached.fields, - abi: cached.abi + cached }; // NB: This recording is normally disabled; when enabled, it @@ -1976,22 +2010,19 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { let (tcx_at, param_env) = self; let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; - let layout = FullLayout { + let layout = TyLayout { ty, - variant_index: None, - layout: &cached.layout, - fields: &cached.fields, - abi: cached.abi + cached }; // NB: This recording is normally disabled; when enabled, it @@ -2006,79 +2037,57 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, } } -impl<'a, 'tcx> FullLayout<'tcx> { +impl<'a, 'tcx> TyLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { - let variants = match self.ty.sty { - ty::TyAdt(def, _) if def.is_enum() => &def.variants[..], - _ => &[] - }; - let count = if variants.is_empty() { - 0 - } else { - variants[variant_index].fields.len() - }; - - let (layout, fields, abi) = match *self.layout { - Layout::Univariant => (self.layout, self.fields, self.abi), - + let cached = match self.layout { Layout::NullablePointer { ref variants, .. } | Layout::General { ref variants, .. } => { - let variant = &variants[variant_index]; - (&variant.layout, &variant.fields, variant.abi) + &variants[variant_index] } - _ => bug!() + _ => self.cached }; - assert_eq!(fields.count(), count); - - FullLayout { - variant_index: Some(variant_index), - layout, - fields, - abi, - ..*self + assert_eq!(cached.variant_index, Some(variant_index)); + + TyLayout { + ty: self.ty, + cached } } - fn field_type_unnormalized(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, i: usize) -> Ty<'tcx> { - let ptr_field_type = |pointee: Ty<'tcx>| { + pub fn field(&self, cx: C, i: usize) -> C::TyLayout + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> + { + let tcx = cx.tcx(); + let ptr_field_layout = |pointee: Ty<'tcx>| { assert!(i < 2); - let mk_ptr = |ty: Ty<'tcx>| { - match self.ty.sty { - ty::TyRef(r, ty::TypeAndMut { mutbl, .. }) => { - tcx.mk_ref(r, ty::TypeAndMut { ty, mutbl }) - } - ty::TyRawPtr(ty::TypeAndMut { mutbl, .. }) => { - tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }) - } - ty::TyAdt(def, _) if def.is_box() => { - tcx.mk_box(ty) - } - _ => bug!() - } - }; - let slice = |element: Ty<'tcx>| { - if i == 0 { - mk_ptr(element) - } else { - tcx.types.usize - } - }; - match tcx.struct_tail(pointee).sty { - ty::TySlice(element) => slice(element), - ty::TyStr => slice(tcx.types.u8), + + // Reuse the fat *T type as its own thin pointer data field. + // This provides information about e.g. DST struct pointees + // (which may have no non-DST form), and will work as long + // as the `Abi` or `FieldPlacement` is checked by users. + if i == 0 { + return cx.layout_of(Pointer.to_ty(tcx)).map_same(|mut ptr_layout| { + ptr_layout.ty = self.ty; + ptr_layout + }); + } + + let meta_ty = match tcx.struct_tail(pointee).sty { + ty::TySlice(_) | + ty::TyStr => tcx.types.usize, ty::TyDynamic(..) => { - if i == 0 { - mk_ptr(tcx.mk_nil()) - } else { - Pointer.to_ty(tcx) - } + // FIXME(eddyb) use an usize/fn() array with + // the correct number of vtables slots. + tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) } - _ => bug!("FullLayout::field_type({:?}): not applicable", self) - } + _ => bug!("TyLayout::field_type({:?}): not applicable", self) + }; + cx.layout_of(meta_ty) }; - match self.ty.sty { + cx.layout_of(match self.ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | @@ -2089,16 +2098,16 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyFnDef(..) | ty::TyDynamic(..) | ty::TyForeign(..) => { - bug!("FullLayout::field_type({:?}): not applicable", self) + bug!("TyLayout::field_type({:?}): not applicable", self) } // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_field_type(pointee) + return ptr_field_layout(pointee); } ty::TyAdt(def, _) if def.is_box() => { - ptr_field_type(self.ty.boxed_ty()) + return ptr_field_layout(self.ty.boxed_ty()); } // Arrays and slices. @@ -2126,16 +2135,16 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyAdt(def, substs) => { let v = if def.is_enum() { match self.variant_index { - None => match *self.layout { + None => match self.layout { // Discriminant field for enums (where applicable). Layout::General { discr, .. } | Layout::NullablePointer { discr, .. } => { - return [discr.to_ty(tcx)][i]; + return cx.layout_of([discr.to_ty(tcx)][i]); + } + _ => { + bug!("TyLayout::field_type: enum `{}` has no discriminant", + self.ty) } - _ if def.variants.len() > 1 => return [][i], - - // Enums with one variant behave like structs. - _ => 0 }, Some(v) => v } @@ -2148,16 +2157,9 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { - bug!("FullLayout::field_type: unexpected type `{}`", self.ty) + bug!("TyLayout::field_type: unexpected type `{}`", self.ty) } - } - } - - pub fn field> + HasTyCtxt<'tcx>>(&self, - cx: C, - i: usize) - -> C::FullLayout { - cx.layout_of(self.field_type_unnormalized(cx.tcx(), i)) + }) } /// Returns true if the layout corresponds to an unsized type. @@ -2198,11 +2200,11 @@ impl<'a, 'tcx> FullLayout<'tcx> { // FIXME(eddyb) track value ranges and traverse already optimized enums. fn non_zero_field(&self, cx: C) -> Result, LayoutError<'tcx>> - where C: LayoutOf, FullLayout = Result>> + + where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { let tcx = cx.tcx(); - match (self.layout, self.abi, &self.ty.sty) { + match (&self.layout, self.abi, &self.ty.sty) { // FIXME(eddyb) check this via value ranges on scalars. (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyRef(..)) | (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { @@ -2238,7 +2240,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { // Perhaps one of the fields is non-zero, let's recurse and find out. _ => { - if let FieldPlacement::Array { count, .. } = *self.fields { + if let FieldPlacement::Array { count, .. } = self.fields { if count > 0 { return self.field(cx, 0)?.non_zero_field(cx); } @@ -2341,6 +2343,7 @@ impl<'gcx> HashStable> for Abi { } impl_stable_hash_for!(struct ::ty::layout::CachedLayout { + variant_index, layout, fields, abi diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index dd5e97544c82c..e0c7bc66876ea 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -753,7 +753,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, discr, .. } = *layout.layout { + if let Layout::General { ref variants, discr, .. } = layout.layout { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 0f96a22f897ae..fdc27d4e041cc 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -1316,11 +1316,6 @@ extern "C" { ElementCount: c_uint, Packed: Bool); - pub fn LLVMConstNamedStruct(S: TypeRef, - ConstantVals: *const ValueRef, - Count: c_uint) - -> ValueRef; - /// Enables LLVM debug output. pub fn LLVMRustSetDebug(Enabled: c_int); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index bd9a460846b19..8fa55b6ef7481 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -36,7 +36,7 @@ use type_of::LayoutLlvmExt; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, Size, FullLayout}; +use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc_back::PanicStrategy; @@ -275,7 +275,7 @@ pub trait LayoutExt<'tcx> { fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; } -impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { +impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn is_aggregate(&self) -> bool { match self.abi { layout::Abi::Scalar(_) | @@ -311,7 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { let mut total = Size::from_bytes(0); let mut result = None; - let is_union = match *self.fields { + let is_union = match self.fields { layout::FieldPlacement::Array { count, .. } => { if count > 0 { return self.field(ccx, 0).homogeneous_aggregate(ccx); @@ -424,7 +424,7 @@ impl CastTarget { #[derive(Debug)] pub struct ArgType<'tcx> { kind: ArgKind, - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, /// Cast target, either a single uniform or a pair of registers. pub cast: Option, /// Dummy argument, which is emitted before the real argument. @@ -435,7 +435,7 @@ pub struct ArgType<'tcx> { } impl<'a, 'tcx> ArgType<'tcx> { - fn new(layout: FullLayout<'tcx>) -> ArgType<'tcx> { + fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { ArgType { kind: ArgKind::Direct, layout, @@ -610,7 +610,7 @@ impl<'a, 'tcx> FnType<'tcx> { let fn_ty = instance_ty(ccx.tcx(), &instance); let sig = ty_fn_sig(ccx, fn_ty); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); - Self::new(ccx, sig, &[]) + FnType::new(ccx, sig, &[]) } pub fn new(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 545e986d7d10f..2f252c5e55e0b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -28,6 +28,7 @@ use super::ModuleSource; use super::ModuleTranslation; use super::ModuleKind; +use abi; use assert_module_sources; use back::link; use back::symbol_export; @@ -40,7 +41,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -68,7 +69,7 @@ use symbol_names_test; use time_graph; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use type_::Type; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use CrateInfo; @@ -203,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::TyDynamic(ref data, ..)) => { + let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target)) + .field(ccx, abi::FAT_PTR_EXTRA); consts::ptrcast(meth::get_vtable(ccx, source, data.principal()), - Type::vtable_ptr(ccx)) + vtable_ptr.llvm_type(ccx)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -255,8 +258,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); - (bcx.pointercast(base, llcast_ty), info) + let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR); + (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bcx, base, src_ty, dst_ty) @@ -371,7 +374,7 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::FullLayout) -> ValueRef { +pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef { if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { bcx.trunc(val, Type::i1(bcx.ccx)) } else { @@ -400,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, align: Option, ) { let ccx = bcx.ccx; diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 2766edb59c1d8..ed598e0a86b68 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -14,7 +14,7 @@ use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use rustc::ty::layout::{self, FullLayout}; +use rustc::ty::layout::{self, TyLayout}; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { @@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> bool { + layout: TyLayout<'tcx>) -> bool { match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 7d3621d53e0dd..26f130ec75542 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -11,7 +11,7 @@ use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use common::CrateContext; -use rustc::ty::layout::{self, FullLayout}; +use rustc::ty::layout::{self, TyLayout}; #[derive(PartialEq)] pub enum Flavor { @@ -20,7 +20,7 @@ pub enum Flavor { } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> bool { + layout: TyLayout<'tcx>) -> bool { match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 00e8562c2a134..45f2b39b982d0 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -14,7 +14,7 @@ use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, FullLayout, Size}; +use rustc::ty::layout::{self, Layout, TyLayout, Size}; #[derive(Clone, Copy, PartialEq, Debug)] enum Class { @@ -53,7 +53,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, cls: &mut [Class], off: Size) -> Result<(), Memory> { @@ -90,7 +90,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) // FIXME(eddyb) have to work around Rust enums for now. // Fix is either guarantee no data where there is no field, // by putting variants in fields, or be more clever. - match *layout.layout { + match layout.layout { Layout::General { .. } | Layout::NullablePointer { .. } => return Err(Memory), _ => {} diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index ed8f65834068b..7ccac6069233f 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -54,20 +54,11 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let layout = ccx.layout_of(ty); - match layout.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, - - layout::Abi::Aggregate { .. } => layout.is_zst() - } -} - /// Returns true if the type is represented as a pair of immediates. pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); - match *layout.fields { + match layout.fields { layout::FieldPlacement::Arbitrary { .. } => { // There must be only 2 fields. if layout.fields.count() != 2 { @@ -75,8 +66,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) } // The two fields must be both immediates. - type_is_immediate(ccx, layout.field(ccx, 0).ty) && - type_is_immediate(ccx, layout.field(ccx, 1).ty) + layout.field(ccx, 0).is_llvm_immediate() && + layout.field(ccx, 1).is_llvm_immediate() } _ => false } @@ -256,16 +247,7 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to()); - let empty = C_array(Type::i8(cx), &[]); - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_named_struct(cx.str_slice_type(), &[ - empty, - cs, - empty, - C_usize(cx, len as u64), - empty - ]) + C_fat_ptr(cx, cs, C_usize(cx, len as u64)) } pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { @@ -293,12 +275,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> } } -pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef { - unsafe { - llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint) - } -} - pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { unsafe { return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 555acaad505f0..83efe6b795872 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -24,14 +24,13 @@ use monomorphize::Instance; use partitioning::CodegenUnit; use type_::Type; -use type_of::LayoutLlvmExt; use rustc_data_structures::base_n; use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutError, LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -101,9 +100,9 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details used_statics: RefCell>, - lltypes: RefCell, Type>>, + lltypes: RefCell, Option), Type>>, + scalar_lltypes: RefCell, Type>>, isize_ty: Type, - str_slice_type: Type, dbg_cx: Option>, @@ -378,8 +377,8 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), + scalar_lltypes: RefCell::new(FxHashMap()), isize_ty: Type::from_ref(ptr::null_mut()), - str_slice_type: Type::from_ref(ptr::null_mut()), dbg_cx, eh_personality: Cell::new(None), eh_unwind_resume: Cell::new(None), @@ -389,28 +388,19 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { placeholder: PhantomData, }; - let (isize_ty, str_slice_ty, mut local_ccx) = { + let (isize_ty, mut local_ccx) = { // Do a little dance to create a dummy CrateContext, so we can // create some things in the LLVM module of this codegen unit let mut local_ccxs = vec![local_ccx]; - let (isize_ty, str_slice_ty) = { + let isize_ty = { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); - let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - str_slice_ty.set_struct_body(&[ - Type::array(&Type::i8(&dummy_ccx), 0), - dummy_ccx.layout_of(shared.tcx.mk_str()).llvm_type(&dummy_ccx).ptr_to(), - Type::array(&Type::i8(&dummy_ccx), 0), - Type::isize(&dummy_ccx), - Type::array(&Type::i8(&dummy_ccx), 0) - ], false); - (Type::isize(&dummy_ccx), str_slice_ty) + Type::isize(&dummy_ccx) }; - (isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) + (isize_ty, local_ccxs.pop().unwrap()) }; local_ccx.isize_ty = isize_ty; - local_ccx.str_slice_type = str_slice_ty; local_ccx } @@ -515,10 +505,14 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().used_statics } - pub fn lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + pub fn lltypes<'a>(&'a self) -> &'a RefCell, Option), Type>> { &self.local().lltypes } + pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + &self.local().scalar_lltypes + } + pub fn stats<'a>(&'a self) -> &'a RefCell { &self.local().stats } @@ -527,10 +521,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.local().isize_ty } - pub fn str_slice_type(&self) -> Type { - self.local().str_slice_type - } - pub fn dbg_cx<'a>(&'a self) -> &'a Option> { &self.local().dbg_cx } @@ -669,9 +659,9 @@ impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { - type FullLayout = FullLayout<'tcx>; + type TyLayout = TyLayout<'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { (self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) .layout_of(ty) .unwrap_or_else(|e| match e { @@ -682,10 +672,10 @@ impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a CrateContext<'a, 'tcx> { - type FullLayout = FullLayout<'tcx>; + type TyLayout = TyLayout<'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { self.shared.layout_of(ty) } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index f488ebaa4f513..1bb8aec92e57f 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -32,7 +32,7 @@ use rustc::ty::util::TypeIdHasher; use rustc::ich::Fingerprint; use common::{self, CrateContext}; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, Align, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -1052,7 +1052,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, variant: &'tcx ty::VariantDef, span: Span, } @@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: FullLayout<'tcx>, + type_rep: TyLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, span: Span, @@ -1129,7 +1129,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); - match *self.type_rep.layout { + match self.type_rep.layout { layout::Layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); @@ -1220,7 +1220,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // of discriminant instead of us having to recover its path. fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &mut String, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, offset: Size, size: Size) { for i in 0..layout.fields.count() { @@ -1300,7 +1300,7 @@ enum EnumDiscriminantInfo { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: layout::FullLayout<'tcx>, + layout: layout::TyLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, @@ -1431,7 +1431,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); - let discriminant_type_metadata = match *type_rep.layout { + let discriminant_type_metadata = match type_rep.layout { layout::Layout::NullablePointer { .. } | layout::Layout::Univariant { .. } => None, layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 8dbef1f8d0845..697f4ecd2bee3 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -9,6 +9,7 @@ // except according to those terms. use llvm::ValueRef; +use abi::FnType; use callee; use common::*; use builder::Builder; @@ -32,10 +33,13 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { + pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, + llvtable: ValueRef, + fn_ty: &FnType<'tcx>) -> ValueRef { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", Value(llvtable), self); + let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to()); let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index bca33a8c3074e..93780aefe4ddb 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -20,6 +20,7 @@ use rustc::mir::traversal; use rustc::ty; use rustc::ty::layout::LayoutOf; use common; +use type_of::LayoutLlvmExt; use super::MirContext; pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { @@ -31,21 +32,14 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); - if ty.is_scalar() || - ty.is_box() || - ty.is_region_ptr() || - ty.is_simd() || - mircx.ccx.layout_of(ty).is_zst() - { + if mircx.ccx.layout_of(ty).is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(mircx.ccx, ty) || - common::type_is_fat_ptr(mircx.ccx, ty)); } else if common::type_is_imm_pair(mircx.ccx, ty) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that - // type_is_immediate() may *still* be true, particularly + // is_llvm_immediate() may *still* be true, particularly // for newtypes, but we currently force some types // (e.g. structs) into an alloca unconditionally, just so // that we don't have to deal with having two pathways @@ -179,9 +173,9 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { LvalueContext::StorageLive | LvalueContext::StorageDead | LvalueContext::Validate | - LvalueContext::Inspect | LvalueContext::Consume => {} + LvalueContext::Inspect | LvalueContext::Store | LvalueContext::Borrow { .. } | LvalueContext::Projection(..) => { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 401d4d5221638..0528bf972de0c 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -274,13 +274,22 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let lvalue = self.trans_lvalue(&bcx, location); - let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn); - let (drop_fn, need_extra) = match ty.sty { - ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), - false), - _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) + let mut args: &[_] = &[lvalue.llval, lvalue.llextra]; + args = &args[..1 + lvalue.has_extra() as usize]; + let (drop_fn, fn_ty) = match ty.sty { + ty::TyDynamic(..) => { + let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn); + let sig = common::ty_fn_sig(bcx.ccx, fn_ty); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); + let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]); + args = &args[..1]; + (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty) + } + _ => { + (callee::get_fn(bcx.ccx, drop_fn), + FnType::of_instance(bcx.ccx, &drop_fn)) + } }; - let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; do_call(self, bcx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); @@ -561,15 +570,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (&args[..], None) }; - for (idx, arg) in first_args.iter().enumerate() { + for (i, arg) in first_args.iter().enumerate() { let mut op = self.trans_operand(&bcx, arg); - if idx == 0 { + if i == 0 { if let Pair(_, meta) = op.val { if let Some(ty::InstanceDef::Virtual(_, idx)) = def { - let llmeth = meth::VirtualIndex::from_index(idx) - .get_fn(&bcx, meta); - let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); - llfn = Some(bcx.pointercast(llmeth, llty)); + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&bcx, meta, &fn_ty)); } } } @@ -582,7 +589,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { op.val = Ref(tmp.llval, tmp.alignment); } - self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[idx]); + self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { self.trans_arguments_untupled(&bcx, tup, &mut llargs, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 9465118568660..cc6b84a671527 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -32,7 +32,7 @@ use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; @@ -145,7 +145,7 @@ impl<'a, 'tcx> Const<'tcx> { let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) - } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { + } else if llty == llvalty && ccx.layout_of(self.ty).is_llvm_immediate() { // If the types match, we can use the value directly. OperandValue::Immediate(self.llval) } else { @@ -677,11 +677,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } C_fat_ptr(self.ccx, base, info) } - mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { - debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); + mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => { let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_out = self.ccx.layout_of(cast_ty).immediate_llvm_type(self.ccx); + let cast_layout = self.ccx.layout_of(cast_ty); + assert!(cast_layout.is_llvm_immediate()); + let ll_t_out = cast_layout.immediate_llvm_type(self.ccx); let llval = operand.llval; let signed = match self.ccx.layout_of(operand.ty).abi { layout::Abi::Scalar(layout::Int(_, signed)) => signed, @@ -728,8 +729,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { if common::type_is_fat_ptr(self.ccx, operand.ty) { let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); if common::type_is_fat_ptr(self.ccx, cast_ty) { - let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); - let data_cast = consts::ptrcast(data_ptr, llcast_ty); + let thin_ptr = self.ccx.layout_of(cast_ty) + .field(self.ccx, abi::FAT_PTR_ADDR); + let data_cast = consts::ptrcast(data_ptr, + thin_ptr.llvm_type(self.ccx)); C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -1091,7 +1094,7 @@ fn trans_const_adt<'a, 'tcx>( mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; - match *l.layout { + match l.layout { layout::Layout::General { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { @@ -1147,7 +1150,7 @@ fn trans_const_adt<'a, 'tcx>( /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: layout::FullLayout<'tcx>, + layout: layout::TyLayout<'tcx>, vals: &[Const<'tcx>], discr: Option>) -> Const<'tcx> { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0732720bd1a14..7c0b2748a7feb 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -19,7 +19,7 @@ use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use consts; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; use glue; @@ -54,8 +54,8 @@ impl ops::BitOr for Alignment { } } -impl<'a> From> for Alignment { - fn from(layout: FullLayout) -> Self { +impl<'a> From> for Alignment { + fn from(layout: TyLayout) -> Self { if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi { Alignment::Packed(align) } else { @@ -86,7 +86,7 @@ pub struct LvalueRef<'tcx> { pub llextra: ValueRef, /// Monomorphized type of this lvalue, including variant information - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, /// Whether this lvalue is known to be aligned according to its layout pub alignment: Alignment, @@ -94,7 +94,7 @@ pub struct LvalueRef<'tcx> { impl<'a, 'tcx> LvalueRef<'tcx> { pub fn new_sized(llval: ValueRef, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { LvalueRef { @@ -105,7 +105,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } - pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str) + pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str) -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, layout); let tmp = bcx.alloca( @@ -114,7 +114,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { - if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields { + if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert!(self.has_extra()); assert_eq!(count, 0); @@ -163,7 +163,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { OperandValue::Pair( self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) - } else if common::type_is_immediate(bcx.ccx, self.layout.ty) { + } else if self.layout.is_llvm_immediate() { let mut const_llval = ptr::null_mut(); unsafe { let global = llvm::LLVMIsAGlobalVariable(self.llval); @@ -202,28 +202,15 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let ccx = bcx.ccx; let field = self.layout.field(ccx, ix); let offset = self.layout.fields.offset(ix).bytes(); - let alignment = self.alignment | Alignment::from(self.layout); - // Unions and newtypes only use an offset of 0. - let has_llvm_fields = match *self.layout.fields { - layout::FieldPlacement::Union(_) => false, - layout::FieldPlacement::Array { .. } => true, - layout::FieldPlacement::Arbitrary { .. } => { - match self.layout.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false, - layout::Abi::Aggregate { .. } => true - } - } - }; - let simple = || { LvalueRef { - llval: if has_llvm_fields { - bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) - } else { - assert_eq!(offset, 0); + // Unions and newtypes only use an offset of 0. + llval: if offset == 0 { bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) + } else { + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) }, llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra @@ -309,7 +296,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); - match *self.layout.layout { + match self.layout.layout { layout::Layout::Univariant { .. } | layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), _ => {} @@ -320,7 +307,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Abi::Scalar(discr) => discr, _ => bug!("discriminant not scalar: {:#?}", discr.layout) }; - let (min, max) = match *self.layout.layout { + let (min, max) = match self.layout.layout { layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; @@ -346,7 +333,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.load(discr.llval, discr.alignment.non_abi()) } }; - match *self.layout.layout { + match self.layout.layout { layout::Layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, @@ -369,7 +356,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let to = self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; - match *self.layout.layout { + match self.layout.layout { layout::Layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), @@ -419,17 +406,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let mut downcast = *self; downcast.layout = self.layout.for_variant(variant_index); - // If this is an enum, cast to the appropriate variant struct type. - match *self.layout.layout { - layout::Layout::NullablePointer { .. } | - layout::Layout::General { .. } => { - let variant_ty = Type::struct_(bcx.ccx, - &type_of::struct_llfields(bcx.ccx, downcast.layout), - downcast.layout.is_packed()); - downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); - } - _ => {} - } + // Cast to the appropriate variant struct type. + let variant_ty = downcast.layout.llvm_type(bcx.ccx); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); downcast } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 2cad096448497..38719fedede5b 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -12,18 +12,17 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; use rustc::ty::{self, TypeFoldable}; -use rustc::ty::layout::{LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; use rustc::session::config::FullDebugInfo; use base; use builder::Builder; -use common::{self, CrateContext, Funclet}; +use common::{CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{ArgAttribute, FnType}; -use type_of::{self, LayoutLlvmExt}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -85,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// - /// - the type of the local must be judged "immediate" by `type_is_immediate` + /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in an lvalue path like `tmp.a` @@ -177,7 +176,7 @@ enum LocalRef<'tcx> { } impl<'a, 'tcx> LocalRef<'tcx> { - fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -448,32 +447,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none()); assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none()); - let mut a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(b, &(name + ".1")); llarg_idx += 1; - if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) { - // FIXME(eddyb) As we can't perfectly represent the data and/or - // vtable pointer in a fat pointers in Rust's typesystem, and - // because we split fat pointers into two ArgType's, they're - // not the right type so we have to cast them for now. - let pointee = match arg.layout.ty.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty, - ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(), - _ => bug!() - }; - let data_llty = bcx.ccx.layout_of(pointee).llvm_type(bcx.ccx); - let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); - - a = bcx.pointercast(a, data_llty.ptr_to()); - bcx.set_value_name(a, &(name.clone() + ".ptr")); - b = bcx.pointercast(b, meta_llty); - bcx.set_value_name(b, &(name + ".meta")); - } - return LocalRef::Operand(Some(OperandRef { val: OperandValue::Pair(a, b), layout: arg.layout diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index d1922f8bf99b0..5659072fa932c 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty; -use rustc::ty::layout::{LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; @@ -71,7 +71,7 @@ pub struct OperandRef<'tcx> { pub val: OperandValue, // The layout of value, based on its Rust type. - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, } impl<'tcx> fmt::Debug for OperandRef<'tcx> { @@ -82,7 +82,7 @@ impl<'tcx> fmt::Debug for OperandRef<'tcx> { impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> OperandRef<'tcx> { + layout: TyLayout<'tcx>) -> OperandRef<'tcx> { assert!(layout.is_zst()); let llty = layout.llvm_type(ccx); // FIXME(eddyb) ZSTs should always be immediate, not pairs. diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index c7cb69339f7f3..b68cd3a6ae5d1 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -18,6 +18,7 @@ use rustc_apfloat::{ieee, Float, Status, Round}; use rustc_const_math::MAX_F32_PLUS_HALF_ULP; use std::{u128, i128}; +use abi; use base; use builder::Builder; use callee; @@ -26,7 +27,7 @@ use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_i use consts; use monomorphize; use type_::Type; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use value::Value; use super::{MirContext, LocalRef}; @@ -234,8 +235,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); - let lldata = bcx.pointercast(lldata, llcast_ty); + let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); + let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -253,8 +254,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { if common::type_is_fat_ptr(bcx.ccx, cast.ty) { - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); - let data_cast = bcx.pointercast(data_ptr, llcast_ty); + let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); + let data_cast = bcx.pointercast(data_ptr, + thin_ptr.llvm_type(bcx.ccx)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -268,7 +270,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty)); + assert!(cast.is_llvm_immediate()); let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); @@ -276,7 +278,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); - if let Layout::General { ref discr_range, .. } = *operand.layout.layout { + if let Layout::General { ref discr_range, .. } = operand.layout.layout { if discr_range.end > discr_range.start { // We want `table[e as usize]` to not // have bound checks, and this is the most diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index dbdc8919da9c1..53aaed1578322 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -207,10 +207,6 @@ impl Type { ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint)) } - pub fn vtable_ptr(ccx: &CrateContext) -> Type { - Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() - } - pub fn kind(&self) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(self.to_ref()) diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 60c2b5397391a..77cc3897c9be8 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -11,131 +11,68 @@ use abi::FnType; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, TyLayout}; use trans_item::DefPathBasedNames; use type_::Type; -use syntax::ast; +use std::fmt::Write; -pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - match ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => { - ccx.layout_of(t).llvm_type(ccx).ptr_to() - } - ty::TyAdt(def, _) if def.is_box() => { - ccx.layout_of(ty.boxed_ty()).llvm_type(ccx).ptr_to() - } - _ => bug!("expected fat ptr ty but got {:?}", ty) - } -} - -pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let unsized_part = ccx.tcx().struct_tail(ty); - match unsized_part.sty { - ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { - Type::uint_from_ty(ccx, ast::UintTy::Us) - } - ty::TyDynamic(..) => Type::vtable_ptr(ccx), - _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}", - unsized_part, ty) - } -} - -fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>, - defer: &mut Option<(Type, FullLayout<'tcx>)>) +fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>, + defer: &mut Option<(Type, TyLayout<'tcx>)>) -> Type { - let ptr_ty = |ty: Ty<'tcx>| { - if cx.shared().type_has_metadata(ty) { - if let ty::TyStr = ty.sty { - // This means we get a nicer name in the output (str is always - // unsized). - cx.str_slice_type() - } else { - let ptr_ty = cx.layout_of(ty).llvm_type(cx).ptr_to(); - let info_ty = unsized_info_ty(cx, ty); - Type::struct_(cx, &[ - Type::array(&Type::i8(cx), 0), - ptr_ty, - Type::array(&Type::i8(cx), 0), - info_ty, - Type::array(&Type::i8(cx), 0) - ], false) - } - } else { - cx.layout_of(ty).llvm_type(cx).ptr_to() - } - }; - match ty.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - return ptr_ty(ty); + match layout.abi { + layout::Abi::Scalar(_) => bug!("handled elsewhere"), + layout::Abi::Vector { .. } => { + return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), + layout.fields.count() as u64); } - ty::TyAdt(def, _) if def.is_box() => { - return ptr_ty(ty.boxed_ty()); - } - ty::TyFnPtr(sig) => { - let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); - return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to(); - } - _ => {} + layout::Abi::Aggregate { .. } => {} } - let layout = cx.layout_of(ty); - if let layout::Abi::Scalar(value) = layout.abi { - let llty = match value { - layout::Int(layout::I1, _) => Type::i8(cx), - layout::Int(i, _) => Type::from_integer(cx, i), - layout::F32 => Type::f32(cx), - layout::F64 => Type::f64(cx), - layout::Pointer => { - cx.layout_of(layout::Pointer.to_ty(cx.tcx())).llvm_type(cx) - } - }; - return llty; - } - - if let layout::Abi::Vector { .. } = layout.abi { - return Type::vector(&layout.field(cx, 0).llvm_type(cx), - layout.fields.count() as u64); - } - - let name = match ty.sty { - ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => { + let name = match layout.ty.sty { + ty::TyClosure(..) | + ty::TyGenerator(..) | + ty::TyAdt(..) | + ty::TyDynamic(..) | + ty::TyForeign(..) | + ty::TyStr => { let mut name = String::with_capacity(32); - let printer = DefPathBasedNames::new(cx.tcx(), true, true); - printer.push_type_name(ty, &mut name); + let printer = DefPathBasedNames::new(ccx.tcx(), true, true); + printer.push_type_name(layout.ty, &mut name); + if let (&ty::TyAdt(def, _), Some(v)) = (&layout.ty.sty, layout.variant_index) { + write!(&mut name, "::{}", def.variants[v].name).unwrap(); + } Some(name) } _ => None }; - match *layout.fields { + match layout.fields { layout::FieldPlacement::Union(_) => { - let size = layout.size(cx).bytes(); - let fill = Type::array(&Type::i8(cx), size); + let size = layout.size(ccx).bytes(); + let fill = Type::array(&Type::i8(ccx), size); match name { None => { - Type::struct_(cx, &[fill], layout.is_packed()) + Type::struct_(ccx, &[fill], layout.is_packed()) } Some(ref name) => { - let mut llty = Type::named_struct(cx, name); + let mut llty = Type::named_struct(ccx, name); llty.set_struct_body(&[fill], layout.is_packed()); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(&layout.field(cx, 0).llvm_type(cx), count) + Type::array(&layout.field(ccx, 0).llvm_type(ccx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed()) + Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed()) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = Type::named_struct(ccx, name); *defer = Some((llty, layout)); llty } @@ -144,37 +81,37 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> Vec { +fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>) -> Vec { debug!("struct_llfields: {:#?}", layout); - let align = layout.align(cx); - let size = layout.size(cx); + let align = layout.align(ccx); + let size = layout.size(ccx); let field_count = layout.fields.count(); let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { - let field = layout.field(cx, i); + let field = layout.field(ccx, i); let target_offset = layout.fields.offset(i as usize); debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", i, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; - result.push(Type::array(&Type::i8(cx), padding.bytes())); + result.push(Type::array(&Type::i8(ccx), padding.bytes())); debug!(" padding before: {:?}", padding); - result.push(field.llvm_type(cx)); + result.push(field.llvm_type(ccx)); if layout.is_packed() { assert_eq!(padding.bytes(), 0); } else { - let field_align = field.align(cx); + let field_align = field.align(ccx); assert!(field_align.abi() <= align.abi(), "non-packed type has field with larger align ({}): {:#?}", field_align.abi(), layout); } - offset = target_offset + field.size(cx); + offset = target_offset + field.size(ccx); } if !layout.is_unsized() && field_count > 0 { if offset > size { @@ -184,7 +121,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let padding = size - offset; debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, size); - result.push(Type::array(&Type::i8(cx), padding.bytes())); + result.push(Type::array(&Type::i8(ccx), padding.bytes())); assert!(result.len() == 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -210,13 +147,22 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } pub trait LayoutLlvmExt<'tcx> { + fn is_llvm_immediate(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn over_align(&self, ccx: &CrateContext) -> Option; fn llvm_field_index(&self, index: usize) -> u64; } -impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { +impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { + fn is_llvm_immediate(&self) -> bool { + match self.abi { + layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, + + layout::Abi::Aggregate { .. } => self.is_zst() + } + } + /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. /// The pointee type of the pointer in `LvalueRef` is always this type. /// For sized types, it is also the right LLVM type for an `alloca` @@ -229,8 +175,42 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + if let layout::Abi::Scalar(value) = self.abi { + // Use a different cache for scalars because pointers to DSTs + // can be either fat or thin (data pointers of fat pointers). + if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) { + return llty; + } + let llty = match value { + layout::Int(layout::I1, _) => Type::i8(ccx), + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + let pointee = match self.ty.sty { + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { + ccx.layout_of(ty).llvm_type(ccx) + } + ty::TyAdt(def, _) if def.is_box() => { + ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx) + } + ty::TyFnPtr(sig) => { + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); + FnType::new(ccx, sig, &[]).llvm_type(ccx) + } + _ => Type::i8(ccx) + }; + pointee.ptr_to() + } + }; + ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty); + return llty; + } + + // Check the cache. - if let Some(&llty) = ccx.lltypes().borrow().get(&self.ty) { + if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, self.variant_index)) { return llty; } @@ -244,13 +224,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { let mut defer = None; let llty = if self.ty != normal_ty { - ccx.layout_of(normal_ty).llvm_type(ccx) + let mut layout = ccx.layout_of(normal_ty); + if let Some(v) = self.variant_index { + layout = layout.for_variant(v); + } + layout.llvm_type(ccx) } else { - uncached_llvm_type(ccx, self.ty, &mut defer) + uncached_llvm_type(ccx, *self, &mut defer) }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - ccx.lltypes().borrow_mut().insert(self.ty, llty); + ccx.lltypes().borrow_mut().insert((self.ty, self.variant_index), llty); if let Some((mut llty, layout)) = defer { llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) @@ -279,11 +263,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { fn llvm_field_index(&self, index: usize) -> u64 { if let layout::Abi::Scalar(_) = self.abi { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self); + bug!("TyLayout::llvm_field_index({:?}): not applicable", self); } - match *self.fields { + match self.fields { layout::FieldPlacement::Union(_) => { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) } layout::FieldPlacement::Array { .. } => { diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 8a680f1c9d698..525a1f5310c9a 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -24,10 +24,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* -// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1 -// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3 -// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2 +// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1 +// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3 +// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1 { x } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 5d073670d865c..05682a8efaecc 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -97,43 +97,43 @@ pub fn struct_return() -> S { pub fn helper(_: usize) { } -// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn slice(_: &[u8]) { } -// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` // ... there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { } -// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn str(_: &[u8]) { } -// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn trait_borrow(_: &Drop) { } -// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly) #[no_mangle] pub fn trait_box(_: Box) { } -// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index ad799247f598b..2ab64fffa3b34 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -24,10 +24,10 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1 -// CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] +// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]** +// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 -// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] +// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] let x = &*s; &x; // keep variable in an alloca From b28f668e267d6b463439e776c335f45508f5c1ad Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Fri, 22 Sep 2017 22:44:40 +0300 Subject: [PATCH 41/69] rustc: move size, align & primitive_align from Abi::Aggregate to layout. --- src/librustc/ty/layout.rs | 311 +++++++++-------------- src/librustc_const_eval/eval.rs | 4 +- src/librustc_lint/types.rs | 5 +- src/librustc_mir/transform/inline.rs | 2 +- src/librustc_trans/abi.rs | 36 +-- src/librustc_trans/base.rs | 8 +- src/librustc_trans/cabi_aarch64.rs | 10 +- src/librustc_trans/cabi_arm.rs | 10 +- src/librustc_trans/cabi_asmjs.rs | 10 +- src/librustc_trans/cabi_hexagon.rs | 19 +- src/librustc_trans/cabi_mips.rs | 6 +- src/librustc_trans/cabi_mips64.rs | 6 +- src/librustc_trans/cabi_msp430.rs | 19 +- src/librustc_trans/cabi_nvptx.rs | 19 +- src/librustc_trans/cabi_nvptx64.rs | 19 +- src/librustc_trans/cabi_powerpc.rs | 6 +- src/librustc_trans/cabi_powerpc64.rs | 18 +- src/librustc_trans/cabi_s390x.rs | 19 +- src/librustc_trans/cabi_sparc.rs | 6 +- src/librustc_trans/cabi_sparc64.rs | 14 +- src/librustc_trans/cabi_x86.rs | 21 +- src/librustc_trans/cabi_x86_64.rs | 8 +- src/librustc_trans/cabi_x86_win64.rs | 14 +- src/librustc_trans/debuginfo/metadata.rs | 28 +- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/mir/block.rs | 6 +- src/librustc_trans/mir/constant.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 12 +- src/librustc_trans/mir/rvalue.rs | 4 +- src/librustc_trans/type_of.rs | 38 ++- 30 files changed, 299 insertions(+), 383 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 7bf7d81037d9e..dbad77b904ee0 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -747,10 +747,7 @@ pub enum Abi { Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, - packed: bool, - align: Align, - primitive_align: Align, - size: Size + packed: bool } } @@ -770,68 +767,6 @@ impl Abi { Abi::Aggregate { packed, .. } => packed } } - - /// Returns true if the type is a ZST and not unsized. - pub fn is_zst(&self) -> bool { - match *self { - Abi::Scalar(_) => false, - Abi::Vector { count, .. } => count == 0, - Abi::Aggregate { sized, size, .. } => sized && size.bytes() == 0 - } - } - - pub fn size(&self, cx: C) -> Size { - let dl = cx.data_layout(); - - match *self { - Abi::Scalar(value) => value.size(dl), - - Abi::Vector { element, count } => { - let element_size = element.size(dl); - let vec_size = match element_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::size({:?}): {} * {} overflowed", - self, element_size.bytes(), count) - }; - vec_size.abi_align(self.align(dl)) - } - - Abi::Aggregate { size, .. } => size - } - } - - pub fn align(&self, cx: C) -> Align { - let dl = cx.data_layout(); - - match *self { - Abi::Scalar(value) => value.align(dl), - - Abi::Vector { element, count } => { - let elem_size = element.size(dl); - let vec_size = match elem_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::align({:?}): {} * {} overflowed", - self, elem_size.bytes(), count) - }; - dl.vector_align(vec_size) - } - - Abi::Aggregate { align, .. } => align - } - } - - pub fn size_and_align(&self, cx: C) -> (Size, Align) { - (self.size(cx), self.align(cx)) - } - - /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, cx: C) -> Align { - match *self { - Abi::Aggregate { primitive_align, .. } => primitive_align, - - _ => self.align(cx.data_layout()) - } - } } /// Type layout, from which size and alignment can be cheaply computed. @@ -911,6 +846,9 @@ pub struct CachedLayout { pub layout: Layout, pub fields: FieldPlacement, pub abi: Abi, + pub align: Align, + pub primitive_align: Align, + pub size: Size } fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -947,12 +885,16 @@ impl<'a, 'tcx> Layout { -> Result<&'tcx CachedLayout, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); - let scalar = |value| { + let scalar = |value: Primitive| { + let align = value.align(dl); tcx.intern_layout(CachedLayout { variant_index: None, layout: Layout::Scalar, fields: FieldPlacement::Union(0), - abi: Abi::Scalar(value) + abi: Abi::Scalar(value), + size: value.size(dl), + align, + primitive_align: align }) }; #[derive(Copy, Clone, Debug)] @@ -1005,11 +947,11 @@ impl<'a, 'tcx> Layout { if end > 0 { let optimizing = &mut inverse_memory_index[..end]; if sort_ascending { - optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi()); + optimizing.sort_by_key(|&x| fields[x as usize].align.abi()); } else { optimizing.sort_by(| &a, &b | { - let a = fields[a as usize].align(dl).abi(); - let b = fields[b as usize].align(dl).abi(); + let a = fields[a as usize].align.abi(); + let b = fields[b as usize].align.abi(); b.cmp(&a) }); } @@ -1046,16 +988,15 @@ impl<'a, 'tcx> Layout { // Invariant: offset < dl.obj_size_bound() <= 1<<61 if !packed { - let field_align = field.align(dl); - align = align.max(field_align); - primitive_align = primitive_align.max(field.primitive_align(dl)); - offset = offset.abi_align(field_align); + offset = offset.abi_align(field.align); + align = align.max(field.align); + primitive_align = primitive_align.max(field.primitive_align); } - debug!("univariant offset: {:?} field: {:?} {:?}", offset, field, field.size(dl)); + debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[*i as usize] = offset; - offset = offset.checked_add(field.size(dl), dl) + offset = offset.checked_add(field.size, dl) .ok_or(LayoutError::SizeOverflow(ty))?; } @@ -1095,11 +1036,11 @@ impl<'a, 'tcx> Layout { }, abi: Abi::Aggregate { sized, - packed, - align, - primitive_align, - size: min_size.abi_align(align) - } + packed + }, + align, + primitive_align, + size: min_size.abi_align(align) }) }; let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { @@ -1137,11 +1078,11 @@ impl<'a, 'tcx> Layout { fields, abi: Abi::Aggregate { sized: true, - packed: false, - align, - primitive_align: align, - size: (meta_offset + metadata.size(dl)).abi_align(align) - } + packed: false + }, + align, + primitive_align: align, + size: (meta_offset + metadata.size(dl)).abi_align(align) })) }; @@ -1183,25 +1124,24 @@ impl<'a, 'tcx> Layout { } let element = cx.layout_of(element)?; - let element_size = element.size(dl); let count = count.val.to_const_int().unwrap().to_u64().unwrap(); - let size = element_size.checked_mul(count, dl) + let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; tcx.intern_layout(CachedLayout { variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { - stride: element_size, + stride: element.size, count }, abi: Abi::Aggregate { sized: true, - packed: false, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - size - } + packed: false + }, + align: element.align, + primitive_align: element.primitive_align, + size }) } ty::TySlice(element) => { @@ -1210,16 +1150,16 @@ impl<'a, 'tcx> Layout { variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { - stride: element.size(dl), + stride: element.size, count: 0 }, abi: Abi::Aggregate { sized: false, - packed: false, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - size: Size::from_bytes(0) - } + packed: false + }, + align: element.align, + primitive_align: element.primitive_align, + size: Size::from_bytes(0) }) } ty::TyStr => { @@ -1232,11 +1172,11 @@ impl<'a, 'tcx> Layout { }, abi: Abi::Aggregate { sized: false, - packed: false, - align: dl.i8_align, - primitive_align: dl.i8_align, - size: Size::from_bytes(0) - } + packed: false + }, + align: dl.i8_align, + primitive_align: dl.i8_align, + size: Size::from_bytes(0) }) } @@ -1283,23 +1223,34 @@ impl<'a, 'tcx> Layout { // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { let count = ty.simd_size(tcx) as u64; - let element = ty.simd_type(tcx); - let element = match cx.layout_of(element)?.abi { + let element = cx.layout_of(ty.simd_type(tcx))?; + let element_scalar = match element.abi { Abi::Scalar(value) => value, _ => { tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ a non-machine element type `{}`", - ty, element)); + ty, element.ty)); } }; + let size = element.size.checked_mul(count, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + let align = dl.vector_align(size); + let size = size.abi_align(align); + tcx.intern_layout(CachedLayout { variant_index: None, layout: Layout::Vector, fields: FieldPlacement::Array { - stride: element.size(tcx), + stride: element.size, count }, - abi: Abi::Vector { element, count } + abi: Abi::Vector { + element: element_scalar, + count + }, + size, + align, + primitive_align: align }) } @@ -1344,10 +1295,10 @@ impl<'a, 'tcx> Layout { assert!(!field.is_unsized()); if !packed { - align = align.max(field.align(dl)); - primitive_align = primitive_align.max(field.primitive_align(dl)); + align = align.max(field.align); + primitive_align = primitive_align.max(field.primitive_align); } - size = cmp::max(size, field.size(dl)); + size = cmp::max(size, field.size); } return Ok(tcx.intern_layout(CachedLayout { @@ -1356,11 +1307,11 @@ impl<'a, 'tcx> Layout { fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { sized: true, - packed, - align, - primitive_align, - size: size.abi_align(align) - } + packed + }, + align, + primitive_align, + size: size.abi_align(align) })); } @@ -1411,27 +1362,26 @@ impl<'a, 'tcx> Layout { st[0].variant_index = Some(0); st[1].variant_index = Some(1); let offset = st[i].fields.offset(field_index) + offset; - let mut abi = st[i].abi; - if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { - abi = Abi::Scalar(discr); - } + let CachedLayout { + mut abi, + size, + mut align, + mut primitive_align, + .. + } = st[i]; + let mut discr_align = discr.align(dl); - match abi { - Abi::Aggregate { - ref mut align, - ref mut primitive_align, - ref mut packed, - .. - } => { - if offset.abi_align(discr_align) != offset { - *packed = true; - discr_align = dl.i8_align; - } - *align = align.max(discr_align); - *primitive_align = primitive_align.max(discr_align); + if offset.bytes() == 0 && discr.size(dl) == size { + abi = Abi::Scalar(discr); + } else if let Abi::Aggregate { ref mut packed, .. } = abi { + if offset.abi_align(discr_align) != offset { + *packed = true; + discr_align = dl.i8_align; } - _ => {} } + align = align.max(discr_align); + primitive_align = primitive_align.max(discr_align); + return Ok(tcx.intern_layout(CachedLayout { variant_index: None, layout: Layout::NullablePointer { @@ -1444,7 +1394,10 @@ impl<'a, 'tcx> Layout { offsets: vec![offset], memory_index: vec![0] }, - abi + abi, + size, + align, + primitive_align })); } } @@ -1477,15 +1430,14 @@ impl<'a, 'tcx> Layout { // Find the first field we can't move later // to make room for a larger discriminant. for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { - let field_align = field.align(dl); - if !field.is_zst() || field_align.abi() != 1 { - start_align = start_align.min(field_align); + if !field.is_zst() || field.align.abi() != 1 { + start_align = start_align.min(field.align); break; } } - size = cmp::max(size, st.abi.size(dl)); - align = align.max(st.abi.align(dl)); - primitive_align = primitive_align.max(st.abi.primitive_align(dl)); + size = cmp::max(size, st.size); + align = align.max(st.align); + primitive_align = primitive_align.max(st.primitive_align); Ok(st) }).collect::, _>>()?; @@ -1534,9 +1486,8 @@ impl<'a, 'tcx> Layout { let old_ity_size = min_ity.size(); let new_ity_size = ity.size(); for variant in &mut variants { - match (&mut variant.fields, &mut variant.abi) { - (&mut FieldPlacement::Arbitrary { ref mut offsets, .. }, - &mut Abi::Aggregate { ref mut size, .. }) => { + match variant.fields { + FieldPlacement::Arbitrary { ref mut offsets, .. } => { for i in offsets { if *i <= old_ity_size { assert_eq!(*i, old_ity_size); @@ -1544,8 +1495,8 @@ impl<'a, 'tcx> Layout { } } // We might be making the struct larger. - if *size <= old_ity_size { - *size = new_ity_size; + if variant.size <= old_ity_size { + variant.size = new_ity_size; } } _ => bug!() @@ -1572,12 +1523,12 @@ impl<'a, 'tcx> Layout { } else { Abi::Aggregate { sized: true, - packed: false, - align, - primitive_align, - size + packed: false } - } + }, + align, + primitive_align, + size }) } @@ -1629,12 +1580,10 @@ impl<'a, 'tcx> Layout { // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { let type_desc = format!("{:?}", ty); - let overall_size = layout.size(tcx); - let align = layout.align(tcx); tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, - align, - overall_size, + layout.align, + layout.size, opt_discr_size, variants); }; @@ -1670,16 +1619,15 @@ impl<'a, 'tcx> Layout { } Ok(field_layout) => { let offset = layout.fields.offset(i); - let field_size = field_layout.size(tcx); - let field_end = offset + field_size; + let field_end = offset + field_layout.size; if min_size < field_end { min_size = field_end; } session::FieldInfo { name: name.to_string(), offset: offset.bytes(), - size: field_size.bytes(), - align: field_layout.align(tcx).abi(), + size: field_layout.size.bytes(), + align: field_layout.align.abi(), } } } @@ -1692,9 +1640,9 @@ impl<'a, 'tcx> Layout { } else { session::SizeKind::Exact }, - align: layout.align(tcx).abi(), + align: layout.align.abi(), size: if min_size.bytes() == 0 { - layout.size(tcx).bytes() + layout.size.bytes() } else { min_size.bytes() }, @@ -1795,7 +1743,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { // First try computing a static layout. let err = match (tcx, param_env).layout_of(ty) { Ok(layout) => { - return Ok(SizeSkeleton::Known(layout.size(tcx))); + return Ok(SizeSkeleton::Known(layout.size)); } Err(err) => err }; @@ -2174,24 +2122,15 @@ impl<'a, 'tcx> TyLayout<'tcx> { /// Returns true if the type is a ZST and not unsized. pub fn is_zst(&self) -> bool { - self.abi.is_zst() - } - - pub fn size(&self, cx: C) -> Size { - self.abi.size(cx) - } - - pub fn align(&self, cx: C) -> Align { - self.abi.align(cx) - } - - pub fn size_and_align(&self, cx: C) -> (Size, Align) { - self.abi.size_and_align(cx) + match self.abi { + Abi::Scalar(_) => false, + Abi::Vector { count, .. } => count == 0, + Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 + } } - /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, cx: C) -> Align { - self.abi.primitive_align(cx) + pub fn size_and_align(&self) -> (Size, Align) { + (self.size, self.align) } /// Find the offset of a non-zero leaf field, starting from @@ -2331,12 +2270,9 @@ impl<'gcx> HashStable> for Abi { element.hash_stable(hcx, hasher); count.hash_stable(hcx, hasher); } - Aggregate { packed, sized, size, align, primitive_align } => { + Aggregate { packed, sized } => { packed.hash_stable(hcx, hasher); sized.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); } } } @@ -2346,7 +2282,10 @@ impl_stable_hash_for!(struct ::ty::layout::CachedLayout { variant_index, layout, fields, - abi + abi, + size, + align, + primitive_align }); impl_stable_hash_for!(enum ::ty::layout::Integer { diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 7badea86c1aba..a548c1df16e28 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -320,12 +320,12 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, }; match &tcx.item_name(def_id)[..] { "size_of" => { - let size = layout_of(substs.type_at(0))?.size(tcx).bytes(); + let size = layout_of(substs.type_at(0))?.size.bytes(); return Ok(mk_const(Integral(Usize(ConstUsize::new(size, tcx.sess.target.usize_ty).unwrap())))); } "min_align_of" => { - let align = layout_of(substs.type_at(0))?.align(tcx).abi(); + let align = layout_of(substs.type_at(0))?.align.abi(); return Ok(mk_const(Integral(Usize(ConstUsize::new(align, tcx.sess.target.usize_ty).unwrap())))); } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index e0c7bc66876ea..761ca662178f9 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -757,15 +757,14 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", - t, layout.size(cx.tcx).bytes(), layout); + t, layout.size.bytes(), layout); let (largest, slargest, largest_index) = enum_definition.variants .iter() .zip(variants) .map(|(variant, variant_layout)| { // Subtract the size of the enum discriminant - let bytes = variant_layout.abi.size(cx.tcx) - .bytes() + let bytes = variant_layout.size.bytes() .saturating_sub(discr_size); debug!("- variant `{}` is {} bytes large", variant.node.name, bytes); diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 0e5528f916a7a..4b7856f857b77 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -626,7 +626,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> Option { - (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size(tcx).bytes()) + (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes()) } fn subst_and_normalize<'a, 'tcx: 'a>( diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 8fa55b6ef7481..2659ca2f0d575 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -296,14 +296,14 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { }; Some(Reg { kind, - size: self.size(ccx) + size: self.size }) } layout::Abi::Vector { .. } => { Some(Reg { kind: RegKind::Vector, - size: self.size(ccx) + size: self.size }) } @@ -345,7 +345,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } // Keep track of the offset (without padding). - let size = field.size(ccx); + let size = field.size; if is_union { total = cmp::max(total, size); } else { @@ -354,7 +354,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } // There needs to be no padding. - if total != self.size(ccx) { + if total != self.size { None } else { result @@ -446,7 +446,7 @@ impl<'a, 'tcx> ArgType<'tcx> { } } - pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) { + pub fn make_indirect(&mut self) { assert!(self.nested.is_empty()); assert_eq!(self.kind, ArgKind::Direct); @@ -458,7 +458,7 @@ impl<'a, 'tcx> ArgType<'tcx> { // program-invisible so can't possibly capture self.attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) - .set_dereferenceable(self.layout.size(ccx)); + .set_dereferenceable(self.layout.size); self.kind = ArgKind::Indirect; } @@ -520,15 +520,15 @@ impl<'a, 'tcx> ArgType<'tcx> { } let ccx = bcx.ccx; if self.is_indirect() { - let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); - base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align(ccx)); + let llsz = C_usize(ccx, self.layout.size.bytes()); + base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align); } else if let Some(ty) = self.cast { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { let cast_dst = bcx.pointercast(dst.llval, ty.llvm_type(ccx).ptr_to()); - bcx.store(val, cast_dst, Some(self.layout.align(ccx))); + bcx.store(val, cast_dst, Some(self.layout.align)); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -556,8 +556,8 @@ impl<'a, 'tcx> ArgType<'tcx> { base::call_memcpy(bcx, bcx.pointercast(dst.llval, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_usize(ccx, self.layout.size(ccx).bytes()), - self.layout.align(ccx).min(ty.align(ccx))); + C_usize(ccx, self.layout.size.bytes()), + self.layout.align.min(ty.align(ccx))); bcx.lifetime_end(llscratch, scratch_size); } @@ -828,7 +828,7 @@ impl<'a, 'tcx> FnType<'tcx> { _ => return } - let size = arg.layout.size(ccx); + let size = arg.layout.size; if let Some(unit) = arg.layout.homogeneous_aggregate(ccx) { // Replace newtypes with their inner-most type. @@ -851,7 +851,7 @@ impl<'a, 'tcx> FnType<'tcx> { } if size > layout::Pointer.size(ccx) { - arg.make_indirect(ccx); + arg.make_indirect(); } else { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, @@ -897,7 +897,7 @@ impl<'a, 'tcx> FnType<'tcx> { "x86_64" => if abi == Abi::SysV64 { cabi_x86_64::compute_abi_info(ccx, self); } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows { - cabi_x86_win64::compute_abi_info(ccx, self); + cabi_x86_win64::compute_abi_info(self); } else { cabi_x86_64::compute_abi_info(ccx, self); }, @@ -910,12 +910,12 @@ impl<'a, 'tcx> FnType<'tcx> { "s390x" => cabi_s390x::compute_abi_info(ccx, self), "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), "wasm32" => cabi_asmjs::compute_abi_info(ccx, self), - "msp430" => cabi_msp430::compute_abi_info(ccx, self), + "msp430" => cabi_msp430::compute_abi_info(self), "sparc" => cabi_sparc::compute_abi_info(ccx, self), "sparc64" => cabi_sparc64::compute_abi_info(ccx, self), - "nvptx" => cabi_nvptx::compute_abi_info(ccx, self), - "nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self), - "hexagon" => cabi_hexagon::compute_abi_info(ccx, self), + "nvptx" => cabi_nvptx::compute_abi_info(self), + "nvptx64" => cabi_nvptx64::compute_abi_info(self), + "hexagon" => cabi_hexagon::compute_abi_info(self), a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2f252c5e55e0b..3c6626cfa7f3a 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -406,15 +406,13 @@ pub fn memcpy_ty<'a, 'tcx>( layout: TyLayout<'tcx>, align: Option, ) { - let ccx = bcx.ccx; - - let size = layout.size(ccx).bytes(); + let size = layout.size.bytes(); if size == 0 { return; } - let align = align.unwrap_or_else(|| layout.align(ccx)); - call_memcpy(bcx, dst, src, C_usize(ccx, size), align); + let align = align.unwrap_or(layout.align); + call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align); } pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs index b021a06072595..d5f341f968583 100644 --- a/src/librustc_trans/cabi_aarch64.rs +++ b/src/librustc_trans/cabi_aarch64.rs @@ -14,7 +14,7 @@ use context::CrateContext; fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); + let size = arg.layout.size; // Ensure we have at most four uniquely addressable members. if size > unit.size.checked_mul(4, ccx).unwrap() { @@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ret.cast_to(uniform); return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -66,7 +66,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc }); return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { @@ -78,7 +78,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc arg.cast_to(uniform); return; } - let size = arg.layout.size(ccx); + let size = arg.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -97,7 +97,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc }); return; } - arg.make_indirect(ccx); + arg.make_indirect(); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 370a950617a1b..438053d63b51d 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -15,7 +15,7 @@ use llvm::CallConv; fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); + let size = arg.layout.size; // Ensure we have at most four uniquely addressable members. if size > unit.size.checked_mul(4, ccx).unwrap() { @@ -52,7 +52,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 32 { let unit = if bits <= 8 { @@ -68,7 +68,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc }); return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) { @@ -84,8 +84,8 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } } - let align = arg.layout.align(ccx).abi(); - let total = arg.layout.size(ccx); + let align = arg.layout.align.abi(); + let total = arg.layout.size; arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index 047caa431c545..da13b75c414af 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -19,7 +19,7 @@ use context::CrateContext; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if ret.layout.is_aggregate() { if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) { - let size = ret.layout.size(ccx); + let size = ret.layout.size; if unit.size == size { ret.cast_to(Uniform { unit, @@ -29,13 +29,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } } - ret.make_indirect(ccx); + ret.make_indirect(); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { +fn classify_arg_ty(arg: &mut ArgType) { if arg.layout.is_aggregate() { - arg.make_indirect(ccx); + arg.make_indirect(); arg.attrs.set(ArgAttribute::ByVal); } } @@ -47,6 +47,6 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_hexagon.rs b/src/librustc_trans/cabi_hexagon.rs index 1acda72675c31..7e7e483fea0c0 100644 --- a/src/librustc_trans/cabi_hexagon.rs +++ b/src/librustc_trans/cabi_hexagon.rs @@ -11,33 +11,32 @@ #![allow(non_upper_case_globals)] use abi::{FnType, ArgType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 { + ret.make_indirect(); } else { ret.extend_integer_width_to(32); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 { + arg.make_indirect(); } else { arg.extend_integer_width_to(32); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs index baab70367419a..fe61670a1086f 100644 --- a/src/librustc_trans/cabi_mips.rs +++ b/src/librustc_trans/cabi_mips.rs @@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); *offset += ccx.tcx().data_layout.pointer_size; } } fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { let dl = &ccx.tcx().data_layout; - let size = arg.layout.size(ccx); - let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { arg.cast_to(Uniform { diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index 1cb63e72fb9be..16d0cfe072d57 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); } else { - ret.make_indirect(ccx); + ret.make_indirect(); *offset += ccx.tcx().data_layout.pointer_size; } } fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { let dl = &ccx.tcx().data_layout; - let size = arg.layout.size(ccx); - let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { arg.cast_to(Uniform { diff --git a/src/librustc_trans/cabi_msp430.rs b/src/librustc_trans/cabi_msp430.rs index 546bb5ad9b44e..d270886a19cd1 100644 --- a/src/librustc_trans/cabi_msp430.rs +++ b/src/librustc_trans/cabi_msp430.rs @@ -12,7 +12,6 @@ // http://www.ti.com/lit/an/slaa534/slaa534.pdf use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; // 3.5 Structures or Unions Passed and Returned by Reference // @@ -20,31 +19,31 @@ use context::CrateContext; // returned by reference. To pass a structure or union by reference, the caller // places its address in the appropriate location: either in a register or on // the stack, according to its position in the argument list. (..)" -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 { + ret.make_indirect(); } else { ret.extend_integer_width_to(16); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 { + arg.make_indirect(); } else { arg.extend_integer_width_to(16); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_nvptx.rs b/src/librustc_trans/cabi_nvptx.rs index 3873752b25470..69cfc690a9f9d 100644 --- a/src/librustc_trans/cabi_nvptx.rs +++ b/src/librustc_trans/cabi_nvptx.rs @@ -12,33 +12,32 @@ // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 { + ret.make_indirect(); } else { ret.extend_integer_width_to(32); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 { + arg.make_indirect(); } else { arg.extend_integer_width_to(32); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_nvptx64.rs b/src/librustc_trans/cabi_nvptx64.rs index 24bf4920c16c1..4d76c15603800 100644 --- a/src/librustc_trans/cabi_nvptx64.rs +++ b/src/librustc_trans/cabi_nvptx64.rs @@ -12,33 +12,32 @@ // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 { + ret.make_indirect(); } else { ret.extend_integer_width_to(64); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 { + arg.make_indirect(); } else { arg.extend_integer_width_to(64); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index df320fb00abe2..c3c8c745e3a61 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); *offset += ccx.tcx().data_layout.pointer_size; } } fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { let dl = &ccx.tcx().data_layout; - let size = arg.layout.size(ccx); - let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { arg.cast_to(Uniform { diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index 9a9d6f8d0ac46..2206a4fa00cc3 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -28,25 +28,23 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, abi: ABI) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); - // ELFv1 only passes one-member aggregates transparently. // ELFv2 passes up to eight uniquely addressable members. - if (abi == ELFv1 && size > unit.size) - || size > unit.size.checked_mul(8, ccx).unwrap() { + if (abi == ELFv1 && arg.layout.size > unit.size) + || arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() { return None; } let valid_unit = match unit.kind { RegKind::Integer => false, RegKind::Float => true, - RegKind::Vector => size.bits() == 128 + RegKind::Vector => arg.layout.size.bits() == 128 }; if valid_unit { Some(Uniform { unit, - total: size + total: arg.layout.size }) } else { None @@ -62,7 +60,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc // The ELFv1 ABI doesn't return aggregates in registers if abi == ELFv1 { - ret.make_indirect(ccx); + ret.make_indirect(); return; } @@ -71,7 +69,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -91,7 +89,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) { @@ -105,7 +103,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc return; } - let size = arg.layout.size(ccx); + let size = arg.layout.size; let (unit, total) = match abi { ELFv1 => { // In ELFv1, aggregates smaller than a doubleword should appear in diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index ed598e0a86b68..9c24b637efd40 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -16,11 +16,11 @@ use context::CrateContext; use rustc::ty::layout::{self, TyLayout}; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { +fn classify_ret_ty(ret: &mut ArgType) { + if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 { ret.extend_integer_width_to(64); } else { - ret.make_indirect(ccx); + ret.make_indirect(); } } @@ -41,32 +41,31 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - let size = arg.layout.size(ccx); - if !arg.layout.is_aggregate() && size.bits() <= 64 { + if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 { arg.extend_integer_width_to(64); return; } if is_single_fp_element(ccx, arg.layout) { - match size.bytes() { + match arg.layout.size.bytes() { 4 => arg.cast_to(Reg::f32()), 8 => arg.cast_to(Reg::f64()), - _ => arg.make_indirect(ccx) + _ => arg.make_indirect() } } else { - match size.bytes() { + match arg.layout.size.bytes() { 1 => arg.cast_to(Reg::i8()), 2 => arg.cast_to(Reg::i16()), 4 => arg.cast_to(Reg::i32()), 8 => arg.cast_to(Reg::i64()), - _ => arg.make_indirect(ccx) + _ => arg.make_indirect() } } } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs index baab70367419a..fe61670a1086f 100644 --- a/src/librustc_trans/cabi_sparc.rs +++ b/src/librustc_trans/cabi_sparc.rs @@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); *offset += ccx.tcx().data_layout.pointer_size; } } fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { let dl = &ccx.tcx().data_layout; - let size = arg.layout.size(ccx); - let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { arg.cast_to(Uniform { diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs index 788fba9dc2628..7c52e27fa67d1 100644 --- a/src/librustc_trans/cabi_sparc64.rs +++ b/src/librustc_trans/cabi_sparc64.rs @@ -16,23 +16,21 @@ use context::CrateContext; fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); - // Ensure we have at most eight uniquely addressable members. - if size > unit.size.checked_mul(8, ccx).unwrap() { + if arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() { return None; } let valid_unit = match unit.kind { RegKind::Integer => false, RegKind::Float => true, - RegKind::Vector => size.bits() == 128 + RegKind::Vector => arg.layout.size.bits() == 128 }; if valid_unit { Some(Uniform { unit, - total: size + total: arg.layout.size }) } else { None @@ -50,7 +48,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ret.cast_to(uniform); return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -71,7 +69,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } // don't return aggregates in registers - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { @@ -85,7 +83,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc return; } - let total = arg.layout.size(ccx); + let total = arg.layout.size; arg.cast_to(Uniform { unit: Reg::i64(), total diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 26f130ec75542..401e75387c49d 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -50,27 +50,25 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let t = &ccx.sess().target.target; if t.options.is_like_osx || t.options.is_like_windows || t.options.is_like_openbsd { - let size = fty.ret.layout.size(ccx); - // According to Clang, everyone but MSVC returns single-element // float aggregates directly in a floating-point register. if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) { - match size.bytes() { + match fty.ret.layout.size.bytes() { 4 => fty.ret.cast_to(Reg::f32()), 8 => fty.ret.cast_to(Reg::f64()), - _ => fty.ret.make_indirect(ccx) + _ => fty.ret.make_indirect() } } else { - match size.bytes() { + match fty.ret.layout.size.bytes() { 1 => fty.ret.cast_to(Reg::i8()), 2 => fty.ret.cast_to(Reg::i16()), 4 => fty.ret.cast_to(Reg::i32()), 8 => fty.ret.cast_to(Reg::i64()), - _ => fty.ret.make_indirect(ccx) + _ => fty.ret.make_indirect() } } } else { - fty.ret.make_indirect(ccx); + fty.ret.make_indirect(); } } else { fty.ret.extend_integer_width_to(32); @@ -80,7 +78,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, for arg in &mut fty.args { if arg.is_ignore() { continue; } if arg.layout.is_aggregate() { - arg.make_indirect(ccx); + arg.make_indirect(); arg.attrs.set(ArgAttribute::ByVal); } else { arg.extend_integer_width_to(32); @@ -104,13 +102,12 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // At this point we know this must be a primitive of sorts. let unit = arg.layout.homogeneous_aggregate(ccx).unwrap(); - let size = arg.layout.size(ccx); - assert_eq!(unit.size, size); + assert_eq!(unit.size, arg.layout.size); if unit.kind == RegKind::Float { continue; } - let size_in_regs = (size.bits() + 31) / 32; + let size_in_regs = (arg.layout.size.bits() + 31) / 32; if size_in_regs == 0 { continue; @@ -122,7 +119,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, free_regs -= size_in_regs; - if size.bits() <= 32 && unit.kind == RegKind::Integer { + if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer { arg.attrs.set(ArgAttribute::InReg); } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 45f2b39b982d0..36ac76aaaa561 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -57,7 +57,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) cls: &mut [Class], off: Size) -> Result<(), Memory> { - if !off.is_abi_aligned(layout.align(ccx)) { + if !off.is_abi_aligned(layout.align) { if !layout.is_zst() { return Err(Memory); } @@ -106,7 +106,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) Ok(()) } - let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize; + let n = ((arg.layout.size.bytes() + 7) / 8) as usize; if n > MAX_EIGHTBYTES { return Err(Memory); } @@ -213,7 +213,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType }; if in_mem { - arg.make_indirect(ccx); + arg.make_indirect(); if is_arg { arg.attrs.set(ArgAttribute::ByVal); } else { @@ -226,7 +226,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType sse_regs -= needed_sse; if arg.layout.is_aggregate() { - let size = arg.layout.size(ccx); + let size = arg.layout.size; arg.cast_to(cast_target(cls.as_ref().unwrap(), size)) } else { arg.extend_integer_width_to(32); diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index b27ccc98861aa..c6d0e5e3a0735 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -9,23 +9,21 @@ // except according to those terms. use abi::{ArgType, FnType, Reg}; -use common::CrateContext; use rustc::ty::layout; // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { - let fixup = |a: &mut ArgType<'tcx>| { - let size = a.layout.size(ccx); +pub fn compute_abi_info(fty: &mut FnType) { + let fixup = |a: &mut ArgType| { match a.layout.abi { layout::Abi::Aggregate { .. } => { - match size.bits() { + match a.layout.size.bits() { 8 => a.cast_to(Reg::i8()), 16 => a.cast_to(Reg::i16()), 32 => a.cast_to(Reg::i32()), 64 => a.cast_to(Reg::i64()), - _ => a.make_indirect(ccx) + _ => a.make_indirect() } } layout::Abi::Vector { .. } => { @@ -33,8 +31,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType // (probably what clang calls "illegal vectors"). } layout::Abi::Scalar(_) => { - if size.bytes() > 8 { - a.make_indirect(ccx); + if a.layout.size.bytes() > 8 { + a.make_indirect(); } else { a.extend_integer_width_to(32); } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 1bb8aec92e57f..6a7b35c05e70c 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -430,16 +430,16 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, cx.tcx().mk_mut_ptr(cx.tcx().types.u8), syntax_pos::DUMMY_SP), offset: layout.fields.offset(0), - size: data_ptr_field.size(cx), - align: data_ptr_field.align(cx), + size: data_ptr_field.size, + align: data_ptr_field.align, flags: DIFlags::FlagArtificial, }, MemberDescription { name: "vtable".to_string(), type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), offset: layout.fields.offset(1), - size: vtable_field.size(cx), - align: vtable_field.align(cx), + size: vtable_field.size, + align: vtable_field.align, flags: DIFlags::FlagArtificial, }, ]; @@ -946,7 +946,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { f.name.to_string() }; let field = layout.field(cx, i); - let (size, align) = field.size_and_align(cx); + let (size, align) = field.size_and_align(); MemberDescription { name, type_metadata: type_metadata(cx, field.ty, self.span), @@ -1062,7 +1062,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { -> Vec { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); - let (size, align) = field.size_and_align(cx); + let (size, align) = field.size_and_align(); MemberDescription { name: f.name.to_string(), type_metadata: type_metadata(cx, field.ty, self.span), @@ -1153,8 +1153,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name: "".to_string(), type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: variant.size(cx), - align: variant.align(cx), + size: variant.size, + align: variant.align, flags: DIFlags::FlagZero } }).collect() @@ -1184,8 +1184,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name: "".to_string(), type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: self.type_rep.size(cx), - align: self.type_rep.align(cx), + size: self.type_rep.size, + align: self.type_rep.align, flags: DIFlags::FlagZero } ] @@ -1230,7 +1230,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } let inner_offset = offset - field_offset; let field = layout.field(ccx, i); - if inner_offset + size <= field.size(ccx) { + if inner_offset + size <= field.size { write!(name, "{}$", i).unwrap(); compute_field_path(ccx, name, field, inner_offset, size); } @@ -1248,8 +1248,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name, type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: variant.size(cx), - align: variant.align(cx), + size: variant.size, + align: variant.align, flags: DIFlags::FlagZero } ] @@ -1443,7 +1443,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, _ => {} } - let (enum_type_size, enum_type_align) = type_rep.size_and_align(cx); + let (enum_type_size, enum_type_align) = type_rep.size_and_align(); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index f374ed90c342d..6c7d7700adeb2 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -60,7 +60,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let i = layout.fields.count() - 1; let sized_size = layout.fields.offset(i).bytes(); - let sized_align = layout.align(ccx).abi(); + let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = C_usize(ccx, sized_size); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 0528bf972de0c..e775c4897f748 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -245,7 +245,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let load = bcx.load( bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), - Some(self.fn_ty.ret.layout.align(bcx.ccx))); + Some(self.fn_ty.ret.layout.align)); load } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); @@ -672,7 +672,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llval = base::to_immediate(bcx, llval, arg.layout); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), - (align | Alignment::Packed(arg.layout.align(bcx.ccx))) + (align | Alignment::Packed(arg.layout.align)) .non_abi()); } else { llval = bcx.load(llval, align.non_abi()); @@ -892,7 +892,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let src = self.trans_operand(bcx, src); let llty = src.layout.llvm_type(bcx.ccx); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); - let align = src.layout.align(bcx.ccx).min(dst.layout.align(bcx.ccx)); + let align = src.layout.align.min(dst.layout.align); src.val.store(bcx, LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align))); } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index cc6b84a671527..45037a1f19836 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1116,7 +1116,7 @@ fn trans_const_adt<'a, 'tcx>( assert_eq!(variant_index, 0); let contents = [ vals[0].llval, - padding(ccx, l.size(ccx) - ccx.size_of(vals[0].ty)) + padding(ccx, l.size - ccx.size_of(vals[0].ty)) ]; Const::new(C_struct(ccx, &contents, l.is_packed()), t) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 7c0b2748a7feb..f10791cae5260 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -56,8 +56,8 @@ impl ops::BitOr for Alignment { impl<'a> From> for Alignment { fn from(layout: TyLayout) -> Self { - if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi { - Alignment::Packed(align) + if layout.is_packed() { + Alignment::Packed(layout.align) } else { Alignment::AbiAligned } @@ -109,7 +109,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, layout); let tmp = bcx.alloca( - layout.llvm_type(bcx.ccx), name, layout.over_align(bcx.ccx)); + layout.llvm_type(bcx.ccx), name, layout.over_align()); Self::new_sized(tmp, layout, Alignment::AbiAligned) } @@ -374,7 +374,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // than storing null to single target field. let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); - let (size, align) = self.layout.size_and_align(bcx.ccx); + let (size, align) = self.layout.size_and_align(); let size = C_usize(bcx.ccx, size.bytes()); let align = C_u32(bcx.ccx, align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); @@ -414,11 +414,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) { - bcx.lifetime_start(self.llval, self.layout.size(bcx.ccx)); + bcx.lifetime_start(self.llval, self.layout.size); } pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) { - bcx.lifetime_end(self.llval, self.layout.size(bcx.ccx)); + bcx.lifetime_end(self.llval, self.layout.size); } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index b68cd3a6ae5d1..0d18311521174 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -107,9 +107,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let OperandValue::Immediate(v) = tr_elem.val { let align = dest.alignment.non_abi() - .unwrap_or_else(|| tr_elem.layout.align(bcx.ccx)); + .unwrap_or(tr_elem.layout.align); let align = C_i32(bcx.ccx, align.abi() as i32); - let size = C_usize(bcx.ccx, dest.layout.size(bcx.ccx).bytes()); + let size = C_usize(bcx.ccx, dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 77cc3897c9be8..52151035a826f 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -50,7 +50,7 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, match layout.fields { layout::FieldPlacement::Union(_) => { - let size = layout.size(ccx).bytes(); + let size = layout.size.bytes(); let fill = Type::array(&Type::i8(ccx), size); match name { None => { @@ -84,8 +84,6 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> Vec { debug!("struct_llfields: {:#?}", layout); - let align = layout.align(ccx); - let size = layout.size(ccx); let field_count = layout.fields.count(); let mut offset = Size::from_bytes(0); @@ -105,27 +103,26 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if layout.is_packed() { assert_eq!(padding.bytes(), 0); } else { - let field_align = field.align(ccx); - assert!(field_align.abi() <= align.abi(), + assert!(field.align.abi() <= layout.align.abi(), "non-packed type has field with larger align ({}): {:#?}", - field_align.abi(), layout); + field.align.abi(), layout); } - offset = target_offset + field.size(ccx); + offset = target_offset + field.size; } if !layout.is_unsized() && field_count > 0 { - if offset > size { + if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", - layout, size, offset); + layout, layout.size, offset); } - let padding = size - offset; + let padding = layout.size - offset; debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", - padding, offset, size); + padding, offset, layout.size); result.push(Type::array(&Type::i8(ccx), padding.bytes())); assert!(result.len() == 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", - offset, size); + offset, layout.size); } result @@ -133,16 +130,15 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, impl<'a, 'tcx> CrateContext<'a, 'tcx> { pub fn align_of(&self, ty: Ty<'tcx>) -> Align { - self.layout_of(ty).align(self) + self.layout_of(ty).align } pub fn size_of(&self, ty: Ty<'tcx>) -> Size { - self.layout_of(ty).size(self) + self.layout_of(ty).size } pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { - let layout = self.layout_of(ty); - (layout.size(self), layout.align(self)) + self.layout_of(ty).size_and_align() } } @@ -150,7 +146,7 @@ pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; - fn over_align(&self, ccx: &CrateContext) -> Option; + fn over_align(&self) -> Option; fn llvm_field_index(&self, index: usize) -> u64; } @@ -251,11 +247,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn over_align(&self, ccx: &CrateContext) -> Option { - let align = self.align(ccx); - let primitive_align = self.primitive_align(ccx); - if align != primitive_align { - Some(align) + fn over_align(&self) -> Option { + if self.align != self.primitive_align { + Some(self.align) } else { None } From 018323ffc2c38669f594b8f7025a3440ae529d2a Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 23 Sep 2017 01:54:45 +0300 Subject: [PATCH 42/69] rustc: collapse the remains of Layout into Variants (enums vs everything else). --- src/librustc/ty/layout.rs | 186 ++++++++--------------- src/librustc_lint/types.rs | 4 +- src/librustc_trans/cabi_x86_64.rs | 22 ++- src/librustc_trans/debuginfo/metadata.rs | 98 ++++++------ src/librustc_trans/intrinsic.rs | 7 +- src/librustc_trans/mir/constant.rs | 41 ++--- src/librustc_trans/mir/lvalue.rs | 38 +++-- src/librustc_trans/mir/rvalue.rs | 9 +- src/librustc_trans/type_of.rs | 19 ++- 9 files changed, 188 insertions(+), 236 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index dbad77b904ee0..bfde8a58e4911 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -769,33 +769,17 @@ impl Abi { } } -/// Type layout, from which size and alignment can be cheaply computed. -/// For ADTs, it also includes field placement and enum optimizations. -/// NOTE: Because Layout is interned, redundant information should be -/// kept to a minimum, e.g. it includes no sub-component Ty or Layout. #[derive(PartialEq, Eq, Hash, Debug)] -pub enum Layout { - /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. - Scalar, - - /// SIMD vectors, from structs marked with #[repr(simd)]. - Vector, - - /// TyArray, TySlice or TyStr. - Array, - - // Remaining variants are all ADTs such as structs, enums or tuples. - - /// Single-case enums, and structs/tuples. - Univariant, - - /// Untagged unions. - UntaggedUnion, +pub enum Variants { + /// Single enum variants, structs/tuples, unions, and all non-ADTs. + Single { + index: usize + }, /// General-case enums: for each case there is a struct, and they all have /// all space reserved for the discriminant, and their first field starts /// at a non-0 offset, after where the discriminant would go. - General { + Tagged { discr: Primitive, /// Inclusive wrap-around range of discriminant values, that is, /// if min > max, it represents min..=u64::MAX followed by 0..=max. @@ -806,7 +790,7 @@ pub enum Layout { variants: Vec, }, - /// Two cases distinguished by a nullable pointer: the case with discriminant + /// Two cases distinguished by a niche: the case with discriminant /// `nndiscr` is represented by the struct `nonnull`, where field `0` /// is known to be nonnull due to its type; if that field is null, then /// it represents the other case, which is known to be zero sized. @@ -814,7 +798,7 @@ pub enum Layout { /// For example, `std::option::Option` instantiated at a safe pointer type /// is represented such that `None` is a null pointer and `Some` is the /// identity function. - NullablePointer { + NicheFilling { nndiscr: u64, discr: Primitive, variants: Vec, @@ -842,8 +826,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { #[derive(PartialEq, Eq, Hash, Debug)] pub struct CachedLayout { - pub variant_index: Option, - pub layout: Layout, + pub variants: Variants, pub fields: FieldPlacement, pub abi: Abi, pub align: Align, @@ -865,7 +848,7 @@ fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } tcx.layout_depth.set(depth+1); - let layout = Layout::compute_uncached(tcx, param_env, ty); + let layout = CachedLayout::compute_uncached(tcx, param_env, ty); tcx.layout_depth.set(depth); layout @@ -878,18 +861,17 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; } -impl<'a, 'tcx> Layout { +impl<'a, 'tcx> CachedLayout { fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) - -> Result<&'tcx CachedLayout, LayoutError<'tcx>> { + -> Result<&'tcx Self, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); let scalar = |value: Primitive| { let align = value.align(dl); tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Scalar, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(0), abi: Abi::Scalar(value), size: value.size(dl), @@ -1028,8 +1010,7 @@ impl<'a, 'tcx> Layout { } Ok(CachedLayout { - variant_index: None, - layout: Layout::Univariant, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { offsets, memory_index @@ -1073,8 +1054,7 @@ impl<'a, 'tcx> Layout { memory_index: vec![0, 1] }; Ok(tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Univariant, + variants: Variants::Single { index: 0 }, fields, abi: Abi::Aggregate { sized: true, @@ -1129,8 +1109,7 @@ impl<'a, 'tcx> Layout { .ok_or(LayoutError::SizeOverflow(ty))?; tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Array, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, count @@ -1147,8 +1126,7 @@ impl<'a, 'tcx> Layout { ty::TySlice(element) => { let element = cx.layout_of(element)?; tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Array, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, count: 0 @@ -1164,8 +1142,7 @@ impl<'a, 'tcx> Layout { } ty::TyStr => { tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Array, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 @@ -1238,8 +1215,7 @@ impl<'a, 'tcx> Layout { let size = size.abi_align(align); tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::Vector, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, count @@ -1302,8 +1278,7 @@ impl<'a, 'tcx> Layout { } return Ok(tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::UntaggedUnion, + variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { sized: true, @@ -1332,11 +1307,7 @@ impl<'a, 'tcx> Layout { else { StructKind::AlwaysSized } }; - let mut cached = univariant_uninterned(&variants[0], &def.repr, kind)?; - if def.is_enum() { - cached.variant_index = Some(0); - } - return Ok(tcx.intern_layout(cached)); + return univariant(&variants[0], &def.repr, kind); } let no_explicit_discriminants = def.variants.iter().enumerate() @@ -1359,8 +1330,9 @@ impl<'a, 'tcx> Layout { univariant_uninterned(&variants[1], &def.repr, StructKind::AlwaysSized)? ]; - st[0].variant_index = Some(0); - st[1].variant_index = Some(1); + for (i, v) in st.iter_mut().enumerate() { + v.variants = Variants::Single { index: i }; + } let offset = st[i].fields.offset(field_index) + offset; let CachedLayout { mut abi, @@ -1383,8 +1355,7 @@ impl<'a, 'tcx> Layout { primitive_align = primitive_align.max(discr_align); return Ok(tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::NullablePointer { + variants: Variants::NicheFilling { nndiscr: i as u64, discr, @@ -1426,7 +1397,7 @@ impl<'a, 'tcx> Layout { let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| { let mut st = univariant_uninterned(&field_layouts, &def.repr, StructKind::EnumVariant(min_ity))?; - st.variant_index = Some(i); + st.variants = Variants::Single { index: i }; // Find the first field we can't move later // to make room for a larger discriminant. for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { @@ -1506,8 +1477,7 @@ impl<'a, 'tcx> Layout { let discr = Int(ity, signed); tcx.intern_layout(CachedLayout { - variant_index: None, - layout: Layout::General { + variants: Variants::Tagged { discr, // FIXME: should be u128? @@ -1544,7 +1514,7 @@ impl<'a, 'tcx> Layout { return Err(LayoutError::Unknown(ty)); } ty::TyInfer(_) | ty::TyError => { - bug!("Layout::compute: unexpected type `{}`", ty) + bug!("CachedLayout::compute: unexpected type `{}`", ty) } }) } @@ -1650,8 +1620,8 @@ impl<'a, 'tcx> Layout { } }; - match layout.layout { - Layout::Univariant => { + match layout.variants { + Variants::Single { .. } => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() }; @@ -1675,8 +1645,8 @@ impl<'a, 'tcx> Layout { } } - Layout::NullablePointer { .. } | - Layout::General { .. } => { + Variants::NicheFilling { .. } | + Variants::Tagged { .. } => { debug!("print-type-size `{:#?}` adt general variants def {}", ty, adt_def.variants.len()); let variant_infos: Vec<_> = @@ -1688,27 +1658,11 @@ impl<'a, 'tcx> Layout { layout.for_variant(i)) }) .collect(); - record(adt_kind.into(), match layout.layout { - Layout::General { discr, .. } => Some(discr.size(tcx)), + record(adt_kind.into(), match layout.variants { + Variants::Tagged { discr, .. } => Some(discr.size(tcx)), _ => None }, variant_infos); } - - Layout::UntaggedUnion => { - debug!("print-type-size t: `{:?}` adt union", ty); - // layout does not currently store info about each - // variant... - record(adt_kind.into(), None, Vec::new()); - } - - // other cases provide little interesting (i.e. adjustable - // via representation tweaks) size info beyond total size. - Layout::Scalar | - Layout::Vector | - Layout::Array => { - debug!("print-type-size t: `{:?}` adt other", ty); - record(adt_kind.into(), None, Vec::new()) - } } } } @@ -1950,7 +1904,7 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - Layout::record_layout_for_printing(tcx, ty, param_env, layout); + CachedLayout::record_layout_for_printing(tcx, ty, param_env, layout); Ok(layout) } @@ -1979,7 +1933,7 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - Layout::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); + CachedLayout::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); Ok(layout) } @@ -1987,15 +1941,15 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, impl<'a, 'tcx> TyLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { - let cached = match self.layout { - Layout::NullablePointer { ref variants, .. } | - Layout::General { ref variants, .. } => { + let cached = match self.variants { + Variants::Single { .. } => self.cached, + + Variants::NicheFilling { ref variants, .. } | + Variants::Tagged { ref variants, .. } => { &variants[variant_index] } - - _ => self.cached }; - assert_eq!(cached.variant_index, Some(variant_index)); + assert_eq!(cached.variants, Variants::Single { index: variant_index }); TyLayout { ty: self.ty, @@ -2081,26 +2035,17 @@ impl<'a, 'tcx> TyLayout<'tcx> { // ADTs. ty::TyAdt(def, substs) => { - let v = if def.is_enum() { - match self.variant_index { - None => match self.layout { - // Discriminant field for enums (where applicable). - Layout::General { discr, .. } | - Layout::NullablePointer { discr, .. } => { - return cx.layout_of([discr.to_ty(tcx)][i]); - } - _ => { - bug!("TyLayout::field_type: enum `{}` has no discriminant", - self.ty) - } - }, - Some(v) => v + match self.variants { + Variants::Single { index } => { + def.variants[index].fields[i].ty(tcx, substs) } - } else { - 0 - }; - def.variants[v].fields[i].ty(tcx, substs) + // Discriminant field for enums (where applicable). + Variants::Tagged { discr, .. } | + Variants::NicheFilling { discr, .. } => { + return cx.layout_of([discr.to_ty(tcx)][i]); + } + } } ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | @@ -2143,18 +2088,18 @@ impl<'a, 'tcx> TyLayout<'tcx> { HasTyCtxt<'tcx> { let tcx = cx.tcx(); - match (&self.layout, self.abi, &self.ty.sty) { + match (&self.variants, self.abi, &self.ty.sty) { // FIXME(eddyb) check this via value ranges on scalars. - (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyRef(..)) | - (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { + (_, Abi::Scalar(Pointer), &ty::TyRef(..)) | + (_, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { Ok(Some((Size::from_bytes(0), Pointer))) } - (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyAdt(def, _)) if def.is_box() => { + (_, Abi::Scalar(Pointer), &ty::TyAdt(def, _)) if def.is_box() => { Ok(Some((Size::from_bytes(0), Pointer))) } // FIXME(eddyb) check this via value ranges on scalars. - (&Layout::General { discr, .. }, _, &ty::TyAdt(def, _)) => { + (&Variants::Tagged { discr, .. }, _, &ty::TyAdt(def, _)) => { if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { Ok(Some((self.fields.offset(0), discr))) } else { @@ -2196,20 +2141,18 @@ impl<'a, 'tcx> TyLayout<'tcx> { } } -impl<'gcx> HashStable> for Layout { +impl<'gcx> HashStable> for Variants { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - use ty::layout::Layout::*; + use ty::layout::Variants::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Scalar => {} - Vector => {} - Array => {} - Univariant => {} - UntaggedUnion => {} - General { + Single { index } => { + index.hash_stable(hcx, hasher); + } + Tagged { discr, discr_range: RangeInclusive { start, end }, ref variants, @@ -2219,7 +2162,7 @@ impl<'gcx> HashStable> for Layout { end.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } - NullablePointer { + NicheFilling { nndiscr, ref variants, ref discr, @@ -2279,8 +2222,7 @@ impl<'gcx> HashStable> for Abi { } impl_stable_hash_for!(struct ::ty::layout::CachedLayout { - variant_index, - layout, + variants, fields, abi, size, diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 761ca662178f9..46debcce95843 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -13,7 +13,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf}; use middle::const_val::ConstVal; use rustc_const_eval::ConstContext; use util::nodemap::FxHashSet; @@ -753,7 +753,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, discr, .. } = layout.layout { + if let layout::Variants::Tagged { ref variants, discr, .. } = layout.variants { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 36ac76aaaa561..d5a51fa1863e2 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -14,7 +14,7 @@ use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout, Size}; +use rustc::ty::layout::{self, TyLayout, Size}; #[derive(Clone, Copy, PartialEq, Debug)] enum Class { @@ -87,17 +87,15 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } layout::Abi::Aggregate { .. } => { - // FIXME(eddyb) have to work around Rust enums for now. - // Fix is either guarantee no data where there is no field, - // by putting variants in fields, or be more clever. - match layout.layout { - Layout::General { .. } | - Layout::NullablePointer { .. } => return Err(Memory), - _ => {} - } - for i in 0..layout.fields.count() { - let field_off = off + layout.fields.offset(i); - classify(ccx, layout.field(ccx, i), cls, field_off)?; + match layout.variants { + layout::Variants::Single { .. } => { + for i in 0..layout.fields.count() { + let field_off = off + layout.fields.offset(i); + classify(ccx, layout.field(ccx, i), cls, field_off)?; + } + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => return Err(Memory), } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 6a7b35c05e70c..a905d35f3d326 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: TyLayout<'tcx>, + layout: TyLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, span: Span, @@ -1129,37 +1129,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); - match self.type_rep.layout { - layout::Layout::General { ref variants, .. } => { - let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata - .expect("")); - (0..variants.len()).map(|i| { - let variant = self.type_rep.for_variant(i); - let (variant_type_metadata, member_desc_factory) = - describe_enum_variant(cx, - variant, - &adt.variants[i], - discriminant_info, - self.containing_scope, - self.span); - - let member_descriptions = member_desc_factory - .create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - &member_descriptions); - MemberDescription { - name: "".to_string(), - type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), - size: variant.size, - align: variant.align, - flags: DIFlags::FlagZero - } - }).collect() - }, - layout::Layout::Univariant => { + match self.layout.variants { + layout::Variants::Single { .. } => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1167,7 +1138,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } else { let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, - self.type_rep, + self.layout, &adt.variants[0], NoDiscriminant, self.containing_scope, @@ -1184,19 +1155,48 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { name: "".to_string(), type_metadata: variant_type_metadata, offset: Size::from_bytes(0), - size: self.type_rep.size, - align: self.type_rep.align, + size: self.layout.size, + align: self.layout.align, flags: DIFlags::FlagZero } ] } } - layout::Layout::NullablePointer { + layout::Variants::Tagged { ref variants, .. } => { + let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata + .expect("")); + (0..variants.len()).map(|i| { + let variant = self.layout.for_variant(i); + let (variant_type_metadata, member_desc_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[i], + discriminant_info, + self.containing_scope, + self.span); + + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + &member_descriptions); + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::from_bytes(0), + size: variant.size, + align: variant.align, + flags: DIFlags::FlagZero + } + }).collect() + } + layout::Variants::NicheFilling { nndiscr, discr, .. } => { - let variant = self.type_rep.for_variant(nndiscr as usize); + let variant = self.layout.for_variant(nndiscr as usize); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, @@ -1237,8 +1237,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } } compute_field_path(cx, &mut name, - self.type_rep, - self.type_rep.fields.offset(0), + self.layout, + self.layout.fields.offset(0), discr.size(cx)); name.push_str(&adt.variants[(1 - nndiscr) as usize].name.as_str()); @@ -1253,8 +1253,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { flags: DIFlags::FlagZero } ] - }, - ref l @ _ => bug!("Not an enum layout: {:#?}", l) + } } } } @@ -1429,21 +1428,20 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }; - let type_rep = cx.layout_of(enum_type); + let layout = cx.layout_of(enum_type); - let discriminant_type_metadata = match type_rep.layout { - layout::Layout::NullablePointer { .. } | - layout::Layout::Univariant { .. } => None, - layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), - ref l @ _ => bug!("Not an enum layout: {:#?}", l) + let discriminant_type_metadata = match layout.variants { + layout::Variants::Single { .. } | + layout::Variants::NicheFilling { .. } => None, + layout::Variants::Tagged { discr, .. } => Some(discriminant_type_metadata(discr)), }; - match (type_rep.abi, discriminant_type_metadata) { + match (layout.abi, discriminant_type_metadata) { (layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), _ => {} } - let (enum_type_size, enum_type_align) = type_rep.size_and_align(); + let (enum_type_size, enum_type_align) = layout.size_and_align(); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( @@ -1471,7 +1469,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_metadata, EnumMDF(EnumMemberDescriptionFactory { enum_type, - type_rep, + layout, discriminant_type_metadata, containing_scope, span, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 45d2f7c69e94e..d982fa192b369 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -394,12 +394,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }, "discriminant_value" => { - match substs.type_at(0).sty { - ty::TyAdt(adt, ..) if adt.is_enum() => { - args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty) - } - _ => C_null(llret_ty) - } + args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty) } "align_offset" => { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 45037a1f19836..5a2dcf4fb180e 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1094,8 +1094,26 @@ fn trans_const_adt<'a, 'tcx>( mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; - match l.layout { - layout::Layout::General { .. } => { + match l.variants { + layout::Variants::Single { index } => { + assert_eq!(variant_index, index); + if let layout::Abi::Vector { .. } = l.abi { + Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) + } else if let layout::FieldPlacement::Union(_) = l.fields { + assert_eq!(variant_index, 0); + assert_eq!(vals.len(), 1); + let contents = [ + vals[0].llval, + padding(ccx, l.size - ccx.size_of(vals[0].ty)) + ]; + + Const::new(C_struct(ccx, &contents, l.is_packed()), t) + } else { + assert_eq!(variant_index, 0); + build_const_struct(ccx, l, vals, None) + } + } + layout::Variants::Tagged { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1112,23 +1130,7 @@ fn trans_const_adt<'a, 'tcx>( build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } - layout::Layout::UntaggedUnion => { - assert_eq!(variant_index, 0); - let contents = [ - vals[0].llval, - padding(ccx, l.size - ccx.size_of(vals[0].ty)) - ]; - - Const::new(C_struct(ccx, &contents, l.is_packed()), t) - } - layout::Layout::Univariant => { - assert_eq!(variant_index, 0); - build_const_struct(ccx, l, vals, None) - } - layout::Layout::Vector => { - Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) - } - layout::Layout::NullablePointer { nndiscr, .. } => { + layout::Variants::NicheFilling { nndiscr, .. } => { if variant_index as u64 == nndiscr { build_const_struct(ccx, l.for_variant(variant_index), vals, None) } else { @@ -1137,7 +1139,6 @@ fn trans_const_adt<'a, 'tcx>( Const::new(C_null(ccx.layout_of(t).llvm_type(ccx)), t) } } - _ => bug!("trans_const_adt: cannot handle type {} repreented as {:#?}", t, l) } } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index f10791cae5260..5a558e3652b51 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -296,10 +296,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); - match self.layout.layout { - layout::Layout::Univariant { .. } | - layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), - _ => {} + match self.layout.variants { + layout::Variants::Single { index } => { + assert_eq!(index, 0); + return C_uint(cast_to, 0); + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, } let discr = self.project_field(bcx, 0); @@ -307,8 +310,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Abi::Scalar(discr) => discr, _ => bug!("discriminant not scalar: {:#?}", discr.layout) }; - let (min, max) = match self.layout.layout { - layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), + let (min, max) = match self.layout.variants { + layout::Variants::Tagged { ref discr_range, .. } => { + (discr_range.start, discr_range.end) + } _ => (0, u64::max_value()), }; let max_next = max.wrapping_add(1); @@ -333,20 +338,20 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.load(discr.llval, discr.alignment.non_abi()) } }; - match self.layout.layout { - layout::Layout::General { .. } => { + match self.layout.variants { + layout::Variants::Single { .. } => bug!(), + layout::Variants::Tagged { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, _ => false }; bcx.intcast(lldiscr, cast_to, signed) } - layout::Layout::NullablePointer { nndiscr, .. } => { + layout::Variants::NicheFilling { nndiscr, .. } => { let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; let zero = C_null(discr.layout.llvm_type(bcx.ccx)); bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) } - _ => bug!("{} is not an enum", self.layout.ty) } } @@ -356,13 +361,17 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let to = self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; - match self.layout.layout { - layout::Layout::General { .. } => { + match self.layout.variants { + layout::Variants::Single { index } => { + assert_eq!(to, 0); + assert_eq!(variant_index, index); + } + layout::Variants::Tagged { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), ptr.llval, ptr.alignment.non_abi()); } - layout::Layout::NullablePointer { nndiscr, .. } => { + layout::Variants::NicheFilling { nndiscr, .. } => { if to != nndiscr { let use_memset = match self.layout.abi { layout::Abi::Scalar(_) => false, @@ -385,9 +394,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } } - _ => { - assert_eq!(to, 0); - } } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 0d18311521174..d3677e2eefd43 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, Layout, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; @@ -278,8 +278,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); - if let Layout::General { ref discr_range, .. } = operand.layout.layout { - if discr_range.end > discr_range.start { + match operand.layout.variants { + layout::Variants::Tagged { + ref discr_range, .. + } if discr_range.end > discr_range.start => { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. @@ -290,6 +292,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { C_uint(ll_t_in, discr_range.end) )); } + _ => {} } let signed = match operand.layout.abi { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 52151035a826f..2b3ac0386ee27 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -40,8 +40,13 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mut name = String::with_capacity(32); let printer = DefPathBasedNames::new(ccx.tcx(), true, true); printer.push_type_name(layout.ty, &mut name); - if let (&ty::TyAdt(def, _), Some(v)) = (&layout.ty.sty, layout.variant_index) { - write!(&mut name, "::{}", def.variants[v].name).unwrap(); + match (&layout.ty.sty, &layout.variants) { + (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => { + if def.is_enum() && !def.variants.is_empty() { + write!(&mut name, "::{}", def.variants[index].name).unwrap(); + } + } + _ => {} } Some(name) } @@ -206,7 +211,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // Check the cache. - if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, self.variant_index)) { + let variant_index = match self.variants { + layout::Variants::Single { index } => Some(index), + _ => None + }; + if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, variant_index)) { return llty; } @@ -221,7 +230,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let mut defer = None; let llty = if self.ty != normal_ty { let mut layout = ccx.layout_of(normal_ty); - if let Some(v) = self.variant_index { + if let Some(v) = variant_index { layout = layout.for_variant(v); } layout.llvm_type(ccx) @@ -230,7 +239,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - ccx.lltypes().borrow_mut().insert((self.ty, self.variant_index), llty); + ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty); if let Some((mut llty, layout)) = defer { llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) From de3e581e29b1fd02fe4ef5cc415e5173f30e2ca7 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 23 Sep 2017 15:04:37 +0300 Subject: [PATCH 43/69] rustc: support u128 discriminant ranges. --- src/librustc/lib.rs | 1 + src/librustc/ty/layout.rs | 34 +++++++------- src/librustc_trans/builder.rs | 28 +++++------- src/librustc_trans/common.rs | 4 +- src/librustc_trans/meth.rs | 3 +- src/librustc_trans/mir/block.rs | 11 ++--- src/librustc_trans/mir/constant.rs | 8 ++-- src/librustc_trans/mir/lvalue.rs | 72 +++++++++++------------------- src/librustc_trans/mir/rvalue.rs | 10 ++--- 9 files changed, 70 insertions(+), 101 deletions(-) diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 44039817e7206..b59f7480476b8 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -46,6 +46,7 @@ #![feature(const_fn)] #![feature(core_intrinsics)] #![feature(drain_filter)] +#![feature(i128)] #![feature(i128_type)] #![feature(inclusive_range)] #![feature(inclusive_range_syntax)] diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index bfde8a58e4911..a97574681a231 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -20,7 +20,7 @@ use syntax_pos::DUMMY_SP; use std::cmp; use std::fmt; -use std::i64; +use std::i128; use std::iter; use std::mem; use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; @@ -467,7 +467,7 @@ impl<'a, 'tcx> Integer { } /// Find the smallest Integer type which can represent the signed value. - pub fn fit_signed(x: i64) -> Integer { + pub fn fit_signed(x: i128) -> Integer { match x { -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1, -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8, @@ -479,7 +479,7 @@ impl<'a, 'tcx> Integer { } /// Find the smallest Integer type which can represent the unsigned value. - pub fn fit_unsigned(x: u64) -> Integer { + pub fn fit_unsigned(x: u128) -> Integer { match x { 0...0x0000_0000_0000_0001 => I1, 0...0x0000_0000_0000_00ff => I8, @@ -495,7 +495,7 @@ impl<'a, 'tcx> Integer { let dl = cx.data_layout(); let wanted = align.abi(); - for &candidate in &[I8, I16, I32, I64] { + for &candidate in &[I8, I16, I32, I64, I128] { let ty = Int(candidate, false); if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { return Some(candidate); @@ -522,19 +522,19 @@ impl<'a, 'tcx> Integer { /// Find the appropriate Integer type and signedness for the given /// signed discriminant range and #[repr] attribute. - /// N.B.: u64 values above i64::MAX will be treated as signed, but + /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, - min: i64, - max: i64) + min: i128, + max: i128) -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there - // are any negative values, the only valid unsigned representation is u64 - // which can fit all i64 values, so the result remains unaffected. - let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64)); + // are any negative values, the only valid unsigned representation is u128 + // which can fit all i128 values, so the result remains unaffected. + let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128)); let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); let mut min_from_extern = None; @@ -782,11 +782,11 @@ pub enum Variants { Tagged { discr: Primitive, /// Inclusive wrap-around range of discriminant values, that is, - /// if min > max, it represents min..=u64::MAX followed by 0..=max. + /// if min > max, it represents min..=u128::MAX followed by 0..=max. // FIXME(eddyb) always use the shortest range, e.g. by finding // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. - discr_range: RangeInclusive, + discr_range: RangeInclusive, variants: Vec, }, @@ -1375,14 +1375,12 @@ impl<'a, 'tcx> CachedLayout { } } - let (mut min, mut max) = (i64::max_value(), i64::min_value()); + let (mut min, mut max) = (i128::max_value(), i128::min_value()); for discr in def.discriminants(tcx) { - let x = discr.to_u128_unchecked() as i64; + let x = discr.to_u128_unchecked() as i128; if x < min { min = x; } if x > max { max = x; } } - // FIXME: should handle i128? signed-value based impl is weird and hard to - // grok. let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; @@ -1479,9 +1477,7 @@ impl<'a, 'tcx> CachedLayout { tcx.intern_layout(CachedLayout { variants: Variants::Tagged { discr, - - // FIXME: should be u128? - discr_range: (min as u64)..=(max as u64), + discr_range: (min as u128)..=(max as u128), variants }, // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 6ad12a13eca7a..9da3a479f0c28 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -24,6 +24,7 @@ use rustc::session::{config, Session}; use std::borrow::Cow; use std::ffi::CString; +use std::ops::Range; use std::ptr; use syntax_pos::Span; @@ -549,35 +550,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } - pub fn load_range_assert(&self, ptr: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool, - align: Option) -> ValueRef { - let value = self.load(ptr, align); - + pub fn range_metadata(&self, load: ValueRef, range: Range) { unsafe { - let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr)); - let min = llvm::LLVMConstInt(t, lo, signed); - let max = llvm::LLVMConstInt(t, hi, signed); - - let v = [min, max]; + let llty = val_ty(load); + let v = [ + C_uint_big(llty, range.start), + C_uint_big(llty, range.end) + ]; - llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), v.as_ptr(), v.len() as c_uint)); } - - value } - pub fn load_nonnull(&self, ptr: ValueRef, align: Option) -> ValueRef { - let value = self.load(ptr, align); + pub fn nonnull_metadata(&self, load: ValueRef) { unsafe { - llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0)); } - - value } pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7ccac6069233f..f476416619e69 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -178,9 +178,9 @@ pub fn C_uint(t: Type, i: u64) -> ValueRef { } } -pub fn C_big_integral(t: Type, u: u128) -> ValueRef { +pub fn C_uint_big(t: Type, u: u128) -> ValueRef { unsafe { - let words = [u as u64, u.wrapping_shr(64) as u64]; + let words = [u as u64, (u >> 64) as u64]; llvm::LLVMConstIntOfArbitraryPrecision(t.to_ref(), 2, words.as_ptr()) } } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 697f4ecd2bee3..a7d467f1cc5f3 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -40,7 +40,8 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_fn({:?}, {:?})", Value(llvtable), self); let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to()); - let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); + let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); + bcx.nonnull_metadata(ptr); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e775c4897f748..cc0bbb8145dbd 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -666,17 +666,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.layout.ty == bcx.tcx().types.bool { - llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - // We store bools as i8 so we need to truncate to i1. - llval = base::to_immediate(bcx, llval, arg.layout); - } else if let Some(ty) = arg.cast { + if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), (align | Alignment::Packed(arg.layout.align)) .non_abi()); } else { llval = bcx.load(llval, align.non_abi()); } + if arg.layout.ty == bcx.tcx().types.bool { + bcx.range_metadata(llval, 0..2); + // We store bools as i8 so we need to truncate to i1. + llval = base::to_immediate(bcx, llval, arg.layout); + } } llargs.push(llval); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 5a2dcf4fb180e..e8ff9ae32483c 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -28,7 +28,7 @@ use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; +use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64}; use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; @@ -70,13 +70,13 @@ impl<'a, 'tcx> Const<'tcx> { I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16), I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32), I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64), - I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128), + I128(v) => (C_uint_big(Type::i128(ccx), v as u128), tcx.types.i128), Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize), U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8), U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16), U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32), U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64), - U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128), + U128(v) => (C_uint_big(Type::i128(ccx), v), tcx.types.u128), Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize), }; Const { llval: llval, ty: ty } @@ -994,7 +994,7 @@ unsafe fn cast_const_float_to_int(ccx: &CrateContext, let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast }; err.report(ccx.tcx(), span, "expression"); } - C_big_integral(int_ty, cast_result.value) + C_uint_big(int_ty, cast_result.value) } unsafe fn cast_const_int_to_float(ccx: &CrateContext, diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5a558e3652b51..6da9c7a465766 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -14,10 +14,9 @@ use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; -use abi; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; +use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -140,30 +139,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { return OperandRef::new_zst(bcx.ccx, self.layout); } - let val = if common::type_is_fat_ptr(bcx.ccx, self.layout.ty) { - let data = self.project_field(bcx, abi::FAT_PTR_ADDR); - let lldata = if self.layout.ty.is_region_ptr() || self.layout.ty.is_box() { - bcx.load_nonnull(data.llval, data.alignment.non_abi()) - } else { - bcx.load(data.llval, data.alignment.non_abi()) - }; - - let extra = self.project_field(bcx, abi::FAT_PTR_EXTRA); - let meta_ty = val_ty(extra.llval); - // If the 'extra' field is a pointer, it's a vtable, so use load_nonnull - // instead - let llextra = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer { - bcx.load_nonnull(extra.llval, extra.alignment.non_abi()) - } else { - bcx.load(extra.llval, extra.alignment.non_abi()) - }; - - OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { - OperandValue::Pair( - self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), - self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) - } else if self.layout.is_llvm_immediate() { + let val = if self.layout.is_llvm_immediate() { let mut const_llval = ptr::null_mut(); unsafe { let global = llvm::LLVMIsAGlobalVariable(self.llval); @@ -174,22 +150,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let llval = if !const_llval.is_null() { const_llval - } else if self.layout.ty.is_bool() { - bcx.load_range_assert(self.llval, 0, 2, llvm::False, - self.alignment.non_abi()) - } else if self.layout.ty.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False, - self.alignment.non_abi()) - } else if self.layout.ty.is_region_ptr() || - self.layout.ty.is_box() || - self.layout.ty.is_fn() { - bcx.load_nonnull(self.llval, self.alignment.non_abi()) } else { - bcx.load(self.llval, self.alignment.non_abi()) + let load = bcx.load(self.llval, self.alignment.non_abi()); + if self.layout.ty.is_bool() { + bcx.range_metadata(load, 0..2); + } else if self.layout.ty.is_char() { + // a char is a Unicode codepoint, and so takes values from 0 + // to 0x10FFFF inclusive only. + bcx.range_metadata(load, 0..0x10FFFF+1); + } else if self.layout.ty.is_region_ptr() || + self.layout.ty.is_box() || + self.layout.ty.is_fn() { + bcx.nonnull_metadata(load); + } + load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) + } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { + OperandValue::Pair( + self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), + self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) } else { OperandValue::Ref(self.llval, self.alignment) }; @@ -314,28 +294,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Variants::Tagged { ref discr_range, .. } => { (discr_range.start, discr_range.end) } - _ => (0, u64::max_value()), + _ => (0, !0), }; let max_next = max.wrapping_add(1); let bits = discr_scalar.size(bcx.ccx).bits(); - assert!(bits <= 64); - let mask = !0u64 >> (64 - bits); - let lldiscr = match discr_scalar { + assert!(bits <= 128); + let mask = !0u128 >> (128 - bits); + let lldiscr = bcx.load(discr.llval, discr.alignment.non_abi()); + match discr_scalar { // For a (max) discr of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), layout::Int(..) if max_next & mask != min & mask => { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - bcx.load_range_assert(discr.llval, min, max_next, - /* signed: */ llvm::True, - discr.alignment.non_abi()) + bcx.range_metadata(lldiscr, min..max_next); } _ => { // i.e., if the range is everything. The lo==hi case would be // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - bcx.load(discr.llval, discr.alignment.non_abi()) } }; match self.layout.variants { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d3677e2eefd43..f584c6a653e5f 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -23,7 +23,7 @@ use base; use builder::Builder; use callee; use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_uint_big}; use consts; use monomorphize; use type_::Type; @@ -289,7 +289,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, - C_uint(ll_t_in, discr_range.end) + C_uint_big(ll_t_in, discr_range.end) )); } _ => {} @@ -807,7 +807,7 @@ fn cast_int_to_float(bcx: &Builder, if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. - let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bcx.icmp(llvm::IntUGE, x, max); let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); @@ -934,8 +934,8 @@ fn cast_float_to_int(bcx: &Builder, // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min); let greater = bcx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_big_integral(int_ty, int_max(signed, int_ty)); - let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128); + let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bcx.select(less_or_nan, int_min, fptosui_result); let s1 = bcx.select(greater, int_max, s0); From abbc1ddbd05d4e738591aba6f589c3ecc1233105 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 24 Sep 2017 11:56:23 +0300 Subject: [PATCH 44/69] rustc: make TyLayout::field(NonZero<*T>, 0) return &T. --- src/librustc/ty/layout.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index a97574681a231..2919f25dc9d2c 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -2033,7 +2033,17 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyAdt(def, substs) => { match self.variants { Variants::Single { index } => { - def.variants[index].fields[i].ty(tcx, substs) + let mut field_ty = def.variants[index].fields[i].ty(tcx, substs); + + // Treat NonZero<*T> as containing &T. + // This is especially useful for fat pointers. + if Some(def.did) == tcx.lang_items().non_zero() { + if let ty::TyRawPtr(mt) = field_ty.sty { + field_ty = tcx.mk_ref(tcx.types.re_erased, mt); + } + } + + field_ty } // Discriminant field for enums (where applicable). @@ -2109,10 +2119,6 @@ impl<'a, 'tcx> TyLayout<'tcx> { let offset = self.fields.offset(0); if let Abi::Scalar(value) = field.abi { Ok(Some((offset, value))) - } else if let ty::TyRawPtr(_) = field.ty.sty { - // If `NonZero` contains a non-scalar `*T`, it's - // a fat pointer, which starts with a thin pointer. - Ok(Some((offset, Pointer))) } else { Ok(None) } From 0190f270c1501ecb7f1b1829dcac16af8b4981e1 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 24 Sep 2017 12:01:09 +0300 Subject: [PATCH 45/69] rustc_trans: check for layout::I1 instead of TyBool. --- src/librustc_trans/abi.rs | 18 ++++++++---------- src/librustc_trans/mir/block.rs | 4 ++-- src/librustc_trans/mir/constant.rs | 3 ++- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 2659ca2f0d575..752de4d28356d 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -695,17 +695,15 @@ impl<'a, 'tcx> FnType<'tcx> { let arg_of = |ty: Ty<'tcx>, is_return: bool| { let mut arg = ArgType::new(ccx.layout_of(ty)); - if ty.is_bool() { + if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi { arg.attrs.set(ArgAttribute::ZExt); - } else { - if arg.layout.is_zst() { - // For some forsaken reason, x86_64-pc-windows-gnu - // doesn't ignore zero-sized struct arguments. - // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || - (!win_x64_gnu && !linux_s390x) { - arg.ignore(); - } + } else if arg.layout.is_zst() { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || + (!win_x64_gnu && !linux_s390x) { + arg.ignore(); } } arg diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index cc0bbb8145dbd..cd152a391b8ae 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -12,7 +12,7 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; use rustc::ty::{self, TypeFoldable}; -use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::{self, LayoutOf}; use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; @@ -673,7 +673,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { llval = bcx.load(llval, align.non_abi()); } - if arg.layout.ty == bcx.tcx().types.bool { + if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi { bcx.range_metadata(llval, 0..2); // We store bools as i8 so we need to truncate to i1. llval = base::to_immediate(bcx, llval, arg.layout); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index e8ff9ae32483c..08b05c33bb271 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -454,7 +454,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { span_bug!(span, "dereference of non-constant pointer `{:?}`", Value(base)); } - if projected_ty.is_bool() { + let layout = self.ccx.layout_of(projected_ty); + if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { let i1_type = Type::i1(self.ccx); if val_ty(val) != i1_type { unsafe { From b203a26efbd0d57115fc5dd40ec5410a8e5bd9da Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 24 Sep 2017 12:12:26 +0300 Subject: [PATCH 46/69] rustc: generalize layout::Variants::NicheFilling to niches other than 0. --- src/librustc/ty/layout.rs | 100 +++++++++++------- src/librustc_trans/debuginfo/metadata.rs | 14 +-- src/librustc_trans/mir/constant.rs | 25 +++-- src/librustc_trans/mir/lvalue.rs | 59 ++++++----- .../{nullable.rs => niche-filling.rs} | 8 +- .../{nullable.stdout => niche-filling.stdout} | 26 +++++ 6 files changed, 146 insertions(+), 86 deletions(-) rename src/test/ui/print_type_sizes/{nullable.rs => niche-filling.rs} (84%) rename src/test/ui/print_type_sizes/{nullable.stdout => niche-filling.stdout} (51%) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 2919f25dc9d2c..d52721bc17ab3 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -790,17 +790,18 @@ pub enum Variants { variants: Vec, }, - /// Two cases distinguished by a niche: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where field `0` - /// is known to be nonnull due to its type; if that field is null, then - /// it represents the other case, which is known to be zero sized. + /// Two cases distinguished by a niche (a value invalid for a type): + /// the variant `dataful_variant` contains a niche at an arbitrary + /// offset (field 0 of the enum), which is set to `niche_value` + /// for the other variant. /// - /// For example, `std::option::Option` instantiated at a safe pointer type - /// is represented such that `None` is a null pointer and `Some` is the - /// identity function. + /// For example, `Option<(usize, &T)>` is represented such that + /// `None` has a null pointer for the second tuple field, and + /// `Some` is the identity function (with a non-null reference). NicheFilling { - nndiscr: u64, - discr: Primitive, + dataful_variant: usize, + niche: Primitive, + niche_value: u128, variants: Vec, } } @@ -1323,7 +1324,7 @@ impl<'a, 'tcx> CachedLayout { } for (field_index, field) in variants[i].iter().enumerate() { - if let Some((offset, discr)) = field.non_zero_field(cx)? { + if let Some((offset, niche, niche_value)) = field.find_niche(cx)? { let mut st = vec![ univariant_uninterned(&variants[0], &def.repr, StructKind::AlwaysSized)?, @@ -1342,23 +1343,23 @@ impl<'a, 'tcx> CachedLayout { .. } = st[i]; - let mut discr_align = discr.align(dl); - if offset.bytes() == 0 && discr.size(dl) == size { - abi = Abi::Scalar(discr); + let mut niche_align = niche.align(dl); + if offset.bytes() == 0 && niche.size(dl) == size { + abi = Abi::Scalar(niche); } else if let Abi::Aggregate { ref mut packed, .. } = abi { - if offset.abi_align(discr_align) != offset { + if offset.abi_align(niche_align) != offset { *packed = true; - discr_align = dl.i8_align; + niche_align = dl.i8_align; } } - align = align.max(discr_align); - primitive_align = primitive_align.max(discr_align); + align = align.max(niche_align); + primitive_align = primitive_align.max(niche_align); return Ok(tcx.intern_layout(CachedLayout { variants: Variants::NicheFilling { - nndiscr: i as u64, - - discr, + dataful_variant: i, + niche, + niche_value, variants: st, }, fields: FieldPlacement::Arbitrary { @@ -2048,7 +2049,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { // Discriminant field for enums (where applicable). Variants::Tagged { discr, .. } | - Variants::NicheFilling { discr, .. } => { + Variants::NicheFilling { niche: discr, .. } => { return cx.layout_of([discr.to_ty(tcx)][i]); } } @@ -2084,30 +2085,48 @@ impl<'a, 'tcx> TyLayout<'tcx> { (self.size, self.align) } - /// Find the offset of a non-zero leaf field, starting from + /// Find the offset of a niche leaf field, starting from /// the given type and recursing through aggregates. - /// The tuple is `(offset, primitive, source_path)`. + /// The tuple is `(offset, primitive, niche_value)`. // FIXME(eddyb) track value ranges and traverse already optimized enums. - fn non_zero_field(&self, cx: C) - -> Result, LayoutError<'tcx>> + fn find_niche(&self, cx: C) + -> Result, LayoutError<'tcx>> where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { let tcx = cx.tcx(); match (&self.variants, self.abi, &self.ty.sty) { // FIXME(eddyb) check this via value ranges on scalars. + (_, Abi::Scalar(Int(I1, _)), _) => { + Ok(Some((Size::from_bytes(0), Int(I8, false), 2))) + } + (_, Abi::Scalar(Int(I32, _)), &ty::TyChar) => { + Ok(Some((Size::from_bytes(0), Int(I32, false), 0x10FFFF+1))) + } (_, Abi::Scalar(Pointer), &ty::TyRef(..)) | (_, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { - Ok(Some((Size::from_bytes(0), Pointer))) + Ok(Some((Size::from_bytes(0), Pointer, 0))) } (_, Abi::Scalar(Pointer), &ty::TyAdt(def, _)) if def.is_box() => { - Ok(Some((Size::from_bytes(0), Pointer))) + Ok(Some((Size::from_bytes(0), Pointer, 0))) } // FIXME(eddyb) check this via value ranges on scalars. - (&Variants::Tagged { discr, .. }, _, &ty::TyAdt(def, _)) => { - if def.discriminants(tcx).all(|d| d.to_u128_unchecked() != 0) { - Ok(Some((self.fields.offset(0), discr))) + (&Variants::Tagged { discr, ref discr_range, .. }, _, _) => { + // FIXME(eddyb) support negative/wrap-around discriminant ranges. + if discr_range.start < discr_range.end { + if discr_range.start > 0 { + Ok(Some((self.fields.offset(0), discr, 0))) + } else { + let bits = discr.size(tcx).bits(); + assert!(bits <= 128); + let max_value = !0u128 >> (128 - bits); + if discr_range.end < max_value { + Ok(Some((self.fields.offset(0), discr, discr_range.end + 1))) + } else { + Ok(None) + } + } } else { Ok(None) } @@ -2118,7 +2137,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { let field = self.field(cx, 0)?; let offset = self.fields.offset(0); if let Abi::Scalar(value) = field.abi { - Ok(Some((offset, value))) + Ok(Some((offset, value, 0))) } else { Ok(None) } @@ -2128,13 +2147,14 @@ impl<'a, 'tcx> TyLayout<'tcx> { _ => { if let FieldPlacement::Array { count, .. } = self.fields { if count > 0 { - return self.field(cx, 0)?.non_zero_field(cx); + return self.field(cx, 0)?.find_niche(cx); } } for i in 0..self.fields.count() { - let r = self.field(cx, i)?.non_zero_field(cx)?; - if let Some((offset, primitive)) = r { - return Ok(Some((self.fields.offset(i) + offset, primitive))); + let r = self.field(cx, i)?.find_niche(cx)?; + if let Some((offset, primitive, niche_value)) = r { + let offset = self.fields.offset(i) + offset; + return Ok(Some((offset, primitive, niche_value))); } } Ok(None) @@ -2165,13 +2185,15 @@ impl<'gcx> HashStable> for Variants { variants.hash_stable(hcx, hasher); } NicheFilling { - nndiscr, + dataful_variant, + ref niche, + niche_value, ref variants, - ref discr, } => { - nndiscr.hash_stable(hcx, hasher); + dataful_variant.hash_stable(hcx, hasher); + niche.hash_stable(hcx, hasher); + niche_value.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); - discr.hash_stable(hcx, hasher); } } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index a905d35f3d326..2768c7fb5772f 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1191,17 +1191,13 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() } - layout::Variants::NicheFilling { - nndiscr, - discr, - .. - } => { - let variant = self.layout.for_variant(nndiscr as usize); + layout::Variants::NicheFilling { dataful_variant, .. } => { + let variant = self.layout.for_variant(dataful_variant); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, variant, - &adt.variants[nndiscr as usize], + &adt.variants[dataful_variant], OptimizedDiscriminant, self.containing_scope, self.span); @@ -1239,8 +1235,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { compute_field_path(cx, &mut name, self.layout, self.layout.fields.offset(0), - discr.size(cx)); - name.push_str(&adt.variants[(1 - nndiscr) as usize].name.as_str()); + self.layout.field(cx, 0).size); + name.push_str(&adt.variants[1 - dataful_variant].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 08b05c33bb271..cf6f72d21d6d1 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1122,22 +1122,29 @@ fn trans_const_adt<'a, 'tcx>( }, _ => 0, }; - let discr_ty = l.field(ccx, 0).ty; - let discr = C_int(ccx.layout_of(discr_ty).llvm_type(ccx), discr as i64); + let discr_field = l.field(ccx, 0); + let discr = C_int(discr_field.llvm_type(ccx), discr as i64); if let layout::Abi::Scalar(_) = l.abi { Const::new(discr, t) } else { - let discr = Const::new(discr, discr_ty); + let discr = Const::new(discr, discr_field.ty); build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) } } - layout::Variants::NicheFilling { nndiscr, .. } => { - if variant_index as u64 == nndiscr { - build_const_struct(ccx, l.for_variant(variant_index), vals, None) + layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + if variant_index == dataful_variant { + build_const_struct(ccx, l.for_variant(dataful_variant), vals, None) } else { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - Const::new(C_null(ccx.layout_of(t).llvm_type(ccx)), t) + let niche = l.field(ccx, 0); + let niche_llty = niche.llvm_type(ccx); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + build_const_struct(ccx, l, &[Const::new(niche_llval, niche.ty)], None) } } } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 6da9c7a465766..1f8209c7066a9 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -16,7 +16,7 @@ use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null}; +use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -72,10 +72,6 @@ impl Alignment { } } -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { - bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" -} - #[derive(Copy, Clone, Debug)] pub struct LvalueRef<'tcx> { /// Pointer to the contents of the lvalue @@ -325,10 +321,17 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; bcx.intcast(lldiscr, cast_to, signed) } - layout::Variants::NicheFilling { nndiscr, .. } => { - let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE }; - let zero = C_null(discr.layout.llvm_type(bcx.ccx)); - bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false) + layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + let niche_llty = discr.layout.llvm_type(bcx.ccx); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + let cmp = if dataful_variant == 0 { llvm::IntEQ } else { llvm::IntNE }; + bcx.intcast(bcx.icmp(cmp, lldiscr, niche_llval), cast_to, false) } } } @@ -336,40 +339,42 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Set the discriminant for a new value of the given case of the given /// representation. pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { - let to = self.layout.ty.ty_adt_def().unwrap() - .discriminant_for_variant(bcx.tcx(), variant_index) - .to_u128_unchecked() as u64; match self.layout.variants { layout::Variants::Single { index } => { - assert_eq!(to, 0); assert_eq!(variant_index, index); } layout::Variants::Tagged { .. } => { let ptr = self.project_field(bcx, 0); + let to = self.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(bcx.tcx(), variant_index) + .to_u128_unchecked() as u64; bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), ptr.llval, ptr.alignment.non_abi()); } - layout::Variants::NicheFilling { nndiscr, .. } => { - if to != nndiscr { - let use_memset = match self.layout.abi { - layout::Abi::Scalar(_) => false, - _ => target_sets_discr_via_memset(bcx) - }; - if use_memset { - // Issue #34427: As workaround for LLVM bug on - // ARM, use memset of 0 on whole struct rather - // than storing null to single target field. + layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + if variant_index != dataful_variant { + if bcx.sess().target.target.arch == "arm" || + bcx.sess().target.target.arch == "aarch64" { + // Issue #34427: As workaround for LLVM bug on ARM, + // use memset of 0 before assigning niche value. let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx, 0); let (size, align) = self.layout.size_and_align(); let size = C_usize(bcx.ccx, size.bytes()); let align = C_u32(bcx.ccx, align.abi() as u32); base::call_memset(bcx, llptr, fill_byte, size, align, false); - } else { - let ptr = self.project_field(bcx, 0); - bcx.store(C_null(ptr.layout.llvm_type(bcx.ccx)), - ptr.llval, ptr.alignment.non_abi()); } + + let niche = self.project_field(bcx, 0); + let niche_llty = niche.layout.llvm_type(bcx.ccx); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + bcx.store(niche_llval, niche.llval, niche.alignment.non_abi()); } } } diff --git a/src/test/ui/print_type_sizes/nullable.rs b/src/test/ui/print_type_sizes/niche-filling.rs similarity index 84% rename from src/test/ui/print_type_sizes/nullable.rs rename to src/test/ui/print_type_sizes/niche-filling.rs index 5052c59a39dcf..dfa7b8aae31eb 100644 --- a/src/test/ui/print_type_sizes/nullable.rs +++ b/src/test/ui/print_type_sizes/niche-filling.rs @@ -10,8 +10,8 @@ // compile-flags: -Z print-type-sizes -// This file illustrates how enums with a non-null field are handled, -// modelled after cases like `Option<&u32>` and such. +// This file illustrates how niche-filling enums are handled, +// modelled after cases like `Option<&u32>`, `Option` and such. // // It uses NonZero directly, rather than `&_` or `Unique<_>`, because // the test is not set up to deal with target-dependent pointer width. @@ -72,4 +72,8 @@ pub fn main() { let _x: MyOption> = Default::default(); let _y: EmbeddedDiscr = Default::default(); let _z: MyOption> = Default::default(); + let _a: MyOption = Default::default(); + let _b: MyOption = Default::default(); + let _c: MyOption = Default::default(); + let _b: MyOption> = Default::default(); } diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/niche-filling.stdout similarity index 51% rename from src/test/ui/print_type_sizes/nullable.stdout rename to src/test/ui/print_type_sizes/niche-filling.stdout index ec51adb25af2c..668b31e413f64 100644 --- a/src/test/ui/print_type_sizes/nullable.stdout +++ b/src/test/ui/print_type_sizes/niche-filling.stdout @@ -19,9 +19,35 @@ print-type-size field `.val`: 4 bytes print-type-size field `.post`: 2 bytes print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes +print-type-size type: `MyOption`: 4 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 4 bytes +print-type-size field `.0`: 4 bytes print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 4 bytes print-type-size field `.0`: 4 bytes print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes print-type-size field `.0`: 4 bytes +print-type-size type: `MyOption>`: 2 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 2 bytes +print-type-size field `.0`: 2 bytes +print-type-size type: `MyOption`: 2 bytes, alignment: 1 bytes +print-type-size discriminant: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `MyOption`: 1 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `MyOption`: 1 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `core::cmp::Ordering`: 1 bytes, alignment: 1 bytes +print-type-size discriminant: 1 bytes +print-type-size variant `Less`: 0 bytes +print-type-size variant `Equal`: 0 bytes +print-type-size variant `Greater`: 0 bytes From 5df25c4aed68a4f761645f63e6ce34ec8c30a75e Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 26 Sep 2017 07:27:48 +0300 Subject: [PATCH 47/69] rustc: remove redundant/unused fields from layout::Abi::Vector. --- src/librustc/ty/layout.rs | 27 +++++++++------------------ src/librustc_trans/abi.rs | 4 ++-- src/librustc_trans/cabi_x86_64.rs | 8 ++++---- src/librustc_trans/cabi_x86_win64.rs | 2 +- src/librustc_trans/mir/constant.rs | 2 +- src/librustc_trans/type_of.rs | 4 ++-- 6 files changed, 19 insertions(+), 28 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d52721bc17ab3..9d8736338f128 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -740,10 +740,7 @@ impl FieldPlacement { #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Abi { Scalar(Primitive), - Vector { - element: Primitive, - count: u64 - }, + Vector, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, @@ -755,7 +752,7 @@ impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Abi::Scalar(_) | Abi::Vector { .. } => false, + Abi::Scalar(_) | Abi::Vector => false, Abi::Aggregate { sized, .. } => !sized } } @@ -763,7 +760,7 @@ impl Abi { /// Returns true if the fields of the layout are packed. pub fn is_packed(&self) -> bool { match *self { - Abi::Scalar(_) | Abi::Vector { .. } => false, + Abi::Scalar(_) | Abi::Vector => false, Abi::Aggregate { packed, .. } => packed } } @@ -1202,14 +1199,14 @@ impl<'a, 'tcx> CachedLayout { ty::TyAdt(def, ..) if def.repr.simd() => { let count = ty.simd_size(tcx) as u64; let element = cx.layout_of(ty.simd_type(tcx))?; - let element_scalar = match element.abi { - Abi::Scalar(value) => value, + match element.abi { + Abi::Scalar(_) => {} _ => { tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ a non-machine element type `{}`", ty, element.ty)); } - }; + } let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; let align = dl.vector_align(size); @@ -1221,10 +1218,7 @@ impl<'a, 'tcx> CachedLayout { stride: element.size, count }, - abi: Abi::Vector { - element: element_scalar, - count - }, + abi: Abi::Vector, size, align, primitive_align: align @@ -2076,7 +2070,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { pub fn is_zst(&self) -> bool { match self.abi { Abi::Scalar(_) => false, - Abi::Vector { count, .. } => count == 0, + Abi::Vector => self.size.bytes() == 0, Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 } } @@ -2233,10 +2227,7 @@ impl<'gcx> HashStable> for Abi { Scalar(ref value) => { value.hash_stable(hcx, hasher); } - Vector { ref element, count } => { - element.hash_stable(hcx, hasher); - count.hash_stable(hcx, hasher); - } + Vector => {} Aggregate { packed, sized } => { packed.hash_stable(hcx, hasher); sized.hash_stable(hcx, hasher); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 752de4d28356d..688fa8fe02d03 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -279,7 +279,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn is_aggregate(&self) -> bool { match self.abi { layout::Abi::Scalar(_) | - layout::Abi::Vector { .. } => false, + layout::Abi::Vector => false, layout::Abi::Aggregate { .. } => true } } @@ -300,7 +300,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { }) } - layout::Abi::Vector { .. } => { + layout::Abi::Vector => { Some(Reg { kind: RegKind::Vector, size: self.size diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index d5a51fa1863e2..b799a7690bdf8 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -75,14 +75,14 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) unify(cls, off, reg); } - layout::Abi::Vector { element, count } => { + layout::Abi::Vector => { unify(cls, off, Class::Sse); // everything after the first one is the upper // half of a register. - let eltsz = element.size(ccx); - for i in 1..count { - unify(cls, off + eltsz * (i as u64), Class::SseUp); + for i in 1..layout.fields.count() { + let field_off = off + layout.fields.offset(i); + unify(cls, field_off, Class::SseUp); } } diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index c6d0e5e3a0735..ceb649be197ef 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -26,7 +26,7 @@ pub fn compute_abi_info(fty: &mut FnType) { _ => a.make_indirect() } } - layout::Abi::Vector { .. } => { + layout::Abi::Vector => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index cf6f72d21d6d1..d782ffe1f9d34 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1098,7 +1098,7 @@ fn trans_const_adt<'a, 'tcx>( match l.variants { layout::Variants::Single { index } => { assert_eq!(variant_index, index); - if let layout::Abi::Vector { .. } = l.abi { + if let layout::Abi::Vector = l.abi { Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) } else if let layout::FieldPlacement::Union(_) = l.fields { assert_eq!(variant_index, 0); diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 2b3ac0386ee27..6fec1a675cd63 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> Type { match layout.abi { layout::Abi::Scalar(_) => bug!("handled elsewhere"), - layout::Abi::Vector { .. } => { + layout::Abi::Vector => { return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), layout.fields.count() as u64); } @@ -158,7 +158,7 @@ pub trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, + layout::Abi::Scalar(_) | layout::Abi::Vector => true, layout::Abi::Aggregate { .. } => self.is_zst() } From f62e43da2891a65a484a917d84642544ed093ba2 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 26 Sep 2017 14:41:06 +0300 Subject: [PATCH 48/69] rustc: track validity ranges for layout::Abi::Scalar values. --- src/librustc/ty/layout.rs | 301 ++++++++++++++--------- src/librustc_lint/types.rs | 4 +- src/librustc_trans/abi.rs | 19 +- src/librustc_trans/base.rs | 9 +- src/librustc_trans/cabi_s390x.rs | 8 +- src/librustc_trans/cabi_x86.rs | 8 +- src/librustc_trans/cabi_x86_64.rs | 4 +- src/librustc_trans/debuginfo/metadata.rs | 8 +- src/librustc_trans/lib.rs | 1 + src/librustc_trans/mir/block.rs | 13 +- src/librustc_trans/mir/constant.rs | 16 +- src/librustc_trans/mir/lvalue.rs | 71 ++---- src/librustc_trans/mir/rvalue.rs | 36 ++- src/librustc_trans/type_.rs | 1 - src/librustc_trans/type_of.rs | 14 +- 15 files changed, 294 insertions(+), 219 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 9d8736338f128..899245b22aaa9 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -416,7 +416,6 @@ impl Align { /// Integers, also used for enum discriminants. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Integer { - I1, I8, I16, I32, @@ -427,7 +426,6 @@ pub enum Integer { impl<'a, 'tcx> Integer { pub fn size(&self) -> Size { match *self { - I1 => Size::from_bits(1), I8 => Size::from_bytes(1), I16 => Size::from_bytes(2), I32 => Size::from_bytes(4), @@ -440,7 +438,6 @@ impl<'a, 'tcx> Integer { let dl = cx.data_layout(); match *self { - I1 => dl.i1_align, I8 => dl.i8_align, I16 => dl.i16_align, I32 => dl.i32_align, @@ -451,13 +448,11 @@ impl<'a, 'tcx> Integer { pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { - (I1, false) => tcx.types.u8, (I8, false) => tcx.types.u8, (I16, false) => tcx.types.u16, (I32, false) => tcx.types.u32, (I64, false) => tcx.types.u64, (I128, false) => tcx.types.u128, - (I1, true) => tcx.types.i8, (I8, true) => tcx.types.i8, (I16, true) => tcx.types.i16, (I32, true) => tcx.types.i32, @@ -469,7 +464,6 @@ impl<'a, 'tcx> Integer { /// Find the smallest Integer type which can represent the signed value. pub fn fit_signed(x: i128) -> Integer { match x { - -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1, -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8, -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16, -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32, @@ -481,7 +475,6 @@ impl<'a, 'tcx> Integer { /// Find the smallest Integer type which can represent the unsigned value. pub fn fit_unsigned(x: u128) -> Integer { match x { - 0...0x0000_0000_0000_0001 => I1, 0...0x0000_0000_0000_00ff => I8, 0...0x0000_0000_0000_ffff => I16, 0...0x0000_0000_ffff_ffff => I32, @@ -621,6 +614,29 @@ impl<'a, 'tcx> Primitive { } } +/// Information about one scalar component of a Rust type. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct Scalar { + pub value: Primitive, + + /// Inclusive wrap-around range of valid values, that is, if + /// min > max, it represents min..=u128::MAX followed by 0..=max. + // FIXME(eddyb) always use the shortest range, e.g. by finding + // the largest space between two consecutive valid values and + // taking everything else as the (shortest) valid range. + pub valid_range: RangeInclusive, +} + +impl Scalar { + pub fn is_bool(&self) -> bool { + if let Int(I8, _) = self.value { + self.valid_range == (0..=1) + } else { + false + } + } +} + /// The first half of a fat pointer. /// - For a trait object, this is the address of the box. /// - For a slice, this is the base address. @@ -737,9 +753,9 @@ impl FieldPlacement { /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum Abi { - Scalar(Primitive), + Scalar(Scalar), Vector, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. @@ -777,13 +793,7 @@ pub enum Variants { /// all space reserved for the discriminant, and their first field starts /// at a non-0 offset, after where the discriminant would go. Tagged { - discr: Primitive, - /// Inclusive wrap-around range of discriminant values, that is, - /// if min > max, it represents min..=u128::MAX followed by 0..=max. - // FIXME(eddyb) always use the shortest range, e.g. by finding - // the largest space between two consecutive discriminants and - // taking everything else as the (shortest) discriminant range. - discr_range: RangeInclusive, + discr: Scalar, variants: Vec, }, @@ -797,7 +807,7 @@ pub enum Variants { /// `Some` is the identity function (with a non-null reference). NicheFilling { dataful_variant: usize, - niche: Primitive, + niche: Scalar, niche_value: u128, variants: Vec, } @@ -832,6 +842,21 @@ pub struct CachedLayout { pub size: Size } +impl CachedLayout { + fn scalar(cx: C, scalar: Scalar) -> Self { + let size = scalar.value.size(cx); + let align = scalar.value.align(cx); + CachedLayout { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(0), + abi: Abi::Scalar(scalar), + size, + align, + primitive_align: align + } + } +} + fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result<&'tcx CachedLayout, LayoutError<'tcx>> @@ -867,16 +892,14 @@ impl<'a, 'tcx> CachedLayout { let cx = (tcx, param_env); let dl = cx.data_layout(); let scalar = |value: Primitive| { - let align = value.align(dl); - tcx.intern_layout(CachedLayout { - variants: Variants::Single { index: 0 }, - fields: FieldPlacement::Union(0), - abi: Abi::Scalar(value), - size: value.size(dl), - align, - primitive_align: align - }) + let bits = value.size(dl).bits(); + assert!(bits <= 128); + tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + value, + valid_range: 0..=(!0 >> (128 - bits)) + })) }; + #[derive(Copy, Clone, Debug)] enum StructKind { /// A tuple, closure, or univariant which cannot be coerced to unsized. @@ -1030,7 +1053,12 @@ impl<'a, 'tcx> CachedLayout { let ptr_layout = |pointee: Ty<'tcx>| { let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - return Ok(scalar(Pointer)); + let non_zero = !ty.is_unsafe_ptr(); + let bits = Pointer.size(dl).bits(); + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + value: Pointer, + valid_range: (non_zero as u128)..=(!0 >> (128 - bits)) + }))); } let unsized_part = tcx.struct_tail(pointee); @@ -1066,8 +1094,18 @@ impl<'a, 'tcx> CachedLayout { Ok(match ty.sty { // Basic scalars. - ty::TyBool => scalar(Int(I1, false)), - ty::TyChar => scalar(Int(I32, false)), + ty::TyBool => { + tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + value: Int(I8, false), + valid_range: 0..=1 + })) + } + ty::TyChar => { + tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + value: Int(I32, false), + valid_range: 0..=0x10FFFF + })) + } ty::TyInt(ity) => { scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) } @@ -1076,7 +1114,13 @@ impl<'a, 'tcx> CachedLayout { } ty::TyFloat(FloatTy::F32) => scalar(F32), ty::TyFloat(FloatTy::F64) => scalar(F64), - ty::TyFnPtr(_) => scalar(Pointer), + ty::TyFnPtr(_) => { + let bits = Pointer.size(dl).bits(); + tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + value: Pointer, + valid_range: 1..=(!0 >> (128 - bits)) + })) + } // The never type. ty::TyNever => { @@ -1330,22 +1374,26 @@ impl<'a, 'tcx> CachedLayout { } let offset = st[i].fields.offset(field_index) + offset; let CachedLayout { - mut abi, size, mut align, mut primitive_align, .. } = st[i]; - let mut niche_align = niche.align(dl); - if offset.bytes() == 0 && niche.size(dl) == size { - abi = Abi::Scalar(niche); - } else if let Abi::Aggregate { ref mut packed, .. } = abi { + let mut niche_align = niche.value.align(dl); + let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { + Abi::Scalar(niche.clone()) + } else { + let mut packed = st[i].abi.is_packed(); if offset.abi_align(niche_align) != offset { - *packed = true; + packed = true; niche_align = dl.i8_align; } - } + Abi::Aggregate { + sized: true, + packed + } + }; align = align.max(niche_align); primitive_align = primitive_align.max(niche_align); @@ -1468,25 +1516,28 @@ impl<'a, 'tcx> CachedLayout { } } - let discr = Int(ity, signed); + let discr = Scalar { + value: Int(ity, signed), + valid_range: (min as u128)..=(max as u128) + }; + let abi = if discr.value.size(dl) == size { + Abi::Scalar(discr.clone()) + } else { + Abi::Aggregate { + sized: true, + packed: false + } + }; tcx.intern_layout(CachedLayout { variants: Variants::Tagged { discr, - discr_range: (min as u128)..=(max as u128), variants }, // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results // in lost optimizations, specifically around allocations, see // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`. fields: FieldPlacement::Union(1), - abi: if discr.size(dl) == size { - Abi::Scalar(discr) - } else { - Abi::Aggregate { - sized: true, - packed: false - } - }, + abi, align, primitive_align, size @@ -1650,7 +1701,7 @@ impl<'a, 'tcx> CachedLayout { }) .collect(); record(adt_kind.into(), match layout.variants { - Variants::Tagged { discr, .. } => Some(discr.size(tcx)), + Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)), _ => None }, variant_infos); } @@ -1852,16 +1903,23 @@ impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { } pub trait MaybeResult { + fn from_ok(x: T) -> Self; fn map_same T>(self, f: F) -> Self; } impl MaybeResult for T { + fn from_ok(x: T) -> Self { + x + } fn map_same T>(self, f: F) -> Self { f(self) } } impl MaybeResult for Result { + fn from_ok(x: T) -> Self { + Ok(x) + } fn map_same T>(self, f: F) -> Self { self.map(f) } @@ -1961,7 +2019,13 @@ impl<'a, 'tcx> TyLayout<'tcx> { // (which may have no non-DST form), and will work as long // as the `Abi` or `FieldPlacement` is checked by users. if i == 0 { - return cx.layout_of(Pointer.to_ty(tcx)).map_same(|mut ptr_layout| { + let nil = tcx.mk_nil(); + let ptr_ty = if self.ty.is_unsafe_ptr() { + tcx.mk_mut_ptr(nil) + } else { + tcx.mk_mut_ref(tcx.types.re_static, nil) + }; + return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| { ptr_layout.ty = self.ty; ptr_layout }); @@ -2042,9 +2106,14 @@ impl<'a, 'tcx> TyLayout<'tcx> { } // Discriminant field for enums (where applicable). - Variants::Tagged { discr, .. } | - Variants::NicheFilling { niche: discr, .. } => { - return cx.layout_of([discr.to_ty(tcx)][i]); + Variants::Tagged { ref discr, .. } | + Variants::NicheFilling { niche: ref discr, .. } => { + assert_eq!(i, 0); + let layout = CachedLayout::scalar(tcx, discr.clone()); + return MaybeResult::from_ok(TyLayout { + cached: tcx.intern_layout(layout), + ty: discr.value.to_ty(tcx) + }); } } } @@ -2081,79 +2150,74 @@ impl<'a, 'tcx> TyLayout<'tcx> { /// Find the offset of a niche leaf field, starting from /// the given type and recursing through aggregates. - /// The tuple is `(offset, primitive, niche_value)`. - // FIXME(eddyb) track value ranges and traverse already optimized enums. + /// The tuple is `(offset, scalar, niche_value)`. + // FIXME(eddyb) traverse already optimized enums. fn find_niche(&self, cx: C) - -> Result, LayoutError<'tcx>> + -> Result, LayoutError<'tcx>> where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { - let tcx = cx.tcx(); - match (&self.variants, self.abi, &self.ty.sty) { - // FIXME(eddyb) check this via value ranges on scalars. - (_, Abi::Scalar(Int(I1, _)), _) => { - Ok(Some((Size::from_bytes(0), Int(I8, false), 2))) - } - (_, Abi::Scalar(Int(I32, _)), &ty::TyChar) => { - Ok(Some((Size::from_bytes(0), Int(I32, false), 0x10FFFF+1))) - } - (_, Abi::Scalar(Pointer), &ty::TyRef(..)) | - (_, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { - Ok(Some((Size::from_bytes(0), Pointer, 0))) - } - (_, Abi::Scalar(Pointer), &ty::TyAdt(def, _)) if def.is_box() => { - Ok(Some((Size::from_bytes(0), Pointer, 0))) - } - - // FIXME(eddyb) check this via value ranges on scalars. - (&Variants::Tagged { discr, ref discr_range, .. }, _, _) => { - // FIXME(eddyb) support negative/wrap-around discriminant ranges. - if discr_range.start < discr_range.end { - if discr_range.start > 0 { - Ok(Some((self.fields.offset(0), discr, 0))) - } else { - let bits = discr.size(tcx).bits(); - assert!(bits <= 128); - let max_value = !0u128 >> (128 - bits); - if discr_range.end < max_value { - Ok(Some((self.fields.offset(0), discr, discr_range.end + 1))) - } else { - Ok(None) - } - } + if let Abi::Scalar(Scalar { value, ref valid_range }) = self.abi { + // FIXME(eddyb) support negative/wrap-around discriminant ranges. + return if valid_range.start < valid_range.end { + let bits = value.size(cx).bits(); + assert!(bits <= 128); + let max_value = !0u128 >> (128 - bits); + if valid_range.start > 0 { + let niche = valid_range.start - 1; + Ok(Some((self.fields.offset(0), Scalar { + value, + valid_range: niche..=valid_range.end + }, niche))) + } else if valid_range.end < max_value { + let niche = valid_range.end + 1; + Ok(Some((self.fields.offset(0), Scalar { + value, + valid_range: valid_range.start..=niche + }, niche))) } else { Ok(None) } - } + } else { + Ok(None) + }; + } - // Is this the NonZero lang item wrapping a pointer or integer type? - (_, _, &ty::TyAdt(def, _)) if Some(def.did) == tcx.lang_items().non_zero() => { + // Is this the NonZero lang item wrapping a pointer or integer type? + if let ty::TyAdt(def, _) = self.ty.sty { + if Some(def.did) == cx.tcx().lang_items().non_zero() { let field = self.field(cx, 0)?; let offset = self.fields.offset(0); - if let Abi::Scalar(value) = field.abi { - Ok(Some((offset, value, 0))) - } else { - Ok(None) + if let Abi::Scalar(Scalar { value, ref valid_range }) = field.abi { + return Ok(Some((offset, Scalar { + value, + valid_range: 0..=valid_range.end + }, 0))); } } + } - // Perhaps one of the fields is non-zero, let's recurse and find out. - _ => { - if let FieldPlacement::Array { count, .. } = self.fields { - if count > 0 { - return self.field(cx, 0)?.find_niche(cx); - } - } - for i in 0..self.fields.count() { - let r = self.field(cx, i)?.find_niche(cx)?; - if let Some((offset, primitive, niche_value)) = r { - let offset = self.fields.offset(i) + offset; - return Ok(Some((offset, primitive, niche_value))); - } - } - Ok(None) + // Perhaps one of the fields is non-zero, let's recurse and find out. + if let FieldPlacement::Union(_) = self.fields { + // Only Rust enums have safe-to-inspect fields + // (a discriminant), other unions are unsafe. + if let Variants::Single { .. } = self.variants { + return Ok(None); + } + } + if let FieldPlacement::Array { count, .. } = self.fields { + if count > 0 { + return self.field(cx, 0)?.find_niche(cx); } } + for i in 0..self.fields.count() { + let r = self.field(cx, i)?.find_niche(cx)?; + if let Some((offset, scalar, niche_value)) = r { + let offset = self.fields.offset(i) + offset; + return Ok(Some((offset, scalar, niche_value))); + } + } + Ok(None) } } @@ -2169,13 +2233,10 @@ impl<'gcx> HashStable> for Variants { index.hash_stable(hcx, hasher); } Tagged { - discr, - discr_range: RangeInclusive { start, end }, + ref discr, ref variants, } => { discr.hash_stable(hcx, hasher); - start.hash_stable(hcx, hasher); - end.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } NicheFilling { @@ -2236,6 +2297,17 @@ impl<'gcx> HashStable> for Abi { } } +impl<'gcx> HashStable> for Scalar { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let Scalar { value, valid_range: RangeInclusive { start, end } } = *self; + value.hash_stable(hcx, hasher); + start.hash_stable(hcx, hasher); + end.hash_stable(hcx, hasher); + } +} + impl_stable_hash_for!(struct ::ty::layout::CachedLayout { variants, fields, @@ -2246,7 +2318,6 @@ impl_stable_hash_for!(struct ::ty::layout::CachedLayout { }); impl_stable_hash_for!(enum ::ty::layout::Integer { - I1, I8, I16, I32, diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 46debcce95843..1356574f646aa 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -753,8 +753,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let layout::Variants::Tagged { ref variants, discr, .. } = layout.variants { - let discr_size = discr.size(cx.tcx).bytes(); + if let layout::Variants::Tagged { ref variants, ref discr, .. } = layout.variants { + let discr_size = discr.value.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", t, layout.size.bytes(), layout); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 688fa8fe02d03..c87f856b0054e 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -287,8 +287,8 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { match self.abi { // The primitive for this algorithm. - layout::Abi::Scalar(value) => { - let kind = match value { + layout::Abi::Scalar(ref scalar) => { + let kind = match scalar.value { layout::Int(..) | layout::Pointer => RegKind::Integer, layout::F32 | @@ -471,8 +471,8 @@ impl<'a, 'tcx> ArgType<'tcx> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - match self.layout.abi { - layout::Abi::Scalar(layout::Int(i, signed)) => { + if let layout::Abi::Scalar(ref scalar) = self.layout.abi { + if let layout::Int(i, signed) = scalar.value { if i.size().bits() < bits { self.attrs.set(if signed { ArgAttribute::SExt @@ -481,8 +481,6 @@ impl<'a, 'tcx> ArgType<'tcx> { }); } } - - _ => {} } } @@ -695,9 +693,12 @@ impl<'a, 'tcx> FnType<'tcx> { let arg_of = |ty: Ty<'tcx>, is_return: bool| { let mut arg = ArgType::new(ccx.layout_of(ty)); - if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi { - arg.attrs.set(ArgAttribute::ZExt); - } else if arg.layout.is_zst() { + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if scalar.is_bool() { + arg.attrs.set(ArgAttribute::ZExt); + } + } + if arg.layout.is_zst() { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu. diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 3c6626cfa7f3a..ff70184b26245 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -375,11 +375,12 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { } pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef { - if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { - bcx.trunc(val, Type::i1(bcx.ccx)) - } else { - val + if let layout::Abi::Scalar(ref scalar) = layout.abi { + if scalar.is_bool() { + return bcx.trunc(val, Type::i1(bcx.ccx)); + } } + val } pub fn call_memcpy(b: &Builder, diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 9c24b637efd40..9fb460043ae81 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -27,8 +27,12 @@ fn classify_ret_ty(ret: &mut ArgType) { fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> bool { match layout.abi { - layout::Abi::Scalar(layout::F32) | - layout::Abi::Scalar(layout::F64) => true, + layout::Abi::Scalar(ref scalar) => { + match scalar.value { + layout::F32 | layout::F64 => true, + _ => false + } + } layout::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 401e75387c49d..dc9f681af52f0 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -22,8 +22,12 @@ pub enum Flavor { fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> bool { match layout.abi { - layout::Abi::Scalar(layout::F32) | - layout::Abi::Scalar(layout::F64) => true, + layout::Abi::Scalar(ref scalar) => { + match scalar.value { + layout::F32 | layout::F64 => true, + _ => false + } + } layout::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index b799a7690bdf8..bc445c7d2a76d 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -65,8 +65,8 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } match layout.abi { - layout::Abi::Scalar(value) => { - let reg = match value { + layout::Abi::Scalar(ref scalar) => { + let reg = match scalar.value { layout::Int(..) | layout::Pointer => Class::Int, layout::F32 | diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 2768c7fb5772f..e0822b96eeb06 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1429,11 +1429,13 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let discriminant_type_metadata = match layout.variants { layout::Variants::Single { .. } | layout::Variants::NicheFilling { .. } => None, - layout::Variants::Tagged { discr, .. } => Some(discriminant_type_metadata(discr)), + layout::Variants::Tagged { ref discr, .. } => { + Some(discriminant_type_metadata(discr.value)) + } }; - match (layout.abi, discriminant_type_metadata) { - (layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), + match (&layout.abi, discriminant_type_metadata) { + (&layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), _ => {} } diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 83fc10173166c..f6c4153c183de 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -26,6 +26,7 @@ #![feature(i128_type)] #![feature(i128)] #![feature(inclusive_range)] +#![feature(inclusive_range_syntax)] #![feature(libc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index cd152a391b8ae..139c4c656db07 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -671,10 +671,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (align | Alignment::Packed(arg.layout.align)) .non_abi()); } else { + // We can't use `LvalueRef::load` here because the argument + // may have a type we don't treat as immediate, but the ABI + // used for this call is passing it by-value. In that case, + // the load would just produce `OperandValue::Ref` instead + // of the `OperandValue::Immediate` we need for the call. llval = bcx.load(llval, align.non_abi()); - } - if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi { - bcx.range_metadata(llval, 0..2); + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if scalar.is_bool() { + bcx.range_metadata(llval, 0..2); + } + } // We store bools as i8 so we need to truncate to i1. llval = base::to_immediate(bcx, llval, arg.layout); } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index d782ffe1f9d34..7e1569c8f8f5e 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -455,9 +455,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { Value(base)); } let layout = self.ccx.layout_of(projected_ty); - if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { + if let layout::Abi::Scalar(ref scalar) = layout.abi { let i1_type = Type::i1(self.ccx); - if val_ty(val) != i1_type { + if scalar.is_bool() && val_ty(val) != i1_type { unsafe { val = llvm::LLVMConstTrunc(val, i1_type.to_ref()); } @@ -685,10 +685,14 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { assert!(cast_layout.is_llvm_immediate()); let ll_t_out = cast_layout.immediate_llvm_type(self.ccx); let llval = operand.llval; - let signed = match self.ccx.layout_of(operand.ty).abi { - layout::Abi::Scalar(layout::Int(_, signed)) => signed, - _ => false - }; + + let mut signed = false; + let l = self.ccx.layout_of(operand.ty); + if let layout::Abi::Scalar(ref scalar) = l.abi { + if let layout::Int(_, true) = scalar.value { + signed = true; + } + } unsafe { match (r_t_in, r_t_out) { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 1f8209c7066a9..f9a179ee0eeca 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -148,16 +148,29 @@ impl<'a, 'tcx> LvalueRef<'tcx> { const_llval } else { let load = bcx.load(self.llval, self.alignment.non_abi()); - if self.layout.ty.is_bool() { - bcx.range_metadata(load, 0..2); - } else if self.layout.ty.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - bcx.range_metadata(load, 0..0x10FFFF+1); - } else if self.layout.ty.is_region_ptr() || - self.layout.ty.is_box() || - self.layout.ty.is_fn() { - bcx.nonnull_metadata(load); + if let layout::Abi::Scalar(ref scalar) = self.layout.abi { + let (min, max) = (scalar.valid_range.start, scalar.valid_range.end); + let max_next = max.wrapping_add(1); + let bits = scalar.value.size(bcx.ccx).bits(); + assert!(bits <= 128); + let mask = !0u128 >> (128 - bits); + // For a (max) value of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + match scalar.value { + layout::Int(..) if max_next & mask != min & mask => { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + bcx.range_metadata(load, min..max_next); + } + layout::Pointer if 0 < min && min < max => { + bcx.nonnull_metadata(load); + } + _ => {} + } } load }; @@ -274,48 +287,18 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); match self.layout.variants { layout::Variants::Single { index } => { - assert_eq!(index, 0); - return C_uint(cast_to, 0); + return C_uint(cast_to, index as u64); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } let discr = self.project_field(bcx, 0); - let discr_scalar = match discr.layout.abi { - layout::Abi::Scalar(discr) => discr, - _ => bug!("discriminant not scalar: {:#?}", discr.layout) - }; - let (min, max) = match self.layout.variants { - layout::Variants::Tagged { ref discr_range, .. } => { - (discr_range.start, discr_range.end) - } - _ => (0, !0), - }; - let max_next = max.wrapping_add(1); - let bits = discr_scalar.size(bcx.ccx).bits(); - assert!(bits <= 128); - let mask = !0u128 >> (128 - bits); - let lldiscr = bcx.load(discr.llval, discr.alignment.non_abi()); - match discr_scalar { - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - layout::Int(..) if max_next & mask != min & mask => { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - bcx.range_metadata(lldiscr, min..max_next); - } - _ => { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - } - }; + let lldiscr = discr.load(bcx).immediate(); match self.layout.variants { layout::Variants::Single { .. } => bug!(), - layout::Variants::Tagged { .. } => { - let signed = match discr_scalar { + layout::Variants::Tagged { ref discr, .. } => { + let signed = match discr.value { layout::Int(_, signed) => signed, _ => false }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index f584c6a653e5f..e52dcd0756211 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -119,6 +119,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } // Use llvm.memset.p0i8.* to initialize byte arrays + let v = base::from_immediate(&bcx, v); if common::val_ty(v) == Type::i8(bcx.ccx) { base::call_memset(&bcx, start, v, size, align, false); return bcx; @@ -278,28 +279,25 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); - match operand.layout.variants { - layout::Variants::Tagged { - ref discr_range, .. - } if discr_range.end > discr_range.start => { - // We want `table[e as usize]` to not - // have bound checks, and this is the most - // convenient place to put the `assume`. - - base::call_assume(&bcx, bcx.icmp( - llvm::IntULE, - llval, - C_uint_big(ll_t_in, discr_range.end) - )); + let mut signed = false; + if let layout::Abi::Scalar(ref scalar) = operand.layout.abi { + if let layout::Int(_, s) = scalar.value { + signed = s; + + if scalar.valid_range.end > scalar.valid_range.start { + // We want `table[e as usize]` to not + // have bound checks, and this is the most + // convenient place to put the `assume`. + + base::call_assume(&bcx, bcx.icmp( + llvm::IntULE, + llval, + C_uint_big(ll_t_in, scalar.valid_range.end) + )); + } } - _ => {} } - let signed = match operand.layout.abi { - layout::Abi::Scalar(layout::Int(_, signed)) => signed, - _ => false - }; - let newval = match (r_t_in, r_t_out) { (CastTy::Int(_), CastTy::Int(_)) => { bcx.intcast(llval, ll_t_out, signed) diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index 53aaed1578322..2774359c994a5 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -268,7 +268,6 @@ impl Type { pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type { use rustc::ty::layout::Integer::*; match i { - I1 => Type::i1(cx), I8 => Type::i8(cx), I16 => Type::i16(cx), I32 => Type::i32(cx), diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 6fec1a675cd63..eab5cb159de39 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -176,14 +176,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { - if let layout::Abi::Scalar(value) = self.abi { + if let layout::Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) { return llty; } - let llty = match value { - layout::Int(layout::I1, _) => Type::i8(ccx), + let llty = match scalar.value { layout::Int(i, _) => Type::from_integer(ccx, i), layout::F32 => Type::f32(ccx), layout::F64 => Type::f64(ccx), @@ -249,11 +248,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { - if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = self.abi { - Type::i1(ccx) - } else { - self.llvm_type(ccx) + if let layout::Abi::Scalar(ref scalar) = self.abi { + if scalar.is_bool() { + return Type::i1(ccx); + } } + self.llvm_type(ccx) } fn over_align(&self) -> Option { From ced5e04e8bfa80ae297cba6c95ec0948dceb6933 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 26 Sep 2017 21:34:10 +0300 Subject: [PATCH 49/69] rustc: optimize out uninhabited types and variants. --- src/librustc/ty/layout.rs | 151 ++++++++++++------ src/librustc_trans/abi.rs | 3 + src/librustc_trans/cabi_x86_64.rs | 2 + src/librustc_trans/cabi_x86_win64.rs | 1 + src/librustc_trans/debuginfo/metadata.rs | 61 ++++--- src/librustc_trans/mir/block.rs | 6 +- src/librustc_trans/mir/constant.rs | 10 +- src/librustc_trans/mir/lvalue.rs | 25 ++- src/librustc_trans/type_of.rs | 7 +- src/test/ui/print_type_sizes/uninhabited.rs | 18 +++ .../ui/print_type_sizes/uninhabited.stdout | 5 + 11 files changed, 197 insertions(+), 92 deletions(-) create mode 100644 src/test/ui/print_type_sizes/uninhabited.rs create mode 100644 src/test/ui/print_type_sizes/uninhabited.stdout diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 899245b22aaa9..0edd8f44f0ce1 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -755,6 +755,7 @@ impl FieldPlacement { /// in terms of categories of C types there are ABI rules for. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum Abi { + Uninhabited, Scalar(Scalar), Vector, Aggregate { @@ -768,7 +769,7 @@ impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, Abi::Aggregate { sized, .. } => !sized } } @@ -776,7 +777,7 @@ impl Abi { /// Returns true if the fields of the layout are packed. pub fn is_packed(&self) -> bool { match *self { - Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, Abi::Aggregate { packed, .. } => packed } } @@ -807,6 +808,7 @@ pub enum Variants { /// `Some` is the identity function (with a non-null reference). NicheFilling { dataful_variant: usize, + niche_variant: usize, niche: Scalar, niche_value: u128, variants: Vec, @@ -855,6 +857,18 @@ impl CachedLayout { primitive_align: align } } + + fn uninhabited(field_count: usize) -> Self { + let align = Align::from_bytes(1, 1).unwrap(); + CachedLayout { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(field_count), + abi: Abi::Uninhabited, + align, + primitive_align: align, + size: Size::from_bytes(0) + } + } } fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -915,13 +929,14 @@ impl<'a, 'tcx> CachedLayout { bug!("struct cannot be packed and aligned"); } - let mut align = if packed { + let base_align = if packed { dl.i8_align } else { dl.aggregate_align }; - let mut primitive_align = align; + let mut align = base_align; + let mut primitive_align = base_align; let mut sized = true; // Anything with repr(C) or repr(packed) doesn't optimize. @@ -978,13 +993,17 @@ impl<'a, 'tcx> CachedLayout { } } - for i in inverse_memory_index.iter() { - let field = fields[*i as usize]; + for &i in &inverse_memory_index { + let field = fields[i as usize]; if !sized { bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty); } + if field.abi == Abi::Uninhabited { + return Ok(CachedLayout::uninhabited(fields.len())); + } + if field.is_unsized() { sized = false; } @@ -997,7 +1016,7 @@ impl<'a, 'tcx> CachedLayout { } debug!("univariant offset: {:?} field: {:#?}", offset, field); - offsets[*i as usize] = offset; + offsets[i as usize] = offset; offset = offset.checked_add(field.size, dl) .ok_or(LayoutError::SizeOverflow(ty))?; @@ -1124,7 +1143,7 @@ impl<'a, 'tcx> CachedLayout { // The never type. ty::TyNever => { - univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? + tcx.intern_layout(CachedLayout::uninhabited(0)) } // Potentially-fat pointers. @@ -1278,11 +1297,15 @@ impl<'a, 'tcx> CachedLayout { }).collect::, _>>() }).collect::, _>>()?; - if variants.is_empty() { - // Uninhabitable; represent as unit - // (Typechecking will reject discriminant-sizing attrs.) - - return univariant(&[], &def.repr, StructKind::AlwaysSized); + let (inh_first, inh_second, inh_third) = { + let mut inh_variants = (0..variants.len()).filter(|&v| { + variants[v].iter().all(|f| f.abi != Abi::Uninhabited) + }); + (inh_variants.next(), inh_variants.next(), inh_variants.next()) + }; + if inh_first.is_none() { + // Uninhabited because it has no variants, or only uninhabited ones. + return Ok(tcx.intern_layout(CachedLayout::uninhabited(0))); } if def.is_union() { @@ -1329,49 +1352,58 @@ impl<'a, 'tcx> CachedLayout { })); } - if !def.is_enum() || (variants.len() == 1 && - !def.repr.inhibit_enum_layout_opt() && - !variants[0].is_empty()) { - // Struct, or union, or univariant enum equivalent to a struct. + let is_struct = !def.is_enum() || + // Only one variant is inhabited. + (inh_second.is_none() && + // Representation optimizations are allowed. + !def.repr.inhibit_enum_layout_opt() && + // Inhabited variant either has data ... + (!variants[inh_first.unwrap()].is_empty() || + // ... or there other, uninhabited, variants. + variants.len() > 1)); + if is_struct { + // Struct, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) - let kind = if def.is_enum() || variants[0].len() == 0 { + let v = inh_first.unwrap(); + let kind = if def.is_enum() || variants[v].len() == 0 { StructKind::AlwaysSized } else { let param_env = tcx.param_env(def.did); - let last_field = def.variants[0].fields.last().unwrap(); + let last_field = def.variants[v].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) .is_sized(tcx, param_env, DUMMY_SP); if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized } }; - return univariant(&variants[0], &def.repr, kind); + let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; + st.variants = Variants::Single { index: v }; + return Ok(tcx.intern_layout(st)); } let no_explicit_discriminants = def.variants.iter().enumerate() .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); - if variants.len() == 2 && + if inh_second.is_some() && inh_third.is_none() && !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { // Nullable pointer optimization - for i in 0..2 { - if !variants[1 - i].iter().all(|f| f.is_zst()) { + let (a, b) = (inh_first.unwrap(), inh_second.unwrap()); + for &(i, other) in &[(a, b), (b, a)] { + if !variants[other].iter().all(|f| f.is_zst()) { continue; } for (field_index, field) in variants[i].iter().enumerate() { if let Some((offset, niche, niche_value)) = field.find_niche(cx)? { - let mut st = vec![ - univariant_uninterned(&variants[0], - &def.repr, StructKind::AlwaysSized)?, - univariant_uninterned(&variants[1], - &def.repr, StructKind::AlwaysSized)? - ]; - for (i, v) in st.iter_mut().enumerate() { - v.variants = Variants::Single { index: i }; - } + let st = variants.iter().enumerate().map(|(j, v)| { + let mut st = univariant_uninterned(v, + &def.repr, StructKind::AlwaysSized)?; + st.variants = Variants::Single { index: j }; + Ok(st) + }).collect::, _>>()?; + let offset = st[i].fields.offset(field_index) + offset; let CachedLayout { size, @@ -1400,6 +1432,7 @@ impl<'a, 'tcx> CachedLayout { return Ok(tcx.intern_layout(CachedLayout { variants: Variants::NicheFilling { dataful_variant: i, + niche_variant: other, niche, niche_value, variants: st, @@ -1419,11 +1452,15 @@ impl<'a, 'tcx> CachedLayout { } let (mut min, mut max) = (i128::max_value(), i128::min_value()); - for discr in def.discriminants(tcx) { + for (i, discr) in def.discriminants(tcx).enumerate() { + if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) { + continue; + } let x = discr.to_u128_unchecked() as i128; if x < min { min = x; } if x > max { max = x; } } + assert!(min <= max, "discriminant range is {}...{}", min, max); let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; @@ -1498,6 +1535,9 @@ impl<'a, 'tcx> CachedLayout { let old_ity_size = min_ity.size(); let new_ity_size = ity.size(); for variant in &mut variants { + if variant.abi == Abi::Uninhabited { + continue; + } match variant.fields { FieldPlacement::Arbitrary { ref mut offsets, .. } => { for i in offsets { @@ -1663,16 +1703,11 @@ impl<'a, 'tcx> CachedLayout { }; match layout.variants { - Variants::Single { .. } => { - let variant_names = || { - adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() - }; - debug!("print-type-size `{:#?}` variants: {:?}", - layout, variant_names()); - assert!(adt_def.variants.len() <= 1, - "univariant with variants {:?}", variant_names()); - if adt_def.variants.len() == 1 { - let variant_def = &adt_def.variants[0]; + Variants::Single { index } => { + debug!("print-type-size `{:#?}` variant {}", + layout, adt_def.variants[index].name); + if !adt_def.variants.is_empty() { + let variant_def = &adt_def.variants[index]; let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect(); record(adt_kind.into(), @@ -1697,7 +1732,7 @@ impl<'a, 'tcx> CachedLayout { variant_def.fields.iter().map(|f| f.name).collect(); build_variant_info(Some(variant_def.name), &fields, - layout.for_variant(i)) + layout.for_variant(cx, i)) }) .collect(); record(adt_kind.into(), match layout.variants { @@ -1989,15 +2024,35 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, } impl<'a, 'tcx> TyLayout<'tcx> { - pub fn for_variant(&self, variant_index: usize) -> Self { + pub fn for_variant(&self, cx: C, variant_index: usize) -> Self + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> + { let cached = match self.variants { - Variants::Single { .. } => self.cached, + Variants::Single { index } if index == variant_index => self.cached, + + Variants::Single { index } => { + // Deny calling for_variant more than once for non-Single enums. + cx.layout_of(self.ty).map_same(|layout| { + assert_eq!(layout.variants, Variants::Single { index }); + layout + }); + + let fields = match self.ty.sty { + ty::TyAdt(def, _) => def.variants[variant_index].fields.len(), + _ => bug!() + }; + let mut cached = CachedLayout::uninhabited(fields); + cached.variants = Variants::Single { index: variant_index }; + cx.tcx().intern_layout(cached) + } Variants::NicheFilling { ref variants, .. } | Variants::Tagged { ref variants, .. } => { &variants[variant_index] } }; + assert_eq!(cached.variants, Variants::Single { index: variant_index }); TyLayout { @@ -2138,6 +2193,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { /// Returns true if the type is a ZST and not unsized. pub fn is_zst(&self) -> bool { match self.abi { + Abi::Uninhabited => true, Abi::Scalar(_) => false, Abi::Vector => self.size.bytes() == 0, Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 @@ -2241,11 +2297,13 @@ impl<'gcx> HashStable> for Variants { } NicheFilling { dataful_variant, + niche_variant, ref niche, niche_value, ref variants, } => { dataful_variant.hash_stable(hcx, hasher); + niche_variant.hash_stable(hcx, hasher); niche.hash_stable(hcx, hasher); niche_value.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); @@ -2285,6 +2343,7 @@ impl<'gcx> HashStable> for Abi { mem::discriminant(self).hash_stable(hcx, hasher); match *self { + Uninhabited => {} Scalar(ref value) => { value.hash_stable(hcx, hasher); } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index c87f856b0054e..c4b90d94dd4b3 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -278,6 +278,7 @@ pub trait LayoutExt<'tcx> { impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn is_aggregate(&self) -> bool { match self.abi { + layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector => false, layout::Abi::Aggregate { .. } => true @@ -286,6 +287,8 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { match self.abi { + layout::Abi::Uninhabited => None, + // The primitive for this algorithm. layout::Abi::Scalar(ref scalar) => { let kind = match scalar.value { diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index bc445c7d2a76d..62540fac8b53e 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -65,6 +65,8 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } match layout.abi { + layout::Abi::Uninhabited => {} + layout::Abi::Scalar(ref scalar) => { let reg = match scalar.value { layout::Int(..) | diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index ceb649be197ef..e93eeb83619b6 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -17,6 +17,7 @@ use rustc::ty::layout; pub fn compute_abi_info(fty: &mut FnType) { let fixup = |a: &mut ArgType| { match a.layout.abi { + layout::Abi::Uninhabited => {} layout::Abi::Aggregate { .. } => { match a.layout.size.bits() { 8 => a.cast_to(Reg::i8()), diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index e0822b96eeb06..25a35274d3233 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1130,43 +1130,38 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); match self.layout.variants { - layout::Variants::Single { .. } => { - assert!(adt.variants.len() <= 1); - - if adt.variants.is_empty() { - vec![] - } else { - let (variant_type_metadata, member_description_factory) = - describe_enum_variant(cx, - self.layout, - &adt.variants[0], - NoDiscriminant, - self.containing_scope, - self.span); + layout::Variants::Single { .. } if adt.variants.is_empty() => vec![], + layout::Variants::Single { index } => { + let (variant_type_metadata, member_description_factory) = + describe_enum_variant(cx, + self.layout, + &adt.variants[index], + NoDiscriminant, + self.containing_scope, + self.span); - let member_descriptions = - member_description_factory.create_member_descriptions(cx); + let member_descriptions = + member_description_factory.create_member_descriptions(cx); - set_members_of_composite_type(cx, - variant_type_metadata, - &member_descriptions[..]); - vec![ - MemberDescription { - name: "".to_string(), - type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), - size: self.layout.size, - align: self.layout.align, - flags: DIFlags::FlagZero - } - ] - } + set_members_of_composite_type(cx, + variant_type_metadata, + &member_descriptions[..]); + vec![ + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::from_bytes(0), + size: self.layout.size, + align: self.layout.align, + flags: DIFlags::FlagZero + } + ] } layout::Variants::Tagged { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); (0..variants.len()).map(|i| { - let variant = self.layout.for_variant(i); + let variant = self.layout.for_variant(cx, i); let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, variant, @@ -1191,8 +1186,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() } - layout::Variants::NicheFilling { dataful_variant, .. } => { - let variant = self.layout.for_variant(dataful_variant); + layout::Variants::NicheFilling { dataful_variant, niche_variant, .. } => { + let variant = self.layout.for_variant(cx, dataful_variant); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, @@ -1236,7 +1231,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { self.layout, self.layout.fields.offset(0), self.layout.field(cx, 0).size); - name.push_str(&adt.variants[1 - dataful_variant].name.as_str()); + name.push_str(&adt.variants[niche_variant].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 139c4c656db07..d1b6e9073b843 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -710,7 +710,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Immediate(llval) => { for i in 0..tuple.layout.fields.count() { let field = tuple.layout.field(bcx.ccx, i); - let elem = bcx.extract_value(llval, tuple.layout.llvm_field_index(i)); + let elem = if field.is_zst() { + C_undef(field.llvm_type(bcx.ccx)) + } else { + bcx.extract_value(llval, tuple.layout.llvm_field_index(i)) + }; // If the tuple is immediate, the elements are as well let op = OperandRef { val: Immediate(base::to_immediate(bcx, elem, field)), diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 7e1569c8f8f5e..542893bd62b39 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1099,6 +1099,11 @@ fn trans_const_adt<'a, 'tcx>( mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; + + if let layout::Abi::Uninhabited = l.abi { + return Const::new(C_undef(l.llvm_type(ccx)), t); + } + match l.variants { layout::Variants::Single { index } => { assert_eq!(variant_index, index); @@ -1114,7 +1119,6 @@ fn trans_const_adt<'a, 'tcx>( Const::new(C_struct(ccx, &contents, l.is_packed()), t) } else { - assert_eq!(variant_index, 0); build_const_struct(ccx, l, vals, None) } } @@ -1132,12 +1136,12 @@ fn trans_const_adt<'a, 'tcx>( Const::new(discr, t) } else { let discr = Const::new(discr, discr_field.ty); - build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr)) + build_const_struct(ccx, l.for_variant(ccx, variant_index), vals, Some(discr)) } } layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { if variant_index == dataful_variant { - build_const_struct(ccx, l.for_variant(dataful_variant), vals, None) + build_const_struct(ccx, l.for_variant(ccx, dataful_variant), vals, None) } else { let niche = l.field(ccx, 0); let niche_llty = niche.llvm_type(ccx); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index f9a179ee0eeca..c6eb822ec8761 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -115,7 +115,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { assert_eq!(count, 0); self.llextra } else { - common::C_usize(ccx, count) + C_usize(ccx, count) } } else { bug!("unexpected layout `{:#?}` in LvalueRef::len", self.layout) @@ -304,7 +304,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { }; bcx.intcast(lldiscr, cast_to, signed) } - layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + layout::Variants::NicheFilling { + dataful_variant, + niche_variant, + niche_value, + .. + } => { let niche_llty = discr.layout.llvm_type(bcx.ccx); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { @@ -313,8 +318,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } else { C_uint_big(niche_llty, niche_value) }; - let cmp = if dataful_variant == 0 { llvm::IntEQ } else { llvm::IntNE }; - bcx.intcast(bcx.icmp(cmp, lldiscr, niche_llval), cast_to, false) + bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval), + C_uint(cast_to, niche_variant as u64), + C_uint(cast_to, dataful_variant as u64)) } } } @@ -324,7 +330,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { match self.layout.variants { layout::Variants::Single { index } => { - assert_eq!(variant_index, index); + if index != variant_index { + // If the layout of an enum is `Single`, all + // other variants are necessarily uninhabited. + assert_eq!(self.layout.for_variant(bcx.ccx, variant_index).abi, + layout::Abi::Uninhabited); + } } layout::Variants::Tagged { .. } => { let ptr = self.project_field(bcx, 0); @@ -366,7 +377,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> LvalueRef<'tcx> { LvalueRef { - llval: bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]), + llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]), llextra: ptr::null_mut(), layout: self.layout.field(bcx.ccx, 0), alignment: self.alignment @@ -376,7 +387,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) -> LvalueRef<'tcx> { let mut downcast = *self; - downcast.layout = self.layout.for_variant(variant_index); + downcast.layout = self.layout.for_variant(bcx.ccx, variant_index); // Cast to the appropriate variant struct type. let variant_ty = downcast.layout.llvm_type(bcx.ccx); diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index eab5cb159de39..d2f9ca3546812 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -27,6 +27,7 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), layout.fields.count() as u64); } + layout::Abi::Uninhabited | layout::Abi::Aggregate { .. } => {} } @@ -158,7 +159,9 @@ pub trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector => true, + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector => true, layout::Abi::Aggregate { .. } => self.is_zst() } @@ -230,7 +233,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = if self.ty != normal_ty { let mut layout = ccx.layout_of(normal_ty); if let Some(v) = variant_index { - layout = layout.for_variant(v); + layout = layout.for_variant(ccx, v); } layout.llvm_type(ccx) } else { diff --git a/src/test/ui/print_type_sizes/uninhabited.rs b/src/test/ui/print_type_sizes/uninhabited.rs new file mode 100644 index 0000000000000..69cc4c933601e --- /dev/null +++ b/src/test/ui/print_type_sizes/uninhabited.rs @@ -0,0 +1,18 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -Z print-type-sizes + +#![feature(never_type)] + +pub fn main() { + let _x: Option = None; + let _y: Result = Ok(42); +} diff --git a/src/test/ui/print_type_sizes/uninhabited.stdout b/src/test/ui/print_type_sizes/uninhabited.stdout new file mode 100644 index 0000000000000..2a8706f7ac551 --- /dev/null +++ b/src/test/ui/print_type_sizes/uninhabited.stdout @@ -0,0 +1,5 @@ +print-type-size type: `std::result::Result`: 4 bytes, alignment: 4 bytes +print-type-size variant `Ok`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size type: `std::option::Option`: 0 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes From f8d5d0c30c32c20163e45c3c1521add198b63afc Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 3 Oct 2017 10:45:07 +0300 Subject: [PATCH 50/69] rustc_trans: compute better align/dereferenceable attributes from pointees. --- src/librustc_llvm/ffi.rs | 6 + src/librustc_trans/abi.rs | 284 +++++++++++++++---------- src/rustllvm/RustWrapper.cpp | 58 +++++ src/test/codegen/function-arguments.rs | 10 +- src/test/codegen/packed.rs | 2 +- 5 files changed, 237 insertions(+), 123 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index fdc27d4e041cc..f8c71d4825513 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -730,7 +730,9 @@ extern "C" { FunctionTy: TypeRef) -> ValueRef; pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint); + pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64); pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute); pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef, index: c_uint, @@ -760,7 +762,11 @@ extern "C" { // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute); + pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef, + index: c_uint, + bytes: u64); // Operations on load/store instructions (only) pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index c4b90d94dd4b3..aaadc7518e51c 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -96,20 +96,24 @@ impl ArgAttribute { /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. -#[derive(Copy, Clone, Debug, Default)] +#[derive(Copy, Clone, Debug)] pub struct ArgAttributes { regular: ArgAttribute, - dereferenceable_bytes: u64, + pointee_size: Size, + pointee_align: Option } impl ArgAttributes { - pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { - self.regular = self.regular | attr; - self + fn new() -> Self { + ArgAttributes { + regular: ArgAttribute::default(), + pointee_size: Size::from_bytes(0), + pointee_align: None, + } } - pub fn set_dereferenceable(&mut self, size: Size) -> &mut Self { - self.dereferenceable_bytes = size.bytes(); + pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { + self.regular = self.regular | attr; self } @@ -118,24 +122,52 @@ impl ArgAttributes { } pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableAttr(llfn, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableAttr(llfn, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentAttr(llfn, + idx.as_uint(), + align.abi() as u32); + } + regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); } } pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; + } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, + idx.as_uint(), + align.abi() as u32); } + regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); } } } @@ -439,12 +471,20 @@ pub struct ArgType<'tcx> { impl<'a, 'tcx> ArgType<'tcx> { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { + let mut attrs = ArgAttributes::new(); + + if let layout::Abi::Scalar(ref scalar) = layout.abi { + if scalar.is_bool() { + attrs.set(ArgAttribute::ZExt); + } + } + ArgType { kind: ArgKind::Direct, layout, cast: None, pad: None, - attrs: ArgAttributes::default(), + attrs, nested: vec![] } } @@ -454,14 +494,16 @@ impl<'a, 'tcx> ArgType<'tcx> { assert_eq!(self.kind, ArgKind::Direct); // Wipe old attributes, likely not valid through indirection. - self.attrs = ArgAttributes::default(); + self.attrs = ArgAttributes::new(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture self.attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) - .set_dereferenceable(self.layout.size); + .set(ArgAttribute::NonNull); + self.attrs.pointee_size = self.layout.size; + self.attrs.pointee_align = Some(self.layout.align); self.kind = ArgKind::Indirect; } @@ -472,6 +514,22 @@ impl<'a, 'tcx> ArgType<'tcx> { self.kind = ArgKind::Ignore; } + fn safe_pointee(&mut self, layout: TyLayout) { + match self.layout.abi { + layout::Abi::Scalar(layout::Scalar { + value: layout::Pointer, + ref valid_range + }) => { + if valid_range.start > 0 { + self.attrs.set(ArgAttribute::NonNull); + } + self.attrs.pointee_size = layout.size; + self.attrs.pointee_align = Some(layout.align); + } + _ => bug!("ArgType::safe_pointee({:#?}): not a pointer", self.layout) + } + } + pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness if let layout::Abi::Scalar(ref scalar) = self.layout.abi { @@ -694,123 +752,115 @@ impl<'a, 'tcx> FnType<'tcx> { _ => false }; - let arg_of = |ty: Ty<'tcx>, is_return: bool| { - let mut arg = ArgType::new(ccx.layout_of(ty)); - if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { - if scalar.is_bool() { - arg.attrs.set(ArgAttribute::ZExt); - } - } - if arg.layout.is_zst() { - // For some forsaken reason, x86_64-pc-windows-gnu - // doesn't ignore zero-sized struct arguments. - // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || - (!win_x64_gnu && !linux_s390x) { - arg.ignore(); - } - } - arg - }; - - let ret_ty = sig.output(); - let mut ret = arg_of(ret_ty, true); - - if !type_is_fat_ptr(ccx, ret_ty) { - // The `noalias` attribute on the return value is useful to a - // function ptr caller. - if ret_ty.is_box() { - // `Box` pointer return values never alias because ownership - // is transferred - ret.attrs.set(ArgAttribute::NoAlias); + // Handle safe Rust thin and fat pointers. + let adjust_for_rust_type = |arg: &mut ArgType<'tcx>, is_return: bool| { + // We only handle thin pointers here. + match arg.layout.abi { + layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} + _ => return } - // We can also mark the return value as `dereferenceable` in certain cases - match ret_ty.sty { - // These are not really pointers but pairs, (pointer, len) - ty::TyRef(_, ty::TypeAndMut { ty, .. }) => { - ret.attrs.set_dereferenceable(ccx.size_of(ty)); - } - ty::TyAdt(def, _) if def.is_box() => { - ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty())); + let mut ty = arg.layout.ty; + + // FIXME(eddyb) detect more nested cases than `Option<&T>` here. + match arg.layout.variants { + layout::Variants::NicheFilling { dataful_variant, .. } => { + let variant = arg.layout.for_variant(ccx, dataful_variant); + for i in 0..variant.fields.count() { + let field = variant.field(ccx, i); + match field.abi { + layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => { + // We found the pointer field, use its type. + ty = field.ty; + break; + } + _ => {} + } + } } _ => {} } - } - let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); + match ty.sty { + // `Box` pointer parameters never alias because ownership is transferred + ty::TyAdt(def, _) if def.is_box() => { + arg.attrs.set(ArgAttribute::NoAlias); - // Handle safe Rust thin and fat pointers. - let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { - // `Box` pointer parameters never alias because ownership is transferred - ty::TyAdt(def, _) if def.is_box() => { - arg.attrs.set(ArgAttribute::NoAlias); - Some(ty.boxed_ty()) - } + arg.safe_pointee(ccx.layout_of(ty.boxed_ty())); + } - ty::TyRef(_, mt) => { - // `&mut` pointer parameters never alias other parameters, or mutable global data - // - // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as - // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely - // on memory dependencies rather than pointer equality - let is_freeze = ccx.shared().type_is_freeze(mt.ty); - - let no_alias_is_safe = - if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || - ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { - // Mutable refrences or immutable shared references - mt.mutbl == hir::MutMutable || is_freeze - } else { - // Only immutable shared references - mt.mutbl != hir::MutMutable && is_freeze - }; + ty::TyRef(_, mt) => { + // `&mut` pointer parameters never alias other parameters, + // or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, + // and can be marked as both `readonly` and `noalias`, as + // LLVM's definition of `noalias` is based solely on memory + // dependencies rather than pointer equality + let is_freeze = ccx.shared().type_is_freeze(mt.ty); + + let no_alias_is_safe = + if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || + ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { + // Mutable refrences or immutable shared references + mt.mutbl == hir::MutMutable || is_freeze + } else { + // Only immutable shared references + mt.mutbl != hir::MutMutable && is_freeze + }; - if no_alias_is_safe { - arg.attrs.set(ArgAttribute::NoAlias); - } + if no_alias_is_safe { + arg.attrs.set(ArgAttribute::NoAlias); + } - if mt.mutbl == hir::MutImmutable && is_freeze { - arg.attrs.set(ArgAttribute::ReadOnly); + if mt.mutbl == hir::MutImmutable && is_freeze && !is_return { + arg.attrs.set(ArgAttribute::ReadOnly); + } + + arg.safe_pointee(ccx.layout_of(mt.ty)); } + _ => {} + } - Some(mt.ty) + // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions + // with align attributes, and those calls later block optimizations. + if !is_return { + arg.attrs.pointee_align = None; } - _ => None }; - for ty in inputs.iter().chain(extra_args.iter()) { - let mut arg = arg_of(ty, false); - - if type_is_fat_ptr(ccx, ty) { - let mut data = ArgType::new(arg.layout.field(ccx, 0)); - let mut info = ArgType::new(arg.layout.field(ccx, 1)); - - if let Some(inner) = rust_ptr_attrs(ty, &mut data) { - data.attrs.set(ArgAttribute::NonNull); - if ccx.tcx().struct_tail(inner).is_trait() { - // vtables can be safely marked non-null, readonly - // and noalias. - info.attrs.set(ArgAttribute::NonNull); - info.attrs.set(ArgAttribute::ReadOnly); - info.attrs.set(ArgAttribute::NoAlias); - } - } - // FIXME(eddyb) other ABIs don't have logic for nested. - if rust_abi { - arg.nested = vec![data, info]; + let arg_of = |ty: Ty<'tcx>, is_return: bool| { + let mut arg = ArgType::new(ccx.layout_of(ty)); + if arg.layout.is_zst() { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || + (!win_x64_gnu && !linux_s390x) { + arg.ignore(); } + } + + // FIXME(eddyb) other ABIs don't have logic for nested. + if !is_return && type_is_fat_ptr(ccx, arg.layout.ty) && rust_abi { + arg.nested = vec![ + ArgType::new(arg.layout.field(ccx, 0)), + ArgType::new(arg.layout.field(ccx, 1)) + ]; + adjust_for_rust_type(&mut arg.nested[0], false); + adjust_for_rust_type(&mut arg.nested[1], false); } else { - if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { - arg.attrs.set_dereferenceable(ccx.size_of(inner)); - } + adjust_for_rust_type(&mut arg, is_return); } - args.push(arg); - } + + arg + }; FnType { - args, - ret, + ret: arg_of(sig.output(), true), + args: inputs.iter().chain(extra_args.iter()).map(|ty| { + arg_of(ty, false) + }).collect(), variadic: sig.variadic, cconv, } diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index c8d974febf23e..9aa172591b86f 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -178,6 +178,22 @@ extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint32_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, unsigned Index, uint64_t Bytes) { @@ -194,6 +210,22 @@ extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint64_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, LLVMRustAttribute RustAttr) { Function *A = unwrap(Fn); @@ -206,6 +238,19 @@ extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn, + unsigned Index, + uint32_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, uint64_t Bytes) { Function *A = unwrap(Fn); @@ -218,6 +263,19 @@ extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn, + unsigned Index, + uint64_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn, unsigned Index, const char *Name, diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 05682a8efaecc..6cb1972afa52f 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -15,7 +15,7 @@ #![feature(custom_attribute)] pub struct S { - _field: [i64; 4], + _field: [i32; 8], } pub struct UnsafeInner { @@ -66,7 +66,7 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0) +// CHECK: @indirect_struct(%S* noalias nocapture align 4 dereferenceable(32) %arg0) #[no_mangle] pub fn indirect_struct(_: S) { } @@ -77,17 +77,17 @@ pub fn indirect_struct(_: S) { pub fn borrowed_struct(_: &S) { } -// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) +// CHECK: noalias align 4 dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) #[no_mangle] pub fn _box(x: Box) -> Box { x } -// CHECK: @struct_return(%S* noalias nocapture sret dereferenceable(32)) +// CHECK: @struct_return(%S* noalias nocapture sret align 4 dereferenceable(32)) #[no_mangle] pub fn struct_return() -> S { S { - _field: [0, 0, 0, 0] + _field: [0, 0, 0, 0, 0, 0, 0, 0] } } diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 99e6e38a3bf0b..87cf042f27e94 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -39,7 +39,7 @@ pub struct BigPacked { #[no_mangle] pub fn call_pkd(f: fn() -> Array) -> BigPacked { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture sret dereferenceable(32) [[ALLOCA]]) +// CHECK: call void %{{.*}}(%Array* noalias nocapture sret align 4 dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an From ac60872077608c4382aab39495e15c9f226630dd Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 5 Oct 2017 02:21:10 +0300 Subject: [PATCH 51/69] rustc_trans: generate LLVM pointee types based on alignment. --- src/librustc_trans/abi.rs | 94 ++++++--------------- src/librustc_trans/context.rs | 7 ++ src/librustc_trans/type_.rs | 13 ++- src/librustc_trans/type_of.rs | 150 +++++++++++++++++++++++++++++++++- 4 files changed, 191 insertions(+), 73 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index aaadc7518e51c..54e648c6d4a57 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -32,13 +32,11 @@ use cabi_nvptx64; use cabi_hexagon; use mir::lvalue::LvalueRef; use type_::Type; -use type_of::LayoutLlvmExt; +use type_of::{LayoutLlvmExt, PointerKind}; -use rustc::hir; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; -use rustc_back::PanicStrategy; use libc::c_uint; use std::{cmp, iter}; @@ -514,22 +512,6 @@ impl<'a, 'tcx> ArgType<'tcx> { self.kind = ArgKind::Ignore; } - fn safe_pointee(&mut self, layout: TyLayout) { - match self.layout.abi { - layout::Abi::Scalar(layout::Scalar { - value: layout::Pointer, - ref valid_range - }) => { - if valid_range.start > 0 { - self.attrs.set(ArgAttribute::NonNull); - } - self.attrs.pointee_size = layout.size; - self.attrs.pointee_align = Some(layout.align); - } - _ => bug!("ArgType::safe_pointee({:#?}): not a pointer", self.layout) - } - } - pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness if let layout::Abi::Scalar(ref scalar) = self.layout.abi { @@ -754,42 +736,30 @@ impl<'a, 'tcx> FnType<'tcx> { // Handle safe Rust thin and fat pointers. let adjust_for_rust_type = |arg: &mut ArgType<'tcx>, is_return: bool| { - // We only handle thin pointers here. match arg.layout.abi { - layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} - _ => return - } - - let mut ty = arg.layout.ty; - - // FIXME(eddyb) detect more nested cases than `Option<&T>` here. - match arg.layout.variants { - layout::Variants::NicheFilling { dataful_variant, .. } => { - let variant = arg.layout.for_variant(ccx, dataful_variant); - for i in 0..variant.fields.count() { - let field = variant.field(ccx, i); - match field.abi { - layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => { - // We found the pointer field, use its type. - ty = field.ty; - break; - } - _ => {} - } + layout::Abi::Scalar(layout::Scalar { + value: layout::Pointer, + ref valid_range + }) => { + if valid_range.start > 0 && valid_range.start < valid_range.end { + arg.attrs.set(ArgAttribute::NonNull); } } _ => {} } - match ty.sty { - // `Box` pointer parameters never alias because ownership is transferred - ty::TyAdt(def, _) if def.is_box() => { - arg.attrs.set(ArgAttribute::NoAlias); + if let Some(pointee) = arg.layout.pointee_info(ccx) { + if let Some(kind) = pointee.safe { + arg.attrs.pointee_size = pointee.size; + arg.attrs.pointee_align = Some(pointee.align); - arg.safe_pointee(ccx.layout_of(ty.boxed_ty())); - } + // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions + // with align attributes, and those calls later block optimizations. + if !is_return { + arg.attrs.pointee_align = None; + } - ty::TyRef(_, mt) => { + // `Box` pointer parameters never alias because ownership is transferred // `&mut` pointer parameters never alias other parameters, // or mutable global data // @@ -797,35 +767,19 @@ impl<'a, 'tcx> FnType<'tcx> { // and can be marked as both `readonly` and `noalias`, as // LLVM's definition of `noalias` is based solely on memory // dependencies rather than pointer equality - let is_freeze = ccx.shared().type_is_freeze(mt.ty); - - let no_alias_is_safe = - if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || - ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { - // Mutable refrences or immutable shared references - mt.mutbl == hir::MutMutable || is_freeze - } else { - // Only immutable shared references - mt.mutbl != hir::MutMutable && is_freeze - }; - - if no_alias_is_safe { + let no_alias = match kind { + PointerKind::Shared => false, + PointerKind::Frozen | PointerKind::UniqueOwned => true, + PointerKind::UniqueBorrowed => !is_return + }; + if no_alias { arg.attrs.set(ArgAttribute::NoAlias); } - if mt.mutbl == hir::MutImmutable && is_freeze && !is_return { + if kind == PointerKind::Frozen && !is_return { arg.attrs.set(ArgAttribute::ReadOnly); } - - arg.safe_pointee(ccx.layout_of(mt.ty)); } - _ => {} - } - - // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions - // with align attributes, and those calls later block optimizations. - if !is_return { - arg.attrs.pointee_align = None; } }; diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 83efe6b795872..d768b14a82efd 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -24,6 +24,7 @@ use monomorphize::Instance; use partitioning::CodegenUnit; use type_::Type; +use type_of::PointeeInfo; use rustc_data_structures::base_n; use rustc::middle::trans::Stats; @@ -102,6 +103,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { lltypes: RefCell, Option), Type>>, scalar_lltypes: RefCell, Type>>, + pointee_infos: RefCell, Option>>, isize_ty: Type, dbg_cx: Option>, @@ -378,6 +380,7 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), scalar_lltypes: RefCell::new(FxHashMap()), + pointee_infos: RefCell::new(FxHashMap()), isize_ty: Type::from_ref(ptr::null_mut()), dbg_cx, eh_personality: Cell::new(None), @@ -513,6 +516,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().scalar_lltypes } + pub fn pointee_infos<'a>(&'a self) -> &'a RefCell, Option>> { + &self.local().pointee_infos + } + pub fn stats<'a>(&'a self) -> &'a RefCell { &self.local().stats } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index 2774359c994a5..02224858b4692 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -17,7 +17,7 @@ use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128}; use context::CrateContext; use syntax::ast; -use rustc::ty::layout; +use rustc::ty::layout::{self, Align}; use std::ffi::CString; use std::fmt; @@ -275,4 +275,15 @@ impl Type { I128 => Type::i128(cx), } } + + /// Return a LLVM type that has at most the required alignment, + /// as a conservative approximation for unknown pointee types. + pub fn pointee_for_abi_align(ccx: &CrateContext, align: Align) -> Type { + if let Some(ity) = layout::Integer::for_abi_align(ccx, align) { + Type::from_integer(ccx, ity) + } else { + // FIXME(eddyb) We could find a better approximation here. + Type::i8(ccx) + } + } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index d2f9ca3546812..529ad51ba8ea3 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -10,8 +10,10 @@ use abi::FnType; use common::*; +use rustc::hir; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, TyLayout}; +use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; +use rustc_back::PanicStrategy; use trans_item::DefPathBasedNames; use type_::Type; @@ -148,12 +150,35 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } } +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum PointerKind { + /// Most general case, we know no restrictions to tell LLVM. + Shared, + + /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`. + Frozen, + + /// `&mut T`, when we know `noalias` is safe for LLVM. + UniqueBorrowed, + + /// `Box`, unlike `UniqueBorrowed`, it also has `noalias` on returns. + UniqueOwned +} + +#[derive(Copy, Clone)] +pub struct PointeeInfo { + pub size: Size, + pub align: Align, + pub safe: Option, +} + pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn over_align(&self) -> Option; fn llvm_field_index(&self, index: usize) -> u64; + fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; } impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { @@ -202,7 +227,14 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); FnType::new(ccx, sig, &[]).llvm_type(ccx) } - _ => Type::i8(ccx) + _ => { + // If we know the alignment, pick something better than i8. + if let Some(pointee) = self.pointee_info(ccx) { + Type::pointee_for_abi_align(ccx, pointee.align) + } else { + Type::i8(ccx) + } + } }; pointee.ptr_to() } @@ -285,4 +317,118 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } } + + fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { + // We only handle thin pointers here. + match self.abi { + layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} + _ => return None + } + + if let Some(&pointee) = ccx.pointee_infos().borrow().get(&self.ty) { + return pointee; + } + + let mut result = None; + match self.ty.sty { + ty::TyRawPtr(mt) => { + let (size, align) = ccx.size_and_align_of(mt.ty); + result = Some(PointeeInfo { + size, + align, + safe: None + }); + } + + ty::TyRef(_, mt) => { + let (size, align) = ccx.size_and_align_of(mt.ty); + + let kind = match mt.mutbl { + hir::MutImmutable => if ccx.shared().type_is_freeze(mt.ty) { + PointerKind::Frozen + } else { + PointerKind::Shared + }, + hir::MutMutable => { + if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || + ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { + PointerKind::UniqueBorrowed + } else { + PointerKind::Shared + } + } + }; + + result = Some(PointeeInfo { + size, + align, + safe: Some(kind) + }); + } + + ty::TyAdt(def, _) if def.is_box() => { + let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty()); + result = Some(PointeeInfo { + size, + align, + safe: Some(PointerKind::UniqueOwned) + }); + } + + _ => { + let mut data_variant = match self.variants { + layout::Variants::NicheFilling { dataful_variant, .. } => { + // Only the niche itself is always initialized, + // so only check for a pointer at its offset. + // + // If the niche is a pointer, it's either valid + // (according to its type), or null (which the + // niche field's scalar validity range encodes). + // This allows using `dereferenceable_or_null` + // for e.g. `Option<&T>`, and this will continue + // to work as long as we don't start using more + // niches than just null (e.g. the first page + // of the address space, or unaligned pointers). + if self.fields.offset(0).bytes() == 0 { + Some(self.for_variant(ccx, dataful_variant)) + } else { + None + } + } + _ => Some(*self) + }; + + if let Some(variant) = data_variant { + // We're not interested in any unions. + if let layout::FieldPlacement::Union(_) = variant.fields { + data_variant = None; + } + } + + if let Some(variant) = data_variant { + for i in 0..variant.fields.count() { + let field = variant.field(ccx, i); + if field.size == self.size { + // We found the pointer field, use its information. + result = field.pointee_info(ccx); + break; + } + } + } + + if let ty::TyAdt(def, _) = self.ty.sty { + if Some(def.did) == ccx.tcx().lang_items().non_zero() { + // FIXME(eddyb) Don't treat NonZero<*T> as + // as containing &T in ty::layout. + if let Some(ref mut pointee) = result { + pointee.safe = None; + } + } + } + } + } + + ccx.pointee_infos().borrow_mut().insert(self.ty, result); + result + } } From f1b7cd99254dd11ff7370fe423cca6fd8046f7d2 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 5 Oct 2017 04:22:23 +0300 Subject: [PATCH 52/69] rustc_trans: restrict "immediate pairs" to pairs of scalars. --- src/librustc_trans/common.rs | 21 +-------- src/librustc_trans/intrinsic.rs | 6 +-- src/librustc_trans/mir/analyze.rs | 9 ++-- src/librustc_trans/mir/block.rs | 33 ++++--------- src/librustc_trans/mir/constant.rs | 7 +-- src/librustc_trans/mir/lvalue.rs | 8 ++-- src/librustc_trans/mir/mod.rs | 8 ++-- src/librustc_trans/mir/operand.rs | 76 ++++++++++++++---------------- src/librustc_trans/mir/rvalue.rs | 3 +- src/librustc_trans/type_of.rs | 19 ++++++++ 10 files changed, 84 insertions(+), 106 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index f476416619e69..03ae58fd941f1 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -28,7 +28,7 @@ use type_of::LayoutLlvmExt; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -54,25 +54,6 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -/// Returns true if the type is represented as a pair of immediates. -pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - -> bool { - let layout = ccx.layout_of(ty); - match layout.fields { - layout::FieldPlacement::Arbitrary { .. } => { - // There must be only 2 fields. - if layout.fields.count() != 2 { - return false; - } - - // The two fields must be both immediates. - layout.field(ccx, 0).is_llvm_immediate() && - layout.field(ccx, 1).is_llvm_immediate() - } - _ => false - } -} - pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index d982fa192b369..7d08090cd7e7c 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -675,10 +675,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { - OperandRef { - val: OperandValue::Immediate(llval), - layout: result.layout - }.unpack_if_pair(bcx).val.store(bcx, result); + OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout) + .val.store(bcx, result); } } } diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 93780aefe4ddb..bf822249a64a0 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -19,7 +19,6 @@ use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use rustc::ty; use rustc::ty::layout::LayoutOf; -use common; use type_of::LayoutLlvmExt; use super::MirContext; @@ -32,10 +31,11 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); - if mircx.ccx.layout_of(ty).is_llvm_immediate() { + let layout = mircx.ccx.layout_of(ty); + if layout.is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - } else if common::type_is_imm_pair(mircx.ccx, ty) { + } else if layout.is_llvm_scalar_pair(mircx.ccx) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -145,7 +145,8 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); - if common::type_is_imm_pair(self.cx.ccx, ty) { + let layout = self.cx.ccx.layout_of(ty); + if layout.is_llvm_scalar_pair(self.cx.ccx) { return; } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index d1b6e9073b843..e739037b07d7f 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -135,11 +135,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some((ret_dest, target)) = destination { let ret_bcx = this.get_builder(target); this.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - layout: fn_ty.ret.layout, - }; - this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); + this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -153,11 +149,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } if let Some((ret_dest, target)) = destination { - let op = OperandRef { - val: Immediate(llret), - layout: fn_ty.ret.layout, - }; - this.store_return(&bcx, ret_dest, &fn_ty.ret, op); + this.store_return(&bcx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bcx, target); } else { bcx.unreachable(); @@ -252,7 +244,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Ref(llval, align) = op.val { bcx.load(llval, align.non_abi()) } else { - op.pack_if_pair(&bcx).immediate() + op.immediate_or_packed_pair(&bcx) } }; bcx.ret(llval); @@ -545,12 +537,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - // Make a fake operand for store_return - let op = OperandRef { - val: Ref(dst.llval, Alignment::AbiAligned), - layout: fn_ty.ret.layout, - }; - self.store_return(&bcx, ret_dest, &fn_ty.ret, op); + self.store_return(&bcx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { @@ -649,7 +636,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { op.val.store(bcx, scratch); (scratch.llval, Alignment::AbiAligned, true) } else { - (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) + (op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false) } } Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => { @@ -915,12 +902,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx: &Builder<'a, 'tcx>, dest: ReturnDest<'tcx>, ret_ty: &ArgType<'tcx>, - op: OperandRef<'tcx>) { + llval: ValueRef) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bcx, op.immediate(), dst), + Store(dst) => ret_ty.store(bcx, llval, dst), IndirectOperand(tmp, index) => { let op = tmp.load(bcx); tmp.storage_dead(bcx); @@ -929,14 +916,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = LvalueRef::alloca(bcx, op.layout, "tmp_ret"); + let tmp = LvalueRef::alloca(bcx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bcx); - ret_ty.store(bcx, op.immediate(), tmp); + ret_ty.store(bcx, llval, tmp); let op = tmp.load(bcx); tmp.storage_dead(bcx); op } else { - op.unpack_if_pair(bcx) + OperandRef::from_immediate_or_packed_pair(bcx, llval, ret_ty.layout) }; self.locals[index] = LocalRef::Operand(Some(op)); } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 542893bd62b39..3196300a706de 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -139,13 +139,14 @@ impl<'a, 'tcx> Const<'tcx> { } pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { - let llty = ccx.layout_of(self.ty).immediate_llvm_type(ccx); + let layout = ccx.layout_of(self.ty); + let llty = layout.immediate_llvm_type(ccx); let llvalty = val_ty(self.llval); - let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { + let val = if llty == llvalty && layout.is_llvm_scalar_pair(ccx) { let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) - } else if llty == llvalty && ccx.layout_of(self.ty).is_llvm_immediate() { + } else if llty == llvalty && layout.is_llvm_immediate() { // If the types match, we can use the value directly. OperandValue::Immediate(self.llval) } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index c6eb822ec8761..8340d865eb1e2 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -16,7 +16,7 @@ use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big}; +use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -175,10 +175,10 @@ impl<'a, 'tcx> LvalueRef<'tcx> { load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) - } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { + } else if self.layout.is_llvm_scalar_pair(bcx.ccx) { OperandValue::Pair( - self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), - self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) + self.project_field(bcx, 0).load(bcx).immediate(), + self.project_field(bcx, 1).load(bcx).immediate()) } else { OperandValue::Ref(self.llval, self.alignment) }; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 38719fedede5b..6f9d32b1a37d6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -475,11 +475,9 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); bcx.set_value_name(llarg, &name); llarg_idx += 1; - let operand = OperandRef { - val: OperandValue::Immediate(llarg), - layout: arg.layout - }; - return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); + return LocalRef::Operand(Some( + OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout) + )); } else { let tmp = LvalueRef::alloca(bcx, arg.layout, &name); arg.store_fn_arg(bcx, &mut llarg_idx, tmp); diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 5659072fa932c..3e7aa9d0db5bf 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -15,7 +15,7 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, CrateContext, C_undef}; +use common::{CrateContext, C_undef}; use builder::Builder; use value::Value; use type_of::LayoutLlvmExt; @@ -24,7 +24,6 @@ use std::fmt; use std::ptr; use super::{MirContext, LocalRef}; -use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; /// The representation of a Rust value. The enum variant is in fact @@ -84,10 +83,10 @@ impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> OperandRef<'tcx> { assert!(layout.is_zst()); - let llty = layout.llvm_type(ccx); - // FIXME(eddyb) ZSTs should always be immediate, not pairs. - // This hack only exists to unpack a constant undef pair. - Const::new(C_undef(llty), layout.ty).to_operand(ccx) + OperandRef { + val: OperandValue::Immediate(C_undef(layout.llvm_type(ccx))), + layout + } } /// Asserts that this operand refers to a scalar and returns @@ -115,12 +114,13 @@ impl<'a, 'tcx> OperandRef<'tcx> { } } - /// If this operand is a Pair, we return an - /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { + /// If this operand is a `Pair`, we return an aggregate with the two values. + /// For other cases, see `immediate`. + pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef { if let OperandValue::Pair(a, b) = self.val { let llty = self.layout.llvm_type(bcx.ccx); - debug!("Operand::pack_if_pair: packing {:?} into {:?}", self, llty); + debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", + self, llty); // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); let elems = [a, b]; @@ -128,29 +128,33 @@ impl<'a, 'tcx> OperandRef<'tcx> { let elem = base::from_immediate(bcx, elems[i]); llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i)); } - self.val = OperandValue::Immediate(llpair); + llpair + } else { + self.immediate() } - self } - /// If this operand is a pair in an Immediate, - /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { - if let OperandValue::Immediate(llval) = self.val { - // Deconstruct the immediate aggregate. - if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { - debug!("Operand::unpack_if_pair: unpacking {:?}", self); + /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. + pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>, + llval: ValueRef, + layout: TyLayout<'tcx>) + -> OperandRef<'tcx> { + let val = if layout.is_llvm_scalar_pair(bcx.ccx) { + debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", + llval, layout); - let a = bcx.extract_value(llval, self.layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, self.layout.field(bcx.ccx, 0)); + // Deconstruct the immediate aggregate. + let a = bcx.extract_value(llval, layout.llvm_field_index(0)); + let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0)); - let b = bcx.extract_value(llval, self.layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, self.layout.field(bcx.ccx, 1)); + let b = bcx.extract_value(llval, layout.llvm_field_index(1)); + let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1)); - self.val = OperandValue::Pair(a, b); - } - } - self + OperandValue::Pair(a, b) + } else { + OperandValue::Immediate(llval) + }; + OperandRef { val, layout } } } @@ -170,16 +174,9 @@ impl<'a, 'tcx> OperandValue { bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi()); } OperandValue::Pair(a, b) => { - // See comment above about zero-sized values. - let dest_a = dest.project_field(bcx, 0); - if !dest_a.layout.is_zst() { - let a = base::from_immediate(bcx, a); - bcx.store(a, dest_a.llval, dest_a.alignment.non_abi()); - } - let dest_b = dest.project_field(bcx, 1); - if !dest_b.layout.is_zst() { - let b = base::from_immediate(bcx, b); - bcx.store(b, dest_b.llval, dest_b.alignment.non_abi()); + for (i, &x) in [a, b].iter().enumerate() { + OperandValue::Immediate(x) + .store(bcx, dest.project_field(bcx, i)); } } } @@ -218,13 +215,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (OperandValue::Pair(a, b), &mir::ProjectionElem::Field(ref f, ty)) => { let llval = [a, b][f.index()]; - let op = OperandRef { + return OperandRef { val: OperandValue::Immediate(llval), layout: bcx.ccx.layout_of(self.monomorphize(&ty)) }; - - // Handle nested pairs. - return op.unpack_if_pair(bcx); } _ => {} } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index e52dcd0756211..33b1a7e3363fd 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -70,9 +70,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); - let operand = operand.pack_if_pair(&bcx); match operand.val { - OperandValue::Pair(..) => bug!(), + OperandValue::Pair(..) | OperandValue::Immediate(_) => { // unsize from an immediate structure. We don't // really need a temporary alloca here, but diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 529ad51ba8ea3..6da6f1ebaf060 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -174,6 +174,7 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; + fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn over_align(&self) -> Option; @@ -192,6 +193,24 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } + fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool { + match self.fields { + layout::FieldPlacement::Arbitrary { .. } => { + // There must be only 2 fields. + if self.fields.count() != 2 { + return false; + } + + // The two fields must be both scalars. + match (&self.field(ccx, 0).abi, &self.field(ccx, 1).abi) { + (&layout::Abi::Scalar(_), &layout::Abi::Scalar(_)) => true, + _ => false + } + } + _ => false + } + } + /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. /// The pointee type of the pointer in `LvalueRef` is always this type. /// For sized types, it is also the right LLVM type for an `alloca` From cdeb4b0d258c19f57ee6fb089126656e18324367 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Fri, 6 Oct 2017 10:25:35 +0300 Subject: [PATCH 53/69] rustc: encode scalar pairs in layout ABI. --- src/librustc/ty/layout.rs | 138 +++++++++++++++++-------- src/librustc_trans/abi.rs | 9 +- src/librustc_trans/cabi_x86_64.rs | 1 + src/librustc_trans/cabi_x86_win64.rs | 1 + src/librustc_trans/common.rs | 9 +- src/librustc_trans/context.rs | 7 +- src/librustc_trans/mir/analyze.rs | 4 +- src/librustc_trans/mir/constant.rs | 17 ++- src/librustc_trans/mir/lvalue.rs | 33 +++--- src/librustc_trans/mir/operand.rs | 32 +++--- src/librustc_trans/type_of.rs | 135 +++++++++++++++++------- src/test/codegen/adjustments.rs | 6 +- src/test/codegen/function-arguments.rs | 2 +- src/test/codegen/packed.rs | 5 +- src/test/codegen/refs.rs | 4 +- 15 files changed, 266 insertions(+), 137 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 0edd8f44f0ce1..21ba7995332b7 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -757,6 +757,7 @@ impl FieldPlacement { pub enum Abi { Uninhabited, Scalar(Scalar), + ScalarPair(Scalar, Scalar), Vector, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. @@ -769,7 +770,10 @@ impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, Abi::Aggregate { sized, .. } => !sized } } @@ -777,7 +781,10 @@ impl Abi { /// Returns true if the fields of the layout are packed. pub fn is_packed(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, Abi::Aggregate { packed, .. } => packed } } @@ -905,13 +912,32 @@ impl<'a, 'tcx> CachedLayout { -> Result<&'tcx Self, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); - let scalar = |value: Primitive| { + let scalar_unit = |value: Primitive| { let bits = value.size(dl).bits(); assert!(bits <= 128); - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) - })) + } + }; + let scalar = |value: Primitive| { + tcx.intern_layout(CachedLayout::scalar(cx, scalar_unit(value))) + }; + let scalar_pair = |a: Scalar, b: Scalar| { + let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); + let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); + let size = (b_offset + b.value.size(dl)).abi_align(align); + CachedLayout { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Arbitrary { + offsets: vec![Size::from_bytes(0), b_offset], + memory_index: vec![0, 1] + }, + abi: Abi::ScalarPair(a, b), + align, + primitive_align: align, + size + } }; #[derive(Copy, Clone, Debug)] @@ -1049,19 +1075,54 @@ impl<'a, 'tcx> CachedLayout { memory_index = inverse_memory_index; } + let size = min_size.abi_align(align); + let mut abi = Abi::Aggregate { + sized, + packed + }; + + // Look for a scalar pair, as an ABI optimization. + // FIXME(eddyb) ignore extra ZST fields and field ordering. + if sized && !packed && fields.len() == 2 { + match (&fields[0].abi, &fields[1].abi) { + (&Abi::Scalar(ref a), &Abi::Scalar(ref b)) => { + let pair = scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!() + }; + if offsets[0] == pair_offsets[0] && + offsets[1] == pair_offsets[1] && + memory_index[0] == 0 && + memory_index[1] == 1 && + align == pair.align && + primitive_align == pair.primitive_align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + _ => {} + } + } + Ok(CachedLayout { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { offsets, memory_index }, - abi: Abi::Aggregate { - sized, - packed - }, + abi, align, primitive_align, - size: min_size.abi_align(align) + size }) }; let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { @@ -1070,45 +1131,34 @@ impl<'a, 'tcx> CachedLayout { assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { + let mut data_ptr = scalar_unit(Pointer); + if !ty.is_unsafe_ptr() { + data_ptr.valid_range.start = 1; + } + let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - let non_zero = !ty.is_unsafe_ptr(); - let bits = Pointer.size(dl).bits(); - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, Scalar { - value: Pointer, - valid_range: (non_zero as u128)..=(!0 >> (128 - bits)) - }))); + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); } let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { - ty::TyForeign(..) => return Ok(scalar(Pointer)), + ty::TyForeign(..) => { + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + } ty::TySlice(_) | ty::TyStr => { - Int(dl.ptr_sized_integer(), false) + scalar_unit(Int(dl.ptr_sized_integer(), false)) + } + ty::TyDynamic(..) => { + let mut vtable = scalar_unit(Pointer); + vtable.valid_range.start = 1; + vtable } - ty::TyDynamic(..) => Pointer, _ => return Err(LayoutError::Unknown(unsized_part)) }; // Effectively a (ptr, meta) tuple. - let align = Pointer.align(dl).max(metadata.align(dl)); - let meta_offset = Pointer.size(dl); - assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl))); - let fields = FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0), meta_offset], - memory_index: vec![0, 1] - }; - Ok(tcx.intern_layout(CachedLayout { - variants: Variants::Single { index: 0 }, - fields, - abi: Abi::Aggregate { - sized: true, - packed: false - }, - align, - primitive_align: align, - size: (meta_offset + metadata.size(dl)).abi_align(align) - })) + Ok(tcx.intern_layout(scalar_pair(data_ptr, metadata))) }; Ok(match ty.sty { @@ -1134,11 +1184,9 @@ impl<'a, 'tcx> CachedLayout { ty::TyFloat(FloatTy::F32) => scalar(F32), ty::TyFloat(FloatTy::F64) => scalar(F64), ty::TyFnPtr(_) => { - let bits = Pointer.size(dl).bits(); - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { - value: Pointer, - valid_range: 1..=(!0 >> (128 - bits)) - })) + let mut ptr = scalar_unit(Pointer); + ptr.valid_range.start = 1; + tcx.intern_layout(CachedLayout::scalar(cx, ptr)) } // The never type. @@ -2194,7 +2242,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { pub fn is_zst(&self) -> bool { match self.abi { Abi::Uninhabited => true, - Abi::Scalar(_) => false, + Abi::Scalar(_) | Abi::ScalarPair(..) => false, Abi::Vector => self.size.bytes() == 0, Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 } @@ -2347,6 +2395,10 @@ impl<'gcx> HashStable> for Abi { Scalar(ref value) => { value.hash_stable(hcx, hasher); } + ScalarPair(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } Vector => {} Aggregate { packed, sized } => { packed.hash_stable(hcx, hasher); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 54e648c6d4a57..d69103bbb529d 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -311,6 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector => false, + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => true } } @@ -340,6 +341,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { }) } + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { let mut total = Size::from_bytes(0); let mut result = None; @@ -745,10 +747,13 @@ impl<'a, 'tcx> FnType<'tcx> { arg.attrs.set(ArgAttribute::NonNull); } } - _ => {} + _ => { + // Nothing to do for non-pointer types. + return; + } } - if let Some(pointee) = arg.layout.pointee_info(ccx) { + if let Some(pointee) = arg.layout.pointee_info_at(ccx, Size::from_bytes(0)) { if let Some(kind) = pointee.safe { arg.attrs.pointee_size = pointee.size; arg.attrs.pointee_align = Some(pointee.align); diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 62540fac8b53e..eeb69276500f5 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -88,6 +88,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } } + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { match layout.variants { layout::Variants::Single { .. } => { diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index e93eeb83619b6..473c00120a740 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -18,6 +18,7 @@ pub fn compute_abi_info(fty: &mut FnType) { let fixup = |a: &mut ArgType| { match a.layout.abi { layout::Abi::Uninhabited => {} + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { match a.layout.size.bits() { 8 => a.cast_to(Reg::i8()), diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 03ae58fd941f1..8a2c1ed2dc2f2 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -232,16 +232,9 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { } pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { - let empty = C_array(Type::i8(cx), &[]); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ - empty, - ptr, - empty, - meta, - empty - ], false) + C_struct(cx, &[ptr, meta], false) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index d768b14a82efd..b2bb605d01b46 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -31,7 +31,7 @@ use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -103,7 +103,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { lltypes: RefCell, Option), Type>>, scalar_lltypes: RefCell, Type>>, - pointee_infos: RefCell, Option>>, + pointee_infos: RefCell, Size), Option>>, isize_ty: Type, dbg_cx: Option>, @@ -516,7 +516,8 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().scalar_lltypes } - pub fn pointee_infos<'a>(&'a self) -> &'a RefCell, Option>> { + pub fn pointee_infos<'a>(&'a self) + -> &'a RefCell, Size), Option>> { &self.local().pointee_infos } diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index bf822249a64a0..3129ed028d4d1 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -35,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { if layout.is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - } else if layout.is_llvm_scalar_pair(mircx.ccx) { + } else if layout.is_llvm_scalar_pair() { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -146,7 +146,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); let layout = self.cx.ccx.layout_of(ty); - if layout.is_llvm_scalar_pair(self.cx.ccx) { + if layout.is_llvm_scalar_pair() { return; } } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 3196300a706de..318e36dc71a4d 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -117,7 +117,12 @@ impl<'a, 'tcx> Const<'tcx> { } fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { - const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i)) + let layout = ccx.layout_of(self.ty); + if let layout::Abi::ScalarPair(..) = layout.abi { + const_get_elt(self.llval, i as u64) + } else { + const_get_elt(self.llval, layout.llvm_field_index(i)) + } } fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { @@ -143,7 +148,7 @@ impl<'a, 'tcx> Const<'tcx> { let llty = layout.immediate_llvm_type(ccx); let llvalty = val_ty(self.llval); - let val = if llty == llvalty && layout.is_llvm_scalar_pair(ccx) { + let val = if llty == llvalty && layout.is_llvm_scalar_pair() { let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) } else if llty == llvalty && layout.is_llvm_immediate() { @@ -1174,6 +1179,14 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> Const<'tcx> { assert_eq!(vals.len(), layout.fields.count()); + if let layout::Abi::ScalarPair(..) = layout.abi { + assert_eq!(vals.len(), 2); + return Const::new(C_struct(ccx, &[ + vals[0].llval, + vals[1].llval, + ], false), layout.ty); + } + // offset of current value let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 8340d865eb1e2..ff0b448267820 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -175,10 +175,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) - } else if self.layout.is_llvm_scalar_pair(bcx.ccx) { - OperandValue::Pair( - self.project_field(bcx, 0).load(bcx).immediate(), - self.project_field(bcx, 1).load(bcx).immediate()) + } else if self.layout.is_llvm_scalar_pair() { + let load = |i| { + let x = self.project_field(bcx, i).load(bcx).immediate(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i)) + }; + OperandValue::Pair(load(0), load(1)) } else { OperandValue::Ref(self.llval, self.alignment) }; @@ -190,17 +193,23 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { let ccx = bcx.ccx; let field = self.layout.field(ccx, ix); - let offset = self.layout.fields.offset(ix).bytes(); + let offset = self.layout.fields.offset(ix); let alignment = self.alignment | Alignment::from(self.layout); let simple = || { + // Unions and newtypes only use an offset of 0. + let llval = if offset.bytes() == 0 { + self.llval + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + // Offsets have to match either first or second field. + assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx))); + bcx.struct_gep(self.llval, 1) + } else { + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + }; LvalueRef { - // Unions and newtypes only use an offset of 0. - llval: if offset == 0 { - bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) - } else { - bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) - }, + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()), llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra } else { @@ -249,7 +258,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let meta = self.llextra; - let unaligned_offset = C_usize(ccx, offset); + let unaligned_offset = C_usize(ccx, offset.bytes()); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta); diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 3e7aa9d0db5bf..b9d4148acf67e 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -123,11 +123,8 @@ impl<'a, 'tcx> OperandRef<'tcx> { self, llty); // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); - let elems = [a, b]; - for i in 0..2 { - let elem = base::from_immediate(bcx, elems[i]); - llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i)); - } + llpair = bcx.insert_value(llpair, a, 0); + llpair = bcx.insert_value(llpair, b, 1); llpair } else { self.immediate() @@ -139,18 +136,13 @@ impl<'a, 'tcx> OperandRef<'tcx> { llval: ValueRef, layout: TyLayout<'tcx>) -> OperandRef<'tcx> { - let val = if layout.is_llvm_scalar_pair(bcx.ccx) { + let val = if layout.is_llvm_scalar_pair() { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. - let a = bcx.extract_value(llval, layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0)); - - let b = bcx.extract_value(llval, layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1)); - - OperandValue::Pair(a, b) + OperandValue::Pair(bcx.extract_value(llval, 0), + bcx.extract_value(llval, 1)) } else { OperandValue::Immediate(llval) }; @@ -175,8 +167,11 @@ impl<'a, 'tcx> OperandValue { } OperandValue::Pair(a, b) => { for (i, &x) in [a, b].iter().enumerate() { - OperandValue::Immediate(x) - .store(bcx, dest.project_field(bcx, i)); + let field = dest.project_field(bcx, i); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx)); + bcx.store(base::from_immediate(bcx, x), + field.llval, field.alignment.non_abi()); } } } @@ -214,10 +209,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match (o.val, &proj.elem) { (OperandValue::Pair(a, b), &mir::ProjectionElem::Field(ref f, ty)) => { + let layout = bcx.ccx.layout_of(self.monomorphize(&ty)); let llval = [a, b][f.index()]; + // HACK(eddyb) have to bitcast pointers + // until LLVM removes pointee types. + let llval = bcx.bitcast(llval, + layout.immediate_llvm_type(bcx.ccx)); return OperandRef { val: OperandValue::Immediate(llval), - layout: bcx.ccx.layout_of(self.monomorphize(&ty)) + layout }; } _ => {} diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 6da6f1ebaf060..d62e2ac1552ec 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -29,6 +29,12 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), layout.fields.count() as u64); } + layout::Abi::ScalarPair(..) => { + return Type::struct_(ccx, &[ + layout.scalar_pair_element_llvm_type(ccx, 0), + layout.scalar_pair_element_llvm_type(ccx, 1), + ], false); + } layout::Abi::Uninhabited | layout::Abi::Aggregate { .. } => {} } @@ -174,12 +180,15 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; - fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool; + fn is_llvm_scalar_pair<'a>(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type; fn over_align(&self) -> Option; fn llvm_field_index(&self, index: usize) -> u64; - fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option; } impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { @@ -188,26 +197,18 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector => true, - + layout::Abi::ScalarPair(..) => false, layout::Abi::Aggregate { .. } => self.is_zst() } } - fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool { - match self.fields { - layout::FieldPlacement::Arbitrary { .. } => { - // There must be only 2 fields. - if self.fields.count() != 2 { - return false; - } - - // The two fields must be both scalars. - match (&self.field(ccx, 0).abi, &self.field(ccx, 1).abi) { - (&layout::Abi::Scalar(_), &layout::Abi::Scalar(_)) => true, - _ => false - } - } - _ => false + fn is_llvm_scalar_pair<'a>(&self) -> bool { + match self.abi { + layout::Abi::ScalarPair(..) => true, + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector | + layout::Abi::Aggregate { .. } => false } } @@ -248,7 +249,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } _ => { // If we know the alignment, pick something better than i8. - if let Some(pointee) = self.pointee_info(ccx) { + if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) { Type::pointee_for_abi_align(ccx, pointee.align) } else { Type::i8(ccx) @@ -310,6 +311,59 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { self.llvm_type(ccx) } + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type { + // HACK(eddyb) special-case fat pointers until LLVM removes + // pointee types, to avoid bitcasting every `OperandRef::deref`. + match self.ty.sty { + ty::TyRef(..) | + ty::TyRawPtr(_) => { + return self.field(ccx, index).llvm_type(ccx); + } + ty::TyAdt(def, _) if def.is_box() => { + return self.field(ccx, index).llvm_type(ccx); + } + _ => {} + } + + let (a, b) = match self.abi { + layout::Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self) + }; + let scalar = [a, b][index]; + + // Make sure to return the same type `immediate_llvm_type` would, + // to avoid dealing with two types and the associated conversions. + // This means that `(bool, bool)` is represented as `{i1, i1}`, + // both in memory and as an immediate, while `bool` is typically + // `i8` in memory and only `i1` when immediate. While we need to + // load/store `bool` as `i8` to avoid crippling LLVM optimizations, + // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`. + if scalar.is_bool() { + return Type::i1(ccx); + } + + match scalar.value { + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + // If we know the alignment, pick something better than i8. + let offset = if index == 0 { + Size::from_bytes(0) + } else { + a.value.size(ccx).abi_align(b.value.align(ccx)) + }; + let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) { + Type::pointee_for_abi_align(ccx, pointee.align) + } else { + Type::i8(ccx) + }; + pointee.ptr_to() + } + } + } + fn over_align(&self) -> Option { if self.align != self.primitive_align { Some(self.align) @@ -319,8 +373,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } fn llvm_field_index(&self, index: usize) -> u64 { - if let layout::Abi::Scalar(_) = self.abi { - bug!("TyLayout::llvm_field_index({:?}): not applicable", self); + match self.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) => { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + _ => {} } match self.fields { layout::FieldPlacement::Union(_) => { @@ -337,20 +395,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { - // We only handle thin pointers here. - match self.abi { - layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} - _ => return None - } - - if let Some(&pointee) = ccx.pointee_infos().borrow().get(&self.ty) { + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option { + if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) { return pointee; } let mut result = None; match self.ty.sty { - ty::TyRawPtr(mt) => { + ty::TyRawPtr(mt) if offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(mt.ty); result = Some(PointeeInfo { size, @@ -359,7 +412,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyRef(_, mt) => { + ty::TyRef(_, mt) if offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(mt.ty); let kind = match mt.mutbl { @@ -385,7 +438,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyAdt(def, _) if def.is_box() => { + ty::TyAdt(def, _) if def.is_box() && offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty()); result = Some(PointeeInfo { size, @@ -408,7 +461,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // to work as long as we don't start using more // niches than just null (e.g. the first page // of the address space, or unaligned pointers). - if self.fields.offset(0).bytes() == 0 { + if self.fields.offset(0) == offset { Some(self.for_variant(ccx, dataful_variant)) } else { None @@ -425,12 +478,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } if let Some(variant) = data_variant { + let ptr_end = offset + layout::Pointer.size(ccx); for i in 0..variant.fields.count() { - let field = variant.field(ccx, i); - if field.size == self.size { - // We found the pointer field, use its information. - result = field.pointee_info(ccx); - break; + let field_start = variant.fields.offset(i); + if field_start <= offset { + let field = variant.field(ccx, i); + if ptr_end <= field_start + field.size { + // We found the right field, look inside it. + result = field.pointee_info_at(ccx, offset - field_start); + break; + } } } } @@ -447,7 +504,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - ccx.pointee_infos().borrow_mut().insert(self.ty, result); + ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result); result } } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 525a1f5310c9a..2b35d4547395a 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -24,9 +24,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1 -// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3 -// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1 +// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0 +// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1 +// CHECK: ret { [0 x i8]*, [[USIZE]] } %1 { x } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 6cb1972afa52f..428cbdddb2209 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -133,7 +133,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) +// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 87cf042f27e94..64e842b026e24 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -54,9 +54,6 @@ pub struct PackedPair(u8, u32); // CHECK-LABEL: @pkd_pair #[no_mangle] pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) { - // CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1 - // CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1 - // CHECK: store i8 [[V1]], i8* {{.*}}, align 1 - // CHECK: store i32 [[V2]], i32* {{.*}}, align 1 +// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false) *pair2 = *pair1; } diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 2ab64fffa3b34..0c084131ea3f2 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -24,9 +24,9 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]** +// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]** // CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 // CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] let x = &*s; From c4d9ada70108210a2a2f7d3025a0d693fc3e3e9d Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 8 Oct 2017 23:08:47 +0300 Subject: [PATCH 54/69] rustc: place ZSTs first during struct field reordering. --- src/librustc/ty/layout.rs | 41 ++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 21ba7995332b7..70c41e7402d9d 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -964,40 +964,37 @@ impl<'a, 'tcx> CachedLayout { let mut align = base_align; let mut primitive_align = base_align; let mut sized = true; + let mut offsets = vec![Size::from_bytes(0); fields.len()]; + let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); // Anything with repr(C) or repr(packed) doesn't optimize. - // Neither do 1-member and 2-member structs. - // In addition, code in trans assume that 2-element structs can become pairs. - // It's easier to just short-circuit here. - let (mut optimize, sort_ascending) = match kind { + let optimize = match kind { StructKind::AlwaysSized | - StructKind::MaybeUnsized => (fields.len() > 2, false), - StructKind::EnumVariant(discr) => { - (discr.size().bytes() == 1, true) + StructKind::MaybeUnsized | + StructKind::EnumVariant(I8) => { + (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty() } + StructKind::EnumVariant(_) => false }; - - optimize &= (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); - - let mut offsets = vec![Size::from_bytes(0); fields.len()]; - let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - if optimize { let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; - if end > 0 { - let optimizing = &mut inverse_memory_index[..end]; - if sort_ascending { + let optimizing = &mut inverse_memory_index[..end]; + match kind { + StructKind::AlwaysSized | + StructKind::MaybeUnsized => { + optimizing.sort_by_key(|&x| { + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + let f = &fields[x as usize]; + (!f.is_zst(), cmp::Reverse(f.align.abi())) + }) + } + StructKind::EnumVariant(_) => { optimizing.sort_by_key(|&x| fields[x as usize].align.abi()); - } else { - optimizing.sort_by(| &a, &b | { - let a = fields[a as usize].align.abi(); - let b = fields[b as usize].align.abi(); - b.cmp(&a) - }); } } } From 0b8697241f3988bfa55b9f17d60732dab0e3d75d Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 9 Oct 2017 00:38:10 +0300 Subject: [PATCH 55/69] rustc_trans: be more relaxed with non-lvalue consumes, especially ZSTs. --- src/librustc_trans/mir/analyze.rs | 30 +++++++++++------ src/librustc_trans/mir/operand.rs | 53 +++++++++++++++++++++++-------- 2 files changed, 59 insertions(+), 24 deletions(-) diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 3129ed028d4d1..3f3c5ac0a627e 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -136,19 +136,29 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { context: LvalueContext<'tcx>, location: Location) { debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); + let ccx = self.cx.ccx; if let mir::Lvalue::Projection(ref proj) = *lvalue { - // Allow uses of projections of immediate pair fields. + // Allow uses of projections that are ZSTs or from immediate scalar fields. if let LvalueContext::Consume = context { - if let mir::Lvalue::Local(_) = proj.base { - if let mir::ProjectionElem::Field(..) = proj.elem { - let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); - - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); - let layout = self.cx.ccx.layout_of(ty); - if layout.is_llvm_scalar_pair() { - return; - } + let base_ty = proj.base.ty(self.cx.mir, ccx.tcx()); + let base_ty = self.cx.monomorphize(&base_ty); + + // ZSTs don't require any actual memory access. + let elem_ty = base_ty.projection_ty(ccx.tcx(), &proj.elem).to_ty(ccx.tcx()); + let elem_ty = self.cx.monomorphize(&elem_ty); + if ccx.layout_of(elem_ty).is_zst() { + return; + } + + if let mir::ProjectionElem::Field(..) = proj.elem { + let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx())); + if layout.is_llvm_scalar_pair() { + // Recurse as a `Consume` instead of `Projection`, + // potentially stopping at non-operand projections, + // which would trigger `mark_as_lvalue` on locals. + self.visit_lvalue(&proj.base, LvalueContext::Consume, location); + return; } } } diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index b9d4148acf67e..97e7dda31aa2c 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -179,19 +179,19 @@ impl<'a, 'tcx> OperandValue { } impl<'a, 'tcx> MirContext<'a, 'tcx> { - pub fn trans_consume(&mut self, - bcx: &Builder<'a, 'tcx>, - lvalue: &mir::Lvalue<'tcx>) - -> OperandRef<'tcx> + fn maybe_trans_consume_direct(&mut self, + bcx: &Builder<'a, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> Option> { - debug!("trans_consume(lvalue={:?})", lvalue); + debug!("maybe_trans_consume_direct(lvalue={:?})", lvalue); // watch out for locals that do not have an // alloca; they are handled somewhat differently if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { LocalRef::Operand(Some(o)) => { - return o; + return Some(o); } LocalRef::Operand(None) => { bug!("use of {:?} before def", lvalue); @@ -204,21 +204,24 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Moves out of pair fields are trivial. if let &mir::Lvalue::Projection(ref proj) = lvalue { - if let mir::Lvalue::Local(index) = proj.base { - if let LocalRef::Operand(Some(o)) = self.locals[index] { - match (o.val, &proj.elem) { - (OperandValue::Pair(a, b), - &mir::ProjectionElem::Field(ref f, ty)) => { - let layout = bcx.ccx.layout_of(self.monomorphize(&ty)); + if let mir::ProjectionElem::Field(ref f, _) = proj.elem { + if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) { + let layout = o.layout.field(bcx.ccx, f.index()); + + // Handled in `trans_consume`. + assert!(!layout.is_zst()); + + match o.val { + OperandValue::Pair(a, b) => { let llval = [a, b][f.index()]; // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx)); - return OperandRef { + return Some(OperandRef { val: OperandValue::Immediate(llval), layout - }; + }); } _ => {} } @@ -226,6 +229,28 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } + None + } + + pub fn trans_consume(&mut self, + bcx: &Builder<'a, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> OperandRef<'tcx> + { + debug!("trans_consume(lvalue={:?})", lvalue); + + let ty = self.monomorphized_lvalue_ty(lvalue); + let layout = bcx.ccx.layout_of(ty); + + // ZSTs don't require any actual memory access. + if layout.is_zst() { + return OperandRef::new_zst(bcx.ccx, layout); + } + + if let Some(o) = self.maybe_trans_consume_direct(bcx, lvalue) { + return o; + } + // for most lvalues, to consume them we just load them // out from their home self.trans_lvalue(bcx, lvalue).load(bcx) From 37a7521ef93b2e2d7a4cd04df38929d841b8ffcc Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 9 Oct 2017 02:31:06 +0300 Subject: [PATCH 56/69] rustc: unpack scalar newtype layout ABIs. --- src/librustc/ty/layout.rs | 50 +++++++++++++++++------- src/librustc_trans/mir/analyze.rs | 4 +- src/librustc_trans/mir/block.rs | 8 ++-- src/librustc_trans/mir/constant.rs | 54 ++++++++++++++++++++------ src/librustc_trans/mir/operand.rs | 53 ++++++++++++++++++------- src/test/codegen/function-arguments.rs | 6 +-- src/test/codegen/issue-32031.rs | 4 +- 7 files changed, 129 insertions(+), 50 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 70c41e7402d9d..3bf711d3e232f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1078,6 +1078,30 @@ impl<'a, 'tcx> CachedLayout { packed }; + // Unpack newtype ABIs. + if sized && optimize && size.bytes() > 0 { + // All but one field must be ZSTs, and so they all start at 0. + if offsets.iter().all(|o| o.bytes() == 0) { + let mut non_zst_fields = fields.iter().filter(|f| !f.is_zst()); + + // We have exactly one non-ZST field. + match (non_zst_fields.next(), non_zst_fields.next()) { + (Some(field), None) => { + // Field size match and it has a scalar ABI. + if size == field.size { + match field.abi { + Abi::Scalar(_) => { + abi = field.abi.clone(); + } + _ => {} + } + } + } + _ => {} + } + } + } + // Look for a scalar pair, as an ABI optimization. // FIXME(eddyb) ignore extra ZST fields and field ordering. if sized && !packed && fields.len() == 2 { @@ -1424,6 +1448,18 @@ impl<'a, 'tcx> CachedLayout { let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; st.variants = Variants::Single { index: v }; + // Exclude 0 from the range of a newtype ABI NonZero. + if Some(def.did) == cx.tcx().lang_items().non_zero() { + match st.abi { + Abi::Scalar(ref mut scalar) | + Abi::ScalarPair(ref mut scalar, _) => { + if scalar.valid_range.start == 0 { + scalar.valid_range.start = 1; + } + } + _ => {} + } + } return Ok(tcx.intern_layout(st)); } @@ -2284,20 +2320,6 @@ impl<'a, 'tcx> TyLayout<'tcx> { }; } - // Is this the NonZero lang item wrapping a pointer or integer type? - if let ty::TyAdt(def, _) = self.ty.sty { - if Some(def.did) == cx.tcx().lang_items().non_zero() { - let field = self.field(cx, 0)?; - let offset = self.fields.offset(0); - if let Abi::Scalar(Scalar { value, ref valid_range }) = field.abi { - return Ok(Some((offset, Scalar { - value, - valid_range: 0..=valid_range.end - }, 0))); - } - } - } - // Perhaps one of the fields is non-zero, let's recurse and find out. if let FieldPlacement::Union(_) = self.fields { // Only Rust enums have safe-to-inspect fields diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 3f3c5ac0a627e..223379527c989 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -139,7 +139,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ccx = self.cx.ccx; if let mir::Lvalue::Projection(ref proj) = *lvalue { - // Allow uses of projections that are ZSTs or from immediate scalar fields. + // Allow uses of projections that are ZSTs or from scalar fields. if let LvalueContext::Consume = context { let base_ty = proj.base.ty(self.cx.mir, ccx.tcx()); let base_ty = self.cx.monomorphize(&base_ty); @@ -153,7 +153,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { if let mir::ProjectionElem::Field(..) = proj.elem { let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx())); - if layout.is_llvm_scalar_pair() { + if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { // Recurse as a `Consume` instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `mark_as_lvalue` on locals. diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e739037b07d7f..6811861499d7e 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -700,11 +700,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let elem = if field.is_zst() { C_undef(field.llvm_type(bcx.ccx)) } else { - bcx.extract_value(llval, tuple.layout.llvm_field_index(i)) + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + bcx.bitcast(llval, field.immediate_llvm_type(bcx.ccx)) }; // If the tuple is immediate, the elements are as well let op = OperandRef { - val: Immediate(base::to_immediate(bcx, elem, field)), + val: Immediate(elem), layout: field, }; self.trans_argument(bcx, op, llargs, &args[i]); @@ -712,7 +713,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } Pair(a, b) => { let elems = [a, b]; - for i in 0..tuple.layout.fields.count() { + assert_eq!(tuple.layout.fields.count(), 2); + for i in 0..2 { // Pair is always made up of immediates let op = OperandRef { val: Immediate(elems[i]), diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 318e36dc71a4d..f223227cd72b4 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -118,10 +118,27 @@ impl<'a, 'tcx> Const<'tcx> { fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { let layout = ccx.layout_of(self.ty); - if let layout::Abi::ScalarPair(..) = layout.abi { - const_get_elt(self.llval, i as u64) - } else { - const_get_elt(self.llval, layout.llvm_field_index(i)) + let field = layout.field(ccx, i); + if field.is_zst() { + return C_undef(field.immediate_llvm_type(ccx)); + } + match layout.abi { + layout::Abi::Scalar(_) => self.llval, + layout::Abi::ScalarPair(ref a, ref b) => { + let offset = layout.fields.offset(i); + if offset.bytes() == 0 { + assert_eq!(field.size, a.value.size(ccx)); + const_get_elt(self.llval, 0) + } else { + assert_eq!(offset, a.value.size(ccx) + .abi_align(b.value.align(ccx))); + assert_eq!(field.size, b.value.size(ccx)); + const_get_elt(self.llval, 1) + } + } + _ => { + const_get_elt(self.llval, layout.llvm_field_index(i)) + } } } @@ -159,7 +176,8 @@ impl<'a, 'tcx> Const<'tcx> { // a constant LLVM global and cast its address if necessary. let align = ccx.align_of(self.ty); let ptr = consts::addr_of(ccx, self.llval, align, "const"); - OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned) + OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()), + Alignment::AbiAligned) }; OperandRef { @@ -1179,12 +1197,26 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> Const<'tcx> { assert_eq!(vals.len(), layout.fields.count()); - if let layout::Abi::ScalarPair(..) = layout.abi { - assert_eq!(vals.len(), 2); - return Const::new(C_struct(ccx, &[ - vals[0].llval, - vals[1].llval, - ], false), layout.ty); + match layout.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) if discr.is_none() => { + let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| { + (f, layout.fields.offset(i)) + }).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst()); + match (non_zst_fields.next(), non_zst_fields.next()) { + (Some((x, offset)), None) if offset.bytes() == 0 => { + return Const::new(x.llval, layout.ty); + } + (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty); + } + (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty); + } + _ => {} + } + } + _ => {} } // offset of current value diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 97e7dda31aa2c..7826d998df33a 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,12 +10,12 @@ use llvm::ValueRef; use rustc::ty; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{CrateContext, C_undef}; +use common::{CrateContext, C_undef, C_usize}; use builder::Builder; use value::Value; use type_of::LayoutLlvmExt; @@ -207,24 +207,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let mir::ProjectionElem::Field(ref f, _) = proj.elem { if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) { let layout = o.layout.field(bcx.ccx, f.index()); + let offset = o.layout.fields.offset(f.index()); // Handled in `trans_consume`. assert!(!layout.is_zst()); - match o.val { - OperandValue::Pair(a, b) => { - let llval = [a, b][f.index()]; - // HACK(eddyb) have to bitcast pointers - // until LLVM removes pointee types. - let llval = bcx.bitcast(llval, - layout.immediate_llvm_type(bcx.ccx)); - return Some(OperandRef { - val: OperandValue::Immediate(llval), - layout - }); + // Offset has to match a scalar component. + let llval = match (o.val, &o.layout.abi) { + (OperandValue::Immediate(llval), + &layout::Abi::Scalar(ref scalar)) => { + assert_eq!(offset.bytes(), 0); + assert_eq!(layout.size, scalar.value.size(bcx.ccx)); + llval } - _ => {} - } + (OperandValue::Pair(a_llval, b_llval), + &layout::Abi::ScalarPair(ref a, ref b)) => { + if offset.bytes() == 0 { + assert_eq!(layout.size, a.value.size(bcx.ccx)); + a_llval + } else { + assert_eq!(offset, a.value.size(bcx.ccx) + .abi_align(b.value.align(bcx.ccx))); + assert_eq!(layout.size, b.value.size(bcx.ccx)); + b_llval + } + } + + // `#[repr(simd)]` types are also immediate. + (OperandValue::Immediate(llval), + &layout::Abi::Vector) => { + bcx.extract_element(llval, C_usize(bcx.ccx, f.index() as u64)) + } + + _ => return None + }; + + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx)); + return Some(OperandRef { + val: OperandValue::Immediate(llval), + layout + }); } } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 428cbdddb2209..f96c104b265f9 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -46,13 +46,13 @@ pub fn static_borrow(_: &'static i32) { pub fn named_borrow<'r>(_: &'r i32) { } -// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @unsafe_borrow(i16* dereferenceable(2) %arg0) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_borrow(_: &UnsafeInner) { } -// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @mutable_unsafe_borrow(i16* dereferenceable(2) %arg0) // ... unless this is a mutable borrow, those never alias // ... except that there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] @@ -110,7 +110,7 @@ pub fn slice(_: &[u8]) { pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @unsafe_slice([0 x i16]* nonnull %arg0.0, [[USIZE]] %arg0.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { diff --git a/src/test/codegen/issue-32031.rs b/src/test/codegen/issue-32031.rs index 5d3ccbfa4ceb0..e5ec17385455e 100644 --- a/src/test/codegen/issue-32031.rs +++ b/src/test/codegen/issue-32031.rs @@ -15,7 +15,7 @@ #[no_mangle] pub struct F32(f32); -// CHECK: define float @add_newtype_f32(float, float) +// CHECK: define float @add_newtype_f32(float %a, float %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f32(a: F32, b: F32) -> F32 { @@ -25,7 +25,7 @@ pub fn add_newtype_f32(a: F32, b: F32) -> F32 { #[no_mangle] pub struct F64(f64); -// CHECK: define double @add_newtype_f64(double, double) +// CHECK: define double @add_newtype_f64(double %a, double %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f64(a: F64, b: F64) -> F64 { From 7a36141465d1f97936cfceca87ed428dbfafdd3f Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 9 Oct 2017 19:56:41 +0300 Subject: [PATCH 57/69] rustc: unpack scalar pair newtype layout ABIs. --- src/librustc/ty/layout.rs | 41 +++++----- src/librustc_trans/base.rs | 25 ++++++ src/librustc_trans/mir/block.rs | 47 +++-------- src/librustc_trans/mir/constant.rs | 13 +++- src/librustc_trans/mir/lvalue.rs | 69 +++++++++------- src/librustc_trans/mir/operand.rs | 121 +++++++++++++++++------------ src/librustc_trans/mir/rvalue.rs | 13 ++-- src/librustc_trans/type_of.rs | 10 --- src/test/codegen/refs.rs | 2 +- 9 files changed, 185 insertions(+), 156 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 3bf711d3e232f..fc5d421394959 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1087,10 +1087,11 @@ impl<'a, 'tcx> CachedLayout { // We have exactly one non-ZST field. match (non_zst_fields.next(), non_zst_fields.next()) { (Some(field), None) => { - // Field size match and it has a scalar ABI. + // Field size matches and it has a scalar or scalar pair ABI. if size == field.size { match field.abi { - Abi::Scalar(_) => { + Abi::Scalar(_) | + Abi::ScalarPair(..) => { abi = field.abi.clone(); } _ => {} @@ -2228,17 +2229,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyAdt(def, substs) => { match self.variants { Variants::Single { index } => { - let mut field_ty = def.variants[index].fields[i].ty(tcx, substs); - - // Treat NonZero<*T> as containing &T. - // This is especially useful for fat pointers. - if Some(def.did) == tcx.lang_items().non_zero() { - if let ty::TyRawPtr(mt) = field_ty.sty { - field_ty = tcx.mk_ref(tcx.types.re_erased, mt); - } - } - - field_ty + def.variants[index].fields[i].ty(tcx, substs) } // Discriminant field for enums (where applicable). @@ -2294,21 +2285,22 @@ impl<'a, 'tcx> TyLayout<'tcx> { where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { - if let Abi::Scalar(Scalar { value, ref valid_range }) = self.abi { + let scalar_component = |scalar: &Scalar, offset| { // FIXME(eddyb) support negative/wrap-around discriminant ranges. - return if valid_range.start < valid_range.end { + let Scalar { value, ref valid_range } = *scalar; + if valid_range.start < valid_range.end { let bits = value.size(cx).bits(); assert!(bits <= 128); let max_value = !0u128 >> (128 - bits); if valid_range.start > 0 { let niche = valid_range.start - 1; - Ok(Some((self.fields.offset(0), Scalar { + Ok(Some((offset, Scalar { value, valid_range: niche..=valid_range.end }, niche))) } else if valid_range.end < max_value { let niche = valid_range.end + 1; - Ok(Some((self.fields.offset(0), Scalar { + Ok(Some((offset, Scalar { value, valid_range: valid_range.start..=niche }, niche))) @@ -2317,7 +2309,20 @@ impl<'a, 'tcx> TyLayout<'tcx> { } } else { Ok(None) - }; + } + }; + + match self.abi { + Abi::Scalar(ref scalar) => { + return scalar_component(scalar, Size::from_bytes(0)); + } + Abi::ScalarPair(ref a, ref b) => { + if let Some(result) = scalar_component(a, Size::from_bytes(0))? { + return Ok(Some(result)); + } + return scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx))); + } + _ => {} } // Perhaps one of the fields is non-zero, let's recurse and find out. diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index ff70184b26245..b7408681ed0c8 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -240,6 +240,31 @@ pub fn unsize_thin_ptr<'a, 'tcx>( let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bcx.ccx.layout_of(src_ty); + let dst_layout = bcx.ccx.layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bcx.ccx, i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bcx.ccx, i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 0)), + bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 1))) + } _ => bug!("unsize_thin_ptr: called on bad types"), } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 6811861499d7e..67e0f35b46ef9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -685,46 +685,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let tuple = self.trans_operand(bcx, operand); // Handle both by-ref and immediate tuples. - match tuple.val { - Ref(llval, align) => { - let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align); - for i in 0..tuple.layout.fields.count() { - let field_ptr = tuple_ptr.project_field(bcx, i); - self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]); - } - + if let Ref(llval, align) = tuple.val { + let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align); + for i in 0..tuple.layout.fields.count() { + let field_ptr = tuple_ptr.project_field(bcx, i); + self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]); } - Immediate(llval) => { - for i in 0..tuple.layout.fields.count() { - let field = tuple.layout.field(bcx.ccx, i); - let elem = if field.is_zst() { - C_undef(field.llvm_type(bcx.ccx)) - } else { - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - bcx.bitcast(llval, field.immediate_llvm_type(bcx.ccx)) - }; - // If the tuple is immediate, the elements are as well - let op = OperandRef { - val: Immediate(elem), - layout: field, - }; - self.trans_argument(bcx, op, llargs, &args[i]); - } - } - Pair(a, b) => { - let elems = [a, b]; - assert_eq!(tuple.layout.fields.count(), 2); - for i in 0..2 { - // Pair is always made up of immediates - let op = OperandRef { - val: Immediate(elems[i]), - layout: tuple.layout.field(bcx.ccx, i), - }; - self.trans_argument(bcx, op, llargs, &args[i]); - } + } else { + // If the tuple is immediate, the elements are as well. + for i in 0..tuple.layout.fields.count() { + let op = tuple.extract_field(bcx, i); + self.trans_argument(bcx, op, llargs, &args[i]); } } - } fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index f223227cd72b4..54907cb747c75 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -127,8 +127,12 @@ impl<'a, 'tcx> Const<'tcx> { layout::Abi::ScalarPair(ref a, ref b) => { let offset = layout.fields.offset(i); if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(ccx)); - const_get_elt(self.llval, 0) + if field.size == layout.size { + self.llval + } else { + assert_eq!(field.size, a.value.size(ccx)); + const_get_elt(self.llval, 0) + } } else { assert_eq!(offset, a.value.size(ccx) .abi_align(b.value.align(ccx))); @@ -166,8 +170,9 @@ impl<'a, 'tcx> Const<'tcx> { let llvalty = val_ty(self.llval); let val = if llty == llvalty && layout.is_llvm_scalar_pair() { - let (a, b) = self.get_pair(ccx); - OperandValue::Pair(a, b) + OperandValue::Pair( + const_get_elt(self.llval, 0), + const_get_elt(self.llval, 1)) } else if llty == llvalty && layout.is_llvm_immediate() { // If the types match, we can use the value directly. OperandValue::Immediate(self.llval) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index ff0b448267820..71d2fda09c9d2 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -135,6 +135,31 @@ impl<'a, 'tcx> LvalueRef<'tcx> { return OperandRef::new_zst(bcx.ccx, self.layout); } + let scalar_load_metadata = |load, scalar: &layout::Scalar| { + let (min, max) = (scalar.valid_range.start, scalar.valid_range.end); + let max_next = max.wrapping_add(1); + let bits = scalar.value.size(bcx.ccx).bits(); + assert!(bits <= 128); + let mask = !0u128 >> (128 - bits); + // For a (max) value of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + match scalar.value { + layout::Int(..) if max_next & mask != min & mask => { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + bcx.range_metadata(load, min..max_next); + } + layout::Pointer if 0 < min && min < max => { + bcx.nonnull_metadata(load); + } + _ => {} + } + }; + let val = if self.layout.is_llvm_immediate() { let mut const_llval = ptr::null_mut(); unsafe { @@ -149,39 +174,27 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } else { let load = bcx.load(self.llval, self.alignment.non_abi()); if let layout::Abi::Scalar(ref scalar) = self.layout.abi { - let (min, max) = (scalar.valid_range.start, scalar.valid_range.end); - let max_next = max.wrapping_add(1); - let bits = scalar.value.size(bcx.ccx).bits(); - assert!(bits <= 128); - let mask = !0u128 >> (128 - bits); - // For a (max) value of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - match scalar.value { - layout::Int(..) if max_next & mask != min & mask => { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - bcx.range_metadata(load, min..max_next); - } - layout::Pointer if 0 < min && min < max => { - bcx.nonnull_metadata(load); - } - _ => {} - } + scalar_load_metadata(load, scalar); } load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) - } else if self.layout.is_llvm_scalar_pair() { - let load = |i| { - let x = self.project_field(bcx, i).load(bcx).immediate(); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + let load = |i, scalar: &layout::Scalar| { + let mut llptr = bcx.struct_gep(self.llval, i as u64); + // Make sure to always load i1 as i8. + if scalar.is_bool() { + llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx)); + } + let load = bcx.load(llptr, self.alignment.non_abi()); + scalar_load_metadata(load, scalar); + if scalar.is_bool() { + bcx.trunc(load, Type::i1(bcx.ccx)) + } else { + load + } }; - OperandValue::Pair(load(0), load(1)) + OperandValue::Pair(load(0, a), load(1, b)) } else { OperandValue::Ref(self.llval, self.alignment) }; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 7826d998df33a..8c43bded1bf21 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -15,10 +15,11 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{CrateContext, C_undef, C_usize}; +use common::{self, CrateContext, C_undef, C_usize}; use builder::Builder; use value::Value; use type_of::LayoutLlvmExt; +use type_::Type; use std::fmt; use std::ptr; @@ -84,7 +85,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { layout: TyLayout<'tcx>) -> OperandRef<'tcx> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(C_undef(layout.llvm_type(ccx))), + val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(ccx))), layout } } @@ -148,6 +149,66 @@ impl<'a, 'tcx> OperandRef<'tcx> { }; OperandRef { val, layout } } + + pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> { + let field = self.layout.field(bcx.ccx, i); + let offset = self.layout.fields.offset(i); + + let mut val = match (self.val, &self.layout.abi) { + // If we're uninhabited, or the field is ZST, it has no data. + _ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => { + return OperandRef { + val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.ccx))), + layout: field + }; + } + + // Newtype of a scalar or scalar pair. + (OperandValue::Immediate(_), _) | + (OperandValue::Pair(..), _) if field.size == self.layout.size => { + assert_eq!(offset.bytes(), 0); + self.val + } + + // Extract a scalar component from a pair. + (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { + if offset.bytes() == 0 { + assert_eq!(field.size, a.value.size(bcx.ccx)); + OperandValue::Immediate(a_llval) + } else { + assert_eq!(offset, a.value.size(bcx.ccx) + .abi_align(b.value.align(bcx.ccx))); + assert_eq!(field.size, b.value.size(bcx.ccx)); + OperandValue::Immediate(b_llval) + } + } + + // `#[repr(simd)]` types are also immediate. + (OperandValue::Immediate(llval), &layout::Abi::Vector) => { + OperandValue::Immediate( + bcx.extract_element(llval, C_usize(bcx.ccx, i as u64))) + } + + _ => bug!("OperandRef::extract_field({:?}): not applicable", self) + }; + + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + match val { + OperandValue::Immediate(ref mut llval) => { + *llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.ccx)); + } + OperandValue::Pair(ref mut a, ref mut b) => { + *a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.ccx, 0)); + *b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.ccx, 1)); + } + OperandValue::Ref(..) => bug!() + } + + OperandRef { + val, + layout: field + } + } } impl<'a, 'tcx> OperandValue { @@ -167,11 +228,12 @@ impl<'a, 'tcx> OperandValue { } OperandValue::Pair(a, b) => { for (i, &x) in [a, b].iter().enumerate() { - let field = dest.project_field(bcx, i); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx)); - bcx.store(base::from_immediate(bcx, x), - field.llval, field.alignment.non_abi()); + let mut llptr = bcx.struct_gep(dest.llval, i as u64); + // Make sure to always store i1 as i8. + if common::val_ty(x) == Type::i1(bcx.ccx) { + llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx)); + } + bcx.store(base::from_immediate(bcx, x), llptr, dest.alignment.non_abi()); } } } @@ -202,52 +264,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } - // Moves out of pair fields are trivial. + // Moves out of scalar and scalar pair fields are trivial. if let &mir::Lvalue::Projection(ref proj) = lvalue { if let mir::ProjectionElem::Field(ref f, _) = proj.elem { if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) { - let layout = o.layout.field(bcx.ccx, f.index()); - let offset = o.layout.fields.offset(f.index()); - - // Handled in `trans_consume`. - assert!(!layout.is_zst()); - - // Offset has to match a scalar component. - let llval = match (o.val, &o.layout.abi) { - (OperandValue::Immediate(llval), - &layout::Abi::Scalar(ref scalar)) => { - assert_eq!(offset.bytes(), 0); - assert_eq!(layout.size, scalar.value.size(bcx.ccx)); - llval - } - (OperandValue::Pair(a_llval, b_llval), - &layout::Abi::ScalarPair(ref a, ref b)) => { - if offset.bytes() == 0 { - assert_eq!(layout.size, a.value.size(bcx.ccx)); - a_llval - } else { - assert_eq!(offset, a.value.size(bcx.ccx) - .abi_align(b.value.align(bcx.ccx))); - assert_eq!(layout.size, b.value.size(bcx.ccx)); - b_llval - } - } - - // `#[repr(simd)]` types are also immediate. - (OperandValue::Immediate(llval), - &layout::Abi::Vector) => { - bcx.extract_element(llval, C_usize(bcx.ccx, f.index() as u64)) - } - - _ => return None - }; - - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx)); - return Some(OperandRef { - val: OperandValue::Immediate(llval), - layout - }); + return Some(o.extract_field(bcx, f.index())); } } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 33b1a7e3363fd..bf44434cafea7 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -223,20 +223,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { operand.val } mir::CastKind::Unsize => { - // unsize targets other than to a fat pointer currently - // can't be operands. - assert!(common::type_is_fat_ptr(bcx.ccx, cast.ty)); - match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a // "trait-object-to-supertrait" coercion, for // example, // &'a fmt::Debug+Send => &'a fmt::Debug, - // So we need to pointercast the base to ensure - // the types match up. - let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); - let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx)); + + // HACK(eddyb) have to bitcast pointers + // until LLVM removes pointee types. + let lldata = bcx.pointercast(lldata, + cast.scalar_pair_element_llvm_type(bcx.ccx, 0)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index d62e2ac1552ec..35da258cdb346 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -491,16 +491,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } } - - if let ty::TyAdt(def, _) = self.ty.sty { - if Some(def.did) == ccx.tcx().lang_items().non_zero() { - // FIXME(eddyb) Don't treat NonZero<*T> as - // as containing &T in ty::layout. - if let Some(ref mut pointee) = result { - pointee.safe = None; - } - } - } } } diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 0c084131ea3f2..6c00ffa754b06 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -24,7 +24,7 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]** +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 // CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 // CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] From 18ecc564f2cee4da3ef9397ba58e19d3fd9be3de Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 10 Oct 2017 20:54:50 +0300 Subject: [PATCH 58/69] rustc_trans: support scalar pairs directly in the Rust ABI. --- src/librustc_llvm/lib.rs | 9 +- src/librustc_trans/abi.rs | 453 +++++++++++++++--------------- src/librustc_trans/attributes.rs | 2 +- src/librustc_trans/cabi_asmjs.rs | 5 +- src/librustc_trans/cabi_x86.rs | 17 +- src/librustc_trans/cabi_x86_64.rs | 6 +- src/librustc_trans/intrinsic.rs | 6 +- src/librustc_trans/mir/block.rs | 134 ++++----- src/librustc_trans/mir/mod.rs | 98 +++---- 9 files changed, 367 insertions(+), 363 deletions(-) diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index 5ccce8de70639..592bd62056455 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -74,22 +74,19 @@ pub fn AddFunctionAttrStringValue(llfn: ValueRef, } } -#[repr(C)] #[derive(Copy, Clone)] pub enum AttributePlace { + ReturnValue, Argument(u32), Function, } impl AttributePlace { - pub fn ReturnValue() -> Self { - AttributePlace::Argument(0) - } - pub fn as_uint(self) -> c_uint { match self { + AttributePlace::ReturnValue => 0, + AttributePlace::Argument(i) => 1 + i, AttributePlace::Function => !0, - AttributePlace::Argument(i) => i, } } } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index d69103bbb529d..7ef89597b11ca 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef, AttributePlace}; use base; use builder::Builder; -use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_usize}; +use common::{instance_ty, ty_fn_sig, C_usize}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -30,7 +30,8 @@ use cabi_sparc64; use cabi_nvptx; use cabi_nvptx64; use cabi_hexagon; -use mir::lvalue::LvalueRef; +use mir::lvalue::{Alignment, LvalueRef}; +use mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; @@ -44,15 +45,19 @@ use std::{cmp, iter}; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; -#[derive(Clone, Copy, PartialEq, Debug)] -enum ArgKind { - /// Pass the argument directly using the normal converted - /// LLVM type or by coercing to another specified type - Direct, - /// Pass the argument indirectly via a hidden pointer - Indirect, - /// Ignore the argument (useful for empty struct) +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum PassMode { + /// Ignore the argument (useful for empty struct). Ignore, + /// Pass the argument directly. + Direct(ArgAttributes), + /// Pass a pair's elements directly in two arguments. + Pair(ArgAttributes, ArgAttributes), + /// Pass the argument after casting it, to either + /// a single uniform or a pair of registers. + Cast(CastTarget), + /// Pass the argument indirectly via a hidden pointer. + Indirect(ArgAttributes), } // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest @@ -94,7 +99,7 @@ impl ArgAttribute { /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct ArgAttributes { regular: ArgAttribute, pointee_size: Size, @@ -248,7 +253,7 @@ impl Reg { /// An argument passed entirely registers with the /// same kind (e.g. HFA / HVA on PPC64 and AArch64). -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Uniform { pub unit: Reg, @@ -399,7 +404,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum CastTarget { Uniform(Uniform), Pair(Reg, Reg) @@ -452,66 +457,53 @@ impl CastTarget { } } -/// Information about how a specific C type -/// should be passed to or returned from a function -/// -/// This is borrowed from clang's ABIInfo.h +/// Information about how to pass an argument to, +/// or return a value from, a function, under some ABI. #[derive(Debug)] pub struct ArgType<'tcx> { - kind: ArgKind, pub layout: TyLayout<'tcx>, - /// Cast target, either a single uniform or a pair of registers. - pub cast: Option, + /// Dummy argument, which is emitted before the real argument. pub pad: Option, - /// Attributes of argument. - pub attrs: ArgAttributes, - pub nested: Vec> + + pub mode: PassMode, } impl<'a, 'tcx> ArgType<'tcx> { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { - let mut attrs = ArgAttributes::new(); - - if let layout::Abi::Scalar(ref scalar) = layout.abi { - if scalar.is_bool() { - attrs.set(ArgAttribute::ZExt); - } - } - ArgType { - kind: ArgKind::Direct, layout, - cast: None, pad: None, - attrs, - nested: vec![] + mode: PassMode::Direct(ArgAttributes::new()), } } pub fn make_indirect(&mut self) { - assert!(self.nested.is_empty()); - assert_eq!(self.kind, ArgKind::Direct); + assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); - // Wipe old attributes, likely not valid through indirection. - self.attrs = ArgAttributes::new(); + // Start with fresh attributes for the pointer. + let mut attrs = ArgAttributes::new(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture - self.attrs.set(ArgAttribute::NoAlias) - .set(ArgAttribute::NoCapture) - .set(ArgAttribute::NonNull); - self.attrs.pointee_size = self.layout.size; - self.attrs.pointee_align = Some(self.layout.align); + attrs.set(ArgAttribute::NoAlias) + .set(ArgAttribute::NoCapture) + .set(ArgAttribute::NonNull); + attrs.pointee_size = self.layout.size; + attrs.pointee_align = Some(self.layout.align); - self.kind = ArgKind::Indirect; + self.mode = PassMode::Indirect(attrs); } - pub fn ignore(&mut self) { - assert!(self.nested.is_empty()); - assert_eq!(self.kind, ArgKind::Direct); - self.kind = ArgKind::Ignore; + pub fn make_indirect_byval(&mut self) { + self.make_indirect(); + match self.mode { + PassMode::Indirect(ref mut attrs) => { + attrs.set(ArgAttribute::ByVal); + } + _ => bug!() + } } pub fn extend_integer_width_to(&mut self, bits: u64) { @@ -519,32 +511,36 @@ impl<'a, 'tcx> ArgType<'tcx> { if let layout::Abi::Scalar(ref scalar) = self.layout.abi { if let layout::Int(i, signed) = scalar.value { if i.size().bits() < bits { - self.attrs.set(if signed { - ArgAttribute::SExt - } else { - ArgAttribute::ZExt - }); + if let PassMode::Direct(ref mut attrs) = self.mode { + attrs.set(if signed { + ArgAttribute::SExt + } else { + ArgAttribute::ZExt + }); + } } } } } pub fn cast_to>(&mut self, target: T) { - assert!(self.nested.is_empty()); - self.cast = Some(target.into()); + assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); + self.mode = PassMode::Cast(target.into()); } pub fn pad_with(&mut self, reg: Reg) { - assert!(self.nested.is_empty()); self.pad = Some(reg); } pub fn is_indirect(&self) -> bool { - self.kind == ArgKind::Indirect + match self.mode { + PassMode::Indirect(_) => true, + _ => false + } } pub fn is_ignore(&self) -> bool { - self.kind == ArgKind::Ignore + self.mode == PassMode::Ignore } /// Get the LLVM type for an lvalue of the original Rust type of @@ -557,20 +553,19 @@ impl<'a, 'tcx> ArgType<'tcx> { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: LvalueRef<'tcx>) { + pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: LvalueRef<'tcx>) { if self.is_ignore() { return; } let ccx = bcx.ccx; if self.is_indirect() { - let llsz = C_usize(ccx, self.layout.size.bytes()); - base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align); - } else if let Some(ty) = self.cast { + OperandValue::Ref(val, Alignment::AbiAligned).store(bcx, dst) + } else if let PassMode::Cast(cast) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bcx.pointercast(dst.llval, ty.llvm_type(ccx).ptr_to()); + let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to()); bcx.store(val, cast_dst, Some(self.layout.align)); } else { // The actual return type is a struct, but the ABI @@ -588,8 +583,8 @@ impl<'a, 'tcx> ArgType<'tcx> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.alloca(ty.llvm_type(ccx), "abi_cast", None); - let scratch_size = ty.size(ccx); + let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", None); + let scratch_size = cast.size(ccx); bcx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... @@ -600,32 +595,33 @@ impl<'a, 'tcx> ArgType<'tcx> { bcx.pointercast(dst.llval, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), C_usize(ccx, self.layout.size.bytes()), - self.layout.align.min(ty.align(ccx))); + self.layout.align.min(cast.align(ccx))); bcx.lifetime_end(llscratch, scratch_size); } } else { - val = base::from_immediate(bcx, val); - bcx.store(val, dst.llval, None); + OperandValue::Immediate(val).store(bcx, dst); } } pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) { - if !self.nested.is_empty() { - for (i, arg) in self.nested.iter().enumerate() { - arg.store_fn_arg(bcx, idx, dst.project_field(bcx, i)); - } - return; - } if self.pad.is_some() { *idx += 1; } - if self.is_ignore() { - return; + let mut next = || { + let val = llvm::get_param(bcx.llfn(), *idx as c_uint); + *idx += 1; + val + }; + match self.mode { + PassMode::Ignore => {}, + PassMode::Pair(..) => { + OperandValue::Pair(next(), next()).store(bcx, dst); + } + PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => { + self.store(bcx, next(), dst); + } } - let val = llvm::get_param(bcx.llfn(), *idx as c_uint); - *idx += 1; - self.store(bcx, val, dst); } } @@ -660,7 +656,7 @@ impl<'a, 'tcx> FnType<'tcx> { sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); - fn_ty.adjust_for_abi(ccx, sig); + fn_ty.adjust_for_abi(ccx, sig.abi); fn_ty } @@ -669,9 +665,23 @@ impl<'a, 'tcx> FnType<'tcx> { extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); // Don't pass the vtable, it's not an argument of the virtual fn. - assert_eq!(fn_ty.args[0].nested.len(), 2); - fn_ty.args[0].nested[1].ignore(); - fn_ty.adjust_for_abi(ccx, sig); + { + let self_arg = &mut fn_ty.args[0]; + match self_arg.mode { + PassMode::Pair(data_ptr, _) => { + self_arg.mode = PassMode::Direct(data_ptr); + } + _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg) + } + + let pointee = self_arg.layout.ty.builtin_deref(true, ty::NoPreference) + .unwrap_or_else(|| { + bug!("FnType::new_vtable: non-pointer self {:?}", self_arg) + }).ty; + let fat_ptr_ty = ccx.tcx().mk_mut_ptr(pointee); + self_arg.layout = ccx.layout_of(fat_ptr_ty).field(ccx, 0); + } + fn_ty.adjust_for_abi(ccx, sig.abi); fn_ty } @@ -737,31 +747,37 @@ impl<'a, 'tcx> FnType<'tcx> { }; // Handle safe Rust thin and fat pointers. - let adjust_for_rust_type = |arg: &mut ArgType<'tcx>, is_return: bool| { - match arg.layout.abi { - layout::Abi::Scalar(layout::Scalar { - value: layout::Pointer, - ref valid_range - }) => { - if valid_range.start > 0 && valid_range.start < valid_range.end { - arg.attrs.set(ArgAttribute::NonNull); - } - } - _ => { - // Nothing to do for non-pointer types. - return; + let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, + scalar: &layout::Scalar, + layout: TyLayout<'tcx>, + offset: Size, + is_return: bool| { + // Booleans are always an i1 that needs to be zero-extended. + if scalar.is_bool() { + attrs.set(ArgAttribute::ZExt); + return; + } + + // Only pointer types handled below. + if scalar.value != layout::Pointer { + return; + } + + if scalar.valid_range.start < scalar.valid_range.end { + if scalar.valid_range.start > 0 { + attrs.set(ArgAttribute::NonNull); } } - if let Some(pointee) = arg.layout.pointee_info_at(ccx, Size::from_bytes(0)) { + if let Some(pointee) = layout.pointee_info_at(ccx, offset) { if let Some(kind) = pointee.safe { - arg.attrs.pointee_size = pointee.size; - arg.attrs.pointee_align = Some(pointee.align); + attrs.pointee_size = pointee.size; + attrs.pointee_align = Some(pointee.align); // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions // with align attributes, and those calls later block optimizations. if !is_return { - arg.attrs.pointee_align = None; + attrs.pointee_align = None; } // `Box` pointer parameters never alias because ownership is transferred @@ -778,11 +794,11 @@ impl<'a, 'tcx> FnType<'tcx> { PointerKind::UniqueBorrowed => !is_return }; if no_alias { - arg.attrs.set(ArgAttribute::NoAlias); + attrs.set(ArgAttribute::NoAlias); } if kind == PointerKind::Frozen && !is_return { - arg.attrs.set(ArgAttribute::ReadOnly); + attrs.set(ArgAttribute::ReadOnly); } } } @@ -794,22 +810,39 @@ impl<'a, 'tcx> FnType<'tcx> { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || - (!win_x64_gnu && !linux_s390x) { - arg.ignore(); + if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) { + arg.mode = PassMode::Ignore; } } - // FIXME(eddyb) other ABIs don't have logic for nested. - if !is_return && type_is_fat_ptr(ccx, arg.layout.ty) && rust_abi { - arg.nested = vec![ - ArgType::new(arg.layout.field(ccx, 0)), - ArgType::new(arg.layout.field(ccx, 1)) - ]; - adjust_for_rust_type(&mut arg.nested[0], false); - adjust_for_rust_type(&mut arg.nested[1], false); - } else { - adjust_for_rust_type(&mut arg, is_return); + // FIXME(eddyb) other ABIs don't have logic for scalar pairs. + if !is_return && rust_abi { + if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi { + let mut a_attrs = ArgAttributes::new(); + let mut b_attrs = ArgAttributes::new(); + adjust_for_rust_scalar(&mut a_attrs, + a, + arg.layout, + Size::from_bytes(0), + false); + adjust_for_rust_scalar(&mut b_attrs, + b, + arg.layout, + a.value.size(ccx).abi_align(b.value.align(ccx)), + false); + arg.mode = PassMode::Pair(a_attrs, b_attrs); + return arg; + } + } + + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if let PassMode::Direct(ref mut attrs) = arg.mode { + adjust_for_rust_scalar(attrs, + scalar, + arg.layout, + Size::from_bytes(0), + is_return); + } } arg @@ -827,40 +860,20 @@ impl<'a, 'tcx> FnType<'tcx> { fn adjust_for_abi(&mut self, ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>) { - let abi = sig.abi; + abi: Abi) { if abi == Abi::Unadjusted { return } if abi == Abi::Rust || abi == Abi::RustCall || abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { let fixup = |arg: &mut ArgType<'tcx>| { + if arg.is_ignore() { return; } + match arg.layout.abi { layout::Abi::Aggregate { .. } => {} _ => return } let size = arg.layout.size; - - if let Some(unit) = arg.layout.homogeneous_aggregate(ccx) { - // Replace newtypes with their inner-most type. - if unit.size == size { - // Needs a cast as we've unpacked a newtype. - arg.cast_to(unit); - return; - } - - // Pairs of floats. - if unit.kind == RegKind::Float { - if unit.size.checked_mul(2, ccx) == Some(size) { - // FIXME(eddyb) This should be using Uniform instead of a pair, - // but the resulting [2 x float/double] breaks emscripten. - // See https://github.com/kripken/emscripten-fastcomp/issues/178. - arg.cast_to(CastTarget::Pair(unit, unit)); - return; - } - } - } - if size > layout::Pointer.size(ccx) { arg.make_indirect(); } else { @@ -873,25 +886,12 @@ impl<'a, 'tcx> FnType<'tcx> { }); } }; - // Fat pointers are returned by-value. - if !self.ret.is_ignore() { - if !type_is_fat_ptr(ccx, sig.output()) { - fixup(&mut self.ret); - } - } + fixup(&mut self.ret); for arg in &mut self.args { - if arg.is_ignore() { continue; } - if !arg.nested.is_empty() { - for arg in &mut arg.nested { - assert!(arg.nested.is_empty()); - fixup(arg); - } - continue; - } fixup(arg); } - if self.ret.is_indirect() { - self.ret.attrs.set(ArgAttribute::StructRet); + if let PassMode::Indirect(ref mut attrs) = self.ret.mode { + attrs.set(ArgAttribute::StructRet); } return; } @@ -930,55 +930,44 @@ impl<'a, 'tcx> FnType<'tcx> { a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) } - if self.ret.is_indirect() { - self.ret.attrs.set(ArgAttribute::StructRet); + if let PassMode::Indirect(ref mut attrs) = self.ret.mode { + attrs.set(ArgAttribute::StructRet); } } pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { let mut llargument_tys = Vec::new(); - let llreturn_ty = if self.ret.is_ignore() { - Type::void(ccx) - } else if self.ret.is_indirect() { - llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); - Type::void(ccx) - } else if let Some(cast) = self.ret.cast { - cast.llvm_type(ccx) - } else { - self.ret.layout.immediate_llvm_type(ccx) + let llreturn_ty = match self.ret.mode { + PassMode::Ignore => Type::void(ccx), + PassMode::Direct(_) | PassMode::Pair(..) => { + self.ret.layout.immediate_llvm_type(ccx) + } + PassMode::Cast(cast) => cast.llvm_type(ccx), + PassMode::Indirect(_) => { + llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); + Type::void(ccx) + } }; - { - let mut push = |arg: &ArgType<'tcx>| { - if arg.is_ignore() { - return; - } - // add padding - if let Some(ty) = arg.pad { - llargument_tys.push(ty.llvm_type(ccx)); - } - - let llarg_ty = if arg.is_indirect() { - arg.memory_ty(ccx).ptr_to() - } else if let Some(cast) = arg.cast { - cast.llvm_type(ccx) - } else { - arg.layout.immediate_llvm_type(ccx) - }; + for arg in &self.args { + // add padding + if let Some(ty) = arg.pad { + llargument_tys.push(ty.llvm_type(ccx)); + } - llargument_tys.push(llarg_ty); - }; - for arg in &self.args { - if !arg.nested.is_empty() { - for arg in &arg.nested { - assert!(arg.nested.is_empty()); - push(arg); - } + let llarg_ty = match arg.mode { + PassMode::Ignore => continue, + PassMode::Direct(_) => arg.layout.immediate_llvm_type(ccx), + PassMode::Pair(..) => { + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 0)); + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 1)); continue; } - push(arg); - } + PassMode::Cast(cast) => cast.llvm_type(ccx), + PassMode::Indirect(_) => arg.memory_ty(ccx).ptr_to(), + }; + llargument_tys.push(llarg_ty); } if self.variadic { @@ -989,52 +978,62 @@ impl<'a, 'tcx> FnType<'tcx> { } pub fn apply_attrs_llfn(&self, llfn: ValueRef) { - let mut i = if self.ret.is_indirect() { 1 } else { 0 }; - if !self.ret.is_ignore() { - self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); - } - i += 1; - let mut apply = |arg: &ArgType| { - if !arg.is_ignore() { - if arg.pad.is_some() { i += 1; } - arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); - i += 1; - } + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + i += 1; }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn); + } + PassMode::Indirect(ref attrs) => apply(attrs), + _ => {} + } for arg in &self.args { - if !arg.nested.is_empty() { - for arg in &arg.nested { - assert!(arg.nested.is_empty()); - apply(arg); + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs) => apply(attrs), + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); } - continue; + PassMode::Cast(_) => apply(&ArgAttributes::new()), } - apply(arg); } } pub fn apply_attrs_callsite(&self, callsite: ValueRef) { - let mut i = if self.ret.is_indirect() { 1 } else { 0 }; - if !self.ret.is_ignore() { - self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); - } - i += 1; - let mut apply = |arg: &ArgType| { - if !arg.is_ignore() { - if arg.pad.is_some() { i += 1; } - arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); - i += 1; - } + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + i += 1; }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite); + } + PassMode::Indirect(ref attrs) => apply(attrs), + _ => {} + } for arg in &self.args { - if !arg.nested.is_empty() { - for arg in &arg.nested { - assert!(arg.nested.is_empty()); - apply(arg); + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs) => apply(attrs), + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); } - continue; + PassMode::Cast(_) => apply(&ArgAttributes::new()), } - apply(arg); } if self.cconv != llvm::CCallConv { diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs index b6ca1460a7d0a..745aa0da82900 100644 --- a/src/librustc_trans/attributes.rs +++ b/src/librustc_trans/attributes.rs @@ -116,7 +116,7 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe naked(llfn, true); } else if attr.check_name("allocator") { Attribute::NoAlias.apply_llfn( - llvm::AttributePlace::ReturnValue(), llfn); + llvm::AttributePlace::ReturnValue, llfn); } else if attr.check_name("unwind") { unwind(llfn, true); } else if attr.check_name("rustc_allocator_nounwind") { diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index da13b75c414af..1664251cf897b 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform}; +use abi::{FnType, ArgType, LayoutExt, Uniform}; use context::CrateContext; // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 @@ -35,8 +35,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc fn classify_arg_ty(arg: &mut ArgType) { if arg.layout.is_aggregate() { - arg.make_indirect(); - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } } diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index dc9f681af52f0..6fd0140c39901 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; +use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind}; use common::CrateContext; use rustc::ty::layout::{self, TyLayout}; @@ -82,8 +82,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, for arg in &mut fty.args { if arg.is_ignore() { continue; } if arg.layout.is_aggregate() { - arg.make_indirect(); - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } else { arg.extend_integer_width_to(32); } @@ -102,7 +101,15 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mut free_regs = 2; for arg in &mut fty.args { - if arg.is_ignore() || arg.is_indirect() { continue; } + let attrs = match arg.mode { + PassMode::Ignore | + PassMode::Indirect(_) => continue, + PassMode::Direct(ref mut attrs) => attrs, + PassMode::Pair(..) | + PassMode::Cast(_) => { + bug!("x86 shouldn't be passing arguments by {:?}", arg.mode) + } + }; // At this point we know this must be a primitive of sorts. let unit = arg.layout.homogeneous_aggregate(ccx).unwrap(); @@ -124,7 +131,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, free_regs -= size_in_regs; if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer { - arg.attrs.set(ArgAttribute::InReg); + attrs.set(ArgAttribute::InReg); } if free_regs == 0 { diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index eeb69276500f5..81eb362ca46dc 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -11,7 +11,7 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp -use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; +use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; use rustc::ty::layout::{self, TyLayout, Size}; @@ -214,11 +214,11 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType }; if in_mem { - arg.make_indirect(); if is_arg { - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } else { // `sret` parameter thus one less integer register available + arg.make_indirect(); int_regs -= 1; } } else { diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7d08090cd7e7c..5abc096407d5d 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -13,7 +13,7 @@ use intrinsics::{self, Intrinsic}; use llvm; use llvm::{ValueRef}; -use abi::{Abi, FnType}; +use abi::{Abi, FnType, PassMode}; use mir::lvalue::{LvalueRef, Alignment}; use mir::operand::{OperandRef, OperandValue}; use base::*; @@ -237,7 +237,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "volatile_load" => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); - if let Some(ty) = fn_ty.ret.cast { + if let PassMode::Cast(ty) = fn_ty.ret.mode { ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to()); } let load = bcx.volatile_load(ptr); @@ -671,7 +671,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, }; if !fn_ty.ret.is_ignore() { - if let Some(ty) = fn_ty.ret.cast { + if let PassMode::Cast(ty) = fn_ty.ret.mode { let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 67e0f35b46ef9..f43eba36a8232 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -15,7 +15,7 @@ use rustc::ty::{self, TypeFoldable}; use rustc::ty::layout::{self, LayoutOf}; use rustc::traits; use rustc::mir; -use abi::{Abi, FnType, ArgType}; +use abi::{Abi, FnType, ArgType, PassMode}; use base; use callee; use builder::Builder; @@ -207,44 +207,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Return => { - if self.fn_ty.ret.is_ignore() || self.fn_ty.ret.is_indirect() { - bcx.ret_void(); - return; - } + let llval = match self.fn_ty.ret.mode { + PassMode::Ignore | PassMode::Indirect(_) => { + bcx.ret_void(); + return; + } - let llval = if let Some(cast_ty) = self.fn_ty.ret.cast { - let op = match self.locals[mir::RETURN_POINTER] { - LocalRef::Operand(Some(op)) => op, - LocalRef::Operand(None) => bug!("use of return before def"), - LocalRef::Lvalue(tr_lvalue) => { - OperandRef { - val: Ref(tr_lvalue.llval, tr_lvalue.alignment), - layout: tr_lvalue.layout - } - } - }; - let llslot = match op.val { - Immediate(_) | Pair(..) => { - let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret"); - op.val.store(&bcx, scratch); - scratch.llval - } - Ref(llval, align) => { - assert_eq!(align, Alignment::AbiAligned, - "return pointer is unaligned!"); - llval + PassMode::Direct(_) | PassMode::Pair(..) => { + let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); + if let Ref(llval, align) = op.val { + bcx.load(llval, align.non_abi()) + } else { + op.immediate_or_packed_pair(&bcx) } - }; - let load = bcx.load( - bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), - Some(self.fn_ty.ret.layout.align)); - load - } else { - let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); - if let Ref(llval, align) = op.val { - bcx.load(llval, align.non_abi()) - } else { - op.immediate_or_packed_pair(&bcx) + } + + PassMode::Cast(cast_ty) => { + let op = match self.locals[mir::RETURN_POINTER] { + LocalRef::Operand(Some(op)) => op, + LocalRef::Operand(None) => bug!("use of return before def"), + LocalRef::Lvalue(tr_lvalue) => { + OperandRef { + val: Ref(tr_lvalue.llval, tr_lvalue.alignment), + layout: tr_lvalue.layout + } + } + }; + let llslot = match op.val { + Immediate(_) | Pair(..) => { + let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret"); + op.val.store(&bcx, scratch); + scratch.llval + } + Ref(llval, align) => { + assert_eq!(align, Alignment::AbiAligned, + "return pointer is unaligned!"); + llval + } + }; + bcx.load( + bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), + Some(self.fn_ty.ret.layout.align)) } }; bcx.ret(llval); @@ -559,12 +562,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (i, arg) in first_args.iter().enumerate() { let mut op = self.trans_operand(&bcx, arg); - if i == 0 { - if let Pair(_, meta) = op.val { - if let Some(ty::InstanceDef::Virtual(_, idx)) = def { - llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bcx, meta, &fn_ty)); - } + if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { + if let Pair(data_ptr, meta) = op.val { + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&bcx, meta, &fn_ty)); + llargs.push(data_ptr); + continue; } } @@ -604,21 +607,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { op: OperandRef<'tcx>, llargs: &mut Vec, arg: &ArgType<'tcx>) { - if let Pair(a, b) = op.val { - // Treat the values in a fat pointer separately. - if !arg.nested.is_empty() { - assert_eq!(arg.nested.len(), 2); - let imm_op = |x| OperandRef { - val: Immediate(x), - // We won't be checking the type again. - layout: bcx.ccx.layout_of(bcx.tcx().types.never) - }; - self.trans_argument(bcx, imm_op(a), llargs, &arg.nested[0]); - self.trans_argument(bcx, imm_op(b), llargs, &arg.nested[1]); - return; - } - } - // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { llargs.push(C_undef(ty.llvm_type(bcx.ccx))); @@ -628,15 +616,29 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } + if let PassMode::Pair(..) = arg.mode { + match op.val { + Pair(a, b) => { + llargs.push(a); + llargs.push(b); + return; + } + _ => bug!("trans_argument: {:?} invalid for pair arugment", op) + } + } + // Force by-ref if we have to load through a cast pointer. let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { - if arg.is_indirect() || arg.cast.is_some() { - let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); - op.val.store(bcx, scratch); - (scratch.llval, Alignment::AbiAligned, true) - } else { - (op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false) + match arg.mode { + PassMode::Indirect(_) | PassMode::Cast(_) => { + let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); + op.val.store(bcx, scratch); + (scratch.llval, Alignment::AbiAligned, true) + } + _ => { + (op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false) + } } } Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => { @@ -653,7 +655,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if let Some(ty) = arg.cast { + if let PassMode::Cast(ty) = arg.mode { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), (align | Alignment::Packed(arg.layout.align)) .non_abi()); @@ -890,7 +892,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } DirectOperand(index) => { // If there is a cast, we have to store and reload. - let op = if ret_ty.cast.is_some() { + let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = LvalueRef::alloca(bcx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bcx); ret_ty.store(bcx, llval, tmp); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6f9d32b1a37d6..7f3a430c418e9 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -22,7 +22,7 @@ use builder::Builder; use common::{CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; -use abi::{ArgAttribute, FnType}; +use abi::{ArgAttribute, FnType, PassMode}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -429,55 +429,52 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let arg = &mircx.fn_ty.args[idx]; idx += 1; - let lvalue = if arg.is_indirect() { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up - // FIXME: lifetimes - if arg.pad.is_some() { - llarg_idx += 1; - } - let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - bcx.set_value_name(llarg, &name); - llarg_idx += 1; - LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned) - } else if !lvalue_locals.contains(local.index()) && - !arg.nested.is_empty() { - assert_eq!(arg.nested.len(), 2); - let (a, b) = (&arg.nested[0], &arg.nested[1]); - assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none()); - assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none()); - - let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - bcx.set_value_name(a, &(name.clone() + ".0")); - llarg_idx += 1; - - let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - bcx.set_value_name(b, &(name + ".1")); + if arg.pad.is_some() { llarg_idx += 1; + } - return LocalRef::Operand(Some(OperandRef { - val: OperandValue::Pair(a, b), - layout: arg.layout - })); - } else if !lvalue_locals.contains(local.index()) && - !arg.is_indirect() && arg.cast.is_none() && - arg_scope.is_none() { - if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx, arg.layout); - } - + if arg_scope.is_none() && !lvalue_locals.contains(local.index()) { // We don't have to cast or keep the argument in the alloca. // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead // of putting everything in allocas just so we can use llvm.dbg.declare. - if arg.pad.is_some() { - llarg_idx += 1; + let local = |op| LocalRef::Operand(Some(op)); + match arg.mode { + PassMode::Ignore => { + return local(OperandRef::new_zst(bcx.ccx, arg.layout)); + } + PassMode::Direct(_) => { + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(llarg, &name); + llarg_idx += 1; + return local( + OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout)); + } + PassMode::Pair(..) => { + let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(a, &(name.clone() + ".0")); + llarg_idx += 1; + + let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(b, &(name + ".1")); + llarg_idx += 1; + + return local(OperandRef { + val: OperandValue::Pair(a, b), + layout: arg.layout + }); + } + _ => {} } + } + + let lvalue = if arg.is_indirect() { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up. + // FIXME: lifetimes let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); bcx.set_value_name(llarg, &name); llarg_idx += 1; - return LocalRef::Operand(Some( - OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout) - )); + LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned) } else { let tmp = LvalueRef::alloca(bcx, arg.layout, &name); arg.store_fn_arg(bcx, &mut llarg_idx, tmp); @@ -489,16 +486,19 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // The Rust ABI passes indirect variables using a pointer and a manual copy, so we // need to insert a deref here, but the C ABI uses a pointer and a copy using the // byval attribute, for which LLVM does the deref itself, so we must not add it. - let variable_access = if arg.is_indirect() && - !arg.attrs.contains(ArgAttribute::ByVal) { - VariableAccess::IndirectVariable { - alloca: lvalue.llval, - address_operations: &deref_op, - } - } else { - VariableAccess::DirectVariable { alloca: lvalue.llval } + let mut variable_access = VariableAccess::DirectVariable { + alloca: lvalue.llval }; + if let PassMode::Indirect(ref attrs) = arg.mode { + if !attrs.contains(ArgAttribute::ByVal) { + variable_access = VariableAccess::IndirectVariable { + alloca: lvalue.llval, + address_operations: &deref_op, + }; + } + } + declare_local( bcx, &mircx.debug_context, From fa67abd12707c34a2def10247c22c336f82cd2c2 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 10 Oct 2017 20:55:21 +0300 Subject: [PATCH 59/69] rustc: don't special-case Box as having a pointer layout. --- src/liballoc/boxed.rs | 18 +++- src/librustc/ty/layout.rs | 168 +++++++++++++++------------------- src/librustc_trans/type_of.rs | 21 +++-- 3 files changed, 98 insertions(+), 109 deletions(-) diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 79292d390e5d2..2226cee6e3684 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -151,7 +151,7 @@ impl Place for IntermediateBox { unsafe fn finalize(b: IntermediateBox) -> Box { let p = b.ptr as *mut T; mem::forget(b); - mem::transmute(p) + Box::from_raw(p) } fn make_place() -> IntermediateBox { @@ -300,7 +300,10 @@ impl Box { issue = "27730")] #[inline] pub unsafe fn from_unique(u: Unique) -> Self { - mem::transmute(u) + #[cfg(stage0)] + return mem::transmute(u); + #[cfg(not(stage0))] + return Box(u); } /// Consumes the `Box`, returning the wrapped raw pointer. @@ -362,7 +365,14 @@ impl Box { issue = "27730")] #[inline] pub fn into_unique(b: Box) -> Unique { - unsafe { mem::transmute(b) } + #[cfg(stage0)] + return unsafe { mem::transmute(b) }; + #[cfg(not(stage0))] + return { + let unique = b.0; + mem::forget(b); + unique + }; } } @@ -627,7 +637,7 @@ impl Box { pub fn downcast(self) -> Result, Box> { >::downcast(self).map_err(|s| unsafe { // reapply the Send marker - mem::transmute::, Box>(s) + Box::from_raw(Box::into_raw(s) as *mut (Any + Send)) }) } } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index fc5d421394959..761897f626660 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1152,37 +1152,6 @@ impl<'a, 'tcx> CachedLayout { }; assert!(!ty.has_infer_types()); - let ptr_layout = |pointee: Ty<'tcx>| { - let mut data_ptr = scalar_unit(Pointer); - if !ty.is_unsafe_ptr() { - data_ptr.valid_range.start = 1; - } - - let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); - if pointee.is_sized(tcx, param_env, DUMMY_SP) { - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); - } - - let unsized_part = tcx.struct_tail(pointee); - let metadata = match unsized_part.sty { - ty::TyForeign(..) => { - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); - } - ty::TySlice(_) | ty::TyStr => { - scalar_unit(Int(dl.ptr_sized_integer(), false)) - } - ty::TyDynamic(..) => { - let mut vtable = scalar_unit(Pointer); - vtable.valid_range.start = 1; - vtable - } - _ => return Err(LayoutError::Unknown(unsized_part)) - }; - - // Effectively a (ptr, meta) tuple. - Ok(tcx.intern_layout(scalar_pair(data_ptr, metadata))) - }; - Ok(match ty.sty { // Basic scalars. ty::TyBool => { @@ -1219,10 +1188,34 @@ impl<'a, 'tcx> CachedLayout { // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_layout(pointee)? - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_layout(ty.boxed_ty())? + let mut data_ptr = scalar_unit(Pointer); + if !ty.is_unsafe_ptr() { + data_ptr.valid_range.start = 1; + } + + let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); + if pointee.is_sized(tcx, param_env, DUMMY_SP) { + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + } + + let unsized_part = tcx.struct_tail(pointee); + let metadata = match unsized_part.sty { + ty::TyForeign(..) => { + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + } + ty::TySlice(_) | ty::TyStr => { + scalar_unit(Int(dl.ptr_sized_integer(), false)) + } + ty::TyDynamic(..) => { + let mut vtable = scalar_unit(Pointer); + vtable.valid_range.start = 1; + vtable + } + _ => return Err(LayoutError::Unknown(unsized_part)) + }; + + // Effectively a (ptr, meta) tuple. + tcx.intern_layout(scalar_pair(data_ptr, metadata)) } // Arrays and slices. @@ -1861,32 +1854,25 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { Err(err) => err }; - let ptr_skeleton = |pointee: Ty<'tcx>| { - let non_zero = !ty.is_unsafe_ptr(); - let tail = tcx.struct_tail(pointee); - match tail.sty { - ty::TyParam(_) | ty::TyProjection(_) => { - assert!(tail.has_param_types() || tail.has_self_ty()); - Ok(SizeSkeleton::Pointer { - non_zero, - tail: tcx.erase_regions(&tail) - }) - } - _ => { - bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ - tail `{}` is not a type parameter or a projection", - ty, err, tail) - } - } - }; - match ty.sty { ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_skeleton(pointee) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_skeleton(ty.boxed_ty()) + let non_zero = !ty.is_unsafe_ptr(); + let tail = tcx.struct_tail(pointee); + match tail.sty { + ty::TyParam(_) | ty::TyProjection(_) => { + assert!(tail.has_param_types() || tail.has_self_ty()); + Ok(SizeSkeleton::Pointer { + non_zero, + tail: tcx.erase_regions(&tail) + }) + } + _ => { + bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ + tail `{}` is not a type parameter or a projection", + ty, err, tail) + } + } } ty::TyAdt(def, substs) => { @@ -2148,39 +2134,6 @@ impl<'a, 'tcx> TyLayout<'tcx> { C::TyLayout: MaybeResult> { let tcx = cx.tcx(); - let ptr_field_layout = |pointee: Ty<'tcx>| { - assert!(i < 2); - - // Reuse the fat *T type as its own thin pointer data field. - // This provides information about e.g. DST struct pointees - // (which may have no non-DST form), and will work as long - // as the `Abi` or `FieldPlacement` is checked by users. - if i == 0 { - let nil = tcx.mk_nil(); - let ptr_ty = if self.ty.is_unsafe_ptr() { - tcx.mk_mut_ptr(nil) - } else { - tcx.mk_mut_ref(tcx.types.re_static, nil) - }; - return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| { - ptr_layout.ty = self.ty; - ptr_layout - }); - } - - let meta_ty = match tcx.struct_tail(pointee).sty { - ty::TySlice(_) | - ty::TyStr => tcx.types.usize, - ty::TyDynamic(..) => { - // FIXME(eddyb) use an usize/fn() array with - // the correct number of vtables slots. - tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) - } - _ => bug!("TyLayout::field_type({:?}): not applicable", self) - }; - cx.layout_of(meta_ty) - }; - cx.layout_of(match self.ty.sty { ty::TyBool | ty::TyChar | @@ -2198,10 +2151,35 @@ impl<'a, 'tcx> TyLayout<'tcx> { // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - return ptr_field_layout(pointee); - } - ty::TyAdt(def, _) if def.is_box() => { - return ptr_field_layout(self.ty.boxed_ty()); + assert!(i < 2); + + // Reuse the fat *T type as its own thin pointer data field. + // This provides information about e.g. DST struct pointees + // (which may have no non-DST form), and will work as long + // as the `Abi` or `FieldPlacement` is checked by users. + if i == 0 { + let nil = tcx.mk_nil(); + let ptr_ty = if self.ty.is_unsafe_ptr() { + tcx.mk_mut_ptr(nil) + } else { + tcx.mk_mut_ref(tcx.types.re_static, nil) + }; + return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| { + ptr_layout.ty = self.ty; + ptr_layout + }); + } + + match tcx.struct_tail(pointee).sty { + ty::TySlice(_) | + ty::TyStr => tcx.types.usize, + ty::TyDynamic(..) => { + // FIXME(eddyb) use an usize/fn() array with + // the correct number of vtables slots. + tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) + } + _ => bug!("TyLayout::field_type({:?}): not applicable", self) + } } // Arrays and slices. diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 35da258cdb346..60e5e4ced2c30 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -321,7 +321,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { return self.field(ccx, index).llvm_type(ccx); } ty::TyAdt(def, _) if def.is_box() => { - return self.field(ccx, index).llvm_type(ccx); + let ptr_ty = ccx.tcx().mk_mut_ptr(self.ty.boxed_ty()); + return ccx.layout_of(ptr_ty).scalar_pair_element_llvm_type(ccx, index); } _ => {} } @@ -438,15 +439,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyAdt(def, _) if def.is_box() && offset.bytes() == 0 => { - let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty()); - result = Some(PointeeInfo { - size, - align, - safe: Some(PointerKind::UniqueOwned) - }); - } - _ => { let mut data_variant = match self.variants { layout::Variants::NicheFilling { dataful_variant, .. } => { @@ -491,6 +483,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } } + + // FIXME(eddyb) This should be for `ptr::Unique`, not `Box`. + if let Some(ref mut pointee) = result { + if let ty::TyAdt(def, _) = self.ty.sty { + if def.is_box() && offset.bytes() == 0 { + pointee.safe = Some(PointerKind::UniqueOwned); + } + } + } } } From 801a1a0fc10706ac908e29d66a598ff121a923cd Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 10 Oct 2017 22:04:13 +0300 Subject: [PATCH 60/69] rustc_trans: remove type_is_fat_ptr and its uses. --- src/librustc_trans/common.rs | 13 --------- src/librustc_trans/mir/constant.rs | 17 ++++-------- src/librustc_trans/mir/rvalue.rs | 44 ++++++++++++++---------------- 3 files changed, 27 insertions(+), 47 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 8a2c1ed2dc2f2..7bd8a0c81ee34 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -41,19 +41,6 @@ use syntax_pos::{Span, DUMMY_SP}; pub use context::{CrateContext, SharedCrateContext}; -pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { - !ccx.shared().type_is_sized(ty) - } - ty::TyAdt(def, _) if def.is_box() => { - !ccx.shared().type_is_sized(ty.boxed_ty()) - } - _ => false - } -} - pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 54907cb747c75..0423a684399c2 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -674,10 +674,6 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { operand.llval } mir::CastKind::Unsize => { - // unsize targets other than to a fat pointer currently - // can't be in constants. - assert!(common::type_is_fat_ptr(self.ccx, cast_ty)); - let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer type").ty; let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) { @@ -760,19 +756,18 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::CastKind::Misc => { // Casts from a fat-ptr. - if common::type_is_fat_ptr(self.ccx, operand.ty) { + let l = self.ccx.layout_of(operand.ty); + let cast = self.ccx.layout_of(cast_ty); + if l.is_llvm_scalar_pair() { let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); - if common::type_is_fat_ptr(self.ccx, cast_ty) { - let thin_ptr = self.ccx.layout_of(cast_ty) - .field(self.ccx, abi::FAT_PTR_ADDR); + if cast.is_llvm_scalar_pair() { let data_cast = consts::ptrcast(data_ptr, - thin_ptr.llvm_type(self.ccx)); + cast.scalar_pair_element_llvm_type(self.ccx, 0)); C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = self.ccx.layout_of(cast_ty) - .immediate_llvm_type(self.ccx); + let llcast_ty = cast.immediate_llvm_type(self.ccx); consts::ptrcast(data_ptr, llcast_ty) } } else { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index bf44434cafea7..4781425f491f8 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -18,7 +18,6 @@ use rustc_apfloat::{ieee, Float, Status, Round}; use rustc_const_math::MAX_F32_PLUS_HALF_ULP; use std::{u128, i128}; -use abi; use base; use builder::Builder; use callee; @@ -54,10 +53,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } - mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { - let cast_ty = self.monomorphize(&cast_ty); - - if common::type_is_fat_ptr(bcx.ccx, cast_ty) { + mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { + // The destination necessarily contains a fat pointer, so if + // it's a scalar pair, it's a fat pointer or newtype thereof. + if dest.layout.is_llvm_scalar_pair() { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); @@ -223,6 +222,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { operand.val } mir::CastKind::Unsize => { + assert!(cast.is_llvm_scalar_pair()); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -248,12 +248,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => { + mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if common::type_is_fat_ptr(bcx.ccx, cast.ty) { - let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); + if cast.is_llvm_scalar_pair() { let data_cast = bcx.pointercast(data_ptr, - thin_ptr.llvm_type(bcx.ccx)); + cast.scalar_pair_element_llvm_type(bcx.ccx, 0)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -368,22 +367,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.layout.ty) { - match (lhs.val, rhs.val) { - (OperandValue::Pair(lhs_addr, lhs_extra), - OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.trans_fat_ptr_binop(&bcx, op, - lhs_addr, lhs_extra, - rhs_addr, rhs_extra, - lhs.layout.ty) - } - _ => bug!() + let llresult = match (lhs.val, rhs.val) { + (OperandValue::Pair(lhs_addr, lhs_extra), + OperandValue::Pair(rhs_addr, rhs_extra)) => { + self.trans_fat_ptr_binop(&bcx, op, + lhs_addr, lhs_extra, + rhs_addr, rhs_extra, + lhs.layout.ty) } - } else { - self.trans_scalar_binop(&bcx, op, - lhs.immediate(), rhs.immediate(), - lhs.layout.ty) + (OperandValue::Immediate(lhs_val), + OperandValue::Immediate(rhs_val)) => { + self.trans_scalar_binop(&bcx, op, lhs_val, rhs_val, lhs.layout.ty) + } + + _ => bug!() }; let operand = OperandRef { val: OperandValue::Immediate(llresult), From d893285b657b39bc9703270179a50e3562dd38e8 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 10 Oct 2017 23:38:07 +0300 Subject: [PATCH 61/69] rustc: use layout::Abi::ScalarPair for structs in more cases. --- src/librustc/ty/layout.rs | 96 ++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 42 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 761897f626660..81d25a772183e 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1078,19 +1078,28 @@ impl<'a, 'tcx> CachedLayout { packed }; - // Unpack newtype ABIs. - if sized && optimize && size.bytes() > 0 { - // All but one field must be ZSTs, and so they all start at 0. - if offsets.iter().all(|o| o.bytes() == 0) { - let mut non_zst_fields = fields.iter().filter(|f| !f.is_zst()); - - // We have exactly one non-ZST field. - match (non_zst_fields.next(), non_zst_fields.next()) { - (Some(field), None) => { - // Field size matches and it has a scalar or scalar pair ABI. - if size == field.size { + // Unpack newtype ABIs and find scalar pairs. + if sized && size.bytes() > 0 { + // All other fields must be ZSTs, and we need them to all start at 0. + let mut zst_offsets = + offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); + if zst_offsets.all(|(_, o)| o.bytes() == 0) { + let mut non_zst_fields = + fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && size == field.size { match field.abi { - Abi::Scalar(_) | + // For plain scalars we can't unpack newtypes + // for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) if optimize => { + abi = field.abi.clone(); + } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. Abi::ScalarPair(..) => { abi = field.abi.clone(); } @@ -1098,40 +1107,43 @@ impl<'a, 'tcx> CachedLayout { } } } - _ => {} - } - } - } - // Look for a scalar pair, as an ABI optimization. - // FIXME(eddyb) ignore extra ZST fields and field ordering. - if sized && !packed && fields.len() == 2 { - match (&fields[0].abi, &fields[1].abi) { - (&Abi::Scalar(ref a), &Abi::Scalar(ref b)) => { - let pair = scalar_pair(a.clone(), b.clone()); - let pair_offsets = match pair.fields { - FieldPlacement::Arbitrary { - ref offsets, - ref memory_index - } => { - assert_eq!(memory_index, &[0, 1]); - offsets + // Two non-ZST fields, and they're both scalars. + (Some((i, &TyLayout { + cached: &CachedLayout { abi: Abi::Scalar(ref a), .. }, .. + })), Some((j, &TyLayout { + cached: &CachedLayout { abi: Abi::Scalar(ref b), .. }, .. + })), None) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = if offsets[i] < offsets[j] { + ((i, a), (j, b)) + } else { + ((j, b), (i, a)) + }; + let pair = scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!() + }; + if offsets[i] == pair_offsets[0] && + offsets[j] == pair_offsets[1] && + align == pair.align && + primitive_align == pair.primitive_align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; } - _ => bug!() - }; - if offsets[0] == pair_offsets[0] && - offsets[1] == pair_offsets[1] && - memory_index[0] == 0 && - memory_index[1] == 1 && - align == pair.align && - primitive_align == pair.primitive_align && - size == pair.size { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; } + + _ => {} } - _ => {} } } From 8437d7c0f13a1ebb4cb14d7174b11b44a1a45695 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 12 Oct 2017 03:55:49 +0300 Subject: [PATCH 62/69] rustc: extend the niche-filling enum optimization past 2 variants. --- src/librustc/ty/layout.rs | 231 ++++++++++-------- src/librustc_trans/debuginfo/metadata.rs | 6 +- src/librustc_trans/mir/constant.rs | 9 +- src/librustc_trans/mir/lvalue.rs | 47 ++-- src/test/ui/print_type_sizes/niche-filling.rs | 10 + .../ui/print_type_sizes/niche-filling.stdout | 27 ++ 6 files changed, 210 insertions(+), 120 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 81d25a772183e..c1ce6143d109a 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -805,19 +805,19 @@ pub enum Variants { variants: Vec, }, - /// Two cases distinguished by a niche (a value invalid for a type): + /// Multiple cases distinguished by a niche (values invalid for a type): /// the variant `dataful_variant` contains a niche at an arbitrary - /// offset (field 0 of the enum), which is set to `niche_value` - /// for the other variant. + /// offset (field 0 of the enum), which for a variant with discriminant + /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`. /// /// For example, `Option<(usize, &T)>` is represented such that /// `None` has a null pointer for the second tuple field, and /// `Some` is the identity function (with a non-null reference). NicheFilling { dataful_variant: usize, - niche_variant: usize, + niche_variants: RangeInclusive, niche: Scalar, - niche_value: u128, + niche_start: u128, variants: Vec, } } @@ -1372,11 +1372,11 @@ impl<'a, 'tcx> CachedLayout { }).collect::, _>>() }).collect::, _>>()?; - let (inh_first, inh_second, inh_third) = { + let (inh_first, inh_second) = { let mut inh_variants = (0..variants.len()).filter(|&v| { variants[v].iter().all(|f| f.abi != Abi::Uninhabited) }); - (inh_variants.next(), inh_variants.next(), inh_variants.next()) + (inh_variants.next(), inh_variants.next()) }; if inh_first.is_none() { // Uninhabited because it has no variants, or only uninhabited ones. @@ -1472,68 +1472,94 @@ impl<'a, 'tcx> CachedLayout { let no_explicit_discriminants = def.variants.iter().enumerate() .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); - if inh_second.is_some() && inh_third.is_none() && - !def.repr.inhibit_enum_layout_opt() && - no_explicit_discriminants { - // Nullable pointer optimization - let (a, b) = (inh_first.unwrap(), inh_second.unwrap()); - for &(i, other) in &[(a, b), (b, a)] { - if !variants[other].iter().all(|f| f.is_zst()) { - continue; + // Niche-filling enum optimization. + if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { + let mut dataful_variant = None; + let mut niche_variants = usize::max_value()..=0; + + // Find one non-ZST variant. + 'variants: for (v, fields) in variants.iter().enumerate() { + for f in fields { + if f.abi == Abi::Uninhabited { + continue 'variants; + } + if !f.is_zst() { + if dataful_variant.is_none() { + dataful_variant = Some(v); + continue 'variants; + } else { + dataful_variant = None; + break 'variants; + } + } + } + if niche_variants.start > v { + niche_variants.start = v; } + niche_variants.end = v; + } + + if niche_variants.start > niche_variants.end { + dataful_variant = None; + } + if let Some(i) = dataful_variant { + let count = (niche_variants.end - niche_variants.start + 1) as u128; for (field_index, field) in variants[i].iter().enumerate() { - if let Some((offset, niche, niche_value)) = field.find_niche(cx)? { - let st = variants.iter().enumerate().map(|(j, v)| { - let mut st = univariant_uninterned(v, - &def.repr, StructKind::AlwaysSized)?; - st.variants = Variants::Single { index: j }; - Ok(st) - }).collect::, _>>()?; - - let offset = st[i].fields.offset(field_index) + offset; - let CachedLayout { - size, - mut align, - mut primitive_align, - .. - } = st[i]; - - let mut niche_align = niche.value.align(dl); - let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { - Abi::Scalar(niche.clone()) - } else { - let mut packed = st[i].abi.is_packed(); - if offset.abi_align(niche_align) != offset { - packed = true; - niche_align = dl.i8_align; - } - Abi::Aggregate { - sized: true, - packed - } + let (offset, niche, niche_start) = + match field.find_niche(cx, count)? { + Some(niche) => niche, + None => continue }; - align = align.max(niche_align); - primitive_align = primitive_align.max(niche_align); - - return Ok(tcx.intern_layout(CachedLayout { - variants: Variants::NicheFilling { - dataful_variant: i, - niche_variant: other, - niche, - niche_value, - variants: st, - }, - fields: FieldPlacement::Arbitrary { - offsets: vec![offset], - memory_index: vec![0] - }, - abi, - size, - align, - primitive_align - })); - } + let st = variants.iter().enumerate().map(|(j, v)| { + let mut st = univariant_uninterned(v, + &def.repr, StructKind::AlwaysSized)?; + st.variants = Variants::Single { index: j }; + Ok(st) + }).collect::, _>>()?; + + let offset = st[i].fields.offset(field_index) + offset; + let CachedLayout { + size, + mut align, + mut primitive_align, + .. + } = st[i]; + + let mut niche_align = niche.value.align(dl); + let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { + Abi::Scalar(niche.clone()) + } else { + let mut packed = st[i].abi.is_packed(); + if offset.abi_align(niche_align) != offset { + packed = true; + niche_align = dl.i8_align; + } + Abi::Aggregate { + sized: true, + packed + } + }; + align = align.max(niche_align); + primitive_align = primitive_align.max(niche_align); + + return Ok(tcx.intern_layout(CachedLayout { + variants: Variants::NicheFilling { + dataful_variant: i, + niche_variants, + niche, + niche_start, + variants: st, + }, + fields: FieldPlacement::Arbitrary { + offsets: vec![offset], + memory_index: vec![0] + }, + abi, + size, + align, + primitive_align + })); } } } @@ -2267,50 +2293,50 @@ impl<'a, 'tcx> TyLayout<'tcx> { } /// Find the offset of a niche leaf field, starting from - /// the given type and recursing through aggregates. + /// the given type and recursing through aggregates, which + /// has at least `count` consecutive invalid values. /// The tuple is `(offset, scalar, niche_value)`. // FIXME(eddyb) traverse already optimized enums. - fn find_niche(&self, cx: C) + fn find_niche(&self, cx: C, count: u128) -> Result, LayoutError<'tcx>> where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { let scalar_component = |scalar: &Scalar, offset| { - // FIXME(eddyb) support negative/wrap-around discriminant ranges. - let Scalar { value, ref valid_range } = *scalar; - if valid_range.start < valid_range.end { - let bits = value.size(cx).bits(); - assert!(bits <= 128); - let max_value = !0u128 >> (128 - bits); - if valid_range.start > 0 { - let niche = valid_range.start - 1; - Ok(Some((offset, Scalar { - value, - valid_range: niche..=valid_range.end - }, niche))) - } else if valid_range.end < max_value { - let niche = valid_range.end + 1; - Ok(Some((offset, Scalar { - value, - valid_range: valid_range.start..=niche - }, niche))) - } else { - Ok(None) - } + let Scalar { value, valid_range: ref v } = *scalar; + + let bits = value.size(cx).bits(); + assert!(bits <= 128); + let max_value = !0u128 >> (128 - bits); + + // Find out how many values are outside the valid range. + let niches = if v.start <= v.end { + v.start + (max_value - v.end) } else { - Ok(None) + v.start - v.end - 1 + }; + + // Give up if we can't fit `count` consecutive niches. + if count > niches { + return None; } + + let niche_start = v.end.wrapping_add(1) & max_value; + let niche_end = v.end.wrapping_add(count) & max_value; + Some((offset, Scalar { + value, + valid_range: v.start..=niche_end + }, niche_start)) }; match self.abi { Abi::Scalar(ref scalar) => { - return scalar_component(scalar, Size::from_bytes(0)); + return Ok(scalar_component(scalar, Size::from_bytes(0))); } Abi::ScalarPair(ref a, ref b) => { - if let Some(result) = scalar_component(a, Size::from_bytes(0))? { - return Ok(Some(result)); - } - return scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx))); + return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| { + scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx))) + })); } _ => {} } @@ -2323,13 +2349,13 @@ impl<'a, 'tcx> TyLayout<'tcx> { return Ok(None); } } - if let FieldPlacement::Array { count, .. } = self.fields { - if count > 0 { - return self.field(cx, 0)?.find_niche(cx); + if let FieldPlacement::Array { .. } = self.fields { + if self.fields.count() > 0 { + return self.field(cx, 0)?.find_niche(cx, count); } } for i in 0..self.fields.count() { - let r = self.field(cx, i)?.find_niche(cx)?; + let r = self.field(cx, i)?.find_niche(cx, count)?; if let Some((offset, scalar, niche_value)) = r { let offset = self.fields.offset(i) + offset; return Ok(Some((offset, scalar, niche_value))); @@ -2359,15 +2385,16 @@ impl<'gcx> HashStable> for Variants { } NicheFilling { dataful_variant, - niche_variant, + niche_variants: RangeInclusive { start, end }, ref niche, - niche_value, + niche_start, ref variants, } => { dataful_variant.hash_stable(hcx, hasher); - niche_variant.hash_stable(hcx, hasher); + start.hash_stable(hcx, hasher); + end.hash_stable(hcx, hasher); niche.hash_stable(hcx, hasher); - niche_value.hash_stable(hcx, hasher); + niche_start.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 25a35274d3233..b2ad538a8ab29 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1186,7 +1186,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() } - layout::Variants::NicheFilling { dataful_variant, niche_variant, .. } => { + layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => { let variant = self.layout.for_variant(cx, dataful_variant); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = @@ -1209,6 +1209,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { let mut name = String::from("RUST$ENCODED$ENUM$"); // HACK(eddyb) the debuggers should just handle offset+size // of discriminant instead of us having to recover its path. + // Right now it's not even going to work for `niche_start > 0`, + // and for multiple niche variants it only supports the first. fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &mut String, layout: TyLayout<'tcx>, @@ -1231,7 +1233,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { self.layout, self.layout.fields.offset(0), self.layout.field(cx, 0).size); - name.push_str(&adt.variants[niche_variant].name.as_str()); + name.push_str(&adt.variants[niche_variants.start].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 0423a684399c2..8c013330e5bcb 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -1163,12 +1163,19 @@ fn trans_const_adt<'a, 'tcx>( build_const_struct(ccx, l.for_variant(ccx, variant_index), vals, Some(discr)) } } - layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { if variant_index == dataful_variant { build_const_struct(ccx, l.for_variant(ccx, dataful_variant), vals, None) } else { let niche = l.field(ccx, 0); let niche_llty = niche.llvm_type(ccx); + let niche_value = ((variant_index - niche_variants.start) as u128) + .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `C_null` as it works on all types. diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 71d2fda09c9d2..6478bd2c84880 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -328,21 +328,31 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } layout::Variants::NicheFilling { dataful_variant, - niche_variant, - niche_value, + ref niche_variants, + niche_start, .. } => { - let niche_llty = discr.layout.llvm_type(bcx.ccx); - // FIXME(eddyb) Check the actual primitive type here. - let niche_llval = if niche_value == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx); + if niche_variants.start == niche_variants.end { + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_start == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_start) + }; + bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval), + C_uint(cast_to, niche_variants.start as u64), + C_uint(cast_to, dataful_variant as u64)) } else { - C_uint_big(niche_llty, niche_value) - }; - bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, niche_variant as u64), - C_uint(cast_to, dataful_variant as u64)) + // Rebase from niche values to discriminant values. + let delta = niche_start.wrapping_sub(niche_variants.start as u128); + let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta)); + let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64); + bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max), + bcx.intcast(lldiscr, cast_to, false), + C_uint(cast_to, dataful_variant as u64)) + } } } } @@ -367,7 +377,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), ptr.llval, ptr.alignment.non_abi()); } - layout::Variants::NicheFilling { dataful_variant, niche_value, .. } => { + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { if variant_index != dataful_variant { if bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" { @@ -382,7 +397,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } let niche = self.project_field(bcx, 0); - let niche_llty = niche.layout.llvm_type(bcx.ccx); + let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx); + let niche_value = ((variant_index - niche_variants.start) as u128) + .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { // HACK(eddyb) Using `C_null` as it works on all types. @@ -390,7 +407,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } else { C_uint_big(niche_llty, niche_value) }; - bcx.store(niche_llval, niche.llval, niche.alignment.non_abi()); + OperandValue::Immediate(niche_llval).store(bcx, niche); } } } diff --git a/src/test/ui/print_type_sizes/niche-filling.rs b/src/test/ui/print_type_sizes/niche-filling.rs index dfa7b8aae31eb..f1c419d889556 100644 --- a/src/test/ui/print_type_sizes/niche-filling.rs +++ b/src/test/ui/print_type_sizes/niche-filling.rs @@ -68,6 +68,13 @@ impl One for u32 { fn one() -> Self { 1 } } +pub enum Enum4 { + One(A), + Two(B), + Three(C), + Four(D) +} + pub fn main() { let _x: MyOption> = Default::default(); let _y: EmbeddedDiscr = Default::default(); @@ -76,4 +83,7 @@ pub fn main() { let _b: MyOption = Default::default(); let _c: MyOption = Default::default(); let _b: MyOption> = Default::default(); + let _e: Enum4<(), char, (), ()> = Enum4::One(()); + let _f: Enum4<(), (), bool, ()> = Enum4::One(()); + let _g: Enum4<(), (), (), MyOption> = Enum4::One(()); } diff --git a/src/test/ui/print_type_sizes/niche-filling.stdout b/src/test/ui/print_type_sizes/niche-filling.stdout index 668b31e413f64..af3e89a936ee0 100644 --- a/src/test/ui/print_type_sizes/niche-filling.stdout +++ b/src/test/ui/print_type_sizes/niche-filling.stdout @@ -19,6 +19,15 @@ print-type-size field `.val`: 4 bytes print-type-size field `.post`: 2 bytes print-type-size field `.pre`: 1 bytes print-type-size end padding: 1 bytes +print-type-size type: `Enum4<(), char, (), ()>`: 4 bytes, alignment: 4 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size variant `Three`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Four`: 0 bytes +print-type-size field `.0`: 0 bytes print-type-size type: `MyOption`: 4 bytes, alignment: 4 bytes print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 4 bytes @@ -29,6 +38,15 @@ print-type-size variant `Some`: 4 bytes print-type-size field `.0`: 4 bytes print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes print-type-size field `.0`: 4 bytes +print-type-size type: `Enum4<(), (), (), MyOption>`: 2 bytes, alignment: 1 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Three`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Four`: 2 bytes +print-type-size field `.0`: 2 bytes print-type-size type: `MyOption>`: 2 bytes, alignment: 1 bytes print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 2 bytes @@ -38,6 +56,15 @@ print-type-size discriminant: 1 bytes print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 1 bytes print-type-size field `.0`: 1 bytes +print-type-size type: `Enum4<(), (), bool, ()>`: 1 bytes, alignment: 1 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Three`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size variant `Four`: 0 bytes +print-type-size field `.0`: 0 bytes print-type-size type: `MyOption`: 1 bytes, alignment: 1 bytes print-type-size variant `None`: 0 bytes print-type-size variant `Some`: 1 bytes From 753d582f62a99b51381ae4cc6523b5b62392ae24 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 28 Oct 2017 16:52:41 +0300 Subject: [PATCH 63/69] rustc: rename CachedLayout to LayoutDetails. --- src/librustc/ty/context.rs | 8 +-- src/librustc/ty/layout.rs | 102 ++++++++++++++++++------------------ src/librustc/ty/maps/mod.rs | 2 +- 3 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index f69e714a99724..904f9a0912522 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -41,7 +41,7 @@ use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predic use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; -use ty::layout::{CachedLayout, TargetDataLayout}; +use ty::layout::{LayoutDetails, TargetDataLayout}; use ty::maps; use ty::steal::Steal; use ty::BindingMode; @@ -78,7 +78,7 @@ use hir; /// Internal storage pub struct GlobalArenas<'tcx> { // internings - layout: TypedArena, + layout: TypedArena, // references generics: TypedArena, @@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> { stability_interner: RefCell>, - layout_interner: RefCell>, + layout_interner: RefCell>, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for @@ -1016,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { interned } - pub fn intern_layout(self, layout: CachedLayout) -> &'gcx CachedLayout { + pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails { if let Some(layout) = self.layout_interner.borrow().get(&layout) { return layout; } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index c1ce6143d109a..28036d90217a7 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -802,7 +802,7 @@ pub enum Variants { /// at a non-0 offset, after where the discriminant would go. Tagged { discr: Scalar, - variants: Vec, + variants: Vec, }, /// Multiple cases distinguished by a niche (values invalid for a type): @@ -818,7 +818,7 @@ pub enum Variants { niche_variants: RangeInclusive, niche: Scalar, niche_start: u128, - variants: Vec, + variants: Vec, } } @@ -842,7 +842,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } #[derive(PartialEq, Eq, Hash, Debug)] -pub struct CachedLayout { +pub struct LayoutDetails { pub variants: Variants, pub fields: FieldPlacement, pub abi: Abi, @@ -851,11 +851,11 @@ pub struct CachedLayout { pub size: Size } -impl CachedLayout { +impl LayoutDetails { fn scalar(cx: C, scalar: Scalar) -> Self { let size = scalar.value.size(cx); let align = scalar.value.align(cx); - CachedLayout { + LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(0), abi: Abi::Scalar(scalar), @@ -867,7 +867,7 @@ impl CachedLayout { fn uninhabited(field_count: usize) -> Self { let align = Align::from_bytes(1, 1).unwrap(); - CachedLayout { + LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(field_count), abi: Abi::Uninhabited, @@ -880,7 +880,7 @@ impl CachedLayout { fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx CachedLayout, LayoutError<'tcx>> + -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { let (param_env, ty) = query.into_parts(); @@ -892,7 +892,7 @@ fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } tcx.layout_depth.set(depth+1); - let layout = CachedLayout::compute_uncached(tcx, param_env, ty); + let layout = LayoutDetails::compute_uncached(tcx, param_env, ty); tcx.layout_depth.set(depth); layout @@ -905,7 +905,7 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; } -impl<'a, 'tcx> CachedLayout { +impl<'a, 'tcx> LayoutDetails { fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) @@ -921,13 +921,13 @@ impl<'a, 'tcx> CachedLayout { } }; let scalar = |value: Primitive| { - tcx.intern_layout(CachedLayout::scalar(cx, scalar_unit(value))) + tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value))) }; let scalar_pair = |a: Scalar, b: Scalar| { let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); let size = (b_offset + b.value.size(dl)).abi_align(align); - CachedLayout { + LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { offsets: vec![Size::from_bytes(0), b_offset], @@ -1024,7 +1024,7 @@ impl<'a, 'tcx> CachedLayout { } if field.abi == Abi::Uninhabited { - return Ok(CachedLayout::uninhabited(fields.len())); + return Ok(LayoutDetails::uninhabited(fields.len())); } if field.is_unsized() { @@ -1110,9 +1110,9 @@ impl<'a, 'tcx> CachedLayout { // Two non-ZST fields, and they're both scalars. (Some((i, &TyLayout { - cached: &CachedLayout { abi: Abi::Scalar(ref a), .. }, .. + details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, .. })), Some((j, &TyLayout { - cached: &CachedLayout { abi: Abi::Scalar(ref b), .. }, .. + details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, .. })), None) => { // Order by the memory placement, not source order. let ((i, a), (j, b)) = if offsets[i] < offsets[j] { @@ -1147,7 +1147,7 @@ impl<'a, 'tcx> CachedLayout { } } - Ok(CachedLayout { + Ok(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { offsets, @@ -1167,13 +1167,13 @@ impl<'a, 'tcx> CachedLayout { Ok(match ty.sty { // Basic scalars. ty::TyBool => { - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { value: Int(I8, false), valid_range: 0..=1 })) } ty::TyChar => { - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF })) @@ -1189,12 +1189,12 @@ impl<'a, 'tcx> CachedLayout { ty::TyFnPtr(_) => { let mut ptr = scalar_unit(Pointer); ptr.valid_range.start = 1; - tcx.intern_layout(CachedLayout::scalar(cx, ptr)) + tcx.intern_layout(LayoutDetails::scalar(cx, ptr)) } // The never type. ty::TyNever => { - tcx.intern_layout(CachedLayout::uninhabited(0)) + tcx.intern_layout(LayoutDetails::uninhabited(0)) } // Potentially-fat pointers. @@ -1207,13 +1207,13 @@ impl<'a, 'tcx> CachedLayout { let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); } let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { ty::TyForeign(..) => { - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); } ty::TySlice(_) | ty::TyStr => { scalar_unit(Int(dl.ptr_sized_integer(), false)) @@ -1244,7 +1244,7 @@ impl<'a, 'tcx> CachedLayout { let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; - tcx.intern_layout(CachedLayout { + tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, @@ -1261,7 +1261,7 @@ impl<'a, 'tcx> CachedLayout { } ty::TySlice(element) => { let element = cx.layout_of(element)?; - tcx.intern_layout(CachedLayout { + tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, @@ -1277,7 +1277,7 @@ impl<'a, 'tcx> CachedLayout { }) } ty::TyStr => { - tcx.intern_layout(CachedLayout { + tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: Size::from_bytes(1), @@ -1350,7 +1350,7 @@ impl<'a, 'tcx> CachedLayout { let align = dl.vector_align(size); let size = size.abi_align(align); - tcx.intern_layout(CachedLayout { + tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { stride: element.size, @@ -1380,7 +1380,7 @@ impl<'a, 'tcx> CachedLayout { }; if inh_first.is_none() { // Uninhabited because it has no variants, or only uninhabited ones. - return Ok(tcx.intern_layout(CachedLayout::uninhabited(0))); + return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0))); } if def.is_union() { @@ -1414,7 +1414,7 @@ impl<'a, 'tcx> CachedLayout { size = cmp::max(size, field.size); } - return Ok(tcx.intern_layout(CachedLayout { + return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { @@ -1519,7 +1519,7 @@ impl<'a, 'tcx> CachedLayout { }).collect::, _>>()?; let offset = st[i].fields.offset(field_index) + offset; - let CachedLayout { + let LayoutDetails { size, mut align, mut primitive_align, @@ -1543,7 +1543,7 @@ impl<'a, 'tcx> CachedLayout { align = align.max(niche_align); primitive_align = primitive_align.max(niche_align); - return Ok(tcx.intern_layout(CachedLayout { + return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::NicheFilling { dataful_variant: i, niche_variants, @@ -1681,7 +1681,7 @@ impl<'a, 'tcx> CachedLayout { packed: false } }; - tcx.intern_layout(CachedLayout { + tcx.intern_layout(LayoutDetails { variants: Variants::Tagged { discr, variants @@ -1709,7 +1709,7 @@ impl<'a, 'tcx> CachedLayout { return Err(LayoutError::Unknown(ty)); } ty::TyInfer(_) | ty::TyError => { - bug!("CachedLayout::compute: unexpected type `{}`", ty) + bug!("LayoutDetails::compute: unexpected type `{}`", ty) } }) } @@ -2005,13 +2005,13 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { #[derive(Copy, Clone, Debug)] pub struct TyLayout<'tcx> { pub ty: Ty<'tcx>, - cached: &'tcx CachedLayout + details: &'tcx LayoutDetails } impl<'tcx> Deref for TyLayout<'tcx> { - type Target = &'tcx CachedLayout; - fn deref(&self) -> &&'tcx CachedLayout { - &self.cached + type Target = &'tcx LayoutDetails; + fn deref(&self) -> &&'tcx LayoutDetails { + &self.details } } @@ -2082,10 +2082,10 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx let (tcx, param_env) = self; let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); - let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?; + let details = tcx.layout_raw(param_env.reveal_all().and(ty))?; let layout = TyLayout { ty, - cached + details }; // NB: This recording is normally disabled; when enabled, it @@ -2094,7 +2094,7 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - CachedLayout::record_layout_for_printing(tcx, ty, param_env, layout); + LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout); Ok(layout) } @@ -2111,10 +2111,10 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, let (tcx_at, param_env) = self; let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); - let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; + let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; let layout = TyLayout { ty, - cached + details }; // NB: This recording is normally disabled; when enabled, it @@ -2123,7 +2123,7 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, // completed, to avoid problems around recursive structures // and the like. (Admitedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - CachedLayout::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); + LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); Ok(layout) } @@ -2134,8 +2134,8 @@ impl<'a, 'tcx> TyLayout<'tcx> { where C: LayoutOf> + HasTyCtxt<'tcx>, C::TyLayout: MaybeResult> { - let cached = match self.variants { - Variants::Single { index } if index == variant_index => self.cached, + let details = match self.variants { + Variants::Single { index } if index == variant_index => self.details, Variants::Single { index } => { // Deny calling for_variant more than once for non-Single enums. @@ -2148,9 +2148,9 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyAdt(def, _) => def.variants[variant_index].fields.len(), _ => bug!() }; - let mut cached = CachedLayout::uninhabited(fields); - cached.variants = Variants::Single { index: variant_index }; - cx.tcx().intern_layout(cached) + let mut details = LayoutDetails::uninhabited(fields); + details.variants = Variants::Single { index: variant_index }; + cx.tcx().intern_layout(details) } Variants::NicheFilling { ref variants, .. } | @@ -2159,11 +2159,11 @@ impl<'a, 'tcx> TyLayout<'tcx> { } }; - assert_eq!(cached.variants, Variants::Single { index: variant_index }); + assert_eq!(details.variants, Variants::Single { index: variant_index }); TyLayout { ty: self.ty, - cached + details } } @@ -2252,9 +2252,9 @@ impl<'a, 'tcx> TyLayout<'tcx> { Variants::Tagged { ref discr, .. } | Variants::NicheFilling { niche: ref discr, .. } => { assert_eq!(i, 0); - let layout = CachedLayout::scalar(tcx, discr.clone()); + let layout = LayoutDetails::scalar(tcx, discr.clone()); return MaybeResult::from_ok(TyLayout { - cached: tcx.intern_layout(layout), + details: tcx.intern_layout(layout), ty: discr.value.to_ty(tcx) }); } @@ -2460,7 +2460,7 @@ impl<'gcx> HashStable> for Scalar { } } -impl_stable_hash_for!(struct ::ty::layout::CachedLayout { +impl_stable_hash_for!(struct ::ty::layout::LayoutDetails { variants, fields, abi, diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index ebd17ebabe79f..2f648e8d3ff82 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -264,7 +264,7 @@ define_maps! { <'tcx> [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx ty::layout::CachedLayout, + -> Result<&'tcx ty::layout::LayoutDetails, ty::layout::LayoutError<'tcx>>, [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) From 95687bfe27d9db705adfa9b7ae9d8e960b813b1b Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 03:56:37 +0200 Subject: [PATCH 64/69] rustc_trans: (hack) use preferred alignment for atomic loads/stores. --- src/librustc_trans/builder.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 9da3a479f0c28..592a6d914758f 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -544,7 +544,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.count_insn("load.atomic"); unsafe { let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); - llvm::LLVMSetAlignment(load, align.abi() as c_uint); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // However, 64-bit atomic loads on `i686-apple-darwin` appear to + // require `___atomic_load` with ABI-alignment, so it's staying. + llvm::LLVMSetAlignment(load, align.pref() as c_uint); load } } @@ -605,7 +608,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let ptr = self.check_store(val, ptr); unsafe { let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); - llvm::LLVMSetAlignment(store, align.abi() as c_uint); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // Also see `atomic_load` for more context. + llvm::LLVMSetAlignment(store, align.pref() as c_uint); } } From fb832833e2da8c5734ce62d43988327a176af38a Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 18 Nov 2017 15:41:07 +0200 Subject: [PATCH 65/69] Don't glob-import overlapping variant names in test/codegen/match-optimizes-away.rs. --- src/test/codegen/match-optimizes-away.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/test/codegen/match-optimizes-away.rs b/src/test/codegen/match-optimizes-away.rs index c0f2f64f82c8d..d7b779374314d 100644 --- a/src/test/codegen/match-optimizes-away.rs +++ b/src/test/codegen/match-optimizes-away.rs @@ -12,11 +12,9 @@ // compile-flags: -O #![crate_type="lib"] -pub enum Three { First, Second, Third } -use Three::*; +pub enum Three { A, B, C } -pub enum Four { First, Second, Third, Fourth } -use Four::*; +pub enum Four { A, B, C, D } #[no_mangle] pub fn three_valued(x: Three) -> Three { @@ -24,9 +22,9 @@ pub fn three_valued(x: Three) -> Three { // CHECK-NEXT: {{^.*:$}} // CHECK-NEXT: ret i8 %0 match x { - First => First, - Second => Second, - Third => Third, + Three::A => Three::A, + Three::B => Three::B, + Three::C => Three::C, } } @@ -36,9 +34,9 @@ pub fn four_valued(x: Four) -> Four { // CHECK-NEXT: {{^.*:$}} // CHECK-NEXT: ret i8 %0 match x { - First => First, - Second => Second, - Third => Third, - Fourth => Fourth, + Four::A => Four::A, + Four::B => Four::B, + Four::C => Four::C, + Four::D => Four::D, } } From b0812de556d81c17fcbd2613c6752fc4d04927bb Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 11:48:12 +0200 Subject: [PATCH 66/69] cargotest: temporarily use eddyb/servo to include servo/servo#19285. --- src/tools/cargotest/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tools/cargotest/main.rs b/src/tools/cargotest/main.rs index a6c56a1307629..b1122f401feb9 100644 --- a/src/tools/cargotest/main.rs +++ b/src/tools/cargotest/main.rs @@ -60,8 +60,8 @@ const TEST_REPOS: &'static [Test] = &[ }, Test { name: "servo", - repo: "https://github.com/servo/servo", - sha: "38fe9533b93e985657f99a29772bf3d3c8694822", + repo: "https://github.com/eddyb/servo", + sha: "6031de9a397e2feba4ff98725991825f62b68518", lock: None, // Only test Stylo a.k.a. Quantum CSS, the parts of Servo going into Firefox. // This takes much less time to build than all of Servo and supports stable Rust. From 88e4d2c2918428d55e34cd57c11279ea839c8822 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 12:13:24 +0200 Subject: [PATCH 67/69] rustc_trans: work around i686-pc-windows-msvc byval align LLVM bug. --- src/librustc_trans/abi.rs | 4 +++- src/test/codegen/function-arguments.rs | 4 ++-- src/test/codegen/packed.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 7ef89597b11ca..0bf6e84337b22 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -491,7 +491,9 @@ impl<'a, 'tcx> ArgType<'tcx> { .set(ArgAttribute::NoCapture) .set(ArgAttribute::NonNull); attrs.pointee_size = self.layout.size; - attrs.pointee_align = Some(self.layout.align); + // FIXME(eddyb) We should be doing this, but at least on + // i686-pc-windows-msvc, it results in wrong stack offsets. + // attrs.pointee_align = Some(self.layout.align); self.mode = PassMode::Indirect(attrs); } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index f96c104b265f9..f8945a6ee8d93 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -66,7 +66,7 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture align 4 dereferenceable(32) %arg0) +// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0) #[no_mangle] pub fn indirect_struct(_: S) { } @@ -83,7 +83,7 @@ pub fn _box(x: Box) -> Box { x } -// CHECK: @struct_return(%S* noalias nocapture sret align 4 dereferenceable(32)) +// CHECK: @struct_return(%S* noalias nocapture sret dereferenceable(32)) #[no_mangle] pub fn struct_return() -> S { S { diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 64e842b026e24..dd530cf03cd41 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -39,7 +39,7 @@ pub struct BigPacked { #[no_mangle] pub fn call_pkd(f: fn() -> Array) -> BigPacked { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture sret align 4 dereferenceable(32) [[ALLOCA]]) +// CHECK: call void %{{.*}}(%Array* noalias nocapture sret dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an From 89e437354adac3acc50e94012e675b8c6fffcaa4 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 20:28:52 +0200 Subject: [PATCH 68/69] rustc_trans: remove primitive_align optimization. --- src/librustc/ty/layout.rs | 45 ++++++-------------------------- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/builder.rs | 8 +++--- src/librustc_trans/intrinsic.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 3 +-- src/librustc_trans/type_of.rs | 9 ------- 6 files changed, 14 insertions(+), 55 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 28036d90217a7..71bf333a8c612 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -847,7 +847,6 @@ pub struct LayoutDetails { pub fields: FieldPlacement, pub abi: Abi, pub align: Align, - pub primitive_align: Align, pub size: Size } @@ -861,7 +860,6 @@ impl LayoutDetails { abi: Abi::Scalar(scalar), size, align, - primitive_align: align } } @@ -872,7 +870,6 @@ impl LayoutDetails { fields: FieldPlacement::Union(field_count), abi: Abi::Uninhabited, align, - primitive_align: align, size: Size::from_bytes(0) } } @@ -935,7 +932,6 @@ impl<'a, 'tcx> LayoutDetails { }, abi: Abi::ScalarPair(a, b), align, - primitive_align: align, size } }; @@ -955,14 +951,12 @@ impl<'a, 'tcx> LayoutDetails { bug!("struct cannot be packed and aligned"); } - let base_align = if packed { + let mut align = if packed { dl.i8_align } else { dl.aggregate_align }; - let mut align = base_align; - let mut primitive_align = base_align; let mut sized = true; let mut offsets = vec![Size::from_bytes(0); fields.len()]; let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); @@ -1012,7 +1006,6 @@ impl<'a, 'tcx> LayoutDetails { if !packed { let discr_align = discr.align(dl); align = align.max(discr_align); - primitive_align = primitive_align.max(discr_align); } } @@ -1035,7 +1028,6 @@ impl<'a, 'tcx> LayoutDetails { if !packed { offset = offset.abi_align(field.align); align = align.max(field.align); - primitive_align = primitive_align.max(field.primitive_align); } debug!("univariant offset: {:?} field: {:#?}", offset, field); @@ -1134,7 +1126,6 @@ impl<'a, 'tcx> LayoutDetails { if offsets[i] == pair_offsets[0] && offsets[j] == pair_offsets[1] && align == pair.align && - primitive_align == pair.primitive_align && size == pair.size { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). @@ -1155,7 +1146,6 @@ impl<'a, 'tcx> LayoutDetails { }, abi, align, - primitive_align, size }) }; @@ -1255,7 +1245,6 @@ impl<'a, 'tcx> LayoutDetails { packed: false }, align: element.align, - primitive_align: element.primitive_align, size }) } @@ -1272,7 +1261,6 @@ impl<'a, 'tcx> LayoutDetails { packed: false }, align: element.align, - primitive_align: element.primitive_align, size: Size::from_bytes(0) }) } @@ -1288,7 +1276,6 @@ impl<'a, 'tcx> LayoutDetails { packed: false }, align: dl.i8_align, - primitive_align: dl.i8_align, size: Size::from_bytes(0) }) } @@ -1359,7 +1346,6 @@ impl<'a, 'tcx> LayoutDetails { abi: Abi::Vector, size, align, - primitive_align: align }) } @@ -1389,19 +1375,17 @@ impl<'a, 'tcx> LayoutDetails { bug!("Union cannot be packed and aligned"); } - let mut primitive_align = if def.repr.packed() { + let mut align = if def.repr.packed() { dl.i8_align } else { dl.aggregate_align }; - let mut align = if def.repr.align > 0 { + if def.repr.align > 0 { let repr_align = def.repr.align as u64; - primitive_align.max( - Align::from_bytes(repr_align, repr_align).unwrap()) - } else { - primitive_align - }; + align = align.max( + Align::from_bytes(repr_align, repr_align).unwrap()); + } let mut size = Size::from_bytes(0); for field in &variants[0] { @@ -1409,7 +1393,6 @@ impl<'a, 'tcx> LayoutDetails { if !packed { align = align.max(field.align); - primitive_align = primitive_align.max(field.primitive_align); } size = cmp::max(size, field.size); } @@ -1422,7 +1405,6 @@ impl<'a, 'tcx> LayoutDetails { packed }, align, - primitive_align, size: size.abi_align(align) })); } @@ -1519,12 +1501,7 @@ impl<'a, 'tcx> LayoutDetails { }).collect::, _>>()?; let offset = st[i].fields.offset(field_index) + offset; - let LayoutDetails { - size, - mut align, - mut primitive_align, - .. - } = st[i]; + let LayoutDetails { size, mut align, .. } = st[i]; let mut niche_align = niche.value.align(dl); let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { @@ -1541,7 +1518,6 @@ impl<'a, 'tcx> LayoutDetails { } }; align = align.max(niche_align); - primitive_align = primitive_align.max(niche_align); return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::NicheFilling { @@ -1558,7 +1534,6 @@ impl<'a, 'tcx> LayoutDetails { abi, size, align, - primitive_align })); } } @@ -1577,7 +1552,6 @@ impl<'a, 'tcx> LayoutDetails { let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; - let mut primitive_align = dl.aggregate_align; let mut size = Size::from_bytes(0); // We're interested in the smallest alignment, so start large. @@ -1599,7 +1573,6 @@ impl<'a, 'tcx> LayoutDetails { } size = cmp::max(size, st.size); align = align.max(st.align); - primitive_align = primitive_align.max(st.primitive_align); Ok(st) }).collect::, _>>()?; @@ -1692,7 +1665,6 @@ impl<'a, 'tcx> LayoutDetails { fields: FieldPlacement::Union(1), abi, align, - primitive_align, size }) } @@ -2465,8 +2437,7 @@ impl_stable_hash_for!(struct ::ty::layout::LayoutDetails { fields, abi, size, - align, - primitive_align + align }); impl_stable_hash_for!(enum ::ty::layout::Integer { diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 0bf6e84337b22..54828044de670 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -585,7 +585,7 @@ impl<'a, 'tcx> ArgType<'tcx> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", None); + let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx)); let scratch_size = cast.size(ccx); bcx.lifetime_start(llscratch, scratch_size); diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 592a6d914758f..50e673bdbfdd7 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -488,7 +488,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef { let builder = Builder::with_ccx(self.ccx); builder.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -496,7 +496,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { builder.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -506,9 +506,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), name.as_ptr()) }; - if let Some(align) = align { - llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); - } + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); alloca } } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 5abc096407d5d..adbb45f893b08 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -817,7 +817,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = bcx.alloca(i64p, "slot", None); + let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 6478bd2c84880..891d52045c217 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -103,8 +103,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str) -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, layout); - let tmp = bcx.alloca( - layout.llvm_type(bcx.ccx), name, layout.over_align()); + let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align); Self::new_sized(tmp, layout, Alignment::AbiAligned) } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 60e5e4ced2c30..9b32c825117ee 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -185,7 +185,6 @@ pub trait LayoutLlvmExt<'tcx> { fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, index: usize) -> Type; - fn over_align(&self) -> Option; fn llvm_field_index(&self, index: usize) -> u64; fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) -> Option; @@ -365,14 +364,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn over_align(&self) -> Option { - if self.align != self.primitive_align { - Some(self.align) - } else { - None - } - } - fn llvm_field_index(&self, index: usize) -> u64 { match self.abi { layout::Abi::Scalar(_) | From f9f5ab98b0aae2a5ef8e41df2277ca3a2cd6e89a Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 23:38:48 +0200 Subject: [PATCH 69/69] Revert "tests: Update run-make/issue-25581 to reflect how fat pointers are passed." This reverts commit b12dcdef4fae5e3856e6911fd6cfbeedadcf3821. --- src/test/run-make/issue-25581/test.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/test/run-make/issue-25581/test.c b/src/test/run-make/issue-25581/test.c index ab85d2bb13fb1..5736b1730216d 100644 --- a/src/test/run-make/issue-25581/test.c +++ b/src/test/run-make/issue-25581/test.c @@ -2,10 +2,15 @@ #include #include -size_t slice_len(uint8_t *data, size_t len) { - return len; +struct ByteSlice { + uint8_t *data; + size_t len; +}; + +size_t slice_len(struct ByteSlice bs) { + return bs.len; } -uint8_t slice_elem(uint8_t *data, size_t len, size_t idx) { - return data[idx]; +uint8_t slice_elem(struct ByteSlice bs, size_t idx) { + return bs.data[idx]; }