diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 6b53702a1b55d..f8f6956c47e4d 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -1,6 +1,7 @@ +use crate::attributes; use crate::builder::Builder; use crate::context::CodegenCx; -use crate::llvm::{self, AttributePlace}; +use crate::llvm::{self, Attribute, AttributePlace}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; @@ -20,6 +21,7 @@ use rustc_target::abi::{self, HasDataLayout, Int}; pub use rustc_target::spec::abi::Abi; use libc::c_uint; +use smallvec::SmallVec; pub trait ArgAttributesExt { fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); @@ -38,57 +40,65 @@ fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool { cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true) } -const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::Attribute); 1] = - [(ArgAttribute::InReg, llvm::Attribute::InReg)]; +const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] = + [(ArgAttribute::InReg, llvm::AttributeKind::InReg)]; -const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::Attribute); 5] = [ - (ArgAttribute::NoAlias, llvm::Attribute::NoAlias), - (ArgAttribute::NoCapture, llvm::Attribute::NoCapture), - (ArgAttribute::NonNull, llvm::Attribute::NonNull), - (ArgAttribute::ReadOnly, llvm::Attribute::ReadOnly), - (ArgAttribute::NoUndef, llvm::Attribute::NoUndef), +const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [ + (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias), + (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture), + (ArgAttribute::NonNull, llvm::AttributeKind::NonNull), + (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly), + (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef), ]; -impl ArgAttributesExt for ArgAttributes { - fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) { - let mut regular = self.regular; - unsafe { - // ABI-affecting attributes must always be applied - for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES { - if regular.contains(attr) { - llattr.apply_llfn(idx, llfn); - } - } - if let Some(align) = self.pointee_align { - llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32); - } - match self.arg_ext { - ArgExtension::None => {} - ArgExtension::Zext => llvm::Attribute::ZExt.apply_llfn(idx, llfn), - ArgExtension::Sext => llvm::Attribute::SExt.apply_llfn(idx, llfn), - } - // Only apply remaining attributes when optimizing - if cx.sess().opts.optimize == config::OptLevel::No { - return; - } - let deref = self.pointee_size.bytes(); - if deref != 0 { - if regular.contains(ArgAttribute::NonNull) { - llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref); - } else { - llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref); - } - regular -= ArgAttribute::NonNull; - } - for (attr, llattr) in OPTIMIZATION_ATTRIBUTES { - if regular.contains(attr) { - llattr.apply_llfn(idx, llfn); - } +fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> { + let mut regular = this.regular; + + let mut attrs = SmallVec::new(); + + // ABI-affecting attributes must always be applied + for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES { + if regular.contains(attr) { + attrs.push(llattr.create_attr(cx.llcx)); + } + } + if let Some(align) = this.pointee_align { + attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes())); + } + match this.arg_ext { + ArgExtension::None => {} + ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)), + ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)), + } + + // Only apply remaining attributes when optimizing + if cx.sess().opts.optimize != config::OptLevel::No { + let deref = this.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref)); + } else { + attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref)); } - if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) { - llvm::Attribute::NoAlias.apply_llfn(idx, llfn); + regular -= ArgAttribute::NonNull; + } + for (attr, llattr) in OPTIMIZATION_ATTRIBUTES { + if regular.contains(attr) { + attrs.push(llattr.create_attr(cx.llcx)); } } + if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) { + attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx)); + } + } + + attrs +} + +impl ArgAttributesExt for ArgAttributes { + fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) { + let attrs = get_attrs(self, cx); + attributes::apply_to_llfn(llfn, idx, &attrs); } fn apply_attrs_to_callsite( @@ -97,52 +107,8 @@ impl ArgAttributesExt for ArgAttributes { cx: &CodegenCx<'_, '_>, callsite: &Value, ) { - let mut regular = self.regular; - unsafe { - // ABI-affecting attributes must always be applied - for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES { - if regular.contains(attr) { - llattr.apply_callsite(idx, callsite); - } - } - if let Some(align) = self.pointee_align { - llvm::LLVMRustAddAlignmentCallSiteAttr( - callsite, - idx.as_uint(), - align.bytes() as u32, - ); - } - match self.arg_ext { - ArgExtension::None => {} - ArgExtension::Zext => llvm::Attribute::ZExt.apply_callsite(idx, callsite), - ArgExtension::Sext => llvm::Attribute::SExt.apply_callsite(idx, callsite), - } - // Only apply remaining attributes when optimizing - if cx.sess().opts.optimize == config::OptLevel::No { - return; - } - let deref = self.pointee_size.bytes(); - if deref != 0 { - if regular.contains(ArgAttribute::NonNull) { - llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref); - } else { - llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr( - callsite, - idx.as_uint(), - deref, - ); - } - regular -= ArgAttribute::NonNull; - } - for (attr, llattr) in OPTIMIZATION_ATTRIBUTES { - if regular.contains(attr) { - llattr.apply_callsite(idx, callsite); - } - } - if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) { - llvm::Attribute::NoAlias.apply_callsite(idx, callsite); - } - } + let attrs = get_attrs(self, cx); + attributes::apply_to_callsite(callsite, idx, &attrs); } } @@ -444,15 +410,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { - // FIXME(eddyb) can this also be applied to callsites? + let mut func_attrs = SmallVec::<[_; 2]>::new(); if self.ret.layout.abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); + func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx)); } - - // FIXME(eddyb, wesleywiser): apply this to callsites as well? if !self.can_unwind { - llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn); + func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx)); } + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs }); let mut i = 0; let mut apply = |attrs: &ArgAttributes| { @@ -467,13 +432,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(attrs); - unsafe { - llvm::LLVMRustAddStructRetAttr( - llfn, - llvm::AttributePlace::Argument(i).as_uint(), - self.ret.layout.llvm_type(cx), - ); - } + let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx)); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); } PassMode::Cast(cast) => { cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); @@ -488,13 +448,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Ignore => {} PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { let i = apply(attrs); - unsafe { - llvm::LLVMRustAddByValAttr( - llfn, - llvm::AttributePlace::Argument(i).as_uint(), - arg.layout.llvm_type(cx), - ); - } + let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx)); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]); } PassMode::Direct(ref attrs) | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { @@ -517,12 +472,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { + let mut func_attrs = SmallVec::<[_; 2]>::new(); if self.ret.layout.abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite); + func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx)); } if !self.can_unwind { - llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite); + func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx)); } + attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs }); let mut i = 0; let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| { @@ -537,13 +494,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(bx.cx, attrs); - unsafe { - llvm::LLVMRustAddStructRetCallSiteAttr( - callsite, - llvm::AttributePlace::Argument(i).as_uint(), - self.ret.layout.llvm_type(bx), - ); - } + let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx)); + attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]); } PassMode::Cast(cast) => { cast.attrs.apply_attrs_to_callsite( @@ -572,13 +524,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Ignore => {} PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { let i = apply(bx.cx, attrs); - unsafe { - llvm::LLVMRustAddByValCallSiteAttr( - callsite, - llvm::AttributePlace::Argument(i).as_uint(), - arg.layout.llvm_type(bx), - ); - } + let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx)); + attributes::apply_to_callsite( + callsite, + llvm::AttributePlace::Argument(i), + &[byval], + ); } PassMode::Direct(ref attrs) | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { @@ -610,10 +561,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { if self.conv == Conv::CCmseNonSecureCall { // This will probably get ignored on all targets but those supporting the TrustZone-M // extension (thumbv8m targets). - llvm::AddCallSiteAttrString( + let cmse_nonsecure_call = + llvm::CreateAttrString(bx.cx.llcx, cstr::cstr!("cmse_nonsecure_call")); + attributes::apply_to_callsite( callsite, llvm::AttributePlace::Function, - cstr::cstr!("cmse_nonsecure_call"), + &[cmse_nonsecure_call], ); } } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 7680d4fd233be..eb19e42721701 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -64,7 +64,8 @@ pub(crate) unsafe fn codegen( llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } if tcx.sess.must_emit_unwind_tables() { - attributes::emit_uwtable(llfn); + let uwtable = attributes::uwtable_attr(llcx); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); } let callee = kind.fn_name(method.name); @@ -105,20 +106,22 @@ pub(crate) unsafe fn codegen( let name = "__rust_alloc_error_handler"; let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty); // -> ! DIFlagNoReturn - llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); + let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]); if tcx.sess.target.default_hidden_visibility { llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } if tcx.sess.must_emit_unwind_tables() { - attributes::emit_uwtable(llfn); + let uwtable = attributes::uwtable_attr(llcx); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); } let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default }; let callee = kind.fn_name(sym::oom); let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty); // -> ! DIFlagNoReturn - llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee); + attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast()); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index e22bec249513d..96c7d884b7b20 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -1,3 +1,4 @@ +use crate::attributes; use crate::builder::Builder; use crate::common::Funclet; use crate::context::CodegenCx; @@ -18,6 +19,7 @@ use rustc_target::abi::*; use rustc_target::asm::*; use libc::{c_char, c_uint}; +use smallvec::SmallVec; use tracing::debug; impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { @@ -273,19 +275,20 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ) .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed")); + let mut attrs = SmallVec::<[_; 2]>::new(); if options.contains(InlineAsmOptions::PURE) { if options.contains(InlineAsmOptions::NOMEM) { - llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result); + attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx)); } else if options.contains(InlineAsmOptions::READONLY) { - llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result); + attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx)); } - llvm::Attribute::WillReturn.apply_callsite(llvm::AttributePlace::Function, result); + attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx)); } else if options.contains(InlineAsmOptions::NOMEM) { - llvm::Attribute::InaccessibleMemOnly - .apply_callsite(llvm::AttributePlace::Function, result); + attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx)); } else { // LLVM doesn't have an attribute to represent ReadOnly + SideEffect } + attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs }); // Write results to outputs for (idx, op) in operands.iter().enumerate() { diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index f6d7221d4e9e8..13a41388f5e2f 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -7,53 +7,75 @@ use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::config::OptLevel; -use rustc_session::Session; use rustc_target::spec::abi::Abi; use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector}; +use smallvec::SmallVec; use crate::attributes; use crate::llvm::AttributePlace::Function; -use crate::llvm::{self, Attribute}; +use crate::llvm::{self, Attribute, AttributeKind, AttributePlace}; use crate::llvm_util; pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr}; use crate::context::CodegenCx; use crate::value::Value; -/// Mark LLVM function to use provided inline heuristic. +pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) { + if !attrs.is_empty() { + llvm::AddFunctionAttributes(llfn, idx, attrs); + } +} + +pub fn remove_from_llfn(llfn: &Value, idx: AttributePlace, attrs: &[AttributeKind]) { + if !attrs.is_empty() { + llvm::RemoveFunctionAttributes(llfn, idx, attrs); + } +} + +pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) { + if !attrs.is_empty() { + llvm::AddCallSiteAttributes(callsite, idx, attrs); + } +} + +/// Get LLVM attribute for the provided inline heuristic. #[inline] -fn inline<'ll>(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) { - use self::InlineAttr::*; +fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> { match inline { - Hint => Attribute::InlineHint.apply_llfn(Function, val), - Always => Attribute::AlwaysInline.apply_llfn(Function, val), - Never => { - if cx.tcx().sess.target.arch != "amdgpu" { - Attribute::NoInline.apply_llfn(Function, val); + InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)), + InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)), + InlineAttr::Never => { + if cx.sess().target.arch != "amdgpu" { + Some(AttributeKind::NoInline.create_attr(cx.llcx)) + } else { + None } } - None => {} - }; + InlineAttr::None => None, + } } -/// Apply LLVM sanitize attributes. +/// Get LLVM sanitize attributes. #[inline] -pub fn sanitize<'ll>(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: &'ll Value) { +pub fn sanitize_attrs<'ll>( + cx: &CodegenCx<'ll, '_>, + no_sanitize: SanitizerSet, +) -> SmallVec<[&'ll Attribute; 4]> { + let mut attrs = SmallVec::new(); let enabled = cx.tcx.sess.opts.debugging_opts.sanitizer - no_sanitize; if enabled.contains(SanitizerSet::ADDRESS) { - llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn); + attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx)); } if enabled.contains(SanitizerSet::MEMORY) { - llvm::Attribute::SanitizeMemory.apply_llfn(Function, llfn); + attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx)); } if enabled.contains(SanitizerSet::THREAD) { - llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn); + attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx)); } if enabled.contains(SanitizerSet::HWADDRESS) { - llvm::Attribute::SanitizeHWAddress.apply_llfn(Function, llfn); + attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx)); } if enabled.contains(SanitizerSet::MEMTAG) { // Check to make sure the mte target feature is actually enabled. @@ -66,26 +88,21 @@ pub fn sanitize<'ll>(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: & sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`"); } - llvm::Attribute::SanitizeMemTag.apply_llfn(Function, llfn); + attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx)); } + attrs } /// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. #[inline] -pub fn emit_uwtable(val: &Value) { +pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute { // NOTE: We should determine if we even need async unwind tables, as they // take have more overhead and if we can use sync unwind tables we // probably should. - llvm::EmitUWTableAttr(val, true); -} - -/// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue. -#[inline] -fn naked(val: &Value, is_naked: bool) { - Attribute::Naked.toggle_llfn(Function, val, is_naked); + llvm::CreateUWTableAttr(llcx, true) } -pub fn set_frame_pointer_type<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { let mut fp = cx.sess().target.frame_pointer; // "mcount" function relies on stack pointer. // See . @@ -96,19 +113,14 @@ pub fn set_frame_pointer_type<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { let attr_value = match fp { FramePointer::Always => cstr!("all"), FramePointer::NonLeaf => cstr!("non-leaf"), - FramePointer::MayOmit => return, + FramePointer::MayOmit => return None, }; - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("frame-pointer"), - attr_value, - ); + Some(llvm::CreateAttrStringValue(cx.llcx, cstr!("frame-pointer"), attr_value)) } /// Tell LLVM what instrument function to insert. #[inline] -fn set_instrument_function<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { if cx.sess().instrument_mcount() { // Similar to `clang -pg` behavior. Handled by the // `post-inline-ee-instrument` LLVM pass. @@ -117,16 +129,17 @@ fn set_instrument_function<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { // See test/CodeGen/mcount.c in clang. let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap(); - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, + Some(llvm::CreateAttrStringValue( + cx.llcx, cstr!("instrument-function-entry-inlined"), &mcount_name, - ); + )) + } else { + None } } -fn set_probestack<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { // Currently stack probes seem somewhat incompatible with the address // sanitizer and thread sanitizer. With asan we're already protected from // stack overflow anyway so we don't really need stack probes regardless. @@ -137,107 +150,105 @@ fn set_probestack<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { .sanitizer .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD) { - return; + return None; } // probestack doesn't play nice either with `-C profile-generate`. if cx.sess().opts.cg.profile_generate.enabled() { - return; + return None; } // probestack doesn't play nice either with gcov profiling. if cx.sess().opts.debugging_opts.profile { - return; + return None; } let attr_value = match cx.sess().target.stack_probes { - StackProbeType::None => None, + StackProbeType::None => return None, // Request LLVM to generate the probes inline. If the given LLVM version does not support // this, no probe is generated at all (even if the attribute is specified). - StackProbeType::Inline => Some(cstr!("inline-asm")), + StackProbeType::Inline => cstr!("inline-asm"), // Flag our internal `__rust_probestack` function as the stack probe symbol. // This is defined in the `compiler-builtins` crate for each architecture. - StackProbeType::Call => Some(cstr!("__rust_probestack")), + StackProbeType::Call => cstr!("__rust_probestack"), // Pick from the two above based on the LLVM version. StackProbeType::InlineOrCall { min_llvm_version_for_inline } => { if llvm_util::get_version() < min_llvm_version_for_inline { - Some(cstr!("__rust_probestack")) + cstr!("__rust_probestack") } else { - Some(cstr!("inline-asm")) + cstr!("inline-asm") } } }; - if let Some(attr_value) = attr_value { - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("probe-stack"), - attr_value, - ); - } + Some(llvm::CreateAttrStringValue(cx.llcx, cstr!("probe-stack"), attr_value)) } -fn set_stackprotector<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { let sspattr = match cx.sess().stack_protector() { - StackProtector::None => return, - StackProtector::All => Attribute::StackProtectReq, - StackProtector::Strong => Attribute::StackProtectStrong, - StackProtector::Basic => Attribute::StackProtect, + StackProtector::None => return None, + StackProtector::All => AttributeKind::StackProtectReq, + StackProtector::Strong => AttributeKind::StackProtectStrong, + StackProtector::Basic => AttributeKind::StackProtect, }; - sspattr.apply_llfn(Function, llfn) + Some(sspattr.create_attr(cx.llcx)) } -pub fn apply_target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute { let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess)); - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("target-cpu"), - target_cpu.as_c_str(), - ); + llvm::CreateAttrStringValue(cx.llcx, cstr!("target-cpu"), target_cpu.as_c_str()) } -pub fn apply_tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { - if let Some(tune) = llvm_util::tune_cpu(cx.tcx.sess) { +pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { + llvm_util::tune_cpu(cx.tcx.sess).map(|tune| { let tune_cpu = SmallCStr::new(tune); - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("tune-cpu"), - tune_cpu.as_c_str(), - ); - } + llvm::CreateAttrStringValue(cx.llcx, cstr!("tune-cpu"), tune_cpu.as_c_str()) + }) } -/// Sets the `NonLazyBind` LLVM attribute on a given function, -/// assuming the codegen options allow skipping the PLT. -pub fn non_lazy_bind<'ll>(sess: &Session, llfn: &'ll Value) { +/// Get the `NonLazyBind` LLVM attribute, +/// if the codegen options allow skipping the PLT. +pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { // Don't generate calls through PLT if it's not necessary - if !sess.needs_plt() { - Attribute::NonLazyBind.apply_llfn(Function, llfn); + if !cx.sess().needs_plt() { + Some(AttributeKind::NonLazyBind.create_attr(cx.llcx)) + } else { + None } } -pub(crate) fn default_optimisation_attrs<'ll>(sess: &Session, llfn: &'ll Value) { - match sess.opts.optimize { +/// Returns attributes to remove and to add, respectively, +/// to set the default optimizations attrs on a function. +#[inline] +pub(crate) fn default_optimisation_attrs<'ll>( + cx: &CodegenCx<'ll, '_>, +) -> ( + // Attributes to remove + SmallVec<[AttributeKind; 3]>, + // Attributes to add + SmallVec<[&'ll Attribute; 2]>, +) { + let mut to_remove = SmallVec::new(); + let mut to_add = SmallVec::new(); + match cx.sess().opts.optimize { OptLevel::Size => { - llvm::Attribute::MinSize.unapply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + to_remove.push(llvm::AttributeKind::MinSize); + to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx)); + to_remove.push(llvm::AttributeKind::OptimizeNone); } OptLevel::SizeMin => { - llvm::Attribute::MinSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx)); + to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx)); + to_remove.push(llvm::AttributeKind::OptimizeNone); } OptLevel::No => { - llvm::Attribute::MinSize.unapply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); - llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + to_remove.push(llvm::AttributeKind::MinSize); + to_remove.push(llvm::AttributeKind::OptimizeForSize); + to_remove.push(llvm::AttributeKind::OptimizeNone); } _ => {} } + (to_remove, to_add) } /// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`) @@ -249,30 +260,35 @@ pub fn from_fn_attrs<'ll, 'tcx>( ) { let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + let mut to_remove = SmallVec::<[_; 4]>::new(); + let mut to_add = SmallVec::<[_; 16]>::new(); + match codegen_fn_attrs.optimize { OptimizeAttr::None => { - default_optimisation_attrs(cx.tcx.sess, llfn); + let (to_remove_opt, to_add_opt) = default_optimisation_attrs(cx); + to_remove.extend(to_remove_opt); + to_add.extend(to_add_opt); } OptimizeAttr::Speed => { - llvm::Attribute::MinSize.unapply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); - llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + to_remove.push(llvm::AttributeKind::MinSize); + to_remove.push(llvm::AttributeKind::OptimizeForSize); + to_remove.push(llvm::AttributeKind::OptimizeNone); } OptimizeAttr::Size => { - llvm::Attribute::MinSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx)); + to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx)); + to_remove.push(llvm::AttributeKind::OptimizeNone); } } - let inline_attr = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { + let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { InlineAttr::Never } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) { InlineAttr::Hint } else { codegen_fn_attrs.inline }; - inline(cx, llfn, inline_attr); + to_add.extend(inline_attr(cx, inline)); // The `uwtable` attribute according to LLVM is: // @@ -291,52 +307,54 @@ pub fn from_fn_attrs<'ll, 'tcx>( // You can also find more info on why Windows always requires uwtables here: // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078 if cx.sess().must_emit_unwind_tables() { - attributes::emit_uwtable(llfn); + to_add.push(uwtable_attr(cx.llcx)); } if cx.sess().opts.debugging_opts.profile_sample_use.is_some() { - llvm::AddFunctionAttrString(llfn, Function, cstr!("use-sample-profile")); + to_add.push(llvm::CreateAttrString(cx.llcx, cstr!("use-sample-profile"))); } // FIXME: none of these three functions interact with source level attributes. - set_frame_pointer_type(cx, llfn); - set_instrument_function(cx, llfn); - set_probestack(cx, llfn); - set_stackprotector(cx, llfn); + to_add.extend(frame_pointer_type_attr(cx)); + to_add.extend(instrument_function_attr(cx)); + to_add.extend(probestack_attr(cx)); + to_add.extend(stackprotector_attr(cx)); if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { - Attribute::Cold.apply_llfn(Function, llfn); + to_add.push(AttributeKind::Cold.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) { - Attribute::ReturnsTwice.apply_llfn(Function, llfn); + to_add.push(AttributeKind::ReturnsTwice.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) { - Attribute::ReadOnly.apply_llfn(Function, llfn); + to_add.push(AttributeKind::ReadOnly.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) { - Attribute::ReadNone.apply_llfn(Function, llfn); + to_add.push(AttributeKind::ReadNone.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { - naked(llfn, true); + to_add.push(AttributeKind::Naked.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) { - Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn); + // apply to return place instead of function (unlike all other attributes applied in this function) + let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx); + attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) { - llvm::AddFunctionAttrString(llfn, Function, cstr!("cmse_nonsecure_entry")); + to_add.push(llvm::CreateAttrString(cx.llcx, cstr!("cmse_nonsecure_entry"))); } if let Some(align) = codegen_fn_attrs.alignment { llvm::set_alignment(llfn, align as usize); } - sanitize(cx, codegen_fn_attrs.no_sanitize, llfn); + to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize)); // Always annotate functions with the target-cpu they are compiled for. // Without this, ThinLTO won't inline Rust functions into Clang generated // functions (because Clang annotates functions this way too). - apply_target_cpu_attr(cx, llfn); + to_add.push(target_cpu_attr(cx)); // tune-cpu is only conveyed through the attribute for our purpose. // The target doesn't care; the subtarget reads our attribute. - apply_tune_cpu_attr(cx, llfn); + to_add.extend(tune_cpu_attr(cx)); let function_features = codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::>(); @@ -379,22 +397,12 @@ pub fn from_fn_attrs<'ll, 'tcx>( // If this function is an import from the environment but the wasm // import has a specific module/name, apply them here. if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) { - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("wasm-import-module"), - &module, - ); + to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("wasm-import-module"), &module)); let name = codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id())); let name = CString::new(name.as_str()).unwrap(); - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("wasm-import-name"), - &name, - ); + to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("wasm-import-name"), &name)); } // The `"wasm"` abi on wasm targets automatically enables the @@ -414,13 +422,11 @@ pub fn from_fn_attrs<'ll, 'tcx>( global_features.extend(function_features.into_iter()); let features = global_features.join(","); let val = CString::new(features).unwrap(); - llvm::AddFunctionAttrStringValue( - llfn, - llvm::AttributePlace::Function, - cstr!("target-features"), - &val, - ); + to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("target-features"), &val)); } + + attributes::remove_from_llfn(llfn, Function, &to_remove); + attributes::apply_to_llfn(llfn, Function, &to_add); } fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option { diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index e15b86aa84fa4..dd3ada443895f 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -95,7 +95,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen // If this codegen unit contains the main function, also create the // wrapper here if let Some(entry) = maybe_create_entry_wrapper::>(&cx) { - attributes::sanitize(&cx, SanitizerSet::empty(), entry); + let attrs = attributes::sanitize_attrs(&cx, SanitizerSet::empty()); + attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs); } // Run replace-all-uses-with for statics that need it diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 780af5bc2af8b..5e78d6fc851a5 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,3 +1,4 @@ +use crate::attributes; use crate::common::Funclet; use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, False}; @@ -22,6 +23,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, Target}; +use smallvec::SmallVec; use std::borrow::Cow; use std::ffi::CStr; use std::iter; @@ -1174,14 +1176,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) { + let mut attrs = SmallVec::<[_; 2]>::new(); + // Cleanup is always the cold path. - llvm::Attribute::Cold.apply_callsite(llvm::AttributePlace::Function, llret); + attrs.push(llvm::AttributeKind::Cold.create_attr(self.llcx)); // In LLVM versions with deferred inlining (currently, system LLVM < 14), // inlining drop glue can lead to exponential size blowup, see #41696 and #92110. if !llvm_util::is_rust_llvm() && llvm_util::get_version() < (14, 0, 0) { - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + attrs.push(llvm::AttributeKind::NoInline.create_attr(self.llcx)); } + + attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &attrs); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index ddc8d72e9bf87..f102becf2bdba 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -520,7 +520,8 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } else { let fty = self.type_variadic_func(&[], self.type_i32()); let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty); - attributes::apply_target_cpu_attr(self, llfn); + let target_cpu = attributes::target_cpu_attr(self); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]); llfn } } @@ -550,12 +551,16 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn set_frame_pointer_type(&self, llfn: &'ll Value) { - attributes::set_frame_pointer_type(self, llfn) + if let Some(attr) = attributes::frame_pointer_type_attr(self) { + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]); + } } fn apply_target_cpu_attr(&self, llfn: &'ll Value) { - attributes::apply_target_cpu_attr(self, llfn); - attributes::apply_tune_cpu_attr(self, llfn); + let mut attrs = SmallVec::<[_; 2]>::new(); + attrs.push(attributes::target_cpu_attr(self)); + attrs.extend(attributes::tune_cpu_attr(self)); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs); } fn create_used_variable(&self) { diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index a6e06ffa819ca..43d1a1f238968 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -18,8 +18,8 @@ use crate::llvm; use crate::llvm::AttributePlace::Function; use crate::type_::Type; use crate::value::Value; -use rustc_codegen_ssa::traits::*; use rustc_middle::ty::Ty; +use smallvec::SmallVec; use tracing::debug; /// Declare a function. @@ -41,12 +41,21 @@ fn declare_raw_fn<'ll>( llvm::SetFunctionCallConv(llfn, callconv); llvm::SetUnnamedAddress(llfn, unnamed); + let mut attrs_to_remove = SmallVec::<[_; 4]>::new(); + let mut attrs_to_add = SmallVec::<[_; 4]>::new(); + if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) { - llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); + attrs_to_add.push(llvm::AttributeKind::NoRedZone.create_attr(cx.llcx)); } - attributes::default_optimisation_attrs(cx.tcx.sess, llfn); - attributes::non_lazy_bind(cx.sess(), llfn); + let (to_remove, to_add) = attributes::default_optimisation_attrs(cx); + attrs_to_remove.extend(to_remove); + attrs_to_add.extend(to_add); + + attrs_to_add.extend(attributes::non_lazy_bind_attr(cx)); + + attributes::remove_from_llfn(llfn, Function, &attrs_to_remove); + attributes::apply_to_llfn(llfn, Function, &attrs_to_add); llfn } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 657f1fcf31e83..31d1460e178ba 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -155,7 +155,7 @@ pub enum DLLStorageClass { /// though it is not ABI compatible (since it's a C++ enum) #[repr(C)] #[derive(Copy, Clone, Debug)] -pub enum Attribute { +pub enum AttributeKind { AlwaysInline = 0, ByVal = 1, Cold = 2, @@ -643,6 +643,9 @@ extern "C" { extern "C" { pub type ConstantInt; } +extern "C" { + pub type Attribute; +} extern "C" { pub type Metadata; } @@ -1169,6 +1172,21 @@ extern "C" { ) -> Option<&Value>; pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); + // Operations on attributes + pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute; + pub fn LLVMRustCreateAttrString(C: &Context, Name: *const c_char) -> &Attribute; + pub fn LLVMRustCreateAttrStringValue( + C: &Context, + Name: *const c_char, + Value: *const c_char, + ) -> &Attribute; + pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; + pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; + pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute; + // Operations on functions pub fn LLVMRustGetOrInsertFunction<'a>( M: &'a Module, @@ -1177,20 +1195,18 @@ extern "C" { FunctionTy: &'a Type, ) -> &'a Value; pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint); - pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32); - pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64); - pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64); - pub fn LLVMRustAddByValAttr(Fn: &Value, index: c_uint, ty: &Type); - pub fn LLVMRustAddStructRetAttr(Fn: &Value, index: c_uint, ty: &Type); - pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute); - pub fn LLVMRustEmitUWTableAttr(Fn: &Value, async_: bool); - pub fn LLVMRustAddFunctionAttrStringValue( + pub fn LLVMRustAddFunctionAttributes<'a>( + Fn: &'a Value, + index: c_uint, + Attrs: *const &'a Attribute, + AttrsLen: size_t, + ); + pub fn LLVMRustRemoveFunctionAttributes( Fn: &Value, index: c_uint, - Name: *const c_char, - Value: *const c_char, + Attrs: *const AttributeKind, + AttrsLen: size_t, ); - pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute); // Operations on parameters pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>; @@ -1211,13 +1227,12 @@ extern "C" { // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint); - pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute); - pub fn LLVMRustAddCallSiteAttrString(Instr: &Value, index: c_uint, Name: *const c_char); - pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32); - pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); - pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); - pub fn LLVMRustAddByValCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type); - pub fn LLVMRustAddStructRetCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type); + pub fn LLVMRustAddCallSiteAttributes<'a>( + Instr: &'a Value, + index: c_uint, + Attrs: *const &'a Attribute, + AttrsLen: size_t, + ); // Operations on load/store instructions (only) pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool); diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 8586b0466c8d0..1c1c4e0a15946 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -31,24 +31,58 @@ impl LLVMRustResult { } } -pub fn EmitUWTableAttr(llfn: &Value, async_: bool) { - unsafe { LLVMRustEmitUWTableAttr(llfn, async_) } +pub fn AddFunctionAttributes<'ll>(llfn: &'ll Value, idx: AttributePlace, attrs: &[&'ll Attribute]) { + unsafe { + LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len()); + } } -pub fn AddFunctionAttrStringValue(llfn: &Value, idx: AttributePlace, attr: &CStr, value: &CStr) { +pub fn RemoveFunctionAttributes(llfn: &Value, idx: AttributePlace, attrs: &[AttributeKind]) { unsafe { - LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), value.as_ptr()) + LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len()); } } -pub fn AddFunctionAttrString(llfn: &Value, idx: AttributePlace, attr: &CStr) { +pub fn AddCallSiteAttributes<'ll>( + callsite: &'ll Value, + idx: AttributePlace, + attrs: &[&'ll Attribute], +) { unsafe { - LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), std::ptr::null()) + LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len()); } } -pub fn AddCallSiteAttrString(callsite: &Value, idx: AttributePlace, attr: &CStr) { - unsafe { LLVMRustAddCallSiteAttrString(callsite, idx.as_uint(), attr.as_ptr()) } +pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &CStr, value: &CStr) -> &'ll Attribute { + unsafe { LLVMRustCreateAttrStringValue(llcx, attr.as_ptr(), value.as_ptr()) } +} + +pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &CStr) -> &'ll Attribute { + unsafe { LLVMRustCreateAttrStringValue(llcx, attr.as_ptr(), std::ptr::null()) } +} + +pub fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute { + unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) } +} + +pub fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute { + unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) } +} + +pub fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute { + unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) } +} + +pub fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute { + unsafe { LLVMRustCreateByValAttr(llcx, ty) } +} + +pub fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute { + unsafe { LLVMRustCreateStructRetAttr(llcx, ty) } +} + +pub fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute { + unsafe { LLVMRustCreateUWTableAttr(llcx, async_) } } #[derive(Copy, Clone)] @@ -132,25 +166,10 @@ pub fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) { } } -impl Attribute { - pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) { - unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) } - } - - pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) { - unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) } - } - - pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) { - unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) } - } - - pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) { - if set { - self.apply_llfn(idx, llfn); - } else { - self.unapply_llfn(idx, llfn); - } +impl AttributeKind { + /// Create an LLVM Attribute with no associated value. + pub fn create_attr(self, llcx: &Context) -> &Attribute { + unsafe { LLVMRustCreateAttrNoValue(llcx, self) } } } diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index c8f31adbfd9ff..d627af48ba58e 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -232,142 +232,103 @@ static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) { report_fatal_error("bad AttributeKind"); } -template static inline void AddAttribute(T *t, unsigned Index, Attribute Attr) { +template static inline void AddAttributes(T *t, unsigned Index, + LLVMAttributeRef *Attrs, size_t AttrsLen) { + AttributeList PAL = t->getAttributes(); + AttributeList PALNew; #if LLVM_VERSION_LT(14, 0) - t->addAttribute(Index, Attr); + AttrBuilder B; + for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen)) + B.addAttribute(unwrap(Attr)); + PALNew = PAL.addAttributes(t->getContext(), Index, B); #else - t->addAttributeAtIndex(Index, Attr); + AttrBuilder B(t->getContext()); + for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen)) + B.addAttribute(unwrap(Attr)); + PALNew = PAL.addAttributesAtIndex(t->getContext(), Index, B); #endif + t->setAttributes(PALNew); } -extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index, - LLVMRustAttribute RustAttr) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::get(Call->getContext(), fromRust(RustAttr)); - AddAttribute(Call, Index, Attr); -} - -extern "C" void LLVMRustAddCallSiteAttrString(LLVMValueRef Instr, unsigned Index, - const char *Name) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::get(Call->getContext(), Name); - AddAttribute(Call, Index, Attr); -} - -extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr, - unsigned Index, - uint32_t Bytes) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::getWithAlignment(Call->getContext(), Align(Bytes)); - AddAttribute(Call, Index, Attr); +template static inline void RemoveAttributes(T *t, unsigned Index, + LLVMRustAttribute *RustAttrs, + size_t RustAttrsLen) { + AttributeList PAL = t->getAttributes(); + AttributeList PALNew; +#if LLVM_VERSION_LT(14, 0) + AttrBuilder B; + for (LLVMRustAttribute RustAttr : makeArrayRef(RustAttrs, RustAttrsLen)) + B.addAttribute(fromRust(RustAttr)); + PALNew = PAL.removeAttributes(t->getContext(), Index, B); +#else + AttributeMask Mask; + for (LLVMRustAttribute RustAttr : makeArrayRef(RustAttrs, RustAttrsLen)) + Mask.addAttribute(fromRust(RustAttr)); + PALNew = PAL.removeAttributesAtIndex(t->getContext(), Index, Mask); +#endif + t->setAttributes(PALNew); } -extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, - unsigned Index, - uint64_t Bytes) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::getWithDereferenceableBytes(Call->getContext(), Bytes); - AddAttribute(Call, Index, Attr); +extern "C" void LLVMRustAddFunctionAttributes(LLVMValueRef Fn, unsigned Index, + LLVMAttributeRef *Attrs, size_t AttrsLen) { + Function *F = unwrap(Fn); + AddAttributes(F, Index, Attrs, AttrsLen); } -extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr, - unsigned Index, - uint64_t Bytes) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::getWithDereferenceableOrNullBytes(Call->getContext(), Bytes); - AddAttribute(Call, Index, Attr); +extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn, unsigned Index, + LLVMRustAttribute *RustAttrs, + size_t RustAttrsLen) { + Function *F = unwrap(Fn); + RemoveAttributes(F, Index, RustAttrs, RustAttrsLen); } -extern "C" void LLVMRustAddByValCallSiteAttr(LLVMValueRef Instr, unsigned Index, - LLVMTypeRef Ty) { +extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr, unsigned Index, + LLVMAttributeRef *Attrs, size_t AttrsLen) { CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::getWithByValType(Call->getContext(), unwrap(Ty)); - AddAttribute(Call, Index, Attr); + AddAttributes(Call, Index, Attrs, AttrsLen); } -extern "C" void LLVMRustAddStructRetCallSiteAttr(LLVMValueRef Instr, unsigned Index, - LLVMTypeRef Ty) { - CallBase *Call = unwrap(Instr); - Attribute Attr = Attribute::getWithStructRetType(Call->getContext(), unwrap(Ty)); - AddAttribute(Call, Index, Attr); +extern "C" LLVMAttributeRef LLVMRustCreateAttrNoValue(LLVMContextRef C, + LLVMRustAttribute RustAttr) { + return wrap(Attribute::get(*unwrap(C), fromRust(RustAttr))); } -extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, - LLVMRustAttribute RustAttr) { - Function *A = unwrap(Fn); - Attribute Attr = Attribute::get(A->getContext(), fromRust(RustAttr)); - AddAttribute(A, Index, Attr); +extern "C" LLVMAttributeRef LLVMRustCreateAttrStringValue(LLVMContextRef C, + const char *Name, + const char *Value) { + return wrap(Attribute::get(*unwrap(C), StringRef(Name), StringRef(Value))); } -extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn, - unsigned Index, - uint32_t Bytes) { - Function *A = unwrap(Fn); - AddAttribute(A, Index, Attribute::getWithAlignment( - A->getContext(), llvm::Align(Bytes))); +extern "C" LLVMAttributeRef LLVMRustCreateAlignmentAttr(LLVMContextRef C, + uint64_t Bytes) { + return wrap(Attribute::getWithAlignment(*unwrap(C), llvm::Align(Bytes))); } -extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, - uint64_t Bytes) { - Function *A = unwrap(Fn); - AddAttribute(A, Index, Attribute::getWithDereferenceableBytes(A->getContext(), - Bytes)); +extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableAttr(LLVMContextRef C, + uint64_t Bytes) { + return wrap(Attribute::getWithDereferenceableBytes(*unwrap(C), Bytes)); } -extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn, - unsigned Index, - uint64_t Bytes) { - Function *A = unwrap(Fn); - AddAttribute(A, Index, Attribute::getWithDereferenceableOrNullBytes( - A->getContext(), Bytes)); +extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C, + uint64_t Bytes) { + return wrap(Attribute::getWithDereferenceableOrNullBytes(*unwrap(C), Bytes)); } -extern "C" void LLVMRustAddByValAttr(LLVMValueRef Fn, unsigned Index, - LLVMTypeRef Ty) { - Function *F = unwrap(Fn); - Attribute Attr = Attribute::getWithByValType(F->getContext(), unwrap(Ty)); - AddAttribute(F, Index, Attr); +extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C, LLVMTypeRef Ty) { + return wrap(Attribute::getWithByValType(*unwrap(C), unwrap(Ty))); } -extern "C" void LLVMRustAddStructRetAttr(LLVMValueRef Fn, unsigned Index, - LLVMTypeRef Ty) { - Function *F = unwrap(Fn); - Attribute Attr = Attribute::getWithStructRetType(F->getContext(), unwrap(Ty)); - AddAttribute(F, Index, Attr); +extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTypeRef Ty) { + return wrap(Attribute::getWithStructRetType(*unwrap(C), unwrap(Ty))); } -extern "C" void LLVMRustEmitUWTableAttr(LLVMValueRef Fn, bool Async) { - Function *F = unwrap(Fn); +extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) { #if LLVM_VERSION_LT(15, 0) - Attribute Attr = Attribute::get(F->getContext(), Attribute::UWTable); -#else - Attribute Attr = Attribute::getWithUWTableKind( - F->getContext(), Async ? UWTableKind::Async : UWTableKind::Sync); -#endif - AddAttribute(F, AttributeList::AttrIndex::FunctionIndex, Attr); -} - -extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn, - unsigned Index, - const char *Name, - const char *Value) { - Function *F = unwrap(Fn); - AddAttribute(F, Index, Attribute::get( - F->getContext(), StringRef(Name), StringRef(Value))); -} - -extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn, - unsigned Index, - LLVMRustAttribute RustAttr) { - Function *F = unwrap(Fn); - AttributeList PAL = F->getAttributes(); - AttributeList PALNew; -#if LLVM_VERSION_LT(14, 0) - PALNew = PAL.removeAttribute(F->getContext(), Index, fromRust(RustAttr)); + return wrap(Attribute::get(*unwrap(C), Attribute::UWTable)); #else - PALNew = PAL.removeAttributeAtIndex(F->getContext(), Index, fromRust(RustAttr)); + return wrap(Attribute::getWithUWTableKind( + *unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync)); #endif - F->setAttributes(PALNew); } // Enable a fast-math flag