From e612bf4d0b6f08623092902c34a504e932388664 Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Thu, 28 Mar 2024 13:23:10 +0100 Subject: [PATCH 001/132] Remove TargetAbi.CppCodegen and TargetAbi.Jit, they are dead code (#100393) --- src/coreclr/nativeaot/Runtime/Portable/CMakeLists.txt | 2 +- .../tools/Common/TypeSystem/Common/TargetDetails.cs | 10 +--------- .../Compiler/DependencyAnalysis/CanonicalEETypeNode.cs | 2 +- .../Compiler/DependencyAnalysis/GCStaticEETypeNode.cs | 1 - .../DependencyAnalysis/NecessaryCanonicalEETypeNode.cs | 2 +- .../DependencyAnalysis/ReflectionFieldMapNode.cs | 4 ---- .../JitInterface/CorInfoImpl.RyuJit.cs | 3 --- 7 files changed, 4 insertions(+), 20 deletions(-) diff --git a/src/coreclr/nativeaot/Runtime/Portable/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/Portable/CMakeLists.txt index 5d19a4eb9eda04..b9d2c3f5c3c146 100644 --- a/src/coreclr/nativeaot/Runtime/Portable/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/Portable/CMakeLists.txt @@ -1,6 +1,6 @@ project(PortableRuntime) -# Portable version of the runtime is designed to be used with CppCodeGen or WASM. +# Portable version of the runtime is designed to be used with WASM. # It should be written in pure C/C++, with no assembly code. add_definitions(-DUSE_PORTABLE_HELPERS) diff --git a/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs b/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs index 16b0d9e6a3c757..42ccc8994e64a9 100644 --- a/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs +++ b/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs @@ -37,14 +37,6 @@ public enum TargetAbi /// model for armel execution model /// NativeAotArmel, - /// - /// Jit runtime ABI - /// - Jit, - /// - /// Cross-platform portable C++ codegen - /// - CppCodegen, } /// @@ -102,7 +94,7 @@ public bool SupportsRelativePointers { get { - return (Abi != TargetAbi.CppCodegen) && (Architecture != TargetArchitecture.Wasm32); + return Architecture != TargetArchitecture.Wasm32; } } diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/CanonicalEETypeNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/CanonicalEETypeNode.cs index c86e85a3be546a..615d07e932be95 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/CanonicalEETypeNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/CanonicalEETypeNode.cs @@ -24,7 +24,7 @@ public CanonicalEETypeNode(NodeFactory factory, TypeDesc type) : base(factory, t Debug.Assert(!type.IsCanonicalDefinitionType(CanonicalFormKind.Any)); Debug.Assert(type.IsCanonicalSubtype(CanonicalFormKind.Any)); Debug.Assert(type == type.ConvertToCanonForm(CanonicalFormKind.Specific)); - Debug.Assert(!type.IsMdArray || factory.Target.Abi == TargetAbi.CppCodegen); + Debug.Assert(!type.IsMdArray); } public override bool StaticDependenciesAreComputed => true; diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GCStaticEETypeNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GCStaticEETypeNode.cs index addb715ec6772b..f8217164be842c 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GCStaticEETypeNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GCStaticEETypeNode.cs @@ -64,7 +64,6 @@ protected override ObjectData GetDehydratableData(NodeFactory factory, bool relo dataBuilder.AddSymbol(this); // +1 for SyncBlock (static size already includes MethodTable) - Debug.Assert(factory.Target.Abi == TargetAbi.NativeAot || factory.Target.Abi == TargetAbi.CppCodegen); int totalSize = (_gcMap.Size + 1) * _target.PointerSize; // We only need to check for containsPointers because ThreadStatics are always allocated diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NecessaryCanonicalEETypeNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NecessaryCanonicalEETypeNode.cs index 7fda2963953282..1746741ecdcc41 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NecessaryCanonicalEETypeNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/NecessaryCanonicalEETypeNode.cs @@ -18,7 +18,7 @@ public NecessaryCanonicalEETypeNode(NodeFactory factory, TypeDesc type) : base(f Debug.Assert(!type.IsCanonicalDefinitionType(CanonicalFormKind.Any)); Debug.Assert(type.IsCanonicalSubtype(CanonicalFormKind.Any)); Debug.Assert(type == type.ConvertToCanonForm(CanonicalFormKind.Specific)); - Debug.Assert(!type.IsMdArray || factory.Target.Abi == TargetAbi.CppCodegen); + Debug.Assert(!type.IsMdArray); } protected override void OutputInterfaceMap(NodeFactory factory, ref ObjectDataBuilder objData) diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReflectionFieldMapNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReflectionFieldMapNode.cs index 8ea831f1efdb8f..c657327d547824 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReflectionFieldMapNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ReflectionFieldMapNode.cs @@ -59,10 +59,6 @@ public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) if (field.IsLiteral || field.HasRva) continue; - // CppCodegen: implement thread statics - if (factory.Target.Abi == TargetAbi.CppCodegen && field.IsThreadStatic) - continue; - FieldTableFlags flags; if (field.IsStatic) { diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs index 1eafeaf01db7a1..2be89e7374f59e 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs @@ -950,9 +950,6 @@ private ObjectNode.ObjectData EncodeEHInfo() RelocType.IMAGE_REL_BASED_ABSOLUTE : RelocType.IMAGE_REL_BASED_RELPTR32; - if (_compilation.NodeFactory.Target.Abi == TargetAbi.Jit) - rel = RelocType.IMAGE_REL_BASED_REL32; - builder.EmitReloc(typeSymbol, rel); } break; From e74f170e39b67c9392e6a9eca6f0c172b5fd84ef Mon Sep 17 00:00:00 2001 From: Egor Bogatov Date: Thu, 28 Mar 2024 17:21:01 +0100 Subject: [PATCH 002/132] revert xor before setcc (#100298) --- src/coreclr/jit/codegenxarch.cpp | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 47ce1fea52d784..314346300b0056 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -6836,19 +6836,6 @@ void CodeGen::genCompareFloat(GenTree* treeNode) ins = (op1Type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd; cmpAttr = emitTypeSize(op1Type); - var_types targetType = treeNode->TypeGet(); - - // Clear target reg in advance via "xor reg,reg" to avoid movzx after SETCC - if ((targetReg != REG_NA) && (op1->GetRegNum() != targetReg) && (op2->GetRegNum() != targetReg) && - !varTypeIsByte(targetType)) - { - regMaskTP targetRegMask = genRegMask(targetReg); - if (((op1->gtGetContainedRegMask() | op2->gtGetContainedRegMask()) & targetRegMask) == 0) - { - instGen_Set_Reg_To_Zero(emitTypeSize(TYP_I_IMPL), targetReg); - targetType = TYP_UBYTE; // just a tip for inst_SETCC that movzx is not needed - } - } GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2); // Are we evaluating this into a register? @@ -6865,7 +6852,7 @@ void CodeGen::genCompareFloat(GenTree* treeNode) condition = GenCondition(GenCondition::P); } - inst_SETCC(condition, targetType, targetReg); + inst_SETCC(condition, treeNode->TypeGet(), targetReg); genProduceReg(tree); } } @@ -7005,22 +6992,8 @@ void CodeGen::genCompareInt(GenTree* treeNode) // TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type)); - var_types targetType = tree->TypeGet(); - if (!canReuseFlags || !genCanAvoidEmittingCompareAgainstZero(tree, type)) { - // Clear target reg in advance via "xor reg,reg" to avoid movzx after SETCC - if ((targetReg != REG_NA) && (op1->GetRegNum() != targetReg) && (op2->GetRegNum() != targetReg) && - !varTypeIsByte(targetType)) - { - regMaskTP targetRegMask = genRegMask(targetReg); - if (((op1->gtGetContainedRegMask() | op2->gtGetContainedRegMask()) & targetRegMask) == 0) - { - instGen_Set_Reg_To_Zero(emitTypeSize(TYP_I_IMPL), targetReg); - targetType = TYP_UBYTE; // just a tip for inst_SETCC that movzx is not needed - } - } - emitAttr size = emitTypeSize(type); bool canSkip = compiler->opts.OptimizationEnabled() && (ins == INS_cmp) && !op1->isUsedFromMemory() && !op2->isUsedFromMemory() && emit->IsRedundantCmp(size, op1->GetRegNum(), op2->GetRegNum()); @@ -7034,7 +7007,7 @@ void CodeGen::genCompareInt(GenTree* treeNode) // Are we evaluating this into a register? if (targetReg != REG_NA) { - inst_SETCC(GenCondition::FromIntegralRelop(tree), targetType, targetReg); + inst_SETCC(GenCondition::FromIntegralRelop(tree), tree->TypeGet(), targetReg); genProduceReg(tree); } } From a5b9a6a124cabe42cfa971d9cb72337eadd9e744 Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Thu, 28 Mar 2024 17:21:43 +0100 Subject: [PATCH 003/132] Fix exception interception on ARM64 with new EH (#100349) The interception stack frame was originally set to the caller SP on arm64, but the checks in CallCatchFunclet and ExInfo::PopExInfos were using the current frame SP instead. This change sets the interception frame to the current frame SP on arm/arm64 too to fix the issue. --- src/coreclr/debug/ee/debugger.cpp | 21 +++++++++++++++------ src/coreclr/vm/exceptionhandling.cpp | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/coreclr/debug/ee/debugger.cpp b/src/coreclr/debug/ee/debugger.cpp index b97f76c4a03c62..62b9f3c99c9c29 100644 --- a/src/coreclr/debug/ee/debugger.cpp +++ b/src/coreclr/debug/ee/debugger.cpp @@ -11566,17 +11566,26 @@ HRESULT Debugger::GetAndSendInterceptCommand(DebuggerIPCEvent *event) // // Set up the VM side of intercepting. // + StackFrame sfInterceptFramePointer; + if (g_isNewExceptionHandlingEnabled) + { + sfInterceptFramePointer = StackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)); + } + else + { +#if defined (TARGET_ARM )|| defined (TARGET_ARM64 ) + // ARM requires the caller stack pointer, not the current stack pointer + sfInterceptFramePointer = CallerStackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)); +#else + sfInterceptFramePointer = StackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)); +#endif + } if (pExState->GetDebuggerState()->SetDebuggerInterceptInfo(csi.m_activeFrame.pIJM, pThread, csi.m_activeFrame.MethodToken, csi.m_activeFrame.md, foundOffset, -#if defined (TARGET_ARM )|| defined (TARGET_ARM64 ) - // ARM requires the caller stack pointer, not the current stack pointer - CallerStackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)), -#else - StackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)), -#endif + sfInterceptFramePointer, pExState->GetFlags() )) { diff --git a/src/coreclr/vm/exceptionhandling.cpp b/src/coreclr/vm/exceptionhandling.cpp index c9ae699a517077..d206fb419fd751 100644 --- a/src/coreclr/vm/exceptionhandling.cpp +++ b/src/coreclr/vm/exceptionhandling.cpp @@ -8090,7 +8090,7 @@ static BOOL CheckExceptionInterception(StackFrameIterator* pStackFrameIterator, reinterpret_cast(&(sfInterceptStackFrame.SP)), NULL, NULL); - TADDR spForDebugger = GetSpForDiagnosticReporting(pStackFrameIterator->m_crawl.GetRegisterSet()); + TADDR spForDebugger = GetRegdisplaySP(pStackFrameIterator->m_crawl.GetRegisterSet()); if ((pExInfo->m_passNumber == 1) || ((pInterceptMD == pMD) && (sfInterceptStackFrame == spForDebugger))) From d80a09ff6b7418dd219e114c604feca3398fea1e Mon Sep 17 00:00:00 2001 From: Eirik Tsarpalis Date: Thu, 28 Mar 2024 17:03:05 +0000 Subject: [PATCH 004/132] Fix a linker warning with the JsonSerializerOptionsUpdateHandler (#100362) * * Fixes a linker warning with JsonSerializerOptionsUpdateHandler. * Makes a few cleanups in the MemberAccessor clases. * Ensures that the MemberAccessor being used is a singleton. * Fix a number of trimmability warnings. --- .../JsonSerializerOptionsUpdateHandler.cs | 8 +- .../DefaultJsonTypeInfoResolver.Helpers.cs | 22 +++-- .../Serialization/Metadata/MemberAccessor.cs | 13 +-- .../ReflectionEmitCachingMemberAccessor.cs | 86 +++++++++++-------- .../Metadata/ReflectionEmitMemberAccessor.cs | 37 +++++--- .../Metadata/ReflectionMemberAccessor.cs | 24 ++++-- 6 files changed, 115 insertions(+), 75 deletions(-) diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonSerializerOptionsUpdateHandler.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonSerializerOptionsUpdateHandler.cs index a2beec2eae3b76..3675f269d8e2fd 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonSerializerOptionsUpdateHandler.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonSerializerOptionsUpdateHandler.cs @@ -2,9 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; using System.Reflection.Metadata; -using System.Runtime.CompilerServices; using System.Text.Json; using System.Text.Json.Serialization.Metadata; @@ -25,11 +23,7 @@ public static void ClearCache(Type[]? types) options.Key.ClearCaches(); } - if (RuntimeFeature.IsDynamicCodeSupported) - { - // Flush the dynamic method cache - ReflectionEmitCachingMemberAccessor.Clear(); - } + DefaultJsonTypeInfoResolver.ClearMemberAccessorCaches(); } } } diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/DefaultJsonTypeInfoResolver.Helpers.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/DefaultJsonTypeInfoResolver.Helpers.cs index 75552ccf382fbd..3a39315eba51c0 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/DefaultJsonTypeInfoResolver.Helpers.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/DefaultJsonTypeInfoResolver.Helpers.cs @@ -7,6 +7,7 @@ using System.Reflection; using System.Runtime.CompilerServices; using System.Text.Json.Reflection; +using System.Threading; namespace System.Text.Json.Serialization.Metadata { @@ -14,23 +15,30 @@ public partial class DefaultJsonTypeInfoResolver { internal static MemberAccessor MemberAccessor { + [RequiresUnreferencedCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] get { - return s_memberAccessor ??= + return s_memberAccessor ?? Initialize(); + static MemberAccessor Initialize() + { + MemberAccessor value = #if NETCOREAPP - // if dynamic code isn't supported, fallback to reflection - RuntimeFeature.IsDynamicCodeSupported ? - new ReflectionEmitCachingMemberAccessor() : - new ReflectionMemberAccessor(); + // if dynamic code isn't supported, fallback to reflection + RuntimeFeature.IsDynamicCodeSupported ? + new ReflectionEmitCachingMemberAccessor() : + new ReflectionMemberAccessor(); #elif NETFRAMEWORK - new ReflectionEmitCachingMemberAccessor(); + new ReflectionEmitCachingMemberAccessor(); #else - new ReflectionMemberAccessor(); + new ReflectionMemberAccessor(); #endif + return Interlocked.CompareExchange(ref s_memberAccessor, value, null) ?? value; + } } } + internal static void ClearMemberAccessorCaches() => s_memberAccessor?.Clear(); private static MemberAccessor? s_memberAccessor; [RequiresUnreferencedCode(JsonSerializer.SerializationUnreferencedCodeMessage)] diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/MemberAccessor.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/MemberAccessor.cs index ff6c442fa488cb..39605a2cff4069 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/MemberAccessor.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/MemberAccessor.cs @@ -9,23 +9,16 @@ namespace System.Text.Json.Serialization.Metadata { internal abstract class MemberAccessor { - public abstract Func? CreateParameterlessConstructor( - [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, - ConstructorInfo? constructorInfo); + public abstract Func? CreateParameterlessConstructor(Type type, ConstructorInfo? constructorInfo); public abstract Func CreateParameterizedConstructor(ConstructorInfo constructor); - public abstract JsonTypeInfo.ParameterizedConstructorDelegate? - CreateParameterizedConstructor(ConstructorInfo constructor); + public abstract JsonTypeInfo.ParameterizedConstructorDelegate? CreateParameterizedConstructor(ConstructorInfo constructor); public abstract Action CreateAddMethodDelegate<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] TCollection>(); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - [RequiresDynamicCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] public abstract Func, TCollection> CreateImmutableEnumerableCreateRangeDelegate(); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - [RequiresDynamicCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] public abstract Func>, TCollection> CreateImmutableDictionaryCreateRangeDelegate(); public abstract Func CreatePropertyGetter(PropertyInfo propertyInfo); @@ -35,5 +28,7 @@ public abstract JsonTypeInfo.ParameterizedConstructorDelegate CreateFieldGetter(FieldInfo fieldInfo); public abstract Action CreateFieldSetter(FieldInfo fieldInfo); + + public virtual void Clear() { } } } diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitCachingMemberAccessor.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitCachingMemberAccessor.cs index e30f87d76da9f1..4efe2a3af47f12 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitCachingMemberAccessor.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitCachingMemberAccessor.cs @@ -8,54 +8,70 @@ namespace System.Text.Json.Serialization.Metadata { - [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] internal sealed partial class ReflectionEmitCachingMemberAccessor : MemberAccessor { - private static readonly ReflectionEmitMemberAccessor s_sourceAccessor = new(); - private static readonly Cache<(string id, Type declaringType, MemberInfo? member)> s_cache = - new(slidingExpiration: TimeSpan.FromMilliseconds(1000), evictionInterval: TimeSpan.FromMilliseconds(200)); + private readonly ReflectionEmitMemberAccessor _sourceAccessor; + private readonly Cache<(string id, Type declaringType, MemberInfo? member)> _cache; - public static void Clear() => s_cache.Clear(); + [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + [RequiresUnreferencedCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + public ReflectionEmitCachingMemberAccessor() + { + _sourceAccessor = new ReflectionEmitMemberAccessor(); + _cache = new(slidingExpiration: TimeSpan.FromMilliseconds(1000), evictionInterval: TimeSpan.FromMilliseconds(200)); + } - public override Action CreateAddMethodDelegate<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] TCollection>() - => s_cache.GetOrAdd((nameof(CreateAddMethodDelegate), typeof(TCollection), null), - static (_) => s_sourceAccessor.CreateAddMethodDelegate()); + public override void Clear() => _cache.Clear(); - public override Func? CreateParameterlessConstructor([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, ConstructorInfo? ctorInfo) - => s_cache.GetOrAdd((nameof(CreateParameterlessConstructor), type, ctorInfo), - [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2077:UnrecognizedReflectionPattern", - Justification = "Cannot apply DynamicallyAccessedMembersAttribute to tuple properties.")] -#pragma warning disable IL2077 // The suppression doesn't work for the trim analyzer: https://github.com/dotnet/roslyn/issues/59746 - static (key) => s_sourceAccessor.CreateParameterlessConstructor(key.declaringType, (ConstructorInfo?)key.member)); -#pragma warning restore IL2077 + public override Action CreateAddMethodDelegate<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] TCollection>() => + _cache.GetOrAdd( + key: (nameof(CreateAddMethodDelegate), typeof(TCollection), null), + _ => _sourceAccessor.CreateAddMethodDelegate()); - public override Func CreateFieldGetter(FieldInfo fieldInfo) - => s_cache.GetOrAdd((nameof(CreateFieldGetter), typeof(TProperty), fieldInfo), static key => s_sourceAccessor.CreateFieldGetter((FieldInfo)key.member!)); + public override Func? CreateParameterlessConstructor(Type type, ConstructorInfo? ctorInfo) => + _cache.GetOrAdd( + key: (nameof(CreateParameterlessConstructor), type, ctorInfo), + valueFactory: key => _sourceAccessor.CreateParameterlessConstructor(key.declaringType, (ConstructorInfo?)key.member)); - public override Action CreateFieldSetter(FieldInfo fieldInfo) - => s_cache.GetOrAdd((nameof(CreateFieldSetter), typeof(TProperty), fieldInfo), static key => s_sourceAccessor.CreateFieldSetter((FieldInfo)key.member!)); + public override Func CreateFieldGetter(FieldInfo fieldInfo) => + _cache.GetOrAdd( + key: (nameof(CreateFieldGetter), typeof(TProperty), fieldInfo), + valueFactory: key => _sourceAccessor.CreateFieldGetter((FieldInfo)key.member!)); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - public override Func>, TCollection> CreateImmutableDictionaryCreateRangeDelegate() - => s_cache.GetOrAdd((nameof(CreateImmutableDictionaryCreateRangeDelegate), typeof((TCollection, TKey, TValue)), null), - static (_) => s_sourceAccessor.CreateImmutableDictionaryCreateRangeDelegate()); + public override Action CreateFieldSetter(FieldInfo fieldInfo) => + _cache.GetOrAdd( + key: (nameof(CreateFieldSetter), typeof(TProperty), fieldInfo), + valueFactory: key => _sourceAccessor.CreateFieldSetter((FieldInfo)key.member!)); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - public override Func, TCollection> CreateImmutableEnumerableCreateRangeDelegate() - => s_cache.GetOrAdd((nameof(CreateImmutableEnumerableCreateRangeDelegate), typeof((TCollection, TElement)), null), - static (_) => s_sourceAccessor.CreateImmutableEnumerableCreateRangeDelegate()); + public override Func>, TCollection> CreateImmutableDictionaryCreateRangeDelegate() => + _cache.GetOrAdd( + key: (nameof(CreateImmutableDictionaryCreateRangeDelegate), typeof((TCollection, TKey, TValue)), null), + valueFactory: _ => _sourceAccessor.CreateImmutableDictionaryCreateRangeDelegate()); - public override Func CreateParameterizedConstructor(ConstructorInfo constructor) - => s_cache.GetOrAdd((nameof(CreateParameterizedConstructor), typeof(T), constructor), static key => s_sourceAccessor.CreateParameterizedConstructor((ConstructorInfo)key.member!)); + public override Func, TCollection> CreateImmutableEnumerableCreateRangeDelegate() => + _cache.GetOrAdd( + key: (nameof(CreateImmutableEnumerableCreateRangeDelegate), typeof((TCollection, TElement)), null), + valueFactory: _ => _sourceAccessor.CreateImmutableEnumerableCreateRangeDelegate()); - public override JsonTypeInfo.ParameterizedConstructorDelegate? CreateParameterizedConstructor(ConstructorInfo constructor) - => s_cache.GetOrAdd((nameof(CreateParameterizedConstructor), typeof(T), constructor), static key => s_sourceAccessor.CreateParameterizedConstructor((ConstructorInfo)key.member!)); + public override Func CreateParameterizedConstructor(ConstructorInfo constructor) => + _cache.GetOrAdd( + key: (nameof(CreateParameterizedConstructor), typeof(T), constructor), + valueFactory: key => _sourceAccessor.CreateParameterizedConstructor((ConstructorInfo)key.member!)); - public override Func CreatePropertyGetter(PropertyInfo propertyInfo) - => s_cache.GetOrAdd((nameof(CreatePropertyGetter), typeof(TProperty), propertyInfo), static key => s_sourceAccessor.CreatePropertyGetter((PropertyInfo)key.member!)); + public override JsonTypeInfo.ParameterizedConstructorDelegate? CreateParameterizedConstructor(ConstructorInfo constructor) => + _cache.GetOrAdd( + key: (nameof(CreateParameterizedConstructor), typeof(T), constructor), + valueFactory: key => _sourceAccessor.CreateParameterizedConstructor((ConstructorInfo)key.member!)); - public override Action CreatePropertySetter(PropertyInfo propertyInfo) - => s_cache.GetOrAdd((nameof(CreatePropertySetter), typeof(TProperty), propertyInfo), static key => s_sourceAccessor.CreatePropertySetter((PropertyInfo)key.member!)); + public override Func CreatePropertyGetter(PropertyInfo propertyInfo) => + _cache.GetOrAdd( + key: (nameof(CreatePropertyGetter), typeof(TProperty), propertyInfo), + valueFactory: key => _sourceAccessor.CreatePropertyGetter((PropertyInfo)key.member!)); + + public override Action CreatePropertySetter(PropertyInfo propertyInfo) => + _cache.GetOrAdd( + key: (nameof(CreatePropertySetter), typeof(TProperty), propertyInfo), + valueFactory: key => _sourceAccessor.CreatePropertySetter((PropertyInfo)key.member!)); } } #endif diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitMemberAccessor.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitMemberAccessor.cs index 4b0b426bdaa9ef..5e6f6986539e10 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitMemberAccessor.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionEmitMemberAccessor.cs @@ -10,12 +10,17 @@ namespace System.Text.Json.Serialization.Metadata { - [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] internal sealed class ReflectionEmitMemberAccessor : MemberAccessor { - public override Func? CreateParameterlessConstructor( - [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, - ConstructorInfo? constructorInfo) + [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + [RequiresUnreferencedCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + public ReflectionEmitMemberAccessor() + { + } + + [SuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] + public override Func? CreateParameterlessConstructor(Type type, ConstructorInfo? constructorInfo) { Debug.Assert(type != null); Debug.Assert(constructorInfo is null || constructorInfo.GetParameters().Length == 0); @@ -68,6 +73,8 @@ internal sealed class ReflectionEmitMemberAccessor : MemberAccessor public override Func CreateParameterizedConstructor(ConstructorInfo constructor) => CreateDelegate>(CreateParameterizedConstructor(constructor)); + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] private static DynamicMethod CreateParameterizedConstructor(ConstructorInfo constructor) { Type? type = constructor.DeclaringType; @@ -109,6 +116,8 @@ public override JsonTypeInfo.ParameterizedConstructorDelegate>( CreateParameterizedConstructor(constructor, typeof(TArg0), typeof(TArg1), typeof(TArg2), typeof(TArg3))); + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] private static DynamicMethod? CreateParameterizedConstructor(ConstructorInfo constructor, Type parameterType1, Type parameterType2, Type parameterType3, Type parameterType4) { Type? type = constructor.DeclaringType; @@ -153,6 +162,8 @@ public override JsonTypeInfo.ParameterizedConstructorDelegate CreateAddMethodDelegate<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] TCollection>() => CreateDelegate>(CreateAddMethodDelegate(typeof(TCollection))); + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] private static DynamicMethod CreateAddMethodDelegate( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] Type collectionType) { @@ -176,13 +187,14 @@ private static DynamicMethod CreateAddMethodDelegate( return dynamicMethod; } - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - [RequiresDynamicCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] public override Func, TCollection> CreateImmutableEnumerableCreateRangeDelegate() => CreateDelegate, TCollection>>( CreateImmutableEnumerableCreateRangeDelegate(typeof(TCollection), typeof(TElement), typeof(IEnumerable))); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] + [UnconditionalSuppressMessage("Trimming", "IL2026:Members annotated with 'RequiresUnreferencedCodeAttribute' require dynamic access otherwise can break functionality when trimming application code", + Justification = "The constructor has been marked RequiresUnreferencedCode")] private static DynamicMethod CreateImmutableEnumerableCreateRangeDelegate(Type collectionType, Type elementType, Type enumerableType) { MethodInfo realMethod = collectionType.GetImmutableEnumerableCreateRangeMethod(elementType); @@ -203,13 +215,14 @@ private static DynamicMethod CreateImmutableEnumerableCreateRangeDelegate(Type c return dynamicMethod; } - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - [RequiresDynamicCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] public override Func>, TCollection> CreateImmutableDictionaryCreateRangeDelegate() => CreateDelegate>, TCollection>>( CreateImmutableDictionaryCreateRangeDelegate(typeof(TCollection), typeof(TKey), typeof(TValue), typeof(IEnumerable>))); - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] + [UnconditionalSuppressMessage("Trimming", "IL2026:Members annotated with 'RequiresUnreferencedCodeAttribute' require dynamic access otherwise can break functionality when trimming application code", + Justification = "The constructor has been marked RequiresUnreferencedCode")] private static DynamicMethod CreateImmutableDictionaryCreateRangeDelegate(Type collectionType, Type keyType, Type valueType, Type enumerableType) { MethodInfo realMethod = collectionType.GetImmutableDictionaryCreateRangeMethod(keyType, valueType); @@ -379,6 +392,8 @@ private static DynamicMethod CreateFieldSetter(FieldInfo fieldInfo, Type runtime return dynamicMethod; } + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] private static DynamicMethod CreateGetterMethod(string memberName, Type memberType) => new DynamicMethod( memberName + "Getter", @@ -387,6 +402,8 @@ private static DynamicMethod CreateGetterMethod(string memberName, Type memberTy typeof(ReflectionEmitMemberAccessor).Module, skipVisibility: true); + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] private static DynamicMethod CreateSetterMethod(string memberName, Type memberType) => new DynamicMethod( memberName + "Setter", diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionMemberAccessor.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionMemberAccessor.cs index 8627a24f3f4926..cfc94496480a8f 100644 --- a/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionMemberAccessor.cs +++ b/src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/ReflectionMemberAccessor.cs @@ -10,9 +10,15 @@ namespace System.Text.Json.Serialization.Metadata { internal sealed class ReflectionMemberAccessor : MemberAccessor { - public override Func? CreateParameterlessConstructor( - [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, - ConstructorInfo? ctorInfo) + [RequiresDynamicCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + [RequiresUnreferencedCode(JsonSerializer.SerializationRequiresDynamicCodeMessage)] + public ReflectionMemberAccessor() + { + } + + [UnconditionalSuppressMessage("Trimming", "IL2067:Target parameter argument does not satisfy 'DynamicallyAccessedMembersAttribute' in call to target method. The parameter of method does not have matching annotations.", + Justification = "The constructor has been marked RequiresUnreferencedCode")] + public override Func? CreateParameterlessConstructor(Type type, ConstructorInfo? ctorInfo) { Debug.Assert(type != null); Debug.Assert(ctorInfo is null || ctorInfo.GetParameters().Length == 0); @@ -122,8 +128,10 @@ public override JsonTypeInfo.ParameterizedConstructorDelegate, TCollection> CreateImmutableEnumerableCreateRangeDelegate() { MethodInfo createRange = typeof(TCollection).GetImmutableEnumerableCreateRangeMethod(typeof(TElement)); @@ -131,8 +139,10 @@ public override Func, TCollection> CreateImmutableEnumerab typeof(Func, TCollection>)); } - [RequiresUnreferencedCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] - [RequiresDynamicCode(IEnumerableConverterFactoryHelpers.ImmutableConvertersUnreferencedCodeMessage)] + [UnconditionalSuppressMessage("Trimming", "IL2026:Members annotated with 'RequiresUnreferencedCodeAttribute' require dynamic access otherwise can break functionality when trimming application code", + Justification = "The constructor has been marked RequiresUnreferencedCode")] + [UnconditionalSuppressMessage("AOT", "IL3050:Calling members annotated with 'RequiresDynamicCodeAttribute' may break functionality when AOT compiling.", + Justification = "The constructor has been marked RequiresDynamicCode")] public override Func>, TCollection> CreateImmutableDictionaryCreateRangeDelegate() { MethodInfo createRange = typeof(TCollection).GetImmutableDictionaryCreateRangeMethod(typeof(TKey), typeof(TValue)); From 1f00cda1340653baf53d8161058554e639c0af2b Mon Sep 17 00:00:00 2001 From: Meri Khamoyan <96171496+mkhamoyan@users.noreply.github.com> Date: Thu, 28 Mar 2024 18:49:00 +0100 Subject: [PATCH 005/132] [mono][wasm] Force interpreter to initialize the pointers (#100288) Force interpreter to initialize the pointers --------- Co-authored-by: Larry Ewing --- .../scenarios/BuildWasiAppsJobsList.txt | 1 + src/mono/browser/runtime/runtime.c | 23 +++++ src/mono/browser/runtime/runtime.h | 2 + src/mono/sample/wasi/native/Program.cs | 5 -- .../PInvokeTableGeneratorTests.cs | 87 +++++++++++++++++++ .../WasmAppBuilder/PInvokeTableGenerator.cs | 19 +++- 6 files changed, 131 insertions(+), 6 deletions(-) create mode 100644 src/mono/wasi/Wasi.Build.Tests/PInvokeTableGeneratorTests.cs diff --git a/eng/testing/scenarios/BuildWasiAppsJobsList.txt b/eng/testing/scenarios/BuildWasiAppsJobsList.txt index b68f7fe3d30ed0..bdb9ecf6e5f074 100644 --- a/eng/testing/scenarios/BuildWasiAppsJobsList.txt +++ b/eng/testing/scenarios/BuildWasiAppsJobsList.txt @@ -3,3 +3,4 @@ Wasi.Build.Tests.ILStripTests Wasi.Build.Tests.SdkMissingTests Wasi.Build.Tests.RuntimeConfigTests Wasi.Build.Tests.WasiTemplateTests +Wasi.Build.Tests.PInvokeTableGeneratorTests diff --git a/src/mono/browser/runtime/runtime.c b/src/mono/browser/runtime/runtime.c index bbd645e21e821c..7dbdc02c259996 100644 --- a/src/mono/browser/runtime/runtime.c +++ b/src/mono/browser/runtime/runtime.c @@ -76,6 +76,7 @@ int monoeg_g_setenv(const char *variable, const char *value, int overwrite); int32_t mini_parse_debug_option (const char *option); char *mono_method_get_full_name (MonoMethod *method); void mono_trace_init (void); +MonoMethod *mono_marshal_get_managed_wrapper (MonoMethod *method, MonoClass *delegate_klass, MonoGCHandle target_handle, MonoError *error); /* Not part of public headers */ #define MONO_ICALL_TABLE_CALLBACKS_VERSION 3 @@ -356,3 +357,25 @@ mono_wasm_assembly_find_method (MonoClass *klass, const char *name, int argument MONO_EXIT_GC_UNSAFE; return result; } + +/* + * mono_wasm_marshal_get_managed_wrapper: + * Creates a wrapper for a function pointer to a method marked with + * UnamangedCallersOnlyAttribute. + * This wrapper ensures that the interpreter initializes the pointers. + */ +void +mono_wasm_marshal_get_managed_wrapper (const char* assemblyName, const char* typeName, const char* methodName, int num_params) +{ + MonoError error; + mono_error_init (&error); + MonoAssembly* assembly = mono_wasm_assembly_load (assemblyName); + assert (assembly); + MonoClass* class = mono_wasm_assembly_find_class (assembly, "", typeName); + assert (class); + MonoMethod* method = mono_wasm_assembly_find_method (class, methodName, num_params); + assert (method); + MonoMethod *managedWrapper = mono_marshal_get_managed_wrapper (method, NULL, 0, &error); + assert (managedWrapper); + mono_compile_method (managedWrapper); +} diff --git a/src/mono/browser/runtime/runtime.h b/src/mono/browser/runtime/runtime.h index 8718fdb596720f..0ad4d2abd451a7 100644 --- a/src/mono/browser/runtime/runtime.h +++ b/src/mono/browser/runtime/runtime.h @@ -18,5 +18,7 @@ extern int mono_wasm_enable_gc; MonoDomain *mono_wasm_load_runtime_common (int debug_level, MonoLogCallback log_callback, const char *interp_opts); MonoAssembly *mono_wasm_assembly_load (const char *name); MonoClass *mono_wasm_assembly_find_class (MonoAssembly *assembly, const char *namespace, const char *name); +MonoMethod *mono_wasm_assembly_find_method (MonoClass *klass, const char *name, int arguments); +void mono_wasm_marshal_get_managed_wrapper (const char* assemblyName, const char* typeName, const char* methodName, int num_params); #endif diff --git a/src/mono/sample/wasi/native/Program.cs b/src/mono/sample/wasi/native/Program.cs index cb2fd0f36caf5a..d8d480869a8de5 100644 --- a/src/mono/sample/wasi/native/Program.cs +++ b/src/mono/sample/wasi/native/Program.cs @@ -20,11 +20,6 @@ public static int MyExport(int number) public unsafe static int Main(string[] args) { Console.WriteLine($"main: {args.Length}"); - // workaround to force the interpreter to initialize wasm_native_to_interp_ftndesc for MyExport - if (args.Length > 10000) { - ((IntPtr)(delegate* unmanaged)&MyExport).ToString(); - } - MyImport(); return 0; } diff --git a/src/mono/wasi/Wasi.Build.Tests/PInvokeTableGeneratorTests.cs b/src/mono/wasi/Wasi.Build.Tests/PInvokeTableGeneratorTests.cs new file mode 100644 index 00000000000000..6a47c0364658d1 --- /dev/null +++ b/src/mono/wasi/Wasi.Build.Tests/PInvokeTableGeneratorTests.cs @@ -0,0 +1,87 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.IO; +using Xunit; +using Xunit.Abstractions; +using Wasm.Build.Tests; + +#nullable enable + +namespace Wasi.Build.Tests; + +public class PInvokeTableGeneratorTests : BuildTestBase +{ + public PInvokeTableGeneratorTests(ITestOutputHelper output, SharedBuildPerTestClassFixture buildContext) + : base(output, buildContext) + { + } + + [Fact] + public void InteropSupportForUnmanagedEntryPointWithoutDelegate() + { + string config = "Release"; + string id = $"{config}_{GetRandomId()}"; + string projectFile = CreateWasmTemplateProject(id, "wasiconsole"); + string code = + """ + using System; + using System.Runtime.InteropServices; + public unsafe class Test + { + [UnmanagedCallersOnly(EntryPoint = "ManagedFunc")] + public static int MyExport(int number) + { + // called from MyImport aka UnmanagedFunc + Console.WriteLine($"MyExport({number}) -> 42"); + return 42; + } + + [DllImport("*", EntryPoint = "UnmanagedFunc")] + public static extern void MyImport(); // calls ManagedFunc aka MyExport + + public unsafe static int Main(string[] args) + { + Console.WriteLine($"main: {args.Length}"); + MyImport(); + return 0; + } + } + """; + string cCode = + """ + #include + + int ManagedFunc(int number); + + void UnmanagedFunc() + { + int ret = 0; + printf("UnmanagedFunc calling ManagedFunc\n"); + ret = ManagedFunc(123); + printf("ManagedFunc returned %d\n", ret); + } + """; + File.WriteAllText(Path.Combine(_projectDir!, "Program.cs"), code); + File.WriteAllText(Path.Combine(_projectDir!, "local.c"), cCode); + string extraProperties = @"false + true"; + AddItemsPropertiesToProject(projectFile, extraProperties: extraProperties, extraItems: @""); + string projectName = Path.GetFileNameWithoutExtension(projectFile); + var buildArgs = new BuildArgs(projectName, config, AOT: true, ProjectFileContents: id, ExtraBuildArgs: null); + buildArgs = ExpandBuildArgs(buildArgs); + BuildProject(buildArgs, + id: id, + new BuildProjectOptions( + DotnetWasmFromRuntimePack: false, + CreateProject: false, + Publish: true + )); + + CommandResult res = new RunCommand(s_buildEnv, _testOutput) + .WithWorkingDirectory(_projectDir!) + .ExecuteWithCapturedOutput($"run --no-silent --no-build -c {config}") + .EnsureSuccessful(); + Assert.Contains("MyExport(123) -> 42", res.Output); + } +} diff --git a/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs b/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs index e4aa070d88ea38..402325b1275ff3 100644 --- a/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs +++ b/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs @@ -323,6 +323,14 @@ private void EmitNativeToInterp(StreamWriter w, List callbacks) // Only blittable parameter/return types are supposed. int cb_index = 0; + w.Write(@"#include + #include + #include + #include + #include + #include ""runtime.h"" + "); + // Arguments to interp entry functions in the runtime w.WriteLine($"InterpFtnDesc wasm_native_to_interp_ftndescs[{callbacks.Count}] = {{}};"); @@ -371,7 +379,16 @@ private void EmitNativeToInterp(StreamWriter w, List callbacks) if (!is_void) sb.Append($" {MapType(method.ReturnType)} res;\n"); - //sb.Append($" printf(\"{entry_name} called\\n\");\n"); + // In case when null force interpreter to initialize the pointers + sb.Append($" if (!(WasmInterpEntrySig_{cb_index})wasm_native_to_interp_ftndescs [{cb_index}].func) {{\n"); + var assemblyFullName = cb.Method.DeclaringType == null ? "" : cb.Method.DeclaringType.Assembly.FullName; + var assemblyName = assemblyFullName != null && assemblyFullName.Split(',').Length > 0 ? assemblyFullName.Split(',')[0].Trim() : ""; + var typeName = cb.Method.DeclaringType == null || cb.Method.DeclaringType.FullName == null ? "" : cb.Method.DeclaringType.FullName; + var methodName = cb.Method.Name; + int numParams = method.GetParameters().Length; + sb.Append($" mono_wasm_marshal_get_managed_wrapper (\"{assemblyName}\", \"{typeName}\", \"{methodName}\", {numParams});\n"); + sb.Append($" }}\n"); + sb.Append($" ((WasmInterpEntrySig_{cb_index})wasm_native_to_interp_ftndescs [{cb_index}].func) ("); if (!is_void) { From cb63e0233a64c48bc84c729778efa8a8b7a90b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Fi=C5=A1era?= Date: Thu, 28 Mar 2024 19:43:37 +0100 Subject: [PATCH 006/132] [wasi] Basic usage docs (#100354) --- src/mono/wasi/README.md | 42 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/src/mono/wasi/README.md b/src/mono/wasi/README.md index 106c65322dc3a1..9cf3f8757afaf9 100644 --- a/src/mono/wasi/README.md +++ b/src/mono/wasi/README.md @@ -2,6 +2,46 @@ This directory contains a build configuration for WASI support, plus a basic sample. This is not intended for production use, nor is it currently supported. This is a step towards possible future support. +## Try it out + +Here is a quick overview of how to consume published artifacts. Assuming .NET SDK is already installed, you should run: + +``` +dotnet workload install wasi-experimental +``` + +This will install workload for building .NET based WASI apps + basic template. +Now you can create a new .NET application that targets WASI + +``` +dotnet new wasiconsole +``` + +And run it with + +``` +dotnet run +``` + +The `runtimeconfig.template.json` contains `perHostConfig` section where wasm hosts can be configured + +### Wasi SDK + +The workload for the time being doesn't include Wasi SDK, which is responsible for native compilation. +If you don't need to modify runtime configuration, you can omit this step. In case you get: + +``` +error : Could not find wasi-sdk. Either set $(WASI_SDK_PATH), or use workloads to get the sdk. SDK is required for building native files. +``` + +you will need to separately download a WASI SDK from https://github.com/WebAssembly/wasi-sdk and point an environment variable `WASI_SDK_PATH` or MSBuild property `WasiSdkRoot` to a location where you extract it. + +### Optional build flags + +- `WasmSingleFileBundle` - bundle all assets into the `.wasm`. The output file name will match the project name. +- `InvariantGlobalization` - remove globalization support, decrease the publish size. +- More details can be found at https://github.com/dotnet/runtime/blob/main/src/mono/wasm/build/WasmApp.Common.targets and https://github.com/dotnet/runtime/blob/main/src/mono/wasi/build/WasiApp.targets + ## How it works The mechanism for executing .NET code in a WASI runtime environment is equivalent to how `dotnet.wasm` executes .NET code in a browser environment. That is, it runs the Mono interpreter to execute .NET bytecode that has been built in the normal way. It should also work with AOT but this is not yet attempted. @@ -65,4 +105,4 @@ Download the Mono Debug extension and configure a launch.json like this: } ] } -``` \ No newline at end of file +``` From 71177d95d142a1341af9d7b5e8b4548889a42c70 Mon Sep 17 00:00:00 2001 From: Milos Kotlar Date: Thu, 28 Mar 2024 19:49:27 +0100 Subject: [PATCH 007/132] [infra] Enable linux-x64 Mono fullAOT mini job on extra platforms (#96332) This PR enables the linux-x64 full AOT mini job on extra platforms, utilizing the CBL-Mariner docker image. The parameters as_name and as_options, along with ld_name and ld_options are added to allow parameterization of the toolchain configuration. --- .../build-runtime-tests-and-send-to-helix.yml | 55 +++++++----- .../runtime-extra-platforms-other.yml | 32 +++++++ src/mono/mono/metadata/marshal.c | 13 ++- src/mono/mono/mini/aot-compiler.c | 35 ++++++-- .../CoreCLRTestLibrary/PlatformDetection.cs | 2 + src/tests/Common/helixpublishwitharcade.proj | 4 +- src/tests/Common/testenvironment.proj | 2 +- .../DllImportSearchPathsTest.cs | 2 +- src/tests/Interop/Interop.csproj | 2 + .../NativeLibrary/API/NativeLibraryTests.cs | 2 +- .../AnsiBSTR/AnsiBStrTest.csproj | 2 + .../StringMarshalling/BSTR/BSTRTest.csproj | 2 + .../LPTSTR/LPTSTRTest.csproj | 2 + .../VBByRefStr/VBByRefStrTest.csproj | 2 + .../X86/X86Base.X64/Program.X86Base.X64.cs | 2 +- .../X86/X86Base/Program.X86Base.cs | 2 +- .../BoxPatternMatchAndSideEffects.csproj | 2 + .../JIT/Methodical/Methodical_others.csproj | 4 + .../JitBlue/GitHub_26491/GitHub_26491.ilproj | 4 + src/tests/JIT/Regression/Regression_3.csproj | 4 + src/tests/build.proj | 13 ++- src/tests/issues.targets | 84 +++++++++++++++++-- .../readytorun_coreroot_determinism.csproj | 4 + src/tests/readytorun/readytorun.csproj | 4 + 24 files changed, 236 insertions(+), 44 deletions(-) diff --git a/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml b/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml index 4769f35a9c8276..127ddbcfeb0284 100644 --- a/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml +++ b/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml @@ -38,39 +38,48 @@ steps: archType: ${{ parameters.archType }} buildConfig: ${{ parameters.buildConfig }} testBuildArgs: ${{ parameters.testBuildArgs }} - - # Build a Mono LLVM AOT cross-compiler for non-amd64 targets (in this case, just arm64) - - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), or(eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot'))) }}: + # Build a Mono AOT cross-compiler for non-amd64 targets (in this case, just arm64) + - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), in(parameters.runtimeVariant, 'llvmaot', 'llvmfullaot', 'minifullaot')) }}: - ${{ if eq(parameters.archType, 'arm64') }}: - - script: ./build.sh - -subset mono - -c ${{ parameters.buildConfig }} - -arch ${{ parameters.archType }} - /p:BuildMonoAotCrossCompiler=true - /p:BuildMonoAotCrossCompilerOnly=true - /p:MonoLibClang="/usr/local/lib/libclang.so.16" - /p:MonoAOTEnableLLVM=true - /p:CrossBuild=true - displayName: "Build Mono LLVM AOT cross compiler" + - ${{ if eq(parameters.runtimeVariant, 'minifullaot') }}: + - script: ./build.sh + -subset mono + -c ${{ parameters.buildConfig }} + -arch ${{ parameters.archType }} + /p:BuildMonoAotCrossCompiler=true + /p:BuildMonoAotCrossCompilerOnly=true + /p:CrossBuild=true + displayName: "Build Mono Mini AOT cross compiler" + - ${{ else }}: + - script: ./build.sh + -subset mono + -c ${{ parameters.buildConfig }} + -arch ${{ parameters.archType }} + /p:BuildMonoAotCrossCompiler=true + /p:BuildMonoAotCrossCompilerOnly=true + /p:MonoLibClang="/usr/local/lib/libclang.so.16" + /p:MonoAOTEnableLLVM=true + /p:CrossBuild=true + displayName: "Build Mono LLVM AOT cross compiler" - ${{ if eq(parameters.archType, 'x64') }}: - ${{ if eq(parameters.runtimeVariant, 'llvmaot') }}: - - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_aot ${{ parameters.buildConfig }} ${{ parameters.archType }} - displayName: "LLVM AOT compile CoreCLR tests" + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_aot ${{ parameters.buildConfig }} ${{ parameters.archType }} /p:RuntimeVariant=${{ parameters.runtimeVariant }} + displayName: "AOT compile CoreCLR tests" target: ${{ coalesce(parameters.llvmAotStepContainer, parameters.container) }} - - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} - displayName: "LLVM AOT compile CoreCLR tests" + - ${{ if in(parameters.runtimeVariant, 'llvmfullaot', 'minifullaot') }}: + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} /p:RuntimeVariant=${{ parameters.runtimeVariant }} + displayName: "AOT compile CoreCLR tests" target: ${{ coalesce(parameters.llvmAotStepContainer, parameters.container) }} - ${{ if eq(parameters.archType, 'arm64') }}: - ${{ if eq(parameters.runtimeVariant, 'llvmaot') }}: - - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_aot ${{ parameters.buildConfig }} ${{ parameters.archType }} cross /p:RuntimeVariant=llvmfullaot -maxcpucount:2 - displayName: "LLVM AOT cross-compile CoreCLR tests" + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_aot ${{ parameters.buildConfig }} ${{ parameters.archType }} cross /p:RuntimeVariant=${{ parameters.runtimeVariant }} -maxcpucount:2 + displayName: "AOT cross-compile CoreCLR tests" env: __MonoToolPrefix: aarch64-linux-gnu- - - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} cross /p:RuntimeVariant=llvmfullaot -maxcpucount:2 - displayName: "LLVM AOT cross-compile CoreCLR tests" + - ${{ if in(parameters.runtimeVariant, 'llvmfullaot', 'minifullaot') }}: + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} cross /p:RuntimeVariant=${{ parameters.runtimeVariant }} -maxcpucount:2 + displayName: "AOT cross-compile CoreCLR tests" env: __MonoToolPrefix: aarch64-linux-gnu- diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml index 6728a4ebb4100c..c1a3e2cd1f4965 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml @@ -149,6 +149,38 @@ jobs: # extraVariablesTemplates: # - template: /eng/pipelines/common/templates/runtimes/test-variables.yml +# +# Mono CoreCLR runtime test executions using live libraries and mini Full AOT +# Only when Mono is changed +# +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/global-build-job.yml + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + buildConfig: Release + runtimeFlavor: mono + platforms: + - linux_x64 + variables: + - name: timeoutPerTestInMinutes + value: 60 + - name: timeoutPerTestCollectionInMinutes + value: 180 + jobParameters: + testGroup: innerloop + nameSuffix: AllSubsets_Mono_MiniFullAot_RuntimeTests + runtimeVariant: minifullaot + buildArgs: -s mono+libs+clr.hosts -c Release + timeoutInMinutes: 300 + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + llvmAotStepContainer: linux_x64 + testRunNamePrefixSuffix: Mono_Release + extraVariablesTemplates: + - template: /eng/pipelines/common/templates/runtimes/test-variables.yml + # # Mono CoreCLR runtime Test executions using live libraries in interpreter mode # Only when Mono is changed diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index a86ed532c1571f..09fddd573c0fdb 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -5834,8 +5834,19 @@ mono_marshal_load_type_info (MonoClass* klass) if (m_class_is_inlinearray (klass)) { // Limit the max size of array instance to 1MiB const int struct_max_size = 1024 * 1024; + guint32 initial_size = size; size *= m_class_inlinearray_value (klass); - g_assert ((size > 0) && (size <= struct_max_size)); + if(size == 0 || size > struct_max_size) { + if (mono_get_runtime_callbacks ()->mono_class_set_deferred_type_load_failure_callback) { + if (mono_get_runtime_callbacks ()->mono_class_set_deferred_type_load_failure_callback (klass, "Inline array struct size out of bounds, abnormally large.")) + break; + else + size = initial_size; // failure occured during AOT compilation, continue execution + } else { + mono_class_set_type_load_failure (klass, "Inline array struct size out of bounds, abnormally large."); + break; + } + } } switch (layout) { diff --git a/src/mono/mono/mini/aot-compiler.c b/src/mono/mono/mini/aot-compiler.c index c4e318b4f56882..bda04415e2d682 100644 --- a/src/mono/mono/mini/aot-compiler.c +++ b/src/mono/mono/mini/aot-compiler.c @@ -242,8 +242,11 @@ typedef struct MonoAotOptions { gboolean child; char *tool_prefix; char *as_prefix; + char *as_name; + char *as_options; char *ld_flags; char *ld_name; + char *ld_options; char *mtriple; char *llvm_path; char *temp_path; @@ -8940,10 +8943,16 @@ mono_aot_parse_options (const char *aot_options, MonoAotOptions *opts) opts->tool_prefix = g_strdup (arg + strlen ("tool-prefix=")); } else if (str_begins_with (arg, "as-prefix=")) { opts->as_prefix = g_strdup (arg + strlen ("as-prefix=")); + } else if (str_begins_with (arg, "as-name=")) { + opts->as_name = g_strdup (arg + strlen ("as-name=")); + } else if (str_begins_with (arg, "as-options=")) { + opts->as_options = g_strdup (arg + strlen ("as-options=")); } else if (str_begins_with (arg, "ld-flags=")) { opts->ld_flags = g_strdup (arg + strlen ("ld-flags=")); } else if (str_begins_with (arg, "ld-name=")) { opts->ld_name = g_strdup (arg + strlen ("ld-name=")); + } else if (str_begins_with (arg, "ld-options=")) { + opts->ld_options = g_strdup (arg + strlen ("ld-options=")); } else if (str_begins_with (arg, "soft-debug")) { opts->soft_debug = TRUE; // Intentionally undocumented x2-- deprecated @@ -13222,8 +13231,16 @@ compile_asm (MonoAotCompile *acfg) #ifdef TARGET_OSX g_string_append (acfg->as_args, "-c -x assembler "); #endif + const char *as_binary_name = acfg->aot_opts.as_name; + if (as_binary_name == NULL) { + as_binary_name = AS_NAME; + } + const char *as_options = acfg->aot_opts.as_options; + if (as_options == NULL) { + as_options = AS_OPTIONS; + } - command = g_strdup_printf ("\"%s%s\" %s %s -o %s %s", as_prefix, AS_NAME, AS_OPTIONS, + command = g_strdup_printf ("\"%s%s\" %s %s -o %s %s", as_prefix, as_binary_name, as_options, acfg->as_args ? acfg->as_args->str : "", wrap_path (objfile), wrap_path (acfg->asm_fname)); aot_printf (acfg, "Executing the native assembler: %s\n", command); @@ -13234,7 +13251,7 @@ compile_asm (MonoAotCompile *acfg) } if (acfg->llvm && !acfg->llvm_owriter) { - command = g_strdup_printf ("\"%s%s\" %s %s -o %s %s", as_prefix, AS_NAME, AS_OPTIONS, + command = g_strdup_printf ("\"%s%s\" %s %s -o %s %s", as_prefix, as_binary_name, as_options, acfg->as_args ? acfg->as_args->str : "", wrap_path (acfg->llvm_ofile), wrap_path (acfg->llvm_sfile)); aot_printf (acfg, "Executing the native assembler: %s\n", command); @@ -13283,16 +13300,21 @@ compile_asm (MonoAotCompile *acfg) str = g_string_new (""); const char *ld_binary_name = acfg->aot_opts.ld_name; + + const char *ld_options = acfg->aot_opts.ld_options; + if (ld_options == NULL) { + ld_options = LD_OPTIONS; + } #if defined(LD_NAME) if (ld_binary_name == NULL) { ld_binary_name = LD_NAME; } if (acfg->aot_opts.tool_prefix) - g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, LD_OPTIONS); + g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, ld_options); else if (acfg->aot_opts.llvm_only) g_string_append_printf (str, "%s", acfg->aot_opts.clangxx); else - g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, LD_OPTIONS); + g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, ld_options); #else if (ld_binary_name == NULL) { ld_binary_name = "ld"; @@ -13301,7 +13323,7 @@ compile_asm (MonoAotCompile *acfg) // Default (linux) if (acfg->aot_opts.tool_prefix) /* Cross compiling */ - g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, LD_OPTIONS); + g_string_append_printf (str, "\"%s%s\" %s", tool_prefix, ld_binary_name, ld_options); else if (acfg->aot_opts.llvm_only) g_string_append_printf (str, "%s", acfg->aot_opts.clangxx); else @@ -14232,8 +14254,11 @@ aot_opts_free (MonoAotOptions *aot_opts) g_free (aot_opts->dedup_include); g_free (aot_opts->tool_prefix); g_free (aot_opts->as_prefix); + g_free (aot_opts->as_name); + g_free (aot_opts->as_options); g_free (aot_opts->ld_flags); g_free (aot_opts->ld_name); + g_free (aot_opts->ld_options); g_free (aot_opts->mtriple); g_free (aot_opts->llvm_path); g_free (aot_opts->temp_path); diff --git a/src/tests/Common/CoreCLRTestLibrary/PlatformDetection.cs b/src/tests/Common/CoreCLRTestLibrary/PlatformDetection.cs index 56581dc9ea7723..d03327e5fddeda 100644 --- a/src/tests/Common/CoreCLRTestLibrary/PlatformDetection.cs +++ b/src/tests/Common/CoreCLRTestLibrary/PlatformDetection.cs @@ -50,6 +50,8 @@ public static bool IsNonZeroLowerBoundArraySupported public static bool IsMonoLLVMAOT => _variant == "llvmaot"; public static bool IsMonoLLVMFULLAOT => _variant == "llvmfullaot"; + public static bool IsMonoMINIFULLAOT => _variant == "minifullaot"; + public static bool IsMonoFULLAOT => IsMonoLLVMFULLAOT || IsMonoMINIFULLAOT; public static bool IsMonoInterpreter => _variant == "monointerpreter"; // These platforms have not had their infrastructure updated to support native test assets. diff --git a/src/tests/Common/helixpublishwitharcade.proj b/src/tests/Common/helixpublishwitharcade.proj index 557e5e55beea90..b5d5153343897f 100644 --- a/src/tests/Common/helixpublishwitharcade.proj +++ b/src/tests/Common/helixpublishwitharcade.proj @@ -413,8 +413,8 @@ - - + + - <_TestEnvFileLine Condition="'$(RuntimeVariant)' == 'llvmfullaot'" Include="export MONO_ENV_OPTIONS=--full-aot" /> + <_TestEnvFileLine Condition="'$(RuntimeVariant)' == 'llvmfullaot' or '$(RuntimeVariant)' == 'minifullaot'" Include="export MONO_ENV_OPTIONS=--full-aot" /> <_TestEnvFileLine Condition="'$(RuntimeVariant)' != ''" Include="export DOTNET_RUNTIME_VARIANT=$(RuntimeVariant)" /> diff --git a/src/tests/Interop/DllImportSearchPaths/DllImportSearchPathsTest.cs b/src/tests/Interop/DllImportSearchPaths/DllImportSearchPathsTest.cs index b86a77cbc521e1..00c7b375a7ed77 100644 --- a/src/tests/Interop/DllImportSearchPaths/DllImportSearchPathsTest.cs +++ b/src/tests/Interop/DllImportSearchPaths/DllImportSearchPathsTest.cs @@ -21,7 +21,7 @@ public static void AssemblyDirectory_NotFound() public static bool CanLoadAssemblyInSubdirectory => !TestLibrary.Utilities.IsNativeAot && - !TestLibrary.PlatformDetection.IsMonoLLVMFULLAOT && + !TestLibrary.PlatformDetection.IsMonoFULLAOT && !OperatingSystem.IsAndroid() && !OperatingSystem.IsIOS() && !OperatingSystem.IsTvOS() && diff --git a/src/tests/Interop/Interop.csproj b/src/tests/Interop/Interop.csproj index 6fc22f7bf4f273..ccbdc2f5e1e7fd 100644 --- a/src/tests/Interop/Interop.csproj +++ b/src/tests/Interop/Interop.csproj @@ -2,6 +2,8 @@ true Debug;Release;Checked + + true diff --git a/src/tests/Interop/NativeLibrary/API/NativeLibraryTests.cs b/src/tests/Interop/NativeLibrary/API/NativeLibraryTests.cs index 2c43feaa3be971..09a3d6c41b9005 100644 --- a/src/tests/Interop/NativeLibrary/API/NativeLibraryTests.cs +++ b/src/tests/Interop/NativeLibrary/API/NativeLibraryTests.cs @@ -182,7 +182,7 @@ public void LoadLibrary_AssemblyDirectory() string subdirectory = Path.Combine(testBinDir, "subdirectory"); - if (!TestLibrary.Utilities.IsNativeAot && !TestLibrary.PlatformDetection.IsMonoLLVMFULLAOT) + if (!TestLibrary.Utilities.IsNativeAot && !TestLibrary.PlatformDetection.IsMonoFULLAOT) { // Library should be found in the assembly directory Assembly assemblyInSubdirectory = Assembly.LoadFile(Path.Combine(subdirectory, $"{assembly.GetName().Name}{suffix}.dll")); diff --git a/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj b/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj index f476d5f41f21f4..ae4b7c519db820 100644 --- a/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj +++ b/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj @@ -2,6 +2,8 @@ true $(DefineConstants);ANSIBSTR + + true diff --git a/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj b/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj index 7aab41716ae908..539f34ced8c87f 100644 --- a/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj +++ b/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj @@ -2,6 +2,8 @@ true $(DefineConstants);BSTR + + true diff --git a/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj b/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj index 52006bf63c1d16..e436167e972f0a 100644 --- a/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj +++ b/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj @@ -2,6 +2,8 @@ true $(DefineConstants);LPTSTR + + true diff --git a/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj b/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj index 621a9f162f3bf5..9154f936b74255 100644 --- a/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj +++ b/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj @@ -1,6 +1,8 @@ true + + true diff --git a/src/tests/JIT/HardwareIntrinsics/X86/X86Base.X64/Program.X86Base.X64.cs b/src/tests/JIT/HardwareIntrinsics/X86/X86Base.X64/Program.X86Base.X64.cs index 40dc13b975f476..98901cf3dfe899 100644 --- a/src/tests/JIT/HardwareIntrinsics/X86/X86Base.X64/Program.X86Base.X64.cs +++ b/src/tests/JIT/HardwareIntrinsics/X86/X86Base.X64/Program.X86Base.X64.cs @@ -4,7 +4,7 @@ using System.Collections.Generic; [assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoLLVMAOT))] -[assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoLLVMFULLAOT))] +[assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoFULLAOT))] namespace JIT.HardwareIntrinsics.X86._X86Base.X64 { public static partial class Program diff --git a/src/tests/JIT/HardwareIntrinsics/X86/X86Base/Program.X86Base.cs b/src/tests/JIT/HardwareIntrinsics/X86/X86Base/Program.X86Base.cs index 364a8aee4b047f..b0001aeeb3e96e 100644 --- a/src/tests/JIT/HardwareIntrinsics/X86/X86Base/Program.X86Base.cs +++ b/src/tests/JIT/HardwareIntrinsics/X86/X86Base/Program.X86Base.cs @@ -4,7 +4,7 @@ using System.Collections.Generic; [assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoLLVMAOT))] -[assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoLLVMFULLAOT))] +[assembly:Xunit.ActiveIssue("https://github.com/dotnet/runtime/issues/75767", typeof(TestLibrary.PlatformDetection), nameof(TestLibrary.PlatformDetection.IsMonoFULLAOT))] namespace JIT.HardwareIntrinsics.X86._X86Base { public static partial class Program diff --git a/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj b/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj index 17ce8036e6a829..e1e12460ae03bc 100644 --- a/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj +++ b/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj @@ -1,6 +1,8 @@ PdbOnly + + true diff --git a/src/tests/JIT/Methodical/Methodical_others.csproj b/src/tests/JIT/Methodical/Methodical_others.csproj index 4021ca89922827..998101aaa4553b 100644 --- a/src/tests/JIT/Methodical/Methodical_others.csproj +++ b/src/tests/JIT/Methodical/Methodical_others.csproj @@ -1,4 +1,8 @@ + + + true + diff --git a/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj b/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj index 5ddfd280bde666..439e29a31673e1 100644 --- a/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj +++ b/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj @@ -1,4 +1,8 @@ + + + true + diff --git a/src/tests/JIT/Regression/Regression_3.csproj b/src/tests/JIT/Regression/Regression_3.csproj index 4eafe2b2450fbe..0006ba5709b72f 100644 --- a/src/tests/JIT/Regression/Regression_3.csproj +++ b/src/tests/JIT/Regression/Regression_3.csproj @@ -1,4 +1,8 @@ + + + true + diff --git a/src/tests/build.proj b/src/tests/build.proj index d41ed76b98276a..e7886f37d77037 100644 --- a/src/tests/build.proj +++ b/src/tests/build.proj @@ -144,15 +144,22 @@ - + - - + + + + + + + + + diff --git a/src/tests/issues.targets b/src/tests/issues.targets index 69cf84f6fdd466..8515eecf4d8e93 100644 --- a/src/tests/issues.targets +++ b/src/tests/issues.targets @@ -2276,7 +2276,7 @@ - + expected failure: overlapped structs fail at AOT compile time, not runtime @@ -2439,7 +2439,7 @@ - + https://github.com/dotnet/runtime/issues/48914 @@ -2511,9 +2511,6 @@ - - - https://github.com/dotnet/runtime/issues/82859 @@ -3740,6 +3737,83 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + diff --git a/src/tests/readytorun/readytorun.csproj b/src/tests/readytorun/readytorun.csproj index d815ac504e6375..585b0be3fcdfe9 100644 --- a/src/tests/readytorun/readytorun.csproj +++ b/src/tests/readytorun/readytorun.csproj @@ -1,4 +1,8 @@ + + + true + From 7c14f7101083189affdb377b030237b89750196e Mon Sep 17 00:00:00 2001 From: David Wrighton Date: Thu, 28 Mar 2024 12:42:30 -0700 Subject: [PATCH 008/132] Add support for the ISOSDacInterface14 api (#100364) * Add support for the ISOSDacInterface14 api - This is a split out of the diagnostics changes associated with PR #99183 - This will allow the removal of the useability of the DomainLocalModule apis from ISOSDacInterface without changing the functionality of the interface by allowing consumers to move to the new apis - NOTE, the SOS_BREAKING_CHANGE number is expected to move from 4 to 5 at that point, so users of ISOSDacInterface which understand ISOSDacInterface14 should indicate that they understand version 5. --- src/coreclr/debug/daccess/daccess.cpp | 4 + src/coreclr/debug/daccess/dacdbiimpl.cpp | 12 +-- src/coreclr/debug/daccess/dacimpl.h | 8 +- src/coreclr/debug/daccess/request.cpp | 116 +++++++++++++++++++++ src/coreclr/inc/sospriv.idl | 14 +++ src/coreclr/pal/prebuilt/idl/sospriv_i.cpp | 9 +- src/coreclr/pal/prebuilt/inc/sospriv.h | 114 ++++++++++++++++++++ src/coreclr/vm/methodtable.cpp | 6 +- src/coreclr/vm/methodtable.h | 7 +- src/coreclr/vm/methodtable.inl | 4 +- 10 files changed, 270 insertions(+), 24 deletions(-) diff --git a/src/coreclr/debug/daccess/daccess.cpp b/src/coreclr/debug/daccess/daccess.cpp index e79dab808def3f..eb5127cdfa4f3c 100644 --- a/src/coreclr/debug/daccess/daccess.cpp +++ b/src/coreclr/debug/daccess/daccess.cpp @@ -3231,6 +3231,10 @@ ClrDataAccess::QueryInterface(THIS_ { ifaceRet = static_cast(this); } + else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface14))) + { + ifaceRet = static_cast(this); + } else { *iface = NULL; diff --git a/src/coreclr/debug/daccess/dacdbiimpl.cpp b/src/coreclr/debug/daccess/dacdbiimpl.cpp index 0a1a43ab4a1b10..d1a0a1ada64d49 100644 --- a/src/coreclr/debug/daccess/dacdbiimpl.cpp +++ b/src/coreclr/debug/daccess/dacdbiimpl.cpp @@ -1577,16 +1577,8 @@ void DacDbiInterfaceImpl::GetStaticsBases(TypeHandle thExact, PTR_BYTE * ppNonGCStaticsBase) { MethodTable * pMT = thExact.GetMethodTable(); - Module * pModuleForStatics = pMT->GetModuleForStatics(); - if (pModuleForStatics != NULL) - { - PTR_DomainLocalModule pLocalModule = pModuleForStatics->GetDomainLocalModule(); - if (pLocalModule != NULL) - { - *ppGCStaticsBase = pLocalModule->GetGCStaticsBasePointer(pMT); - *ppNonGCStaticsBase = pLocalModule->GetNonGCStaticsBasePointer(pMT); - } - } + *ppGCStaticsBase = pMT->GetGCStaticsBasePointer(); + *ppNonGCStaticsBase = pMT->GetNonGCStaticsBasePointer(); } // DacDbiInterfaceImpl::GetStaticsBases //----------------------------------------------------------------------------- diff --git a/src/coreclr/debug/daccess/dacimpl.h b/src/coreclr/debug/daccess/dacimpl.h index 03756c6716574c..e698eed4c1803b 100644 --- a/src/coreclr/debug/daccess/dacimpl.h +++ b/src/coreclr/debug/daccess/dacimpl.h @@ -816,7 +816,8 @@ class ClrDataAccess public ISOSDacInterface10, public ISOSDacInterface11, public ISOSDacInterface12, - public ISOSDacInterface13 + public ISOSDacInterface13, + public ISOSDacInterface14 { public: ClrDataAccess(ICorDebugDataTarget * pTarget, ICLRDataTarget * pLegacyTarget=0); @@ -1216,6 +1217,11 @@ class ClrDataAccess virtual HRESULT STDMETHODCALLTYPE GetGCFreeRegions(ISOSMemoryEnum **ppEnum); virtual HRESULT STDMETHODCALLTYPE LockedFlush(); + // ISOSDacInterface14 + virtual HRESULT STDMETHODCALLTYPE GetStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress); + virtual HRESULT STDMETHODCALLTYPE GetThreadStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS thread, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress); + virtual HRESULT STDMETHODCALLTYPE GetMethodTableInitializationFlags(CLRDATA_ADDRESS methodTable, MethodTableInitializationFlags *initializationStatus); + // // ClrDataAccess. // diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp index 1f11ac61f7217d..dea3b8544ab93a 100644 --- a/src/coreclr/debug/daccess/request.cpp +++ b/src/coreclr/debug/daccess/request.cpp @@ -5401,3 +5401,119 @@ HRESULT ClrDataAccess::LockedFlush() SOSDacLeave(); return hr; } + +HRESULT STDMETHODCALLTYPE ClrDataAccess::GetStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress) +{ + if (!nonGCStaticsAddress && !GCStaticsAddress) + return E_POINTER; + + if (!methodTable) + return E_INVALIDARG; + + SOSDacEnter(); + + PTR_MethodTable mTable = PTR_MethodTable(TO_TADDR(methodTable)); + + BOOL bIsFree = FALSE; + if (!DacValidateMethodTable(mTable, bIsFree)) + { + hr = E_INVALIDARG; + } + else + { + if (GCStaticsAddress != NULL) + { + *GCStaticsAddress = PTR_CDADDR(mTable->GetGCStaticsBasePointer()); + } + if (nonGCStaticsAddress != NULL) + { + *nonGCStaticsAddress = PTR_CDADDR(mTable->GetNonGCStaticsBasePointer()); + } + } + + SOSDacLeave(); + return hr; +} + + +HRESULT STDMETHODCALLTYPE ClrDataAccess::GetThreadStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS threadPtr, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress) +{ + if (!nonGCStaticsAddress && !GCStaticsAddress) + return E_POINTER; + + if (!methodTable) + return E_INVALIDARG; + + if (!threadPtr) + return E_INVALIDARG; + + SOSDacEnter(); + + PTR_MethodTable mTable = PTR_MethodTable(TO_TADDR(methodTable)); + PTR_Thread thread = PTR_Thread(TO_TADDR(threadPtr)); + + + BOOL bIsFree = FALSE; + if (!DacValidateMethodTable(mTable, bIsFree)) + { + hr = E_INVALIDARG; + } + else + { + if (mTable->GetClass()->GetNumThreadStaticFields() == 0) + { + if (GCStaticsAddress != NULL) + { + *GCStaticsAddress = 0; + } + if (nonGCStaticsAddress != NULL) + { + *nonGCStaticsAddress = 0; + } + } + else + { + if (GCStaticsAddress != NULL) + { + *GCStaticsAddress = PTR_CDADDR(mTable->GetGCThreadStaticsBasePointer(thread)); + } + if (nonGCStaticsAddress != NULL) + { + *nonGCStaticsAddress = PTR_CDADDR(mTable->GetNonGCThreadStaticsBasePointer(thread)); + } + } + } + + SOSDacLeave(); + return hr; +} + +HRESULT STDMETHODCALLTYPE ClrDataAccess::GetMethodTableInitializationFlags(CLRDATA_ADDRESS methodTable, MethodTableInitializationFlags *initializationStatus) +{ + if (!methodTable) + return E_INVALIDARG; + + if (!initializationStatus) + return E_POINTER; + + SOSDacEnter(); + + *initializationStatus = (MethodTableInitializationFlags)0; + PTR_MethodTable mTable = PTR_MethodTable(TO_TADDR(methodTable)); + BOOL bIsFree = FALSE; + if (!DacValidateMethodTable(mTable, bIsFree)) + { + hr = E_INVALIDARG; + } + else + { + *initializationStatus = mTable->IsClassInited() ? MethodTableInitialized : (MethodTableInitializationFlags)0; + if (mTable->IsInitError()) + { + *initializationStatus = (MethodTableInitializationFlags)(*initializationStatus | MethodTableInitializationFailed); + } + } + + SOSDacLeave(); + return hr; +} diff --git a/src/coreclr/inc/sospriv.idl b/src/coreclr/inc/sospriv.idl index a13760f7732a49..c377df57a15307 100644 --- a/src/coreclr/inc/sospriv.idl +++ b/src/coreclr/inc/sospriv.idl @@ -43,12 +43,14 @@ typedef unsigned int size_t; typedef int ModuleMapType; typedef int VCSHeapType; typedef int LoaderHeapKind; +typedef int MethodTableInitializationFlags; cpp_quote("#endif") cpp_quote("typedef enum { TYPEDEFTOMETHODTABLE, TYPEREFTOMETHODTABLE } ModuleMapType;") cpp_quote("typedef enum {IndcellHeap, LookupHeap, ResolveHeap, DispatchHeap, CacheEntryHeap, VtableHeap} VCSHeapType;") cpp_quote("typedef enum {LoaderHeapKindNormal = 0, LoaderHeapKindExplicitControl = 1} LoaderHeapKind;") +cpp_quote("typedef enum {MethodTableInitialized = 1, MethodTableInitializationFailed = 2} MethodTableInitializationFlags;") cpp_quote("typedef enum {FreeUnknownRegion = 0, FreeGlobalHugeRegion = 1, FreeGlobalRegion = 2, FreeRegion = 3, FreeSohSegment = 4, FreeUohSegment = 5 } FreeRegionKind;") typedef void (*MODULEMAPTRAVERSE)(UINT index, CLRDATA_ADDRESS methodTable,LPVOID token); @@ -505,3 +507,15 @@ interface ISOSDacInterface13 : IUnknown HRESULT GetGCFreeRegions(ISOSMemoryEnum **ppEnum); HRESULT LockedFlush(); } + +[ + object, + local, + uuid(9aa22aca-6dc6-4a0c-b4e0-70d2416b9837) +] +interface ISOSDacInterface14 : IUnknown +{ + HRESULT GetStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress); + HRESULT GetThreadStaticBaseAddress(CLRDATA_ADDRESS methodTable, CLRDATA_ADDRESS thread, CLRDATA_ADDRESS *nonGCStaticsAddress, CLRDATA_ADDRESS *GCStaticsAddress); + HRESULT GetMethodTableInitializationFlags(CLRDATA_ADDRESS methodTable, MethodTableInitializationFlags *initializationStatus); +} diff --git a/src/coreclr/pal/prebuilt/idl/sospriv_i.cpp b/src/coreclr/pal/prebuilt/idl/sospriv_i.cpp index 141ec62612e48f..f070ae5816a8a8 100644 --- a/src/coreclr/pal/prebuilt/idl/sospriv_i.cpp +++ b/src/coreclr/pal/prebuilt/idl/sospriv_i.cpp @@ -5,11 +5,9 @@ /* link this file in with the server and any clients */ - /* File created by MIDL compiler version 8.01.0622 */ -/* at Mon Jan 18 19:14:07 2038 - */ + /* File created by MIDL compiler version 8.01.0628 */ /* Compiler settings for sospriv.idl: - Oicf, W1, Zp8, env=Win64 (32b run), target_arch=AMD64 8.01.0622 + Oicf, W1, Zp8, env=Win64 (32b run), target_arch=AMD64 8.01.0628 protocol : dce , ms_ext, c_ext, robust error checks: allocation ref bounds_check enum stub_data VC __declspec() decoration level: @@ -120,6 +118,9 @@ MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface12,0x1b93bacc,0x8ca4,0x432d,0x94,0x3a, MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface13,0x3176a8ed,0x597b,0x4f54,0xa7,0x1f,0x83,0x69,0x5c,0x6a,0x8c,0x5e); + +MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface14,0x9aa22aca,0x6dc6,0x4a0c,0xb4,0xe0,0x70,0xd2,0x41,0x6b,0x98,0x37); + #undef MIDL_DEFINE_GUID #ifdef __cplusplus diff --git a/src/coreclr/pal/prebuilt/inc/sospriv.h b/src/coreclr/pal/prebuilt/inc/sospriv.h index 4c86b39cb6f767..855696ef0ce4ef 100644 --- a/src/coreclr/pal/prebuilt/inc/sospriv.h +++ b/src/coreclr/pal/prebuilt/inc/sospriv.h @@ -205,6 +205,7 @@ typedef int VCSHeapType; typedef enum { TYPEDEFTOMETHODTABLE, TYPEREFTOMETHODTABLE } ModuleMapType; typedef enum {IndcellHeap, LookupHeap, ResolveHeap, DispatchHeap, CacheEntryHeap, VtableHeap} VCSHeapType; typedef enum {LoaderHeapKindNormal = 0, LoaderHeapKindExplicitControl = 1} LoaderHeapKind; +typedef enum {MethodTableInitialized = 1, MethodTableInitializationFailed = 2} MethodTableInitializationFlags; typedef enum {FreeUnknownRegion = 0, FreeGlobalHugeRegion = 1, FreeGlobalRegion = 2, FreeRegion = 3, FreeSohSegment = 4, FreeUohSegment = 5 } FreeRegionKind; typedef void ( *MODULEMAPTRAVERSE )( UINT index, @@ -3343,6 +3344,118 @@ EXTERN_C const IID IID_ISOSDacInterface13; #endif /* __ISOSDacInterface13_INTERFACE_DEFINED__ */ +#ifndef __ISOSDacInterface14_INTERFACE_DEFINED__ +#define __ISOSDacInterface14_INTERFACE_DEFINED__ + +/* interface ISOSDacInterface14 */ +/* [uuid][local][object] */ + + +EXTERN_C const IID IID_ISOSDacInterface14; + +#if defined(__cplusplus) && !defined(CINTERFACE) + + MIDL_INTERFACE("9aa22aca-6dc6-4a0c-b4e0-70d2416b9837") + ISOSDacInterface14 : public IUnknown + { + public: + virtual HRESULT STDMETHODCALLTYPE GetStaticBaseAddress( + CLRDATA_ADDRESS methodTable, + CLRDATA_ADDRESS *nonGCStaticsAddress, + CLRDATA_ADDRESS *GCStaticsAddress) = 0; + + virtual HRESULT STDMETHODCALLTYPE GetThreadStaticBaseAddress( + CLRDATA_ADDRESS methodTable, + CLRDATA_ADDRESS thread, + CLRDATA_ADDRESS *nonGCStaticsAddress, + CLRDATA_ADDRESS *GCStaticsAddress) = 0; + + virtual HRESULT STDMETHODCALLTYPE GetMethodTableInitializationFlags( + CLRDATA_ADDRESS methodTable, + MethodTableInitializationFlags *initializationStatus) = 0; + + }; + + +#else /* C style interface */ + + typedef struct ISOSDacInterface14Vtbl + { + BEGIN_INTERFACE + + HRESULT ( STDMETHODCALLTYPE *QueryInterface )( + ISOSDacInterface14 * This, + /* [in] */ REFIID riid, + /* [annotation][iid_is][out] */ + _COM_Outptr_ void **ppvObject); + + ULONG ( STDMETHODCALLTYPE *AddRef )( + ISOSDacInterface14 * This); + + ULONG ( STDMETHODCALLTYPE *Release )( + ISOSDacInterface14 * This); + + HRESULT ( STDMETHODCALLTYPE *GetStaticBaseAddress )( + ISOSDacInterface14 * This, + CLRDATA_ADDRESS methodTable, + CLRDATA_ADDRESS *nonGCStaticsAddress, + CLRDATA_ADDRESS *GCStaticsAddress); + + HRESULT ( STDMETHODCALLTYPE *GetThreadStaticBaseAddress )( + ISOSDacInterface14 * This, + CLRDATA_ADDRESS methodTable, + CLRDATA_ADDRESS thread, + CLRDATA_ADDRESS *nonGCStaticsAddress, + CLRDATA_ADDRESS *GCStaticsAddress); + + HRESULT ( STDMETHODCALLTYPE *GetMethodTableInitializationFlags )( + ISOSDacInterface14 * This, + CLRDATA_ADDRESS methodTable, + MethodTableInitializationFlags *initializationStatus); + + END_INTERFACE + } ISOSDacInterface14Vtbl; + + interface ISOSDacInterface14 + { + CONST_VTBL struct ISOSDacInterface14Vtbl *lpVtbl; + }; + + + +#ifdef COBJMACROS + + +#define ISOSDacInterface14_QueryInterface(This,riid,ppvObject) \ + ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) + +#define ISOSDacInterface14_AddRef(This) \ + ( (This)->lpVtbl -> AddRef(This) ) + +#define ISOSDacInterface14_Release(This) \ + ( (This)->lpVtbl -> Release(This) ) + + +#define ISOSDacInterface14_GetStaticBaseAddress(This,methodTable,nonGCStaticsAddress,GCStaticsAddress) \ + ( (This)->lpVtbl -> GetStaticBaseAddress(This,methodTable,nonGCStaticsAddress,GCStaticsAddress) ) + +#define ISOSDacInterface14_GetThreadStaticBaseAddress(This,methodTable,thread,nonGCStaticsAddress,GCStaticsAddress) \ + ( (This)->lpVtbl -> GetThreadStaticBaseAddress(This,methodTable,thread,nonGCStaticsAddress,GCStaticsAddress) ) + +#define ISOSDacInterface14_GetMethodTableInitializationFlags(This,methodTable,initializationStatus) \ + ( (This)->lpVtbl -> GetMethodTableInitializationFlags(This,methodTable,initializationStatus) ) + +#endif /* COBJMACROS */ + + +#endif /* C style interface */ + + + + +#endif /* __ISOSDacInterface14_INTERFACE_DEFINED__ */ + + /* Additional Prototypes for ALL interfaces */ /* end of Additional Prototypes */ @@ -3353,3 +3466,4 @@ EXTERN_C const IID IID_ISOSDacInterface13; #endif + diff --git a/src/coreclr/vm/methodtable.cpp b/src/coreclr/vm/methodtable.cpp index 39f5d9b31e147c..007e5cd949d35a 100644 --- a/src/coreclr/vm/methodtable.cpp +++ b/src/coreclr/vm/methodtable.cpp @@ -366,7 +366,6 @@ BOOL MethodTable::ValidateWithPossibleAV() (pEEClass && (pEEClass->GetMethodTableWithPossibleAV()->GetClassWithPossibleAV() == pEEClass)))); } -#ifndef DACCESS_COMPILE //========================================================================================== BOOL MethodTable::IsClassInited() @@ -379,7 +378,7 @@ BOOL MethodTable::IsClassInited() if (IsSharedByGenericInstantiations()) return FALSE; - DomainLocalModule *pLocalModule = GetDomainLocalModule(); + PTR_DomainLocalModule pLocalModule = GetDomainLocalModule(); _ASSERTE(pLocalModule != NULL); @@ -391,12 +390,13 @@ BOOL MethodTable::IsInitError() { WRAPPER_NO_CONTRACT; - DomainLocalModule *pLocalModule = GetDomainLocalModule(); + PTR_DomainLocalModule pLocalModule = GetDomainLocalModule(); _ASSERTE(pLocalModule != NULL); return pLocalModule->IsClassInitError(this); } +#ifndef DACCESS_COMPILE //========================================================================================== // mark the class as having its .cctor run void MethodTable::SetClassInited() diff --git a/src/coreclr/vm/methodtable.h b/src/coreclr/vm/methodtable.h index 447f5028717815..703508e2126b50 100644 --- a/src/coreclr/vm/methodtable.h +++ b/src/coreclr/vm/methodtable.h @@ -859,11 +859,10 @@ class MethodTable // mark the class as having its cctor run. #ifndef DACCESS_COMPILE void SetClassInited(); - BOOL IsClassInited(); - - BOOL IsInitError(); void SetClassInitError(); #endif + BOOL IsClassInited(); + BOOL IsInitError(); inline BOOL IsGlobalClass() { @@ -2233,9 +2232,9 @@ class MethodTable DWORD GetOffsetOfFirstStaticHandle(); DWORD GetOffsetOfFirstStaticMT(); -#ifndef DACCESS_COMPILE inline PTR_BYTE GetNonGCStaticsBasePointer(); inline PTR_BYTE GetGCStaticsBasePointer(); +#ifndef DACCESS_COMPILE inline PTR_BYTE GetNonGCThreadStaticsBasePointer(); inline PTR_BYTE GetGCThreadStaticsBasePointer(); inline PTR_BYTE GetGCThreadStaticsBaseHandle(); diff --git a/src/coreclr/vm/methodtable.inl b/src/coreclr/vm/methodtable.inl index 1667912ed50116..b2816fccc2c09e 100644 --- a/src/coreclr/vm/methodtable.inl +++ b/src/coreclr/vm/methodtable.inl @@ -1078,8 +1078,6 @@ inline DWORD MethodTable::GetOptionalMembersSize() return GetEndOffsetOfOptionalMembers() - GetStartOffsetOfOptionalMembers(); } -#ifndef DACCESS_COMPILE - //========================================================================================== inline PTR_BYTE MethodTable::GetNonGCStaticsBasePointer() { @@ -1094,6 +1092,8 @@ inline PTR_BYTE MethodTable::GetGCStaticsBasePointer() return GetDomainLocalModule()->GetGCStaticsBasePointer(this); } +#ifndef DACCESS_COMPILE + //========================================================================================== inline PTR_BYTE MethodTable::GetNonGCThreadStaticsBasePointer() { From e75371251c9b78c619d5346ca014aa4631cadc72 Mon Sep 17 00:00:00 2001 From: Sven Boemer Date: Thu, 28 Mar 2024 14:24:06 -0700 Subject: [PATCH 009/132] Allow trimming FeatureGuard and FeatureSwitchDefinition attributes (#100263) Under AggressiveAttributeTrimming setting. Fixes https://github.com/dotnet/runtime/issues/100256. AggressiveAttributeTrimming was attempting to remove RequiresDynamicCode attributes, but the type was still referenced by FeatureGuardAttribute on IsDynamicCodeCompiled. Adding FeatureGuardAttribute to the set of attributes that get removed with AggressiveAttributeTrimming fixes this. Also adding FeatureSwitchDefinitionAttribute because that one can be removed as well. --- .../ILLink/ILLink.LinkAttributes.Shared.xml | 6 ++ .../AggressiveAttributeTrimmingTest.cs | 80 +++++++++++++++++++ .../System.Runtime.TrimmingTests.proj | 4 + 3 files changed, 90 insertions(+) create mode 100644 src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/AggressiveAttributeTrimmingTest.cs diff --git a/src/libraries/System.Private.CoreLib/src/ILLink/ILLink.LinkAttributes.Shared.xml b/src/libraries/System.Private.CoreLib/src/ILLink/ILLink.LinkAttributes.Shared.xml index adbcd6f00ee499..7706dd2ce18fec 100644 --- a/src/libraries/System.Private.CoreLib/src/ILLink/ILLink.LinkAttributes.Shared.xml +++ b/src/libraries/System.Private.CoreLib/src/ILLink/ILLink.LinkAttributes.Shared.xml @@ -250,6 +250,12 @@ + + + + + + diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/AggressiveAttributeTrimmingTest.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/AggressiveAttributeTrimmingTest.cs new file mode 100644 index 00000000000000..dfdce520e1904d --- /dev/null +++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/AggressiveAttributeTrimmingTest.cs @@ -0,0 +1,80 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#nullable enable + +using System; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Reflection; + +/// +/// Ensures setting _AggressiveAttributeTrimming = true causes various attributes to be trimmed +/// +class Program +{ + [UnconditionalSuppressMessage ("ReflectionAnalysis", "IL2111", Justification = "Expected trim warning for reflection over annotated members.")] + [UnconditionalSuppressMessage ("ReflectionAnalysis", "IL2026", Justification = "Expected trim warning for reflection over annotated members.")] + static int Main(string[] args) + { + // Reference to IsDynamicCodeSupported (which has FeatureGuard(typeof(RequiresDynamicCodeAttribute))) + // should not produce a warning because both RequiresDynamicCodeAttribute and FeatureGuardAttribute are removed. + if (RuntimeFeature.IsDynamicCodeSupported) + { + UseDynamicCode(); + } + + // Check that a few attribute instances are indeed removed + CheckRemovedAttributes(typeof(MembersWithRemovedAttributes)); + + return 100; + } + + [RequiresDynamicCode(nameof(UseDynamicCode))] + static void UseDynamicCode() { } + + class MembersWithRemovedAttributes + { + static void DynamicallyAccessedMembers([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] Type t) { } + + [FeatureGuard(typeof(RequiresUnreferencedCodeAttribute))] + static bool FeatureGuard => throw null!; + + [FeatureSwitchDefinition("Program.MembersWithRemovedAttributes.FeatureSwitchDefinition")] + static bool FeatureSwitchDefinition => throw null!; + + [RequiresDynamicCode(nameof(RequiresDynamicCode))] + static void RequiresDynamicCode() { } + + [RequiresUnreferencedCode(nameof(RequiresUnreferencedCode))] + static void RequiresUnreferencedCode() { } + } + + static void CheckRemovedAttributes([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.All)] Type type) + { + Console.WriteLine($"Validating {type}"); + foreach (var member in type.GetMembers(BindingFlags.Static | BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.DeclaredOnly)) + { + CheckRemovedAttributes(member); + + if (member is MethodInfo method) + { + foreach (var parameter in method.GetParameters()) + { + CheckRemovedAttributes(parameter); + } + } + } + } + + static void CheckRemovedAttributes(ICustomAttributeProvider provider) + { + foreach (var attribute in provider.GetCustomAttributes(false)) + { + if (attribute is NullableContextAttribute) + continue; + + throw new Exception($"Unexpected attribute {attribute.GetType()} on {provider}"); + } + } +} diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/System.Runtime.TrimmingTests.proj b/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/System.Runtime.TrimmingTests.proj index c786baba78e379..c9c42b2c20234e 100644 --- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/System.Runtime.TrimmingTests.proj +++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/TrimmingTests/System.Runtime.TrimmingTests.proj @@ -2,6 +2,10 @@ + + _AggressiveAttributeTrimming + SuppressTrimAnalysisWarnings;TrimmerSingleWarn + osx-x64;linux-x64;browser-wasm From 2567a9467daf9c429fedfc7df845f5f04316dcda Mon Sep 17 00:00:00 2001 From: Kevin Jones Date: Thu, 28 Mar 2024 17:46:45 -0400 Subject: [PATCH 010/132] Fix initialization OpenSSL by KMAC --- .../Interop.EVP.MacAlgs.cs | 15 +++++++++++++-- .../tests/KmacTestDriver.cs | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/src/libraries/Common/src/Interop/Unix/System.Security.Cryptography.Native/Interop.EVP.MacAlgs.cs b/src/libraries/Common/src/Interop/Unix/System.Security.Cryptography.Native/Interop.EVP.MacAlgs.cs index e9a974433516f8..75e88e40aaa6fc 100644 --- a/src/libraries/Common/src/Interop/Unix/System.Security.Cryptography.Native/Interop.EVP.MacAlgs.cs +++ b/src/libraries/Common/src/Interop/Unix/System.Security.Cryptography.Native/Interop.EVP.MacAlgs.cs @@ -12,8 +12,19 @@ internal static partial class Crypto { internal static partial class EvpMacAlgs { - internal static SafeEvpMacHandle? Kmac128 { get; } = EvpMacFetch(HashAlgorithmNames.KMAC128); - internal static SafeEvpMacHandle? Kmac256 { get; } = EvpMacFetch(HashAlgorithmNames.KMAC256); + internal static SafeEvpMacHandle? Kmac128 { get; } + internal static SafeEvpMacHandle? Kmac256 { get; } + + static EvpMacAlgs() + { + CryptoInitializer.Initialize(); + + // Do not use property initializers for these because we need to ensure CryptoInitializer.Initialize + // is called first. Property initializers happen before cctors, so instead set the property after the + // initializer is run. + Kmac128 = EvpMacFetch(HashAlgorithmNames.KMAC128); + Kmac256 = EvpMacFetch(HashAlgorithmNames.KMAC256); + } [LibraryImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EvpMacFetch", StringMarshalling = StringMarshalling.Utf8)] private static partial SafeEvpMacHandle CryptoNative_EvpMacFetch(string algorithm, out int haveFeature); diff --git a/src/libraries/System.Security.Cryptography/tests/KmacTestDriver.cs b/src/libraries/System.Security.Cryptography/tests/KmacTestDriver.cs index 24df42b3d411a4..6a29bae1a17b7d 100644 --- a/src/libraries/System.Security.Cryptography/tests/KmacTestDriver.cs +++ b/src/libraries/System.Security.Cryptography/tests/KmacTestDriver.cs @@ -7,6 +7,7 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Microsoft.DotNet.RemoteExecutor; using Microsoft.DotNet.XUnitExtensions; using Xunit; @@ -1067,6 +1068,23 @@ public void IsSupported_AgreesWithPlatform() Assert.Equal(TKmacTrait.IsSupported, PlatformSupportsKmac()); } + [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] + public void IsSupported_InitializesCrypto() + { + if (!IsSupported) + { + throw new SkipTestException("Algorithm is not supported on current platform."); + } + + // This ensures that KMAC is the first cryptographic algorithm touched in the process, which kicks off + // the initialization of the crypto layer on some platforms. Running in a remote executor ensures no other + // test has pre-initialized anything. + RemoteExecutor.Invoke(static () => + { + return TKmacTrait.IsSupported ? RemoteExecutor.SuccessExitCode : 0; + }).Dispose(); + } + private static async Task AssertOneShotsThrowAnyAsync( int? keySize = null, int? customizationStringSize = null, From f51d705d840ab5859a95e66cb3aacc5f17e90b4c Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Thu, 28 Mar 2024 23:04:27 +0100 Subject: [PATCH 011/132] NativeAOT/win-x86: Enable FEATURE_EH_CALLFINALLY_THUNKS (#99718) * NativeAOT/win-x86: Enable FEATURE_EH_CALLFINALLY_THUNKS * Update src/tests/JIT/jit64/eh/basics/throwinfinallyintryfilter2.ilproj --- src/coreclr/jit/targetx86.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index 60b2f7793f435b..08a4ab996bd1bf 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -54,8 +54,13 @@ #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, // filter-handler, fault) and directly execute 'finally' clauses. +#ifdef FEATURE_EH_FUNCLETS + #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, + // protected by "cloned finally" clauses. +#else #define FEATURE_EH_CALLFINALLY_THUNKS 0 // Generate call-to-finally code in "thunks" in the enclosing EH region, // protected by "cloned finally" clauses. +#endif #define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based // frames for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants From 6561e7c88210ae2d1e4e672b6d7b3a1244e67022 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Thu, 28 Mar 2024 15:53:02 -0700 Subject: [PATCH 012/132] JIT: fix count reconstruction problem (#100385) In large methods with lots of irreducible loops we may find reconstructed counts reaching very large values. Since profile counts in practice won't ever be much larger than say 10^12, detect when reconstructed counts exceed this value, and stop the algorithm. We may eventually decide to rerun in "hard blend" mode where we intentionally limit the edge likelihood ranges. But this should do for now. Closes #100350. --- src/coreclr/jit/fgprofilesynthesis.cpp | 11 +++++++++++ src/coreclr/jit/fgprofilesynthesis.h | 2 ++ 2 files changed, 13 insertions(+) diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index d76dcece8220da..2de615136cfeb4 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -1289,6 +1289,12 @@ void ProfileSynthesis::GaussSeidelSolver() residual = change; residualBlock = block; } + + if (newWeight >= maxCount) + { + JITDUMP("count overflow in " FMT_BB ": " FMT_WT "\n", block->bbNum, newWeight); + m_overflow = true; + } } // If there were no improper headers, we will have converged in one pass. @@ -1312,6 +1318,11 @@ void ProfileSynthesis::GaussSeidelSolver() break; } + if (m_overflow) + { + break; + } + // If we have been iterating for a bit, estimate the dominant GS // eigenvalue. (we might want to start with Jacobi iterations // to get the Jacobi eigenvalue instead). diff --git a/src/coreclr/jit/fgprofilesynthesis.h b/src/coreclr/jit/fgprofilesynthesis.h index c5e3883bdca000..216bd58297286a 100644 --- a/src/coreclr/jit/fgprofilesynthesis.h +++ b/src/coreclr/jit/fgprofilesynthesis.h @@ -51,6 +51,7 @@ class ProfileSynthesis static constexpr weight_t ilNextLikelihood = 0.52; static constexpr weight_t loopBackLikelihood = 0.9; static constexpr weight_t loopExitLikelihood = 0.9; + static constexpr weight_t maxCount = 1e12; void Run(ProfileSynthesisOption option); @@ -84,6 +85,7 @@ class ProfileSynthesis unsigned m_improperLoopHeaders = 0; unsigned m_cappedCyclicProbabilities = 0; bool m_approximate = false; + bool m_overflow = false; }; #endif // !_FGPROFILESYNTHESIS_H_ From 83b0d939bedadf7d782b0b26307c2d8c1d5b76f4 Mon Sep 17 00:00:00 2001 From: Mario Pistrich Date: Fri, 29 Mar 2024 01:05:34 +0100 Subject: [PATCH 013/132] Fix CA2022 warnings (Avoid inexact read with 'Stream.Read') (#100352) * Configure CA2022 severity * Fix CA2022 warnings * Check for NET7_0_OR_GREATER before using ReadExactly * Fix CS1503 * Formatting --------- Co-authored-by: Stephen Toub Co-authored-by: Buyaa Namnan --- eng/CodeAnalysis.src.globalconfig | 3 +++ eng/CodeAnalysis.test.globalconfig | 3 +++ .../src/System/IO/Ports/SerialPort.cs | 16 +++++++++++++++- .../src/System/ServiceModel/XmlBuffer.cs | 16 +++++++++++++++- .../src/Internal/Synthesis/AudioBase.cs | 17 ++++++++++++++++- .../src/Internal/Synthesis/EngineSite.cs | 17 ++++++++++++++++- 6 files changed, 68 insertions(+), 4 deletions(-) diff --git a/eng/CodeAnalysis.src.globalconfig b/eng/CodeAnalysis.src.globalconfig index abdfba711bcea2..21a53462cc5de3 100644 --- a/eng/CodeAnalysis.src.globalconfig +++ b/eng/CodeAnalysis.src.globalconfig @@ -561,6 +561,9 @@ dotnet_diagnostic.CA2020.severity = warning # CA2021: Do not call Enumerable.Cast or Enumerable.OfType with incompatible types dotnet_diagnostic.CA2021.severity = warning +# CA2022: Avoid inexact read with 'Stream.Read' +dotnet_diagnostic.CA2022.severity = warning + # CA2100: Review SQL queries for security vulnerabilities dotnet_diagnostic.CA2100.severity = none diff --git a/eng/CodeAnalysis.test.globalconfig b/eng/CodeAnalysis.test.globalconfig index dccb23a9e1a8f4..0d944fbd890fc9 100644 --- a/eng/CodeAnalysis.test.globalconfig +++ b/eng/CodeAnalysis.test.globalconfig @@ -558,6 +558,9 @@ dotnet_diagnostic.CA2020.severity = none # CA2021: Do not call Enumerable.Cast or Enumerable.OfType with incompatible types dotnet_diagnostic.CA2021.severity = none +# CA2022: Avoid inexact read with 'Stream.Read' +dotnet_diagnostic.CA2022.severity = none + # CA2100: Review SQL queries for security vulnerabilities dotnet_diagnostic.CA2100.severity = none diff --git a/src/libraries/System.IO.Ports/src/System/IO/Ports/SerialPort.cs b/src/libraries/System.IO.Ports/src/System/IO/Ports/SerialPort.cs index 83a9a02926bdd3..752163fd2bd379 100644 --- a/src/libraries/System.IO.Ports/src/System/IO/Ports/SerialPort.cs +++ b/src/libraries/System.IO.Ports/src/System/IO/Ports/SerialPort.cs @@ -963,7 +963,21 @@ public string ReadExisting() Buffer.BlockCopy(_inBuffer, _readPos, bytesReceived, 0, CachedBytesToRead); } - _internalSerialStream.Read(bytesReceived, CachedBytesToRead, bytesReceived.Length - (CachedBytesToRead)); // get everything +#if NET7_0_OR_GREATER + _internalSerialStream.ReadExactly(bytesReceived, CachedBytesToRead, bytesReceived.Length - CachedBytesToRead); // get everything +#else + int readCount = bytesReceived.Length - CachedBytesToRead; + int totalRead = 0; + while (totalRead < readCount) + { + int bytesRead = _internalSerialStream.Read(bytesReceived, CachedBytesToRead + totalRead, readCount - totalRead); + if (bytesRead <= 0) + { + throw new EndOfStreamException(); + } + totalRead += bytesRead; + } +#endif // Read full characters and leave partial input in the buffer. Encoding.GetCharCount doesn't work because // it returns fallback characters on partial input, meaning that it overcounts. Instead, we use diff --git a/src/libraries/System.ServiceModel.Syndication/src/System/ServiceModel/XmlBuffer.cs b/src/libraries/System.ServiceModel.Syndication/src/System/ServiceModel/XmlBuffer.cs index f57dec531c28e9..17f3cfa2e1e7ad 100644 --- a/src/libraries/System.ServiceModel.Syndication/src/System/ServiceModel/XmlBuffer.cs +++ b/src/libraries/System.ServiceModel.Syndication/src/System/ServiceModel/XmlBuffer.cs @@ -86,7 +86,21 @@ public void Close() _bufferState = BufferState.Reading; _buffer = new byte[_stream.Length]; _stream.Position = 0; - _stream.Read(_buffer, 0, _buffer.Length); + +#if NET7_0_OR_GREATER + _stream.ReadExactly(_buffer); +#else + int totalRead = 0; + while (totalRead < _buffer.Length) + { + int bytesRead = _stream.Read(_buffer, totalRead, _buffer.Length - totalRead); + if (bytesRead <= 0) + { + throw new EndOfStreamException(); + } + totalRead += bytesRead; + } +#endif _writer = null; _stream = null; diff --git a/src/libraries/System.Speech/src/Internal/Synthesis/AudioBase.cs b/src/libraries/System.Speech/src/Internal/Synthesis/AudioBase.cs index ff57d09872075e..782cd59fb6c34c 100644 --- a/src/libraries/System.Speech/src/Internal/Synthesis/AudioBase.cs +++ b/src/libraries/System.Speech/src/Internal/Synthesis/AudioBase.cs @@ -121,7 +121,22 @@ internal void PlayWaveFile(AudioData audio) try { byte[] data = new byte[(int)audio._stream.Length]; - audio._stream.Read(data, 0, data.Length); + +#if NET7_0_OR_GREATER + audio._stream.ReadExactly(data); +#else + int totalRead = 0; + while (totalRead < data.Length) + { + int bytesRead = audio._stream.Read(data, totalRead, data.Length - totalRead); + if (bytesRead <= 0) + { + throw new EndOfStreamException(); + } + totalRead += bytesRead; + } +#endif + Play(data); } finally diff --git a/src/libraries/System.Speech/src/Internal/Synthesis/EngineSite.cs b/src/libraries/System.Speech/src/Internal/Synthesis/EngineSite.cs index e37766fc656b92..a658f37ca6180b 100644 --- a/src/libraries/System.Speech/src/Internal/Synthesis/EngineSite.cs +++ b/src/libraries/System.Speech/src/Internal/Synthesis/EngineSite.cs @@ -174,7 +174,22 @@ public Stream LoadResource(Uri uri, string mediaType) int cLen = (int)stream.Length; MemoryStream memStream = new(cLen); byte[] ab = new byte[cLen]; - stream.Read(ab, 0, ab.Length); + +#if NET7_0_OR_GREATER + stream.ReadExactly(ab); +#else + int totalRead = 0; + while (totalRead < cLen) + { + int bytesRead = stream.Read(ab, totalRead, cLen - totalRead); + if (bytesRead <= 0) + { + throw new EndOfStreamException(); + } + totalRead += bytesRead; + } +#endif + _resourceLoader.UnloadFile(localPath); memStream.Write(ab, 0, cLen); memStream.Position = 0; From 85d60b75d1e5aa8e25268b00ebce23b8c68621cb Mon Sep 17 00:00:00 2001 From: Andrew Au Date: Fri, 29 Mar 2024 06:04:25 -0700 Subject: [PATCH 014/132] Dynamic event support (#99862) --- .../eventpipe/gen-eventing-event-inc.lst | 1 + .../nativeaot/Runtime/gctoclreventsink.cpp | 18 ++++++++++++++---- src/coreclr/scripts/genLttngProvider.py | 6 +++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/coreclr/nativeaot/Runtime/eventpipe/gen-eventing-event-inc.lst b/src/coreclr/nativeaot/Runtime/eventpipe/gen-eventing-event-inc.lst index 5abd0e38669262..901af659ff84b6 100644 --- a/src/coreclr/nativeaot/Runtime/eventpipe/gen-eventing-event-inc.lst +++ b/src/coreclr/nativeaot/Runtime/eventpipe/gen-eventing-event-inc.lst @@ -46,6 +46,7 @@ GCBulkSurvivingObjectRanges GCCreateConcurrentThread_V1 GCCreateSegment_V1 GCDecision_V1 +GCDynamicEvent GCEnd_V1 GCFinalizersBegin_V1 GCFinalizersEnd_V1 diff --git a/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp b/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp index 8ece828ba53d80..16ef566053046b 100644 --- a/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp +++ b/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp @@ -11,17 +11,27 @@ void GCToCLREventSink::FireDynamicEvent(const char* eventName, void* payload, ui { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_NATIVEAOT const size_t EventNameMaxSize = 255; WCHAR wideEventName[EventNameMaxSize]; - if (MultiByteToWideChar(CP_ACP, 0, eventName, -1, wideEventName, EventNameMaxSize) == 0) + int i = 0; + while (true) { - return; + if (i == (EventNameMaxSize - 1)) + { + wideEventName[i] = L'\0'; + assert(false); + break; + } + wideEventName[i] = (WCHAR)eventName[i]; + if (eventName[i] == '\0') + { + break; + } + i++; } FireEtwGCDynamicEvent(wideEventName, payloadSize, (const BYTE*)payload, GetClrInstanceId()); -#endif // !FEATURE_NATIVEAOT } void GCToCLREventSink::FireGCStart_V2(uint32_t count, uint32_t depth, uint32_t reason, uint32_t type) diff --git a/src/coreclr/scripts/genLttngProvider.py b/src/coreclr/scripts/genLttngProvider.py index 70affdbbfe01ac..d75a222a27b8a3 100644 --- a/src/coreclr/scripts/genLttngProvider.py +++ b/src/coreclr/scripts/genLttngProvider.py @@ -88,7 +88,7 @@ "win:Binary" :"const BYTE" } -monoLttngDataTypeMapping ={ +portableLttngDataTypeMapping ={ #constructed types "win:null" :" ", "win:Int64" :"const int64_t", @@ -113,8 +113,8 @@ def getLttngDataTypeMapping(runtimeFlavor): if runtimeFlavor.coreclr: return coreCLRLttngDataTypeMapping - elif runtimeFlavor.mono: - return monoLttngDataTypeMapping + else: + return portableLttngDataTypeMapping ctfDataTypeMapping ={ #constructed types From 8e95f22ef437c0b1fe5f769ba7b6d12c3ba7f913 Mon Sep 17 00:00:00 2001 From: Andrew Au Date: Fri, 29 Mar 2024 06:05:10 -0700 Subject: [PATCH 015/132] Make sure we compact the LOH when aggressive GC is requested (#98954) --- src/coreclr/gc/gc.cpp | 8 +++++++- src/coreclr/gc/gcrecord.h | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index b422ae9b9175ff..40cb8694fd4d52 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -20825,6 +20825,12 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, } } + if (settings.reason == reason_induced_aggressive) + { + gc_data_global.gen_to_condemn_reasons.set_condition (gen_joined_aggressive); + settings.loh_compaction = TRUE; + } + #ifdef BGC_SERVO_TUNING if (bgc_tuning::should_trigger_ngc2()) { @@ -31148,7 +31154,7 @@ BOOL gc_heap::plan_loh() void gc_heap::compact_loh() { - assert (loh_compaction_requested() || heap_hard_limit || conserve_mem_setting); + assert (loh_compaction_requested() || heap_hard_limit || conserve_mem_setting || (settings.reason == reason_induced_aggressive)); #ifdef FEATURE_EVENT_TRACE uint64_t start_time = 0, end_time; diff --git a/src/coreclr/gc/gcrecord.h b/src/coreclr/gc/gcrecord.h index 44641157e9f1bb..b611e7c0c1c4f8 100644 --- a/src/coreclr/gc/gcrecord.h +++ b/src/coreclr/gc/gcrecord.h @@ -73,7 +73,8 @@ enum gc_condemn_reason_condition gen_joined_servo_postpone = 27, gen_joined_stress_mix = 28, gen_joined_stress = 29, - gcrc_max = 30 + gen_joined_aggressive = 30, + gcrc_max = 31 }; #ifdef DT_LOG From 7486be885b298aef622833bec509732e6b3f1667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleksey=20Kliger=20=28=CE=BBgeek=29?= Date: Fri, 29 Mar 2024 09:19:47 -0400 Subject: [PATCH 016/132] [cdac] Data Descriptor Spec (#100253) Contributes to #100162 which is part of #99298 Follow-up to #99936 that removes "type layout" and "global value" contracts and instead replaces them with a "data descriptor" blob. Conceptually a particular target runtime provides a pair of a logical data descriptor together with a set of algorithmic contract versions. The logical data descriptor is just a single model that defines all the globals and type layouts relevant to the set of algorithmic contract versions. A logical data descriptor is realized by merging two physical data descriptors in a proscribed order. The physical data descriptors provide some subset of the type layouts or global values. The physical data descriptors come in two flavors: - baseline descriptors that are checked into the dotnet/runtime repo and have well -known names - in-proc descriptors that get embedded into a target runtime. Each in-proc descriptor may refer to a baseline and represents a delta applied on top of the baseline. The data contract model works on top of a flattened logical data descriptor. Co-authored-by: Aaron Robinson Co-authored-by: Jan Kotas Co-authored-by: Noah Falk --- docs/design/datacontracts/data_descriptor.md | 337 ++++++++++++++++++ .../datacontracts/datacontracts_design.md | 185 ++-------- 2 files changed, 375 insertions(+), 147 deletions(-) create mode 100644 docs/design/datacontracts/data_descriptor.md diff --git a/docs/design/datacontracts/data_descriptor.md b/docs/design/datacontracts/data_descriptor.md new file mode 100644 index 00000000000000..cd0d5ce92e82c5 --- /dev/null +++ b/docs/design/datacontracts/data_descriptor.md @@ -0,0 +1,337 @@ +# Data Descriptors + +The [data contract](datacontracts_design.md) specification for .NET depends on each target .NET +runtime describing a subset of its platform- and build-specific data structures to diagnostic +tooling. The information is given meaning by algorithmic contracts that describe how the low-level +layout of the memory of a .NET process corresponds to high-level abstract data structures that +represent the conceptual state of a .NET process. + +In this document we give a logical description of a data descriptor together with a physical +manifestation. + +The physical format is used for two purposes: + +1. To publish well-known data descriptors in the `dotnet/runtime` repository in a machine- and +human-readable form. This data may be used for visualization, diagnostics, etc. These data +descriptors may be written by hand or with the aid of tooling. + +2. To embed a data descriptor blob within a particular instance of a target runtime. The data +descriptor blob will be discovered by diagnostic tooling from the memory of a target process. + +## Logical descriptor + +Each logical descriptor exists within an implied *target architecture* consisting of: +* target architecture endianness (little endian or big endian) +* target architecture pointer size (4 bytes or 8 bytes) + +The following *primitive types* are assumed: int8, uint8, int16, uint16, int32, uint32, int64, +uint64, nint, nuint, pointer. The multi-byte types are in the target architecture +endianness. The types `nint`, `nuint` and `pointer` have target architecture pointer size. + +The data descriptor consists of: +* a collection of type structure descriptors +* a collection of global value descriptors + +## Types + +The types (both primitive types and structures described by structure descriptors) are classified as +having either determinate or indeterminate size. Types with a determinate size may be used for +pointer arithmetic, whereas types with an indeterminate size may not be. Note that some sizes may +be determinate, but *target specific*. For example pointer types have a fixed size that varies by +architecture. + +## Structure descriptors + +Each structure descriptor consists of: +* a name +* an optional size in bytes +* a collection of field descriptors + +If the size is not given, the type has indeterminate size. The size may also be given explicitly as +"indeterminate" to emphasize that the type has indeterminate size. + +The collection of field descriptors may be empty. In that case the type is opaque. The primitive +types may be thought of as opaque (for example: on ARM64 `nuint` is an opaque 8 byte type, `int64` +is another opaque 8 byte type. `string` is an opaque type of indeterminate size). + +Type names must be globally unique within a single logical descriptor. + +### Field descriptors + +Each field descriptor consists of: +* a name +* a type +* an offset in bytes from the beginning of the struct + +The name of a field descriptor must be unique within the definition of a structure. + +Two or more fields may have the same offsets or imply that the underlying fields overlap. The field +offsets need not be aligned using any sort of target-specific alignment rules. + +Each field's type may refer to one of the primitive types or to any other type defined in the logical descriptor. + +If a structure descriptor contains at least one field of indeterminate size, the whole structure +must have indeterminate size. Tooling is not required to, but may, signal a warning if a descriptor +has a determinate size and contains indeterminate size fields. + +It is expected that tooling will signal a warning if a field specifies a type that does not appear +in the logical descriptor. + +## Global value descriptors + +Each global value descriptor consists of: +* a name +* a type +* a value + +The name of each global value must be unique within the logical descriptor. + +The type must be one of the determinate-size primitive types. + +The value must be an integral constant within the range of its type. Signed values use the target's +natural encoding. Pointer values need not be aligned and need not point to addressable target +memory. + + +## Physical descriptors + +The physical descriptors are meant to describe *subsets* of a logical descriptor and to compose. + +In the .NET runtime there are two physical descriptors: +* a "baseline" physical data descriptor with a well-known name, +* an in-memory physical data descriptor that resides in the target process' memory + +When constructing the logical descriptor, first the baseline physical descriptor is consumed: the +types and values from the baseline are added to the logical descriptor. Then the types of the +in-memory data descriptor are used to augment the baseline: fields are added or modified, sizes and +offsets are overwritten. The global values of the in-memory data descriptor are used to augment the +baseline: new globals are added, existing globals are modified by overwriting their types or values. + +Rationale: If a type appears in multiple physical descriptors, the later appearances may add more +fields or change the offsets or definite/indefinite sizes of prior definitions. If a value appears +multiple times, later definitions take precedence. + +## Physical JSON descriptor + +### Version + +This is version 0 of the physical descriptor. + +### Summary + +A data descriptor may be stored in the "JSON with comments" format. There are two formats: a +"regular" format and a "compact" format. The baseline data descriptor may be either regular or +compact. The in-memory descriptor will typically be compact. + +The toplevel dictionary will contain: + +* `"version": 0` +* optional `"baseline": "BASELINE_ID"` see below +* `"types": TYPES_DESCRIPTOR` see below +* `"globals": GLOBALS_DESCRIPTOR` see below + +### Baseline data descriptor identifier + +The in-memory descriptor may contain an optional string identifying a well-known baseline +descriptor. The identifier is an arbitrary string, that could be used, for example to tag a +collection of globals and data structure layouts present in a particular release of a .NET runtime +for a certain architecture (for example `net9.0/coreclr/linux-arm64`). Global values and data structure +layouts present in the data contract descriptor take precedence over the baseline contract. This +way variant builds can be specified as a delta over a baseline. For example, debug builds of +CoreCLR that include additional fields in a `MethodTable` data structure could be based on the same +baseline as Release builds, but with the in-memory data descriptor augmented with new `MethodTable` +fields and additional structure descriptors. + +It is not a requirement that the baseline is chosen so that additional "delta" is the smallest +possible size, although for practical purposes that may be desired. + +Data descriptors are registered as "well known" by checking them into the main branch of +`dotnet/runtime` in the `docs/design/datacontracts/data/` directory in the JSON format specified +in the [data descriptor spec](./data_descriptor.md#Physical_JSON_Descriptor). The relative path name (with `/` as the path separator, if any) of the descriptor without +any extension is the identifier. (for example: +`/docs/design/datacontracts/data/net9.0/coreclr/linux-arm64.json` is the filename for the data +descriptor with identifier `net9.0/coreclr/linux-arm64`) + +The baseline descriptors themselves must not have a baseline. + +### Types descriptor + +**Regular format**: + +The types will be in an array, with each type described by a dictionary containing keys: + +* `"name": "type name"` the name of each type +* optional `"size": int | "indeterminate"` if omitted the size is indeterminate +* optional `"fields": FIELD_ARRAY` if omitted same as a field array of length zero + +Each `FIELD_ARRAY` is an array of dictionaries each containing keys: + +* `"name": "field name"` the name of each field +* `"type": "type name"` the name of a primitive type or another type defined in the logical descriptor +* optional `"offset": int | "unknown"` the offset of the field or "unknown". If omitted, same as "unknown". + +**Compact format**: + +The types will be in a dictionary, with each type name being the key and a `FIELD_DICT` dictionary as a value. + +The `FIELD_DICT` will have a field name as a key, or the special name `"!"` as a key. + +If a key is `!` the value is an `int` giving the total size of the struct. The key must be omitted +if the size is indeterminate. + +If the key is any other string, the value may be one of: + +* `[int, "type name"]` giving the type and offset of the field +* `int` giving just the offset of the field with the type left unspecified + +Unknown offsets are not supported in the compact format. + +Rationale: the compact format is expected to be used for the in-memory data descriptor. In the +common case the field type is known from the baseline descriptor. As a result, a field descriptor +like `"field_name": 36` is the minimum necessary information to be conveyed. If the field is not +present in the baseline, then `"field_name": [12, "uint16"]` must be used. + +**Both formats**: + +Note that the logical descriptor does not contain "unknown" offsets: it is expected that the +in-memory data descriptor will augment the baseline with a known offset for all fields in the +baseline. + +Rationale: "unknown" offsets may be used to document in the physical JSON descriptor that the +in-memory descriptor is expected to provide the offset of the field. + +### Global values + +**Regular format**: + +The global values will be in an array, with each value described by a dictionary containing keys: + +* `"name": "global value name"` the name of the global value +* `"type": "type name"` the type of the global value +* optional `"value": VALUE | [ int ] | "unknown"` the value of the global value, or an offset in an auxiliary array containing the value or "unknown". + +The `VALUE` may be a JSON numeric constant integer or a string containing a signed or unsigned +decimal or hex (with prefix `0x` or `0X`) integer constant. The constant must be within the range +of the type of the global value. + +**Compact format**: + +The global values will be in a dictionary, with each key being the name of a global and the values being one of: + +* `[VALUE | [int], "type name"]` the type and value of a global +* `VALUE | [int]` just the value of a global + +As in the regular format, `VALUE` is a numeric constant or a string containing an integer constant. + +Note that a two element array is unambiguously "type and value", whereas a one-element array is +unambiguously "indirect value". + +**Both formats** + +For pointer and nuint globals, the value may be assumed to fit in a 64-bit unsigned integer. For +nint globals, the value may be assumed to fit in a 64-bit signed integer. + +Note that the logical descriptor does not contain "unknown" values: it is expected that the +in-memory data descriptor will augment the baseline with a known offset for all fields in the +baseline. + +If the value is given as a single-element array `[ int ]` then the value is stored in an auxiliary +array that is part of the data contract descriptor. Only in-memory data descriptors may have +indirect values; baseline data descriptors may not have indirect values. + +Rationale: This allows tooling to generate the in-memory data descriptor as a single constant +string. For pointers, the address can be stored at a known offset in an in-proc +array of pointers and the offset written into the constant JSON string. + +The indirection array is not part of the data descriptor spec. It is expected that the data +contract descriptor will include it. (The data contract descriptor must contain: the data +descriptor, the set of compatible algorithmic contracts, the aux array of globals). + + + +## Example + +This is an example of a baseline descriptor for a 64-bit architecture. Suppose it has the name `"example-64"` + +The baseline is given in the "regular" format. + +```jsonc +{ + "version": 0, + "types": [ + { + "name": "GCHandle", + "size": 8, + "fields": [ + { "name": "Value", "type": "pointer", "offset": 0 } + ] + }, + { + "name": "Thread", + "size": "indeterminate", + "fields": [ + { "name": "ThreadId", "type": "uint32", "offset": "unknown" }, + { "name": "Next", "type": "pointer" }, // offset "unknown" is implied + { "name": "ThreadState", "type": "uint32" } + ] + }, + { + "name": "ThreadStore", + "fields": [ + { "name": "ThreadCount", "type": "int32" }, + { "name": "ThreadList", "type": "pointer" } + ] + } + ], + "globals": [ + { "name": "FEATURE_EH_FUNCLETS", "type": "uint8", "value": "0" }, // baseline defaults value to 0 + { "name": "FEATURE_COMINTEROP", "type", "uint8", "value": "1"}, + { "name": "s_pThreadStore", "type": "pointer" } // no baseline value + ] +} +``` + +The following is an example of an in-memory descriptor that references the above baseline. The in-memory descriptor is in the "compact" format: + +```jsonc +{ + "version": "0", + "baseline": "example-64", + "types": + { + "Thread": { "ThreadId": 32, "ThreadState": 0, "Next": 128 }, + "ThreadStore": { "ThreadCount": 32, "ThreadList": 8 } + }, + "globals": + { + "FEATURE_COMINTEROP": 0, + "s_pThreadStore": [ 0 ] // indirect from aux data offset 0 + } +} +``` + +If the indirect values table has the values `0x0100ffe0` in offset 0, then a possible logical descriptor with the above physical descriptors will have the following types: + +| Type | Size | Field Name | Field Type | Field Offset | +| ----------- | ------------- | ----------- | ---------- | ------------ | +| GCHandle | 8 | Value | pointer | 0 | +| Thread | indeterminate | ThreadState | uint32 | 0 | +| | | ThreadId | uint32 | 32 | +| | | Next | pointer | 128 | +| ThreadStore | indeterminate | ThreadList | pointer | 8 | +| | | ThreadCount | int32 | 32 | + + +And the globals will be: + +| Name | Type | Value | +| ------------------- | ------- | ---------- | +| FEATURE_COMINTEROP | uint8 | 0 | +| FEATURE_EH_FUNCLETS | uint8 | 0 | +| s_pThreadStore | pointer | 0x0100ffe0 | + +The `FEATURE_EH_FUNCLETS` global's value comes from the baseline - not the in-memory data +descriptor. By contrast, `FEATURE_COMINTEROP` comes from the in-memory data descriptor - with the +value embedded directly in the json since it is known at build time and does not vary. Finally the +value of the pointer `s_pThreadStore` comes from the auxiliary vector's offset 0 since it is an +execution-time value that is only known to the running process. diff --git a/docs/design/datacontracts/datacontracts_design.md b/docs/design/datacontracts/datacontracts_design.md index 8a52243fcdcb20..f88e0abfd06e5a 100644 --- a/docs/design/datacontracts/datacontracts_design.md +++ b/docs/design/datacontracts/datacontracts_design.md @@ -16,15 +16,35 @@ The physical layout of this data is not defined in this document, but its practi The Data Contract Descriptor has a set of records of the following forms. -### Global Values -Global values which can be of types (int8, uint8, int16, uint16, int32, uint32, int64, uint64, pointer, nint, nuint, string) -All global values have a string describing their name, and a value of one of the above types. +### Data descriptor + +The data descriptor is a logical entity that defines the layout of certain types relevant to one or +more algorithmic contracts, as well as global values known to the target runtime that may be +relevant to one or more algorithmic contracts. + +More details are provided in the [data descriptor spec](./data_descriptor.md). We highlight some important aspects below: + +#### Global Values + +Global values which can be either primitive integer constants or pointers. +All global values have a string describing their name, a type, and a value of one of the above types. + +#### Data Structure Layout + +Each data structure layout has a name for the type, followed by a list of fields. These fields can +be primitive integer types or pointers or another named data structure type. Each field descriptor +provides the offset of the field, the name of the field, and the type of the field. + +Data structures may have a determinate size, specified in the descriptor, or an indeterminate size. +Determinate sizes are used by contracts for pointer arithmetic such as for iterating over arrays. +The determinate size of a structure may be larger than the sum of the sizes of the fields specified +in the data descriptor (that is, the data descriptor does not include every field and may not +include padding bytes). ### Compatible Contract + Each compatible contract is described by a string naming the contract, and a uint32 version. It is an ERROR if multiple versions of a contract are specified in the contract descriptor. -### Data Structure Layout -Each data structure layout has a name for the type, followed by a list of fields. These fields can be of primitive types (int8, uint8, int16, uint16, int32, uint32, int64, uint64, nint, nuint, pointer) or of another named data structure type. Each field descriptor provides the offset of the field, the name of the field, and the type of the field. ## Versioning of contracts Contracts are described an integer version number. A higher version number is not more recent, it just means different. In order to avoid conflicts, all contracts should be documented in the main branch of the dotnet repository with a version number which does not conflict with any other. It is expected that every version of every contract describes the same functionality/data layout/set of global values. @@ -32,162 +52,33 @@ Contracts are described an integer version number. A higher version number is no ## Contract data model Logically a contract may refer to another contract. If it does so, it will typically refer to other contracts by names which do not include the contract version. This is to allow for version flexibility. Logically once the Data Contract Descriptor is fully processed, there is a single list of contracts that represents the set of contracts useable with whatever runtime instance is being processed. -## Types of contracts +## Algorithmic contracts -There are 3 different types of contracts each representing a different phase of execution of the data contract system. - -### Composition contracts -These contracts indicate the version numbers of other contracts. This is done to reduce the size of contract list needed in the Data Contract Descriptor. In general it is intended that as a runtime nears shipping, the product team can gather up all of the current versions of the contracts into a single magic value, which can be used to initialize most of the contract versions of the data contract system. A specific version number in the Data Contract Descriptor for a given contract will override any composition contracts specified in the Data Contract Descriptor. If there are multiple composition contracts in a Data Contract Descriptor which specify the same contract to have a different version, the first composition contract linearly in the Data Contract Descriptor wins. This is intended to allow for a composite contract for the architecture/os indepedent work, and a separate composite contract for the non independent work. If a contract is specified explicitly in the Data Contract Descriptor and a different version is specified via the composition contract mechanism, the explicitly specified contract takes precedence. - -### Fixed value contracts -These contracts represent data which is entirely determined by the contract version + contract name. There are 2 subtypes of this form of contract. - -#### Global Value Contract -A global value contract specifies numbers which can be referred to by other contracts. If a global value is specified directly in the Data Contract Descriptor, then the global value defintion in the Data Contract Descriptor takes precedence. The intention is that these global variable contracts represent magic numbers and values which are useful for the operation of algorithmic contracts. For instance, we will likely have a `TargetPointerSize` global value represented via a contract, and things like `FEATURE_SUPPORTS_COM` can also be a global value contract, with a value of 1. - -#### Data Structure Definition Contract -A data structure definition contract defines a single type's physical layout. It MUST be named "MyDataStructureType_layout". If a data structure layout is specified directly in the Data Contract Descriptor, then the data structure defintion in the Data Contract Descriptor takes precedence. These contracts are responsible for declaring the field layout of individual fields. While not all versions of a data structure are required to have the same fields/type of fields, algorithms may be built targetting the union of the set of field types defined in the version of a given data structure definition contract. Access to a field which isn't defined on the current runtime will produce an error. - -### Algorithmic contracts -Algorithmic contracts define how to process a given set of data structures to produce useful results. These are effectively code snippets which utilize the abstracted data structures provided by Data Structure Definition Contracts and Global Value Contract to produce useful output about a given program. Descriptions of these contracts may refer to functionality provided by other contracts to do their work. The algorithms provided in these contracts are designed to operate given the ability to read various primitive types and defined data structures from the process memory space, as well as perform general purpose computation. +Algorithmic contracts define how to process a given set of data structures to produce useful results. These are effectively code snippets which utilize the abstracted data structures and global values provided by data descriptor to produce useful output about a given program. Descriptions of these contracts may refer to functionality provided by other contracts to do their work. The algorithms provided in these contracts are designed to operate given the ability to read various primitive types and defined data structures from the process memory space, as well as perform general purpose computation. It is entirely reasonable for an algorithmic contract to have multiple entrypoints which take different inputs. For example imagine a contract which provides information about a `MethodTable`. It may provide the an api to get the `BaseSize` of a `MethodTable`, and an api to get the `DynamicTypeID` of a `MethodTable`. However, while the set of contracts which describe an older version of .NET may provide a means by which the `DynamicTypeID` may be acquired for a `MethodTable`, a newer runtime may not have that concept. In such a case, it is very reasonable to define that the `GetDynamicTypeID` api portion of that contract is defined to simply `throw new NotSupportedException();` -For simplicity, as it can be expected that all developers who work on the .NET runtime understand C# to a fair degree, it is preferred that the algorithms be defined in C#, or at least psuedocode that looks like C#. It is also condsidered entirely permissable to refer to other specifications if the algorithm is a general purpose one which is well defined by the OS or some other body. (For example, it is expected that the unwinding algorithms will be defined by references into either the DWARF spec, or various Windows Unwind specifications.) +For simplicity, as it can be expected that all developers who work on the .NET runtime understand C# to a fair degree, it is preferred that the algorithms be defined in C#, or at least psuedocode that looks like C#. It is also considered entirely permissible to refer to other specifications if the algorithm is a general purpose one which is well defined by the OS or some other body. (For example, it is expected that the unwinding algorithms will be defined by references into either the DWARF spec, or various Windows Unwind specifications.) For working with data from the target process/other contracts, the following C# interface is intended to be used within the algorithmic descriptions: Best practice is to either write the algorithm in C# like psuedocode working on top of the [C# style api](contract_csharp_api_design.cs) or by reference to specifications which are not co-developed with the runtime, such as OS/architecture specifications. Within the contract algorithm specification, the intention is that all interesting api work is done by using an instance of the `Target` class. -## Arrangement of contract specifications in the repo - -Specs shall be stored in the repo in a set of directories. `docs/design/datacontracts` Each one of them shall be a seperate markdown file named with the name of contract. `docs/design/datacontracts/datalayout/.md` Every version of each contract shall be located in the same file to facilitate understanding how variations between different contracts work. - -### Global Value Contracts -The format of each contract spec shall be - - -``` -# Contract - -Insert description of contract, and what its for here. - -## Version - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Values -| Global Name | Type | Value | -| --- | --- | --- | -| SomeGlobal | Int32 | 1 | -| SomeOtherGlobal | Int8 | 0 | - -## Version - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Values -| Global Name | Type | Value | -| --- | --- | --- | -| SomeGlobal | Int32 | 1 | -| SomeOtherGlobal | Int8 | 1 | -``` - -Which should format like: -# Contract - -Insert description of contract, and what its for here. +Algorithmic contracts may include specifications for numbers which can be referred to in the contract or by other contracts. The intention is that these global values represent magic numbers and values which are useful for the operation of algorithmic contracts. -## Version +While not all versions of a data structure are required to have the same fields/type of fields, +algorithms may be built targeting the union of the set of field types defined in the data structure +descriptors of possible target runtimes. Access to a field which isn't defined on the current +runtime will produce an error. -Insert description (if possible) about what is interesting about this particular version of the contract -### Values -| Global Name | Type | Value | -| --- | --- | --- | -| SomeGlobal | Int32 | 1 | -| SomeOtherGlobal | Int8 | 0 | - -## Version - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Values -| Global Name | Type | Value | -| --- | --- | --- | -| SomeGlobal | Int32 | 1 | -| SomeOtherGlobal | Int8 | 1 | - - -### Data Structure Contracts -Data structure contracts describe the field layout of individual types in the that are referred to by algorithmic contracts. If one of the versions is marked as DEFAULT then that version exists if no specific version is specified in the Data Contract Descriptor. - -``` -# Contract _layout - -Insert description of type, and what its for here. - -## Version , DEFAULT - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Structure Size -8 bytes - -### Fields -| Field Name | Type | Offset | -| --- | --- | --- | -| FirstField | Int32 | 0 | -| SecondField | Int64 | 4 | - -## Version - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Structure Size -16 bytes - -### Fields -| Field Name | Type | Offset | -| --- | --- | --- | -| FirstField | Int32 | 0 | -| SecondField | Int64 | 8 | -``` - -Which should format like: -# Contract _layout - -Insert description of type, and what its for here. - -## Version , DEFAULT - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Structure Size -8 bytes - -### Fields -| Field Name | Type | Offset | -| --- | --- | --- | -| FirstField | Int32 | 0 | -| SecondField | Int64 | 4 | - -## Version - -Insert description (if possible) about what is interesting about this particular version of the contract - -### Structure Size -16 bytes +## Arrangement of contract specifications in the repo -### Fields -| Field Name | Type | Offset | -| --- | --- | --- | -| FirstField | Int32 | 0 | -| SecondField | Int64 | 8 | +Specs shall be stored in the repo in a set of directories. `docs/design/datacontracts` Each one of them shall be a separate markdown file named with the name of contract. `docs/design/datacontracts/.md` Every version of each contract shall be located in the same file to facilitate understanding how variations between different contracts work. -### Algorthmic Contract +### Algorithmic Contract -Algorithmic contracts these describe how an algorithm that processes over data layouts work. Unlike all other contract forms, every version of an algorithmic contract presents a consistent api to consumers of the contract. +Algorithmic contracts describe how an algorithm that processes over data layouts work. Every version of an algorithmic contract presents a consistent api to consumers of the contract. There are several sections: 1. The header, where a description of what the contract can do is placed. @@ -326,4 +217,4 @@ int ComputeInterestingValue2(SomeStructUsedAsPartOfContractApi struct) else return struct.Value1; } -``` \ No newline at end of file +``` From 34d13b27a4f0d62367145bcc1f67f7044ad2328a Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Fri, 29 Mar 2024 15:54:13 +0100 Subject: [PATCH 017/132] [NativeAOT/x86] Add SAFESEH support to assembly files and ObjectWriter (#100433) * Add SAFESEH support to assembly files and ObjectWriter * Specify SafeSEH only on x86, emit feat.00 symbol only if non-zero * Use /SAFESEH flag for NativeAOT/x86 * Minor cleanup --- .../Microsoft.NETCore.Native.Windows.targets | 1 + src/coreclr/nativeaot/Runtime/CMakeLists.txt | 3 +++ .../Compiler/ObjectWriter/CoffObjectWriter.cs | 18 +++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Windows.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Windows.targets index 49a0d7cf9e165c..f9fb32ed669e7c 100644 --- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Windows.targets +++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Windows.targets @@ -95,6 +95,7 @@ The .NET Foundation licenses this file to you under the MIT license. + diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index 2d163ea27d78b2..3d0dc1541af04b 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -264,6 +264,9 @@ if(CLR_CMAKE_TARGET_WIN32) if (CLR_CMAKE_TARGET_ARCH_AMD64) add_definitions(-DFEATURE_SPECIAL_USER_MODE_APC) endif() + if (CLR_CMAKE_TARGET_ARCH_I386) + add_compile_options($<$:/safeseh>) + endif() else() if(NOT CLR_CMAKE_TARGET_APPLE) add_definitions(-DFEATURE_READONLY_GS_COOKIE) diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs index 2f3a0c6cefe01b..f319eebfe6ce11 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs @@ -213,6 +213,8 @@ private protected override void EmitSymbolTable( IDictionary definedSymbols, SortedSet undefinedSymbols) { + Feat00Flags feat00Flags = _machine is Machine.I386 ? Feat00Flags.SafeSEH : 0; + foreach (var (symbolName, symbolDefinition) in definedSymbols) { if (_symbolNameToIndex.TryGetValue(symbolName, out uint symbolIndex)) @@ -253,13 +255,18 @@ private protected override void EmitSymbolTable( gfidsSectionWriter.WriteLittleEndian(_symbolNameToIndex[symbolName]); } + feat00Flags |= Feat00Flags.ControlFlowGuard; + } + + if (feat00Flags != 0) + { // Emit the feat.00 symbol that controls various linker behaviors _symbols.Add(new CoffSymbol { Name = "@feat.00", StorageClass = CoffSymbolClass.IMAGE_SYM_CLASS_STATIC, SectionIndex = uint.MaxValue, // IMAGE_SYM_ABSOLUTE - Value = 0x800, // cfGuardCF flags this object as control flow guard aware + Value = (uint)feat00Flags, }); } } @@ -1118,5 +1125,14 @@ public static uint CalculateChecksum(Stream stream) return crc; } } + + private enum Feat00Flags : uint + { + SafeSEH = 1, + StackGuard = 0x100, + SoftwareDevelopmentLifecycle = 0x200, + ControlFlowGuard = 0x800, + ExceptionContinuationMetadata = 0x4000, + } } } From 032399502609e6ece320eab4c0e84ceb01ff88b6 Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Fri, 29 Mar 2024 10:35:45 -0700 Subject: [PATCH 018/132] Add more checking to `GenTreeArrAddr::ParseArrayAddress()` (#100327) If the ARR_ADDR node doesn't make sense, namely if the array offset does not appear to be in the array, then bail. --- src/coreclr/jit/gentree.cpp | 41 ++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 5064abc5ba9a56..05f50f4f1132e0 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -19038,6 +19038,27 @@ GenTreeLclVarCommon* Compiler::gtCallGetDefinedRetBufLclAddr(GenTreeCall* call) // Return Value: // Will set "*pArr" to "nullptr" if this array address is not parseable. // +// Notes: +// Instead of (or in addition to) parsing the GenTree, maybe we should be parsing the VN +// "trees": if optimization has replaced the index expression with a CSE def, it's harder +// to parse, but the VN tree for the CSE def GT_COMMA has all the same info. For example: +// +// \--* ARR_ADDR byref System.Collections.Hashtable+Bucket[] $80 +// \--* ADD byref +// +--* LCL_VAR ref V01 arg1 u:1 +// \--* COMMA long +// +--* STORE_LCL_VAR long V21 cse2 d:1 +// | \--* ADD long +// | +--* MUL long +// | | +--* CAST long <- uint +// | | | \--* LCL_VAR int V07 loc2 u:2 +// | | \--* CNS_INT long 24 +// | \--* CNS_INT long 16 +// \--* LCL_VAR long V21 cse2 u:1 +// +// Here, the COMMA represents the index + offset VN, and we could pull out the index VN +// from the COMMA VN. +// void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* pInxVN) { *pArr = nullptr; @@ -19052,13 +19073,23 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* } // OK, new we have to figure out if any part of the "offset" is a constant contribution to the index. - target_ssize_t elemOffset = GetFirstElemOffset(); - unsigned elemSizeUn = (GetElemType() == TYP_STRUCT) ? comp->typGetObjLayout(GetElemClassHandle())->GetSize() + target_ssize_t firstElemOffset = GetFirstElemOffset(); + assert(firstElemOffset > 0); + + // If we didn't parse any offset, or the offset we parsed doesn't make sense, then give up on + // parsing the array address. (This can happen with JitOptRepeat.) + if (offset < firstElemOffset) + { + *pArr = nullptr; + return; + } + + unsigned elemSizeUn = (GetElemType() == TYP_STRUCT) ? comp->typGetObjLayout(GetElemClassHandle())->GetSize() : genTypeSize(GetElemType()); assert(FitsIn(elemSizeUn)); target_ssize_t elemSize = static_cast(elemSizeUn); - target_ssize_t constIndexOffset = offset - elemOffset; + target_ssize_t constIndexOffset = offset - firstElemOffset; // This should be divisible by the element size... assert((constIndexOffset % elemSize) == 0); @@ -19134,6 +19165,7 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* if (tree->TypeIs(TYP_REF)) { // This must be the array pointer. + assert(*pArr == nullptr); *pArr = tree; assert(inputMul == 1); // Can't multiply the array pointer by anything. } @@ -19229,7 +19261,10 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* default: break; } + // If we didn't return above, must be a contribution to the non-constant part of the index VN. + // We don't get here for GT_CNS_INT, GT_ADD, or GT_SUB, or for GT_MUL by constant, or GT_LSH of + // constant shift. Thus, the generated index VN does not include the parsed constant offset. ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(tree->gtVNPair); if (inputMul != 1) { From 316fc1615384d62f6ea28ff671d5c0a7c111b282 Mon Sep 17 00:00:00 2001 From: Elinor Fung Date: Fri, 29 Mar 2024 12:25:24 -0700 Subject: [PATCH 019/132] Remove requirement for building packs to run host tests and infrastructure around restoring test project assets (#100324) - Delete build infrastructure around test project asset restore - Remove requirement that packs must be built before running host tests - Building packs was only necessary to support directing the restore/build for the test project assets to the built packs --- docs/workflow/testing/host/testing.md | 21 +-- src/installer/Directory.Build.props | 1 - .../AppHost.Bundle.Tests.csproj | 2 - .../Assets/TestProjects/Directory.Build.props | 9 -- .../TestProjects/Directory.Build.targets | 18 --- .../tests/Assets/TestUtils/TestProjects.props | 17 --- .../Assets/TestUtils/TestProjects.targets | 7 - src/installer/tests/Directory.Build.props | 7 - src/installer/tests/Directory.Build.targets | 54 ++----- .../AdditionalProbingPath.cs | 4 +- ...mblyVersionResolutionMultipleFrameworks.cs | 2 +- .../HostActivation.Tests.csproj | 4 +- .../MultiArchInstallLocation.cs | 10 +- .../StandaloneAppActivation.cs | 2 +- .../Microsoft.NET.HostModel.Tests.csproj | 2 - .../PrepareTestAssets/PrepareTestAssets.proj | 133 ------------------ .../tests/TestUtils/DotNetBuilder.cs | 2 +- .../tests/TestUtils/SingleFileTestApp.cs | 4 +- src/installer/tests/TestUtils/TestApp.cs | 4 +- src/installer/tests/TestUtils/TestContext.cs | 4 +- 20 files changed, 37 insertions(+), 270 deletions(-) delete mode 100644 src/installer/tests/Assets/TestProjects/Directory.Build.props delete mode 100644 src/installer/tests/Assets/TestProjects/Directory.Build.targets delete mode 100644 src/installer/tests/Assets/TestUtils/TestProjects.props delete mode 100644 src/installer/tests/Assets/TestUtils/TestProjects.targets delete mode 100644 src/installer/tests/PrepareTestAssets/PrepareTestAssets.proj diff --git a/docs/workflow/testing/host/testing.md b/docs/workflow/testing/host/testing.md index bb45307ecf46f3..46f7761be1fa11 100644 --- a/docs/workflow/testing/host/testing.md +++ b/docs/workflow/testing/host/testing.md @@ -13,15 +13,15 @@ To build the host tests, first build the product: * [CoreCLR](../../building/coreclr/README.md) build instructions * [Libraries](../../building/libraries/README.md) build instructions -2. Build the host and packs: +2. Build the host: ``` - build.cmd/sh -subset host+packs.product -runtimeConfiguration Release -librariesConfiguration Release + build.cmd/sh -subset host -runtimeConfiguration Release -librariesConfiguration Release ``` If using a configuration other than Release for CoreCLR/libraries, specify the desired configuration in the `-runtimeConfiguration`/`-librariesConfiguration` arguments. ### Building all tests -The host tests are part of the `host` subset by default, so building the `host` subset also builds the host test. To build just the host tests: +The host tests are part of the `host` subset by default, so building the `host` subset also builds the host tests. To build just the host tests: ``` build.cmd/sh -subset host.tests -runtimeConfiguration Release -librariesConfiguration Release ``` @@ -36,16 +36,18 @@ dotnet build src\installer\tests\HostActivation.Tests ## Test context The host tests depend on: - 1. Product binaries in a directory layout matching that of a .NET install - 2. Restored [test projects](/src/installer/tests/Assets/TestProjects) which will be built and run by the tests + 1. Pre-built [test project](/src/installer/tests/Assets/Projects) output which will be copied and run by the tests. The `host.pretest` subset builds these projects. + 2. Product binaries in a directory layout matching that of a .NET install 3. TestContextVariables.txt file with property and value pairs which will be read by the tests When [running all tests](#running-all-tests), the build is configured such that these are created/performed before the start of the test run. -In order to create (or update) these dependencies without running all tests, the build targets that create them - RefreshProjectTestAssets and SetupTestContextVariables - can be directly run for the desired test project. For example: -``` -dotnet build src\installer\tests\HostActivation.Tests -t:RefreshProjectTestAssets;SetupTestContextVariables -p:RuntimeConfiguration=Release -p:LibrariesConfiguration=Release -``` +In order to create (or update) these dependencies without running all tests: + 1. Build the `host.pretest` subset. By default, this is included in the `host` subset. This corresponds to (1) above. + 2. Run the `SetUpSharedFrameworkPublish` and `SetupTestContextVariables` targets for the desired test project. This corresponds to (2) and (3) above. For example: + ``` + dotnet build src\installer\tests\HostActivation.Tests -t:SetUpSharedFrameworkPublish;SetupTestContextVariables -p:RuntimeConfiguration=Release -p:LibrariesConfiguration=Release + ``` ## Running tests @@ -77,6 +79,7 @@ The `category!=failing` is to respect the [filtering traits](../libraries/filter ### Visual Studio The [Microsoft.DotNet.CoreSetup.sln](/src/installer/Microsoft.DotNet.CoreSetup.sln) can be used to run and debug host tests through Visual Studio. When using the solution, the product should have already been [built](#building-tests) and the [test context](#test-context) set up. + If you built the runtime or libraries with a different configuration from the host, you have to specify this when starting visual studio: ```console diff --git a/src/installer/Directory.Build.props b/src/installer/Directory.Build.props index 33a094c082f790..45def02007831b 100644 --- a/src/installer/Directory.Build.props +++ b/src/installer/Directory.Build.props @@ -9,7 +9,6 @@ $(DefineConstants),DEBUG,TRACE $(DefineConstants),TRACE $(OutputRID) - $(OutputRID) diff --git a/src/installer/tests/AppHost.Bundle.Tests/AppHost.Bundle.Tests.csproj b/src/installer/tests/AppHost.Bundle.Tests/AppHost.Bundle.Tests.csproj index e70ca66a4f0be5..c564835ae9e71e 100644 --- a/src/installer/tests/AppHost.Bundle.Tests/AppHost.Bundle.Tests.csproj +++ b/src/installer/tests/AppHost.Bundle.Tests/AppHost.Bundle.Tests.csproj @@ -4,11 +4,9 @@ Apphost Bundle Tests $(TestInfraTargetFramework) AppHost.Bundle.Tests - AppHost.Bundle.Tests true ahb - true diff --git a/src/installer/tests/Assets/TestProjects/Directory.Build.props b/src/installer/tests/Assets/TestProjects/Directory.Build.props deleted file mode 100644 index 06b429ead4e00a..00000000000000 --- a/src/installer/tests/Assets/TestProjects/Directory.Build.props +++ /dev/null @@ -1,9 +0,0 @@ - - - - - $(TestRestorePackagesPath) - true - - - diff --git a/src/installer/tests/Assets/TestProjects/Directory.Build.targets b/src/installer/tests/Assets/TestProjects/Directory.Build.targets deleted file mode 100644 index f6350ac71558b3..00000000000000 --- a/src/installer/tests/Assets/TestProjects/Directory.Build.targets +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - diff --git a/src/installer/tests/Assets/TestUtils/TestProjects.props b/src/installer/tests/Assets/TestUtils/TestProjects.props deleted file mode 100644 index 1ace72f7f050e5..00000000000000 --- a/src/installer/tests/Assets/TestUtils/TestProjects.props +++ /dev/null @@ -1,17 +0,0 @@ - - - - - net9.0 - - false - - $(TestTargetRid) - - diff --git a/src/installer/tests/Assets/TestUtils/TestProjects.targets b/src/installer/tests/Assets/TestUtils/TestProjects.targets deleted file mode 100644 index 60faea7812009d..00000000000000 --- a/src/installer/tests/Assets/TestUtils/TestProjects.targets +++ /dev/null @@ -1,7 +0,0 @@ - - - - diff --git a/src/installer/tests/Directory.Build.props b/src/installer/tests/Directory.Build.props index 11472eb328a4da..e56691554aaae4 100644 --- a/src/installer/tests/Directory.Build.props +++ b/src/installer/tests/Directory.Build.props @@ -2,14 +2,7 @@ - $(InstallerProjectRoot)tests\ - $(TestDir)Assets\ $(ArtifactsDir)tests\host\$(TargetOS).$(TargetArchitecture).$(Configuration)\ - $(ArtifactsObjDir)TestStabilizedPackages\ - $(ArtifactsObjDir)TestPackageCache\ - $(ArtifactsObjDir)TestNuGetConfig\NuGet.config - $(ArtifactsObjDir)ExtraNupkgsForTestRestore\ - $(TargetArchitecture) $(NetCoreAppToolCurrent) category!=failing --filter $(TestCaseFilter) -v detailed diff --git a/src/installer/tests/Directory.Build.targets b/src/installer/tests/Directory.Build.targets index ecd79411081d4d..5e952873f92e4c 100644 --- a/src/installer/tests/Directory.Build.targets +++ b/src/installer/tests/Directory.Build.targets @@ -3,6 +3,14 @@ + + + - - - - - - - @@ -75,16 +60,12 @@ See https://github.com/dotnet/arcade/issues/3077. --> - - - + - - @@ -94,25 +75,8 @@ Lines="@(TestContextVariable)" /> - - - - - - - - $(PackageRID) $(MSBuildProjectName) $(TestArtifactsOutputRoot)$(TestsOutputName)/ diff --git a/src/installer/tests/HostActivation.Tests/DependencyResolution/AdditionalProbingPath.cs b/src/installer/tests/HostActivation.Tests/DependencyResolution/AdditionalProbingPath.cs index 5394032f4f13d4..4914f5b7b95a4d 100644 --- a/src/installer/tests/HostActivation.Tests/DependencyResolution/AdditionalProbingPath.cs +++ b/src/installer/tests/HostActivation.Tests/DependencyResolution/AdditionalProbingPath.cs @@ -114,12 +114,12 @@ public SharedTestState() .AddMicrosoftNETCoreAppFrameworkMockCoreClr(TestContext.MicrosoftNETCoreAppVersion) .Build(); - string nativeDependencyRelPath = $"{TestContext.TargetRID}/{Binaries.GetSharedLibraryFileNameForCurrentPlatform("native")}"; + string nativeDependencyRelPath = $"{TestContext.BuildRID}/{Binaries.GetSharedLibraryFileNameForCurrentPlatform("native")}"; FrameworkReferenceApp = CreateFrameworkReferenceApp(Constants.MicrosoftNETCoreApp, TestContext.MicrosoftNETCoreAppVersion, b => b .WithProject(DependencyName, DependencyVersion, p => p .WithAssemblyGroup(null, g => g .WithAsset($"{DependencyName}.dll", f => f.NotOnDisk())) - .WithNativeLibraryGroup(TestContext.TargetRID, g => g + .WithNativeLibraryGroup(TestContext.BuildRID, g => g .WithAsset(nativeDependencyRelPath, f => f.NotOnDisk())))); RuntimeConfig.FromFile(FrameworkReferenceApp.RuntimeConfigJson) .WithTfm(TestContext.Tfm) diff --git a/src/installer/tests/HostActivation.Tests/DependencyResolution/PerAssemblyVersionResolutionMultipleFrameworks.cs b/src/installer/tests/HostActivation.Tests/DependencyResolution/PerAssemblyVersionResolutionMultipleFrameworks.cs index a6ce47ea8064fb..d5e46c56aeff4e 100644 --- a/src/installer/tests/HostActivation.Tests/DependencyResolution/PerAssemblyVersionResolutionMultipleFrameworks.cs +++ b/src/installer/tests/HostActivation.Tests/DependencyResolution/PerAssemblyVersionResolutionMultipleFrameworks.cs @@ -104,7 +104,7 @@ protected override void CustomizeDotNetWithNetCoreApp(DotNetBuilder builder) HighWare, "1.1.1", runtimeConfig => runtimeConfig.WithFramework(Constants.MicrosoftNETCoreApp, "4.0.0"), - path => NetCoreAppBuilder.ForNETCoreApp(HighWare, TestContext.TargetRID) + path => NetCoreAppBuilder.ForNETCoreApp(HighWare, TestContext.BuildRID) .WithProject(HighWare, "1.1.1", p => p .WithAssemblyGroup(null, g => g .WithAsset(TestAssemblyWithNoVersions + ".dll") diff --git a/src/installer/tests/HostActivation.Tests/HostActivation.Tests.csproj b/src/installer/tests/HostActivation.Tests/HostActivation.Tests.csproj index f335c8c509ee5f..240237d85251f3 100644 --- a/src/installer/tests/HostActivation.Tests/HostActivation.Tests.csproj +++ b/src/installer/tests/HostActivation.Tests/HostActivation.Tests.csproj @@ -3,13 +3,11 @@ $(TestInfraTargetFramework) HostActivation.Tests - HostActivation.Tests true ha - true - + diff --git a/src/installer/tests/HostActivation.Tests/MultiArchInstallLocation.cs b/src/installer/tests/HostActivation.Tests/MultiArchInstallLocation.cs index 7bf92fe993ef7c..5d2b6b772d0f5d 100644 --- a/src/installer/tests/HostActivation.Tests/MultiArchInstallLocation.cs +++ b/src/installer/tests/HostActivation.Tests/MultiArchInstallLocation.cs @@ -29,7 +29,7 @@ public void EnvironmentVariable_CurrentArchitectureIsUsedIfEnvVarSet() .DotNetRoot(TestContext.BuiltDotNet.BinPath, arch) .Execute() .Should().Pass() - .And.HaveUsedDotNetRootInstallLocation(TestContext.BuiltDotNet.BinPath, TestContext.TargetRID, arch); + .And.HaveUsedDotNetRootInstallLocation(TestContext.BuiltDotNet.BinPath, TestContext.BuildRID, arch); } [Fact] @@ -41,7 +41,7 @@ public void EnvironmentVariable_IfNoArchSpecificEnvVarIsFoundDotnetRootIsUsed() .DotNetRoot(TestContext.BuiltDotNet.BinPath) .Execute() .Should().Pass() - .And.HaveUsedDotNetRootInstallLocation(TestContext.BuiltDotNet.BinPath, TestContext.TargetRID); + .And.HaveUsedDotNetRootInstallLocation(TestContext.BuiltDotNet.BinPath, TestContext.BuildRID); } [Fact] @@ -55,7 +55,7 @@ public void EnvironmentVariable_ArchSpecificDotnetRootIsUsedOverDotnetRoot() .DotNetRoot(dotnet, arch) .Execute() .Should().Pass() - .And.HaveUsedDotNetRootInstallLocation(dotnet, TestContext.TargetRID, arch) + .And.HaveUsedDotNetRootInstallLocation(dotnet, TestContext.BuildRID, arch) .And.NotHaveStdErrContaining("Using environment variable DOTNET_ROOT="); } @@ -77,7 +77,7 @@ public void EnvironmentVariable_DotNetRootIsUsedOverInstallLocationIfSet() .DotNetRoot(dotnet, arch) .Execute() .Should().Pass() - .And.HaveUsedDotNetRootInstallLocation(dotnet, TestContext.TargetRID, arch) + .And.HaveUsedDotNetRootInstallLocation(dotnet, TestContext.BuildRID, arch) .And.NotHaveStdErrContaining("Using global install location"); } } @@ -119,7 +119,7 @@ public void EnvironmentVariable_DotnetRootPathExistsButHasNoHost() TestContext.BuiltDotNet.BinPath) .Execute() .Should().Fail() - .And.HaveUsedDotNetRootInstallLocation(app.Location, TestContext.TargetRID) + .And.HaveUsedDotNetRootInstallLocation(app.Location, TestContext.BuildRID) // If DOTNET_ROOT points to a folder that exists we assume that there's a dotnet installation in it .And.HaveStdErrContaining($"The required library {Binaries.HostFxr.FileName} could not be found."); } diff --git a/src/installer/tests/HostActivation.Tests/StandaloneAppActivation.cs b/src/installer/tests/HostActivation.Tests/StandaloneAppActivation.cs index b4ea95307c01d6..7390a305ad1595 100644 --- a/src/installer/tests/HostActivation.Tests/StandaloneAppActivation.cs +++ b/src/installer/tests/HostActivation.Tests/StandaloneAppActivation.cs @@ -133,7 +133,7 @@ public void DotNetRoot_IncorrectLayout_Fails() .DotNetRoot(app.Location) .Execute(expectedToFail: true) .Should().Fail() - .And.HaveUsedDotNetRootInstallLocation(Path.GetFullPath(app.Location), TestContext.TargetRID) + .And.HaveUsedDotNetRootInstallLocation(Path.GetFullPath(app.Location), TestContext.BuildRID) .And.HaveStdErrContaining($"The required library {Binaries.HostFxr.FileName} could not be found."); } diff --git a/src/installer/tests/Microsoft.NET.HostModel.Tests/Microsoft.NET.HostModel.Tests.csproj b/src/installer/tests/Microsoft.NET.HostModel.Tests/Microsoft.NET.HostModel.Tests.csproj index e809026a29c46c..3172cfcf0099b3 100644 --- a/src/installer/tests/Microsoft.NET.HostModel.Tests/Microsoft.NET.HostModel.Tests.csproj +++ b/src/installer/tests/Microsoft.NET.HostModel.Tests/Microsoft.NET.HostModel.Tests.csproj @@ -1,13 +1,11 @@ - Microsoft.NET.HostModel.Tests $(TestInfraTargetFramework) Microsoft.NET.HostModel.Tests true hm - true diff --git a/src/installer/tests/PrepareTestAssets/PrepareTestAssets.proj b/src/installer/tests/PrepareTestAssets/PrepareTestAssets.proj deleted file mode 100644 index eb17dd4c391a29..00000000000000 --- a/src/installer/tests/PrepareTestAssets/PrepareTestAssets.proj +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - - - - $(IntermediateOutputPath)temp\ - $(_HostRid) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - $(RepoRoot)NuGet.config - - @(RestoreTestSource -> '<add key="%(Key)" value="%(Identity)" />', '%0A ') - - $([System.IO.File]::ReadAllText('$(TemplateNuGetConfigFile)').Replace( - '<!-- TEST_RESTORE_SOURCES_INSERTION_LINE -->', - '$(RestoreTestSourceConfigLines)')) - - - $([System.Text.RegularExpressions.Regex]::Replace( - '$(TestRestoreNuGetConfigContent)', - '<add key=".+" value="https://pkgs.dev.azure.com/dnceng/internal/.+" />', - '')) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/installer/tests/TestUtils/DotNetBuilder.cs b/src/installer/tests/TestUtils/DotNetBuilder.cs index 98bd3f526ba80b..2ff602fd4b4f0b 100644 --- a/src/installer/tests/TestUtils/DotNetBuilder.cs +++ b/src/installer/tests/TestUtils/DotNetBuilder.cs @@ -120,7 +120,7 @@ public DotNetBuilder AddMicrosoftNETCoreAppFrameworkMockCoreClr(string version, // ./shared/Microsoft.NETCore.App/ - create a mock of the root framework string netCoreAppPath = AddFramework(Constants.MicrosoftNETCoreApp, version); - string currentRid = TestContext.TargetRID; + string currentRid = TestContext.BuildRID; NetCoreAppBuilder.ForNETCoreApp(Constants.MicrosoftNETCoreApp, currentRid) .WithStandardRuntimeFallbacks() diff --git a/src/installer/tests/TestUtils/SingleFileTestApp.cs b/src/installer/tests/TestUtils/SingleFileTestApp.cs index 69d61dd7f41926..bc65e2f6a89fed 100644 --- a/src/installer/tests/TestUtils/SingleFileTestApp.cs +++ b/src/installer/tests/TestUtils/SingleFileTestApp.cs @@ -144,7 +144,7 @@ private void PopulateBuiltAppDirectory() File.Delete(builtApp.DepsJson); var shortVersion = TestContext.Tfm[3..]; // trim "net" from beginning - var builder = NetCoreAppBuilder.ForNETCoreApp(AppName, TestContext.TargetRID, shortVersion); + var builder = NetCoreAppBuilder.ForNETCoreApp(AppName, TestContext.BuildRID, shortVersion); // Update the .runtimeconfig.json builder.WithRuntimeConfig(c => @@ -164,7 +164,7 @@ private void PopulateBuiltAppDirectory() .WithAsset(Path.GetFileName(builtApp.AppDll), f => f.NotOnDisk()))); if (selfContained) { - builder.WithRuntimePack($"{Constants.MicrosoftNETCoreApp}.Runtime.{TestContext.TargetRID}", TestContext.MicrosoftNETCoreAppVersion, l => l + builder.WithRuntimePack($"{Constants.MicrosoftNETCoreApp}.Runtime.{TestContext.BuildRID}", TestContext.MicrosoftNETCoreAppVersion, l => l .WithAssemblyGroup(string.Empty, g => { foreach (var file in Binaries.GetRuntimeFiles().Assemblies) diff --git a/src/installer/tests/TestUtils/TestApp.cs b/src/installer/tests/TestUtils/TestApp.cs index 37936d9506a108..cca9ef4c3740f7 100644 --- a/src/installer/tests/TestUtils/TestApp.cs +++ b/src/installer/tests/TestUtils/TestApp.cs @@ -103,7 +103,7 @@ public enum MockedComponent public void PopulateSelfContained(MockedComponent mock, Action customizer = null) { - var builder = NetCoreAppBuilder.ForNETCoreApp(Name, TestContext.TargetRID); + var builder = NetCoreAppBuilder.ForNETCoreApp(Name, TestContext.BuildRID); // Update the .runtimeconfig.json - add included framework and remove any existing NETCoreApp framework builder.WithRuntimeConfig(c => @@ -114,7 +114,7 @@ public void PopulateSelfContained(MockedComponent mock, Action p.WithAssemblyGroup(null, g => g.WithMainAssembly())); // Add runtime libraries and assets - builder.WithRuntimePack($"{Constants.MicrosoftNETCoreApp}.Runtime.{TestContext.TargetRID}", TestContext.MicrosoftNETCoreAppVersion, l => + builder.WithRuntimePack($"{Constants.MicrosoftNETCoreApp}.Runtime.{TestContext.BuildRID}", TestContext.MicrosoftNETCoreAppVersion, l => { if (mock == MockedComponent.None) { diff --git a/src/installer/tests/TestUtils/TestContext.cs b/src/installer/tests/TestUtils/TestContext.cs index 3c8ffe53f4711a..74bcf5c4f23928 100644 --- a/src/installer/tests/TestUtils/TestContext.cs +++ b/src/installer/tests/TestUtils/TestContext.cs @@ -10,7 +10,6 @@ public sealed class TestContext public static string BuildArchitecture { get; } public static string BuildRID { get; } public static string Configuration { get; } - public static string TargetRID { get; } public static string MicrosoftNETCoreAppVersion { get; } public static string Tfm { get; } @@ -36,9 +35,8 @@ static TestContext() StringComparer.OrdinalIgnoreCase); BuildArchitecture = GetTestContextVariable("BUILD_ARCHITECTURE"); - BuildRID = GetTestContextVariable("BUILDRID"); + BuildRID = GetTestContextVariable("BUILD_RID"); Configuration = GetTestContextVariable("BUILD_CONFIGURATION"); - TargetRID = GetTestContextVariable("TEST_TARGETRID"); MicrosoftNETCoreAppVersion = GetTestContextVariable("MNA_VERSION"); Tfm = GetTestContextVariable("MNA_TFM"); From 869dfb708eab208919fcd53fd6be0312c8f7edd1 Mon Sep 17 00:00:00 2001 From: Aaron Robinson Date: Fri, 29 Mar 2024 13:22:35 -0700 Subject: [PATCH 020/132] Reenable C4242 and C4244 warnings in libunwind (#100241) * Fix C4242 and C4244 warnings in libunwind * Add libunwind PR link --------- Co-authored-by: Filip Navara --- src/native/external/libunwind-version.txt | 1 + .../external/libunwind/include/dwarf_i.h | 6 +-- .../external/libunwind/include/libunwind_i.h | 7 +++- .../include/tdep-aarch64/libunwind_i.h | 16 ++++---- .../libunwind/include/tdep-arm/ex_tables.h | 2 +- .../libunwind/include/tdep-arm/libunwind_i.h | 12 +++--- .../libunwind/include/tdep-hppa/libunwind_i.h | 16 ++++---- .../include/tdep-loongarch64/libunwind_i.h | 4 +- .../libunwind/include/tdep-mips/libunwind_i.h | 24 ++++++------ .../include/tdep-ppc32/libunwind_i.h | 24 ++++++------ .../include/tdep-ppc64/libunwind_i.h | 26 ++++++------- .../include/tdep-riscv/libunwind_i.h | 16 ++++---- .../include/tdep-s390x/libunwind_i.h | 26 ++++++------- .../libunwind/include/tdep-sh/libunwind_i.h | 16 ++++---- .../libunwind/include/tdep-x86/libunwind_i.h | 16 ++++---- .../include/tdep-x86_64/libunwind_i.h | 8 ++-- .../libunwind/src/aarch64/Gget_save_loc.c | 4 +- .../libunwind/src/aarch64/Gstash_frame.c | 8 ++-- .../external/libunwind/src/arm/Gex_tables.c | 38 ++++++++++--------- .../libunwind/src/arm/Gget_save_loc.c | 4 +- .../external/libunwind/src/dwarf/Gexpr.c | 8 ++-- .../external/libunwind/src/dwarf/Gfde.c | 11 +++--- .../libunwind/src/dwarf/Gfind_proc_info-lsb.c | 4 +- .../external/libunwind/src/dwarf/Gparser.c | 13 ++++--- .../libunwind/src/hppa/Gget_save_loc.c | 4 +- .../libunwind/src/loongarch64/Gget_save_loc.c | 4 +- .../external/libunwind/src/mi/Gdyn-remote.c | 2 +- .../libunwind/src/mips/Gget_save_loc.c | 4 +- .../libunwind/src/riscv/Gget_save_loc.c | 4 +- .../libunwind/src/s390x/Gget_save_loc.c | 4 +- .../external/libunwind/src/sh/Gget_save_loc.c | 4 +- .../libunwind/src/x86/Gget_save_loc.c | 4 +- .../libunwind/src/x86_64/Gget_save_loc.c | 4 +- .../external/libunwind_extras/CMakeLists.txt | 2 - 34 files changed, 175 insertions(+), 171 deletions(-) diff --git a/src/native/external/libunwind-version.txt b/src/native/external/libunwind-version.txt index 776043575539fd..fd56a772b51c28 100644 --- a/src/native/external/libunwind-version.txt +++ b/src/native/external/libunwind-version.txt @@ -8,3 +8,4 @@ Apply https://github.com/libunwind/libunwind/pull/704 Revert https://github.com/libunwind/libunwind/pull/503 # issue: https://github.com/libunwind/libunwind/issues/702 Apply https://github.com/libunwind/libunwind/pull/714 Revert https://github.com/libunwind/libunwind/commit/ec03043244082b8f552881ba9fb790aa49c85468 and follow up changes in the same file # issue: https://github.com/libunwind/libunwind/issues/715 +Apply https://github.com/libunwind/libunwind/pull/734 diff --git a/src/native/external/libunwind/include/dwarf_i.h b/src/native/external/libunwind/include/dwarf_i.h index 0f47082adbb732..624021faed4a55 100644 --- a/src/native/external/libunwind/include/dwarf_i.h +++ b/src/native/external/libunwind/include/dwarf_i.h @@ -280,7 +280,7 @@ dwarf_readw (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr, ret = dwarf_readu64 (as, a, addr, &u64, arg); if (ret < 0) return ret; - *val = u64; + *val = (unw_word_t) u64; return ret; default: @@ -398,7 +398,7 @@ dwarf_read_encoded_pointer_inlined (unw_addr_space_t as, unw_accessors_t *a, case DW_EH_PE_udata8: if ((ret = dwarf_readu64 (as, a, addr, &uval64, arg)) < 0) return ret; - val = uval64; + val = (unw_word_t) uval64; break; case DW_EH_PE_sleb128: @@ -421,7 +421,7 @@ dwarf_read_encoded_pointer_inlined (unw_addr_space_t as, unw_accessors_t *a, case DW_EH_PE_sdata8: if ((ret = dwarf_reads64 (as, a, addr, &sval64, arg)) < 0) return ret; - val = sval64; + val = (unw_word_t) sval64; break; default: diff --git a/src/native/external/libunwind/include/libunwind_i.h b/src/native/external/libunwind/include/libunwind_i.h index 1dbcb6a86d0f1b..4140d88a10c65d 100644 --- a/src/native/external/libunwind/include/libunwind_i.h +++ b/src/native/external/libunwind/include/libunwind_i.h @@ -333,7 +333,7 @@ static inline void _unw_debug(int level, char const * const fname, char const * if (level > 16) level = 16; int bcount = snprintf (buf, buf_size, "%*c>%s: ", level, ' ', fname); - int res = write(STDERR_FILENO, buf, bcount); + ssize_t res = write(STDERR_FILENO, buf, bcount); va_list ap; va_start(ap, fmt); @@ -350,7 +350,7 @@ static inline void _unw_debug(int level, char const * const fname, char const * # define Dprintf( /* format */ ...) #endif /* defined(UNW_DEBUG) */ -static ALWAYS_INLINE int +static ALWAYS_INLINE ssize_t print_error (const char *string) { return write (2, string, strlen (string)); @@ -419,6 +419,9 @@ static inline void invalidate_edi (struct elf_dyn_info *edi) # define PT_ARM_EXIDX 0x70000001 /* ARM unwind segment */ #endif /* !PT_ARM_EXIDX */ +#define DWARF_GET_MEM_LOC(l) DWARF_GET_LOC(l) +#define DWARF_GET_REG_LOC(l) ((unw_regnum_t) DWARF_GET_LOC(l)) + #include "tdep/libunwind_i.h" #ifndef TDEP_DWARF_SP diff --git a/src/native/external/libunwind/include/tdep-aarch64/libunwind_i.h b/src/native/external/libunwind/include/tdep-aarch64/libunwind_i.h index ec1a2e91afd643..fd5554946712d4 100644 --- a/src/native/external/libunwind/include/tdep-aarch64/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-aarch64/libunwind_i.h @@ -197,10 +197,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; @@ -220,10 +220,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; @@ -245,10 +245,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -265,10 +265,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-arm/ex_tables.h b/src/native/external/libunwind/include/tdep-arm/ex_tables.h index 9df5e0a9fa4b9b..90a023d4955475 100644 --- a/src/native/external/libunwind/include/tdep-arm/ex_tables.h +++ b/src/native/external/libunwind/include/tdep-arm/ex_tables.h @@ -49,7 +49,7 @@ struct arm_exbuf_data #define arm_exidx_apply_cmd UNW_OBJ(arm_exidx_apply_cmd) int arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf); -int arm_exidx_decode (const uint8_t *buf, uint8_t len, struct dwarf_cursor *c); +int arm_exidx_decode (const uint8_t *buf, int len, struct dwarf_cursor *c); int arm_exidx_apply_cmd (struct arm_exbuf_data *edata, struct dwarf_cursor *c); #endif // ARM_EX_TABLES_H diff --git a/src/native/external/libunwind/include/tdep-arm/libunwind_i.h b/src/native/external/libunwind/include/tdep-arm/libunwind_i.h index 35b13c79fbaca0..0f55dd04ba0991 100644 --- a/src/native/external/libunwind/include/tdep-arm/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-arm/libunwind_i.h @@ -178,7 +178,7 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); addr = DWARF_GET_LOC (loc); @@ -201,7 +201,7 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); addr = DWARF_GET_LOC (loc); @@ -226,10 +226,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -246,10 +246,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-hppa/libunwind_i.h b/src/native/external/libunwind/include/tdep-hppa/libunwind_i.h index 1b6757fb13610c..ce60dcf14b67e1 100644 --- a/src/native/external/libunwind/include/tdep-hppa/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-hppa/libunwind_i.h @@ -146,10 +146,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; @@ -169,10 +169,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; @@ -194,10 +194,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -214,10 +214,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-loongarch64/libunwind_i.h b/src/native/external/libunwind/include/tdep-loongarch64/libunwind_i.h index d21c9229766e34..11fe95d6f11017 100644 --- a/src/native/external/libunwind/include/tdep-loongarch64/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-loongarch64/libunwind_i.h @@ -167,10 +167,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-mips/libunwind_i.h b/src/native/external/libunwind/include/tdep-mips/libunwind_i.h index b0e623499d053d..a3bd4479ae3933 100644 --- a/src/native/external/libunwind/include/tdep-mips/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-mips/libunwind_i.h @@ -195,10 +195,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; @@ -218,10 +218,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; @@ -243,20 +243,20 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else if (c->as->abi == UNW_MIPS_ABI_O32) - return read_s32 (c, DWARF_GET_LOC (loc), val); + return read_s32 (c, DWARF_GET_MEM_LOC (loc), val); else if (c->as->abi == UNW_MIPS_ABI_N32) { if (tdep_big_endian(c->as)) - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc) + 4, val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc) + 4, val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -273,12 +273,12 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else if (c->as->abi == UNW_MIPS_ABI_O32) - return write_s32 (c, DWARF_GET_LOC (loc), &val); + return write_s32 (c, DWARF_GET_MEM_LOC (loc), &val); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-ppc32/libunwind_i.h b/src/native/external/libunwind/include/tdep-ppc32/libunwind_i.h index 46d4f5a8ed9d68..469e02f24b8cd0 100644 --- a/src/native/external/libunwind/include/tdep-ppc32/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-ppc32/libunwind_i.h @@ -130,10 +130,10 @@ dwarf_getvr (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t * val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, valp, 0, c->as_arg)) < 0) @@ -156,10 +156,10 @@ dwarf_putvr (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, valp, 1, c->as_arg)) < 0) return ret; @@ -180,10 +180,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t * val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); return (*c->as->acc.access_mem) (c->as, addr + 0, valp, 0, c->as_arg); } @@ -201,10 +201,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); return (*c->as->acc.access_mem) (c->as, addr + 0, valp, 1, c->as_arg); } @@ -223,10 +223,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t * val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -244,10 +244,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-ppc64/libunwind_i.h b/src/native/external/libunwind/include/tdep-ppc64/libunwind_i.h index a93d5693184334..0767706956b850 100644 --- a/src/native/external/libunwind/include/tdep-ppc64/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-ppc64/libunwind_i.h @@ -183,10 +183,10 @@ dwarf_getvr (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t * val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, valp, 0, c->as_arg)) < 0) @@ -209,10 +209,10 @@ dwarf_putvr (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, valp, 1, c->as_arg)) < 0) return ret; @@ -233,12 +233,11 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t * val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); return (*c->as->acc.access_mem) (c->as, addr + 0, valp, 0, c->as_arg); - } static inline int @@ -254,11 +253,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); - + addr = DWARF_GET_MEM_LOC (loc); return (*c->as->acc.access_mem) (c->as, addr + 0, valp, 1, c->as_arg); } @@ -276,10 +274,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t * val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -297,10 +295,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_V_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-riscv/libunwind_i.h b/src/native/external/libunwind/include/tdep-riscv/libunwind_i.h index 951de12a0bc942..b0aebc35801bff 100644 --- a/src/native/external/libunwind/include/tdep-riscv/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-riscv/libunwind_i.h @@ -169,11 +169,11 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); /* FIXME: unw_word_t may not be equal to FLEN */ - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); #if __riscv_xlen == __riscv_flen return (*c->as->acc.access_mem) (c->as, addr, (unw_word_t *) valp, 0, c->as_arg); @@ -192,11 +192,11 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); /* FIXME: unw_word_t may not be equal to FLEN */ - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); #if __riscv_xlen == __riscv_flen return (*c->as->acc.access_mem) (c->as, addr, (unw_word_t *) valp, 1, c->as_arg); @@ -218,10 +218,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -238,10 +238,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-s390x/libunwind_i.h b/src/native/external/libunwind/include/tdep-s390x/libunwind_i.h index a6af60c9c61d5f..70605a3f8b5008 100644 --- a/src/native/external/libunwind/include/tdep-s390x/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-s390x/libunwind_i.h @@ -123,17 +123,17 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), (unw_word_t*)val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), (unw_word_t*)val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); - *val = *(unw_fpreg_t*) DWARF_GET_LOC (loc); + *val = *(unw_fpreg_t*) DWARF_GET_MEM_LOC (loc); return 0; } @@ -147,15 +147,15 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); } @@ -169,13 +169,13 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val, + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), (unw_fpreg_t*)val, 0, c->as_arg); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = DWARF_GET_LOC (loc); @@ -193,14 +193,14 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val, + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), (unw_fpreg_t*) &val, 1, c->as_arg); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-sh/libunwind_i.h b/src/native/external/libunwind/include/tdep-sh/libunwind_i.h index 4f4a5cdd067517..e5b048235124d5 100644 --- a/src/native/external/libunwind/include/tdep-sh/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-sh/libunwind_i.h @@ -147,10 +147,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; @@ -170,10 +170,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; @@ -195,10 +195,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); } @@ -215,10 +215,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-x86/libunwind_i.h b/src/native/external/libunwind/include/tdep-x86/libunwind_i.h index 58e583c3b3f27a..1f4f07abf77966 100644 --- a/src/native/external/libunwind/include/tdep-x86/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-x86/libunwind_i.h @@ -144,10 +144,10 @@ dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 0, c->as_arg)) < 0) return ret; @@ -167,10 +167,10 @@ dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), + return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); - addr = DWARF_GET_LOC (loc); + addr = DWARF_GET_MEM_LOC (loc); if ((ret = (*c->as->acc.access_mem) (c->as, addr + 0, (unw_word_t *) valp, 1, c->as_arg)) < 0) return ret; @@ -188,10 +188,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = DWARF_GET_LOC (loc); @@ -207,10 +207,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/include/tdep-x86_64/libunwind_i.h b/src/native/external/libunwind/include/tdep-x86_64/libunwind_i.h index 7ec16aafdcdc02..683b397f8bbb3b 100644 --- a/src/native/external/libunwind/include/tdep-x86_64/libunwind_i.h +++ b/src/native/external/libunwind/include/tdep-x86_64/libunwind_i.h @@ -199,10 +199,10 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = DWARF_GET_LOC (loc); @@ -218,10 +218,10 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) return -UNW_EBADREG; if (DWARF_IS_REG_LOC (loc)) - return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_reg) (c->as, DWARF_GET_REG_LOC (loc), &val, 1, c->as_arg); else - return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, + return (*c->as->acc.access_mem) (c->as, DWARF_GET_MEM_LOC (loc), &val, 1, c->as_arg); } diff --git a/src/native/external/libunwind/src/aarch64/Gget_save_loc.c b/src/native/external/libunwind/src/aarch64/Gget_save_loc.c index 86bbbd03d11b3b..9fbef2488127c1 100644 --- a/src/native/external/libunwind/src/aarch64/Gget_save_loc.c +++ b/src/native/external/libunwind/src/aarch64/Gget_save_loc.c @@ -88,13 +88,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/aarch64/Gstash_frame.c b/src/native/external/libunwind/src/aarch64/Gstash_frame.c index c6f370a442853f..7eb317d926901a 100644 --- a/src/native/external/libunwind/src/aarch64/Gstash_frame.c +++ b/src/native/external/libunwind/src/aarch64/Gstash_frame.c @@ -54,25 +54,25 @@ tdep_stash_frame (struct dwarf_cursor *d, struct dwarf_reg_state *rs) && (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_REG) && (rs->reg.val[DWARF_CFA_REG_COLUMN] == FP || rs->reg.val[DWARF_CFA_REG_COLUMN] == SP) - && labs(rs->reg.val[DWARF_CFA_OFF_COLUMN]) < (1 << 29) + && labs((long)rs->reg.val[DWARF_CFA_OFF_COLUMN]) < (1 << 29) && rs->ret_addr_column == LR && (rs->reg.where[FP] == DWARF_WHERE_UNDEF || rs->reg.where[FP] == DWARF_WHERE_SAME || rs->reg.where[FP] == DWARF_WHERE_CFA || (rs->reg.where[FP] == DWARF_WHERE_CFAREL - && labs(rs->reg.val[FP]) < (1 << 29) + && labs((long)rs->reg.val[FP]) < (1 << 29) && rs->reg.val[FP]+1 != 0)) && (rs->reg.where[LR] == DWARF_WHERE_UNDEF || rs->reg.where[LR] == DWARF_WHERE_SAME || rs->reg.where[LR] == DWARF_WHERE_CFA || (rs->reg.where[LR] == DWARF_WHERE_CFAREL - && labs(rs->reg.val[LR]) < (1 << 29) + && labs((long)rs->reg.val[LR]) < (1 << 29) && rs->reg.val[LR]+1 != 0)) && (rs->reg.where[SP] == DWARF_WHERE_UNDEF || rs->reg.where[SP] == DWARF_WHERE_SAME || rs->reg.where[SP] == DWARF_WHERE_CFA || (rs->reg.where[SP] == DWARF_WHERE_CFAREL - && labs(rs->reg.val[SP]) < (1 << 29) + && labs((long)rs->reg.val[SP]) < (1 << 29) && rs->reg.val[SP]+1 != 0))) { /* Save information for a standard frame. */ diff --git a/src/native/external/libunwind/src/arm/Gex_tables.c b/src/native/external/libunwind/src/arm/Gex_tables.c index 56bbd0d07666c5..1d93e1d8c9290e 100644 --- a/src/native/external/libunwind/src/arm/Gex_tables.c +++ b/src/native/external/libunwind/src/arm/Gex_tables.c @@ -26,7 +26,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ specific unwind information. Documentation about the exception handling ABI for the ARM architecture can be found at: http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf -*/ +*/ #include "libunwind_i.h" @@ -151,7 +151,7 @@ arm_exidx_apply_cmd (struct arm_exbuf_data *edata, struct dwarf_cursor *c) * arm_exidx_apply_cmd that applies the command onto the dwarf_cursor. */ HIDDEN int -arm_exidx_decode (const uint8_t *buf, uint8_t len, struct dwarf_cursor *c) +arm_exidx_decode (const uint8_t *buf, int len, struct dwarf_cursor *c) { #define READ_OP() *buf++ assert(buf != NULL); @@ -284,7 +284,7 @@ arm_exidx_decode (const uint8_t *buf, uint8_t len, struct dwarf_cursor *c) /** * Reads the entry from the given cursor and extracts the unwind instructions - * into buf. Returns the number of the extracted unwind insns or + * into buf. Returns the number of the extracted unwind insns or * -UNW_ESTOPUNWIND if the special bit pattern ARM_EXIDX_CANT_UNWIND (0x1) was * found. */ @@ -297,7 +297,7 @@ arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf) uint32_t data; /* An ARM unwind entry consists of a prel31 offset to the start of a - function followed by 31bits of data: + function followed by 31bits of data: * if set to 0x1: the function cannot be unwound (EXIDX_CANTUNWIND) * if bit 31 is one: this is a table entry itself (ARM_EXIDX_COMPACT) * if bit 31 is zero: this is a prel31 offset of the start of the @@ -317,9 +317,9 @@ arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf) { Debug (2, "%p compact model %d [%8.8x]\n", (void *)addr, (data >> 24) & 0x7f, data); - buf[nbuf++] = data >> 16; - buf[nbuf++] = data >> 8; - buf[nbuf++] = data; + buf[nbuf++] = (uint8_t) (data >> 16); + buf[nbuf++] = (uint8_t) (data >> 8); + buf[nbuf++] = (uint8_t) data; } else { @@ -342,9 +342,11 @@ arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf) extbl_data += 4; } else - buf[nbuf++] = data >> 16; - buf[nbuf++] = data >> 8; - buf[nbuf++] = data; + { + buf[nbuf++] = (uint8_t) (data >> 16); + } + buf[nbuf++] = (uint8_t) (data >> 8); + buf[nbuf++] = (uint8_t) data; } else { @@ -357,9 +359,9 @@ arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf) c->as_arg) < 0) return -UNW_EINVAL; n_table_words = data >> 24; - buf[nbuf++] = data >> 16; - buf[nbuf++] = data >> 8; - buf[nbuf++] = data; + buf[nbuf++] = (uint8_t) (data >> 16); + buf[nbuf++] = (uint8_t) (data >> 8); + buf[nbuf++] = (uint8_t) data; extbl_data += 8; } assert (n_table_words <= 5); @@ -370,10 +372,10 @@ arm_exidx_extract (struct dwarf_cursor *c, uint8_t *buf) c->as_arg) < 0) return -UNW_EINVAL; extbl_data += 4; - buf[nbuf++] = data >> 24; - buf[nbuf++] = data >> 16; - buf[nbuf++] = data >> 8; - buf[nbuf++] = data >> 0; + buf[nbuf++] = (uint8_t) (data >> 24); + buf[nbuf++] = (uint8_t) (data >> 16); + buf[nbuf++] = (uint8_t) (data >> 8); + buf[nbuf++] = (uint8_t) data; } } @@ -458,7 +460,7 @@ tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, && di->format != UNW_INFO_FORMAT_ARM_EXIDX) return dwarf_search_unwind_table (as, ip, di, pi, need_unwind_info, arg); - return -UNW_ENOINFO; + return -UNW_ENOINFO; } #ifndef UNW_REMOTE_ONLY diff --git a/src/native/external/libunwind/src/arm/Gget_save_loc.c b/src/native/external/libunwind/src/arm/Gget_save_loc.c index 906c5b180d0de1..e9b43fc6dd2d32 100644 --- a/src/native/external/libunwind/src/arm/Gget_save_loc.c +++ b/src/native/external/libunwind/src/arm/Gget_save_loc.c @@ -88,13 +88,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/dwarf/Gexpr.c b/src/native/external/libunwind/src/dwarf/Gexpr.c index 4a8da2ce1bb3d2..ddaeb7b0b7806a 100644 --- a/src/native/external/libunwind/src/dwarf/Gexpr.c +++ b/src/native/external/libunwind/src/dwarf/Gexpr.c @@ -122,7 +122,7 @@ sword (unw_addr_space_t as UNUSED, unw_word_t val) } } -static inline unw_word_t +static inline int read_operand (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr, int operand_type, unw_word_t *val, void *arg) { @@ -169,7 +169,7 @@ read_operand (unw_addr_space_t as, unw_accessors_t *a, ret = dwarf_readu64 (as, a, addr, &u64, arg); if (ret < 0) return ret; - *val = u64; + *val = (unw_word_t) u64; break; case ULEB128: @@ -366,7 +366,7 @@ if (stackerror) \ Debug (15, "OP_bregx(r%d,0x%lx)\n", (int) operand1, (unsigned long) operand2); if ((ret = unw_get_reg (dwarf_to_cursor (c), - dwarf_to_unw_regnum (operand1), &tmp1)) < 0) + dwarf_to_unw_regnum ((int) operand1), &tmp1)) < 0) return ret; push (tmp1 + operand2); break; @@ -475,7 +475,7 @@ if (stackerror) \ case 8: if ((ret = dwarf_readu64 (as, a, &tmp1, &u64, arg)) < 0) return ret; - tmp2 = u64; + tmp2 = (unw_word_t) u64; if (operand1 != 8) { if (dwarf_is_big_endian (as)) diff --git a/src/native/external/libunwind/src/dwarf/Gfde.c b/src/native/external/libunwind/src/dwarf/Gfde.c index 3847d0a03c079d..3752e0a9d3eeac 100644 --- a/src/native/external/libunwind/src/dwarf/Gfde.c +++ b/src/native/external/libunwind/src/dwarf/Gfde.c @@ -102,7 +102,7 @@ parse_cie (unw_addr_space_t as, unw_accessors_t *a, unw_word_t addr, if ((ret = dwarf_readu64 (as, a, &addr, &u64val, arg)) < 0) return ret; - len = u64val; + len = (unw_word_t) u64val; cie_end_addr = addr + len; if ((ret = dwarf_readu64 (as, a, &addr, &cie_id, arg)) < 0) return ret; @@ -237,7 +237,8 @@ dwarf_extract_proc_info_from_fde (unw_addr_space_t as, unw_accessors_t *a, { unw_word_t fde_end_addr, cie_addr, cie_offset_addr, aug_end_addr = 0; unw_word_t start_ip, ip_range, aug_size, addr = *addrp; - int ret, ip_range_encoding; + int ret; + uint8_t ip_range_encoding; struct dwarf_cie_info dci; uint64_t u64val; uint32_t u32val; @@ -288,18 +289,18 @@ dwarf_extract_proc_info_from_fde (unw_addr_space_t as, unw_accessors_t *a, if ((ret = dwarf_readu64 (as, a, &addr, &u64val, arg)) < 0) return ret; - *addrp = fde_end_addr = addr + u64val; + *addrp = fde_end_addr = (unw_word_t) (addr + u64val); cie_offset_addr = addr; if ((ret = dwarf_reads64 (as, a, &addr, &cie_offset, arg)) < 0) return ret; - if (is_cie_id (cie_offset, is_debug_frame)) + if (is_cie_id ((unw_word_t) cie_offset, is_debug_frame)) /* ignore CIEs (happens during linear searches) */ return 0; if (is_debug_frame) - cie_addr = base + cie_offset; + cie_addr = (unw_word_t) (base + cie_offset); else /* DWARF says that the CIE_pointer in the FDE is a .debug_frame-relative offset, but the GCC-generated .eh_frame diff --git a/src/native/external/libunwind/src/dwarf/Gfind_proc_info-lsb.c b/src/native/external/libunwind/src/dwarf/Gfind_proc_info-lsb.c index c11345e88383f2..7e681477da48f3 100644 --- a/src/native/external/libunwind/src/dwarf/Gfind_proc_info-lsb.c +++ b/src/native/external/libunwind/src/dwarf/Gfind_proc_info-lsb.c @@ -975,10 +975,10 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, #endif { #ifndef UNW_LOCAL_ONLY - int32_t last_ip_offset = di->end_ip - ip_base - di->load_offset; + int32_t last_ip_offset = (int32_t) (di->end_ip - ip_base - di->load_offset); segbase = di->u.rti.segbase; if ((ret = remote_lookup (as, (uintptr_t) table, table_len, - ip - ip_base, &ent, &last_ip_offset, arg)) < 0) + (int32_t) (ip - ip_base), &ent, &last_ip_offset, arg)) < 0) return ret; if (ret) { diff --git a/src/native/external/libunwind/src/dwarf/Gparser.c b/src/native/external/libunwind/src/dwarf/Gparser.c index 7a5d7e1f0ff344..0616b2359a9b2e 100644 --- a/src/native/external/libunwind/src/dwarf/Gparser.c +++ b/src/native/external/libunwind/src/dwarf/Gparser.c @@ -666,7 +666,7 @@ hash (unw_word_t ip, unsigned short log_size) /* based on (sqrt(5)/2-1)*2^64 */ # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL) - return ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1)); + return (unw_hash_index_t) (ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1))); } static inline long @@ -730,7 +730,7 @@ rs_new (struct dwarf_rs_cache *cache, struct dwarf_cursor * c) cache->links[head].ip = c->ip; cache->links[head].valid = 1; - cache->links[head].signal_frame = tdep_cache_frame(c); + cache->links[head].signal_frame = tdep_cache_frame(c) ? 1 : 0; return cache->buckets + head; } @@ -841,7 +841,8 @@ aarch64_get_ra_sign_state(struct dwarf_reg_state *rs) static int apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs) { - unw_word_t regnum, addr, cfa, ip; + unw_regnum_t regnum; + unw_word_t addr, cfa, ip; unw_word_t prev_ip, prev_cfa; unw_addr_space_t as; dwarf_loc_t cfa_loc; @@ -881,7 +882,7 @@ apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs) cfa = c->cfa; else { - regnum = dwarf_to_unw_regnum (rs->reg.val[DWARF_CFA_REG_COLUMN]); + regnum = dwarf_to_unw_regnum ((unw_regnum_t) rs->reg.val[DWARF_CFA_REG_COLUMN]); if ((ret = unw_get_reg (dwarf_to_cursor(c), regnum, &cfa)) < 0) return ret; } @@ -1015,7 +1016,7 @@ find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr) (rs = rs_lookup(cache, c))) { /* update hint; no locking needed: single-word writes are atomic */ - unsigned short index = rs - cache->buckets; + unsigned short index = (unsigned short) (rs - cache->buckets); c->use_prev_instr = ! cache->links[index].signal_frame; memcpy (&sr->rs_current, rs, sizeof (*rs)); } @@ -1047,7 +1048,7 @@ find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr) { if (rs) { - index = rs - cache->buckets; + index = (unsigned short) (rs - cache->buckets); c->hint = cache->links[index].hint; cache->links[c->prev_rs].hint = index + 1; c->prev_rs = index; diff --git a/src/native/external/libunwind/src/hppa/Gget_save_loc.c b/src/native/external/libunwind/src/hppa/Gget_save_loc.c index 02dfa3084f9117..fa4088da85b6c6 100644 --- a/src/native/external/libunwind/src/hppa/Gget_save_loc.c +++ b/src/native/external/libunwind/src/hppa/Gget_save_loc.c @@ -47,13 +47,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/loongarch64/Gget_save_loc.c b/src/native/external/libunwind/src/loongarch64/Gget_save_loc.c index edc765744ad9f6..13ab43d42efb6e 100644 --- a/src/native/external/libunwind/src/loongarch64/Gget_save_loc.c +++ b/src/native/external/libunwind/src/loongarch64/Gget_save_loc.c @@ -89,13 +89,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/mi/Gdyn-remote.c b/src/native/external/libunwind/src/mi/Gdyn-remote.c index 6d4ec1ecf869b3..ec2667e216d6ba 100644 --- a/src/native/external/libunwind/src/mi/Gdyn-remote.c +++ b/src/native/external/libunwind/src/mi/Gdyn-remote.c @@ -101,7 +101,7 @@ intern_array (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr, unw_word_t table_len, unw_word_t **table_data, void *arg) { - unw_word_t i, *data = calloc (table_len, WSIZE); + unw_word_t i, *data = calloc ((size_t) table_len, WSIZE); int ret = 0; if (!data) diff --git a/src/native/external/libunwind/src/mips/Gget_save_loc.c b/src/native/external/libunwind/src/mips/Gget_save_loc.c index c21f9b06d060a1..ca8adbd2a4c964 100644 --- a/src/native/external/libunwind/src/mips/Gget_save_loc.c +++ b/src/native/external/libunwind/src/mips/Gget_save_loc.c @@ -88,13 +88,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/riscv/Gget_save_loc.c b/src/native/external/libunwind/src/riscv/Gget_save_loc.c index 342f8654fbc66b..11aed5c0044fe7 100644 --- a/src/native/external/libunwind/src/riscv/Gget_save_loc.c +++ b/src/native/external/libunwind/src/riscv/Gget_save_loc.c @@ -85,13 +85,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/s390x/Gget_save_loc.c b/src/native/external/libunwind/src/s390x/Gget_save_loc.c index dc462c966e5673..40d2f0e54e4f84 100644 --- a/src/native/external/libunwind/src/s390x/Gget_save_loc.c +++ b/src/native/external/libunwind/src/s390x/Gget_save_loc.c @@ -74,13 +74,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/sh/Gget_save_loc.c b/src/native/external/libunwind/src/sh/Gget_save_loc.c index 24d9f63bc329d7..a9a8845705146c 100644 --- a/src/native/external/libunwind/src/sh/Gget_save_loc.c +++ b/src/native/external/libunwind/src/sh/Gget_save_loc.c @@ -71,13 +71,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/x86/Gget_save_loc.c b/src/native/external/libunwind/src/x86/Gget_save_loc.c index e459382f6d3cfc..849f1cd8bf8962 100644 --- a/src/native/external/libunwind/src/x86/Gget_save_loc.c +++ b/src/native/external/libunwind/src/x86/Gget_save_loc.c @@ -121,13 +121,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind/src/x86_64/Gget_save_loc.c b/src/native/external/libunwind/src/x86_64/Gget_save_loc.c index 40568700e0e401..9d51185220f554 100644 --- a/src/native/external/libunwind/src/x86_64/Gget_save_loc.c +++ b/src/native/external/libunwind/src/x86_64/Gget_save_loc.c @@ -62,13 +62,13 @@ unw_get_save_loc (unw_cursor_t *cursor, int reg, unw_save_loc_t *sloc) if (DWARF_IS_REG_LOC (loc)) { sloc->type = UNW_SLT_REG; - sloc->u.regnum = DWARF_GET_LOC (loc); + sloc->u.regnum = DWARF_GET_REG_LOC (loc); } else #endif { sloc->type = UNW_SLT_MEMORY; - sloc->u.addr = DWARF_GET_LOC (loc); + sloc->u.addr = DWARF_GET_MEM_LOC (loc); } return 0; } diff --git a/src/native/external/libunwind_extras/CMakeLists.txt b/src/native/external/libunwind_extras/CMakeLists.txt index 0b911094c767bb..2bfd2194c969e2 100644 --- a/src/native/external/libunwind_extras/CMakeLists.txt +++ b/src/native/external/libunwind_extras/CMakeLists.txt @@ -140,8 +140,6 @@ if(CLR_CMAKE_HOST_WIN32) # Warnings in release builds add_compile_options(-wd4068) # ignore unknown pragma warnings (gcc pragmas) - add_compile_options(-wd4242) # possible loss of data - add_compile_options(-wd4244) # possible loss of data add_compile_options(-wd4334) # 32-bit shift implicitly converted to 64 bits # Disable warning due to incorrect format specifier in debugging printf via the Debug macro From fb5e93a1a6b96b99fa4851866c9b4e713fdec408 Mon Sep 17 00:00:00 2001 From: Parker Bibus Date: Fri, 29 Mar 2024 13:37:00 -0700 Subject: [PATCH 021/132] [PERF] Update dependson for mono perf jobs (#100420) * Remove the dependson for coreclr when running mono * Add parameters to help with selecting which runs to run for testing. --- eng/pipelines/coreclr/perf_slow.yml | 15 +++++++++++---- eng/pipelines/coreclr/templates/perf-job.yml | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/eng/pipelines/coreclr/perf_slow.yml b/eng/pipelines/coreclr/perf_slow.yml index 92bd74ab5c5133..4a6fa32a81f172 100644 --- a/eng/pipelines/coreclr/perf_slow.yml +++ b/eng/pipelines/coreclr/perf_slow.yml @@ -1,3 +1,11 @@ +parameters: +- name: runPrivateJobs + type: boolean + default: false +- name: runScheduledJobs + type: boolean + default: false + trigger: batch: true branches: @@ -34,14 +42,13 @@ extends: - stage: Build jobs: - - ${{ if and(ne(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'Schedule')) }}: - + - ${{ if and(ne(variables['System.TeamProject'], 'public'), or(in(variables['Build.Reason'], 'Schedule'), parameters.runScheduledJobs)) }}: + # build mono - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono - runtimeVariant: monointerpreter platforms: - linux_arm64 jobParameters: @@ -78,7 +85,7 @@ extends: logicalmachine: 'perfampere' timeoutInMinutes: 720 - - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'Schedule')) }}: + - ${{ if and(ne(variables['System.TeamProject'], 'public'), or(notin(variables['Build.Reason'], 'Schedule', 'Manual'), parameters.runPrivateJobs)) }}: # build coreclr and libraries - template: /eng/pipelines/common/platform-matrix.yml diff --git a/eng/pipelines/coreclr/templates/perf-job.yml b/eng/pipelines/coreclr/templates/perf-job.yml index 425b9926f973ed..0d3f5b46a75002 100644 --- a/eng/pipelines/coreclr/templates/perf-job.yml +++ b/eng/pipelines/coreclr/templates/perf-job.yml @@ -77,7 +77,7 @@ jobs: # Test job depends on the corresponding build job ${{ if eq(parameters.downloadSpecificBuild.buildId, '') }}: dependsOn: - - ${{ if not(in(parameters.runtimeType, 'AndroidMono', 'iOSMono', 'iOSNativeAOT', 'wasm')) }}: + - ${{ if not(or(in(parameters.runtimeType, 'AndroidMono', 'iOSMono', 'iOSNativeAOT', 'wasm'), and(eq(parameters.runtimeType, 'mono'), ne(parameters.codeGenType, 'AOT')))) }}: - ${{ format('build_{0}{1}_{2}_{3}_{4}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, 'coreclr') }} - ${{ if and(eq(parameters.runtimeType, 'mono'), ne(parameters.codeGenType, 'AOT')) }}: - ${{ format('build_{0}{1}_{2}_{3}_{4}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, 'mono') }} From d1cc577eda302c25a29fdf12bf5ab23a5c419453 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Fri, 29 Mar 2024 14:01:28 -0700 Subject: [PATCH 022/132] JIT: fix count reconstruction when a natural loop contains an improper loop (#100449) If a natural loop contains an improper loop, the cyclic probability computation for the natural loop will be an underestimate, as the cyclic probability computation assumes one pass convergence. In such cases count reconstruction may report convergence when it has not in fact converged, as any natural loop header ignores flow from its back edges, assuming their impact has been accounted for by the cyclic probability. So when a loop contains improper loops, fall back to normal iterative computation for the loop. We could use the cyclic probability initially as a convergence accelerator, but would need to switch over to not using it to guarantee full convergence. But that complicates the logic and these cases are rare. --- src/coreclr/jit/compiler.h | 8 ++++++++ src/coreclr/jit/fgprofilesynthesis.cpp | 6 ++++-- src/coreclr/jit/flowgraph.cpp | 9 +++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index c0a60c66ad76bc..07431d8a14e803 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2091,6 +2091,9 @@ class FlowGraphNaturalLoop // Can be used to store additional annotations for this loop on the side. unsigned m_index = 0; + // True if this loop contains an improper loop header + bool m_containsImproperHeader = false; + FlowGraphNaturalLoop(const FlowGraphDfsTree* dfsTree, BasicBlock* head); unsigned LoopBlockBitVecIndex(BasicBlock* block); @@ -2179,6 +2182,11 @@ class FlowGraphNaturalLoop bool ContainsBlock(BasicBlock* block); bool ContainsLoop(FlowGraphNaturalLoop* childLoop); + bool ContainsImproperHeader() const + { + return m_containsImproperHeader; + } + unsigned NumLoopBlocks(); template diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 2de615136cfeb4..0ab8576cb24b86 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -1193,12 +1193,14 @@ void ProfileSynthesis::GaussSeidelSolver() // if (block->bbPreds != nullptr) { - // Leverage Cp for existing loop headers. + // Leverage Cp for existing loop headers, provided that + // all contained loops are proper. + // // This is an optimization to speed convergence. // FlowGraphNaturalLoop* const loop = m_loops->GetLoopByHeader(block); - if (loop != nullptr) + if ((loop != nullptr) && !loop->ContainsImproperHeader()) { // Sum all entry edges that aren't EH flow // diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 9b9f3a3f951418..e2f450a7cb194d 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -4386,6 +4386,15 @@ FlowGraphNaturalLoops* FlowGraphNaturalLoops::Find(const FlowGraphDfsTree* dfsTr if (!FindNaturalLoopBlocks(loop, worklist)) { loops->m_improperLoopHeaders++; + + for (FlowGraphNaturalLoop* const otherLoop : loops->InPostOrder()) + { + if (otherLoop->ContainsBlock(header)) + { + otherLoop->m_containsImproperHeader = true; + } + } + continue; } From b7d91f230846587d959fdeb711a4c24eeeca9349 Mon Sep 17 00:00:00 2001 From: Vladimir Sadov Date: Fri, 29 Mar 2024 14:52:26 -1000 Subject: [PATCH 023/132] Fix for stress failure when adjusting effective IP while stackwalking may put it on a wrong instruction. (#100376) * fix * same change for x86 and NativeAOT, get rid of ICodeManagerFlags::AbortingCall * Apply suggestions from code review Co-authored-by: Jan Kotas --------- Co-authored-by: Jan Kotas --- src/coreclr/inc/eetwain.h | 1 - src/coreclr/inc/gcinfodecoder.h | 1 - src/coreclr/jit/codegenlinear.cpp | 18 ++++++++++++++++- .../Runtime/unix/UnixNativeCodeManager.cpp | 13 +++++------- .../Runtime/windows/CoffNativeCodeManager.cpp | 13 +++++------- src/coreclr/vm/eetwain.cpp | 20 +++++++++---------- src/coreclr/vm/gc_unwind_x86.inl | 8 +------- src/coreclr/vm/stackwalk.h | 1 - 8 files changed, 37 insertions(+), 38 deletions(-) diff --git a/src/coreclr/inc/eetwain.h b/src/coreclr/inc/eetwain.h index deefe5b0ce8deb..bee2f658ee7c08 100644 --- a/src/coreclr/inc/eetwain.h +++ b/src/coreclr/inc/eetwain.h @@ -89,7 +89,6 @@ enum ICodeManagerFlags ExecutionAborted = 0x0002, // execution of this function has been aborted // (i.e. it will not continue execution at the // current location) - AbortingCall = 0x0004, // The current call will never return UpdateAllRegs = 0x0008, // update full register set CodeAltered = 0x0010, // code of that function might be altered // (e.g. by debugger), need to call EE diff --git a/src/coreclr/inc/gcinfodecoder.h b/src/coreclr/inc/gcinfodecoder.h index e3ddecae8fa7d5..b42f5aae8f6034 100644 --- a/src/coreclr/inc/gcinfodecoder.h +++ b/src/coreclr/inc/gcinfodecoder.h @@ -179,7 +179,6 @@ enum ICodeManagerFlags ExecutionAborted = 0x0002, // execution of this function has been aborted // (i.e. it will not continue execution at the // current location) - AbortingCall = 0x0004, // The current call will never return ParentOfFuncletStackFrame = 0x0040, // A funclet for this frame was previously reported diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 78df10811a4c24..5aa961e3bcd077 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -712,7 +712,9 @@ void CodeGen::genCodeForBBlist() if ((call != nullptr) && (call->gtOper == GT_CALL)) { - if ((call->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0) + if ((call->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0 || + ((call->AsCall()->gtCallType == CT_HELPER) && + Compiler::s_helperCallProperties.AlwaysThrow(call->AsCall()->GetHelperNum()))) { instGen(INS_BREAKPOINT); // This should never get executed } @@ -756,6 +758,20 @@ void CodeGen::genCodeForBBlist() case BBJ_ALWAYS: { + GenTree* call = block->lastNode(); + if ((call != nullptr) && (call->gtOper == GT_CALL)) + { + if ((call->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0 || + ((call->AsCall()->gtCallType == CT_HELPER) && + Compiler::s_helperCallProperties.AlwaysThrow(call->AsCall()->GetHelperNum()))) + { + // NOTE: We should probably never see a BBJ_ALWAYS block ending with a throw in a first place. + // If that is fixed, this condition can be just an assert. + // For the reasons why we insert a BP, see the similar code in "case BBJ_THROW:" above. + instGen(INS_BREAKPOINT); // This should never get executed + } + } + // If this block jumps to the next one, we might be able to skip emitting the jump if (block->CanRemoveJumpToNext(compiler)) { diff --git a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp index c9e1adf6b5f759..6759662d5683a3 100644 --- a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp @@ -212,14 +212,11 @@ void UnixNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo, ASSERT(((uintptr_t)codeOffset & 1) == 0); #endif - if (!isActiveStackFrame) + bool executionAborted = ((UnixNativeMethodInfo*)pMethodInfo)->executionAborted; + + if (!isActiveStackFrame && !executionAborted) { - // If we are not in the active method, we are currently pointing - // to the return address. That may not be reachable after a call (if call does not return) - // or reachable via a jump and thus have a different live set. - // Therefore we simply adjust the offset to inside of call instruction. - // NOTE: The GcInfoDecoder depends on this; if you change it, you must - // revisit the GcInfoEncoder/Decoder + // the reasons for this adjustment are explained in EECodeManager::EnumGcRefs codeOffset--; } @@ -230,7 +227,7 @@ void UnixNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo, ); ICodeManagerFlags flags = (ICodeManagerFlags)0; - if (((UnixNativeMethodInfo*)pMethodInfo)->executionAborted) + if (executionAborted) flags = ICodeManagerFlags::ExecutionAborted; if (IsFilter(pMethodInfo)) diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp index d3c2ef42241743..497a09ce815ff4 100644 --- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp @@ -435,8 +435,10 @@ void CoffNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo, PTR_uint8_t gcInfo; uint32_t codeOffset = GetCodeOffset(pMethodInfo, safePointAddress, &gcInfo); + bool executionAborted = ((CoffNativeMethodInfo *)pMethodInfo)->executionAborted; + ICodeManagerFlags flags = (ICodeManagerFlags)0; - if (((CoffNativeMethodInfo *)pMethodInfo)->executionAborted) + if (executionAborted) flags = ICodeManagerFlags::ExecutionAborted; if (IsFilter(pMethodInfo)) @@ -446,14 +448,9 @@ void CoffNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo, flags = (ICodeManagerFlags)(flags | ICodeManagerFlags::ActiveStackFrame); #ifdef USE_GC_INFO_DECODER - if (!isActiveStackFrame) + if (!isActiveStackFrame && !executionAborted) { - // If we are not in the active method, we are currently pointing - // to the return address. That may not be reachable after a call (if call does not return) - // or reachable via a jump and thus have a different live set. - // Therefore we simply adjust the offset to inside of call instruction. - // NOTE: The GcInfoDecoder depends on this; if you change it, you must - // revisit the GcInfoEncoder/Decoder + // the reasons for this adjustment are explained in EECodeManager::EnumGcRefs codeOffset--; } diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index 545bdf7f721025..1665e1c86cf6f0 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -1466,17 +1466,15 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, } else { - /* However if ExecutionAborted, then this must be one of the - * ExceptionFrames. Handle accordingly - */ - _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame)); - - if (flags & AbortingCall) - { - curOffs--; - LOG((LF_GCINFO, LL_INFO1000, "Adjusted GC reporting offset due to flags ExecutionAborted && AbortingCall. Now reporting GC refs for %s at offset %04x.\n", - methodName, curOffs)); - } + // Since we are aborting execution, we are either in a frame that actually faulted or in a throwing call. + // * We do not need to adjust in a leaf + // * A throwing call will have unreachable after it, thus GC info is the same as before the call. + // + // Either way we do not need to adjust. + + // NOTE: only fully interruptible methods may need to report anything here as without + // exception handling all current local variables are already unreachable. + // EnumerateLiveSlots will shortcircuit the partially interruptible case just a bit later. } // Check if we have been given an override value for relOffset diff --git a/src/coreclr/vm/gc_unwind_x86.inl b/src/coreclr/vm/gc_unwind_x86.inl index c5ba9593446e2d..28c7253f382676 100644 --- a/src/coreclr/vm/gc_unwind_x86.inl +++ b/src/coreclr/vm/gc_unwind_x86.inl @@ -3686,13 +3686,7 @@ bool EnumGcRefsX86(PREGDISPLAY pContext, } else { - /* However if ExecutionAborted, then this must be one of the - * ExceptionFrames. Handle accordingly - */ - _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame)); - - newCurOffs = (flags & AbortingCall) ? curOffs-1 // inside "call" - : curOffs; // at faulting instr, or start of "try" + newCurOffs = curOffs; } ptrOffs = 0; diff --git a/src/coreclr/vm/stackwalk.h b/src/coreclr/vm/stackwalk.h index ac37c6679e83c2..736ca2653ee77e 100644 --- a/src/coreclr/vm/stackwalk.h +++ b/src/coreclr/vm/stackwalk.h @@ -284,7 +284,6 @@ class CrawlFrame if (!HasFaulted() && !IsIPadjusted()) { _ASSERTE(!(flags & ActiveStackFrame)); - flags |= AbortingCall; } } From cc7bf831f02cad241547ebea5c56c82f12a50999 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Sat, 30 Mar 2024 20:40:47 +0200 Subject: [PATCH 024/132] Define _TIME_BITS=64 globally (#100461) Contributes to #96460 --- eng/native/configurecompiler.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index 57d3aed47158a8..f7144a028a80d2 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -443,6 +443,8 @@ endif(CLR_CMAKE_HOST_WIN32) # Unconditionally define _FILE_OFFSET_BITS as 64 on all platforms. add_definitions(-D_FILE_OFFSET_BITS=64) +# Unconditionally define _TIME_BITS as 64 on all platforms. +add_definitions(-D_TIME_BITS=64) # Architecture specific files folder name if (CLR_CMAKE_TARGET_ARCH_AMD64) From 1949bd2e017a0dc1e48b7cfed263051a6ed147a5 Mon Sep 17 00:00:00 2001 From: Dong-Heon Jung Date: Mon, 1 Apr 2024 03:52:53 +0900 Subject: [PATCH 025/132] [RISC-V] Fix intermittent failures due to unalignment access (#100431) * [RISC-V] Fix intermittent failures due to unalignment access * [RISC-V] Update for ALIGN_ACCESS * Apply suggestions from code review --------- Co-authored-by: Jan Kotas --- src/coreclr/gcinfo/CMakeLists.txt | 6 ++++-- src/coreclr/inc/stdmacros.h | 4 ++-- src/coreclr/pal/inc/pal_endian.h | 5 +++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/coreclr/gcinfo/CMakeLists.txt b/src/coreclr/gcinfo/CMakeLists.txt index 3885cc14a0a4c3..cdc4ae794c8e85 100644 --- a/src/coreclr/gcinfo/CMakeLists.txt +++ b/src/coreclr/gcinfo/CMakeLists.txt @@ -77,8 +77,10 @@ if (CLR_CMAKE_TARGET_ARCH_RISCV64) create_gcinfo_lib(TARGET gcinfo_unix_riscv64 OS unix ARCH riscv64) endif (CLR_CMAKE_TARGET_ARCH_RISCV64) -create_gcinfo_lib(TARGET gcinfo_universal_arm OS universal ARCH arm) -create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86) +if (NOT CLR_CMAKE_TARGET_ARCH_RISCV64) + create_gcinfo_lib(TARGET gcinfo_universal_arm OS universal ARCH arm) + create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86) +endif (NOT CLR_CMAKE_TARGET_ARCH_RISCV64) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) create_gcinfo_lib(TARGET gcinfo_unix_x86 OS unix ARCH x86) diff --git a/src/coreclr/inc/stdmacros.h b/src/coreclr/inc/stdmacros.h index 7e4ae79c535c5f..79f9225321fe18 100644 --- a/src/coreclr/inc/stdmacros.h +++ b/src/coreclr/inc/stdmacros.h @@ -159,9 +159,9 @@ #define DBG_ADDR(ptr) (DWORD)((UINT_PTR)(ptr)) #endif // HOST_64BIT -#ifdef TARGET_ARM +#if defined(HOST_ARM) || defined(HOST_RISCV64) #define ALIGN_ACCESS ((1< Date: Sun, 31 Mar 2024 20:26:30 -0600 Subject: [PATCH 026/132] Add OptRepeat phase start/end indications (#100471) When OptRepeat is active, for phases that are being repeated, annotate the JitDump phase start/end output with the OptRepeat iteration number. --- src/coreclr/jit/compiler.cpp | 42 ++++++++++++++++++++++++------------ src/coreclr/jit/compiler.h | 9 ++++++-- src/coreclr/jit/phase.cpp | 20 +++++++++++++++-- 3 files changed, 53 insertions(+), 18 deletions(-) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 64f793f1349b93..fe901ffdec299e 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -2903,10 +2903,13 @@ void Compiler::compInitOptions(JitFlags* jitFlags) opts.disCodeBytes = false; #ifdef OPT_CONFIG - opts.optRepeat = false; - opts.optRepeatCount = 1; + opts.optRepeat = false; #endif // OPT_CONFIG + opts.optRepeatIteration = 0; + opts.optRepeatCount = 1; + opts.optRepeatActive = false; + #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; @@ -4993,7 +4996,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl bool doVNBasedIntrinExpansion = true; bool doRangeAnalysis = true; bool doVNBasedDeadStoreRemoval = true; - int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); @@ -5008,15 +5010,25 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl doVNBasedIntrinExpansion = doValueNum; doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); doVNBasedDeadStoreRemoval = doValueNum && (JitConfig.JitDoVNBasedDeadStoreRemoval() != 0); +#endif // defined(OPT_CONFIG) +#ifdef DEBUG if (opts.optRepeat) { - iterations = opts.optRepeatCount; + opts.optRepeatActive = true; } -#endif // defined(OPT_CONFIG) +#endif // DEBUG - while (iterations > 0) + while (++opts.optRepeatIteration <= opts.optRepeatCount) { +#ifdef DEBUG + if (verbose && opts.optRepeat) + { + printf("\n*************** JitOptRepeat: iteration %d of %d\n\n", opts.optRepeatIteration, + opts.optRepeatCount); + } +#endif // DEBUG + fgModified = false; if (doSsa) @@ -5128,19 +5140,14 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } -#ifdef DEBUG - if (verbose && opts.optRepeat) - { - printf("\n*************** JitOptRepeat: iterations remaining: %d\n\n", iterations - 1); - } -#endif // DEBUG - // Iterate if requested, resetting annotations first. - if (--iterations == 0) + if (opts.optRepeatIteration == opts.optRepeatCount) { break; } + assert(opts.optRepeat); + // We may have optimized away the canonical entry BB that SSA // depends on above, so if we are going for another iteration then // make sure we still have a canonical entry. @@ -5158,6 +5165,13 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl } #endif // DEBUG } + +#ifdef DEBUG + if (opts.optRepeat) + { + opts.optRepeatActive = false; + } +#endif // DEBUG } optLoopsCanonical = false; diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 07431d8a14e803..5129b228a9c3b2 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -9875,10 +9875,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG - bool optRepeat; // Repeat optimizer phases k times - int optRepeatCount; // How many times to repeat. By default, comes from JitConfig.JitOptRepeatCount(). + bool optRepeat; // Repeat optimizer phases k times #endif + int optRepeatIteration; // The current optRepeat iteration: from 0 to optRepeatCount. optRepeatCount can be + // zero, in which case no optimizations in the set of repeated optimizations are + // performed. optRepeatIteration will only be zero if optRepeatCount is zero. + int optRepeatCount; // How many times to repeat. By default, comes from JitConfig.JitOptRepeatCount(). + bool optRepeatActive; // `true` if we are in the range of phases being repeated. + bool disAsm; // Display native code as it is generated bool disTesting; // Display BEGIN METHOD/END METHOD anchors for disasm testing bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same DOTNET_* flag as disDiffable) diff --git a/src/coreclr/jit/phase.cpp b/src/coreclr/jit/phase.cpp index 78bf4eec09c752..199167d7d0c52b 100644 --- a/src/coreclr/jit/phase.cpp +++ b/src/coreclr/jit/phase.cpp @@ -80,7 +80,15 @@ void Phase::PrePhase() } else { - printf("\n*************** Starting PHASE %s\n", m_name); + if (comp->opts.optRepeatActive) + { + printf("\n*************** Starting PHASE %s (OptRepeat iteration %d of %d)\n", m_name, + comp->opts.optRepeatIteration, comp->opts.optRepeatCount); + } + else + { + printf("\n*************** Starting PHASE %s\n", m_name); + } } } #endif // DEBUG @@ -124,7 +132,15 @@ void Phase::PostPhase(PhaseStatus status) } else { - printf("\n*************** Finishing PHASE %s%s\n", m_name, statusMessage); + if (comp->opts.optRepeatActive) + { + printf("\n*************** Finishing PHASE %s%s (OptRepeat iteration %d of %d)\n", m_name, statusMessage, + comp->opts.optRepeatIteration, comp->opts.optRepeatCount); + } + else + { + printf("\n*************** Finishing PHASE %s%s\n", m_name, statusMessage); + } } if (doPostPhase && doPostPhaseDumps) From ec4437be46d8b90bc9fa6740c556bd860d9fe5ab Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Mon, 1 Apr 2024 09:50:56 +0200 Subject: [PATCH 027/132] [NativeAOT] Use SoftFP ABI on linux-bionic-arm (#100392) Android uses the SoftFP ABI on ARM32 (https://android.googlesource.com/platform/ndk/+/ndk-r12-release/docs/HardFloatAbi.md). - Copy Crossgen2 logic for "armel" target into ILCompiler - Use "armel" for linux-bionic-arm ILC target --- ...icrosoft.DotNet.ILCompiler.SingleEntry.targets | 4 ++++ .../Microsoft.NETCore.Native.targets | 2 +- .../Compiler/ObjectWriter/ElfObjectWriter.cs | 4 +++- .../tools/aot/ILCompiler/ILCompilerRootCommand.cs | 15 ++++++++++++++- src/coreclr/tools/aot/ILCompiler/Program.cs | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.DotNet.ILCompiler.SingleEntry.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.DotNet.ILCompiler.SingleEntry.targets index 62f159c5dc5268..c2627502f9ddd6 100644 --- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.DotNet.ILCompiler.SingleEntry.targets +++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.DotNet.ILCompiler.SingleEntry.targets @@ -30,6 +30,10 @@ <_linuxToken>linux- <_linuxLibcFlavor Condition="$(_targetOS.StartsWith($(_linuxToken)))">$(_targetOS.SubString($(_linuxToken.Length))) <_targetOS Condition="$(_targetOS.StartsWith($(_linuxToken)))">linux + + + <_targetArchitectureWithAbi>$(_targetArchitecture) + <_targetArchitectureWithAbi Condition="'$(_linuxLibcFlavor)' == 'bionic' and '$(_targetArchitecture)' == 'arm'">armel diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets index 75f579fc4a8617..988a02dd8792d8 100644 --- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets +++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets @@ -227,7 +227,7 @@ The .NET Foundation licenses this file to you under the MIT license. - + diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/ElfObjectWriter.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/ElfObjectWriter.cs index 11303f8eb2759d..503985d92934e4 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/ElfObjectWriter.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/ElfObjectWriter.cs @@ -35,6 +35,7 @@ internal sealed class ElfObjectWriter : UnixObjectWriter { private readonly bool _useInlineRelocationAddends; private readonly ushort _machine; + private readonly bool _useSoftFPAbi; private readonly List _sections = new(); private readonly List _symbols = new(); private uint _localSymbolCount; @@ -61,6 +62,7 @@ public ElfObjectWriter(NodeFactory factory, ObjectWritingOptions options) _ => throw new NotSupportedException("Unsupported architecture") }; _useInlineRelocationAddends = _machine is EM_386 or EM_ARM; + _useSoftFPAbi = _machine is EM_ARM && factory.Target.Abi == TargetAbi.NativeAotArmel; // By convention the symbol table starts with empty symbol _symbols.Add(new ElfSymbol {}); @@ -513,7 +515,7 @@ private protected override void EmitSectionsAndLayout() attributesBuilder.WriteAttribute(Tag_ABI_FP_number_model, 3); // IEEE 754 attributesBuilder.WriteAttribute(Tag_ABI_align_needed, 1); // 8-byte attributesBuilder.WriteAttribute(Tag_ABI_align_preserved, 1); // 8-byte - attributesBuilder.WriteAttribute(Tag_ABI_VFP_args, 1); // FP parameters passes in VFP registers + attributesBuilder.WriteAttribute(Tag_ABI_VFP_args, _useSoftFPAbi ? 0ul : 1ul); // FP parameters passes in VFP registers attributesBuilder.WriteAttribute(Tag_CPU_unaligned_access, 0); // None attributesBuilder.EndSection(); } diff --git a/src/coreclr/tools/aot/ILCompiler/ILCompilerRootCommand.cs b/src/coreclr/tools/aot/ILCompiler/ILCompilerRootCommand.cs index 917f1b612942ed..f38811fd93d2d8 100644 --- a/src/coreclr/tools/aot/ILCompiler/ILCompilerRootCommand.cs +++ b/src/coreclr/tools/aot/ILCompiler/ILCompilerRootCommand.cs @@ -148,7 +148,7 @@ internal sealed class ILCompilerRootCommand : CliRootCommand public CliOption RootDefaultAssemblies { get; } = new("--defaultrooting") { Description = "Root assemblies that are not marked [IsTrimmable]" }; public CliOption TargetArchitecture { get; } = - new("--targetarch") { CustomParser = result => Helpers.GetTargetArchitecture(result.Tokens.Count > 0 ? result.Tokens[0].Value : null), DefaultValueFactory = result => Helpers.GetTargetArchitecture(result.Tokens.Count > 0 ? result.Tokens[0].Value : null), Description = "Target architecture for cross compilation", HelpName = "arg" }; + new("--targetarch") { CustomParser = MakeTargetArchitecture, DefaultValueFactory = MakeTargetArchitecture, Description = "Target architecture for cross compilation", HelpName = "arg" }; public CliOption TargetOS { get; } = new("--targetos") { CustomParser = result => Helpers.GetTargetOS(result.Tokens.Count > 0 ? result.Tokens[0].Value : null), DefaultValueFactory = result => Helpers.GetTargetOS(result.Tokens.Count > 0 ? result.Tokens[0].Value : null), Description = "Target OS for cross compilation", HelpName = "arg" }; public CliOption JitPath { get; } = @@ -170,6 +170,7 @@ internal sealed class ILCompilerRootCommand : CliRootCommand public OptimizationMode OptimizationMode { get; private set; } public ParseResult Result; + public static bool IsArmel { get; private set; } public ILCompilerRootCommand(string[] args) : base(".NET Native IL Compiler") { @@ -373,6 +374,18 @@ public static IEnumerable> GetExtendedHelp(HelpContext _ }; } + private static TargetArchitecture MakeTargetArchitecture(ArgumentResult result) + { + string firstToken = result.Tokens.Count > 0 ? result.Tokens[0].Value : null; + if (firstToken != null && firstToken.Equals("armel", StringComparison.OrdinalIgnoreCase)) + { + IsArmel = true; + return Internal.TypeSystem.TargetArchitecture.ARM; + } + + return Helpers.GetTargetArchitecture(firstToken); + } + private static int MakeParallelism(ArgumentResult result) { if (result.Tokens.Count > 0) diff --git a/src/coreclr/tools/aot/ILCompiler/Program.cs b/src/coreclr/tools/aot/ILCompiler/Program.cs index 2cf88e34a65e0f..baf589f6af4945 100644 --- a/src/coreclr/tools/aot/ILCompiler/Program.cs +++ b/src/coreclr/tools/aot/ILCompiler/Program.cs @@ -113,7 +113,7 @@ public int Run() SharedGenericsMode genericsMode = SharedGenericsMode.CanonicalReferenceTypes; var simdVectorLength = instructionSetSupport.GetVectorTSimdVector(); - var targetAbi = TargetAbi.NativeAot; + var targetAbi = ILCompilerRootCommand.IsArmel ? TargetAbi.NativeAotArmel : TargetAbi.NativeAot; var targetDetails = new TargetDetails(targetArchitecture, targetOS, targetAbi, simdVectorLength); CompilerTypeSystemContext typeSystemContext = new CompilerTypeSystemContext(targetDetails, genericsMode, supportsReflection ? DelegateFeature.All : 0, From c5200b65d4b0e7c2f5dd9421ac79eb892ce9b428 Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Mon, 1 Apr 2024 13:46:13 +0200 Subject: [PATCH 028/132] Enable NativeAOT win-x86 runtime tests (#99688) --- eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml | 4 +++- .../Microsoft.NETCore.Native.Publish.targets | 6 +++--- .../Microsoft.DotNet.ILCompiler/ILCompilerRIDs.props | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml index de23519f9c62ed..e8bfd86cd81dd2 100644 --- a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml +++ b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml @@ -57,6 +57,7 @@ extends: platforms: - windows_x64 - windows_arm64 + - windows_x86 - osx_x64 - osx_arm64 - linux_x64 @@ -164,6 +165,7 @@ extends: buildConfig: Checked platforms: - windows_x64 + - windows_x86 - linux_x64 - linux_arm variables: @@ -172,7 +174,7 @@ extends: - name: timeoutPerTestCollectionInMinutes value: 180 jobParameters: - timeoutInMinutes: 240 + timeoutInMinutes: 300 # doesn't normally take this long, but we have had Helix queues backed up for over an hour nameSuffix: NativeAOT_Pri0 buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release /p:RunAnalyzers=false postBuildSteps: diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets index f57c573057cec8..d8783480def1f7 100644 --- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets +++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets @@ -64,11 +64,11 @@ - - + diff --git a/src/installer/pkg/projects/Microsoft.DotNet.ILCompiler/ILCompilerRIDs.props b/src/installer/pkg/projects/Microsoft.DotNet.ILCompiler/ILCompilerRIDs.props index 7c32807ea03ffb..f1f6193300baf4 100644 --- a/src/installer/pkg/projects/Microsoft.DotNet.ILCompiler/ILCompilerRIDs.props +++ b/src/installer/pkg/projects/Microsoft.DotNet.ILCompiler/ILCompilerRIDs.props @@ -12,6 +12,7 @@ + From c7a3acb3e4e0b6768a730b75e0f53ac478527fb5 Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Mon, 1 Apr 2024 14:22:46 +0200 Subject: [PATCH 029/132] Build win-x86 NativeAOT runtime pack (#100195) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michal Strehovský --- eng/pipelines/runtime-official.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml index 0cc897455e2e06..d77030c9da9668 100644 --- a/eng/pipelines/runtime-official.yml +++ b/eng/pipelines/runtime-official.yml @@ -318,6 +318,7 @@ extends: - linux_bionic_x64 - linux_bionic_arm - linux_bionic_arm64 + - windows_x86 - windows_x64 - windows_arm64 jobParameters: From 0fd23aa2d5913df90c3c57ffef5c2dcdf119d043 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 1 Apr 2024 07:23:26 -0700 Subject: [PATCH 030/132] Fix non-deterministic trigger of struct type initializers (#100469) * Add regression test * Skip cctor triggers for boxing * Delete unnecessary CheckRestore * Better test * Apply suggestions from code review Co-authored-by: Aaron Robinson --------- Co-authored-by: Aaron Robinson --- src/coreclr/vm/jithelpers.cpp | 20 ++++------ src/coreclr/vm/methodtable.cpp | 6 ++- src/coreclr/vm/object.cpp | 7 +++- .../JitBlue/Runtime_100466/Runtime_100466.cs | 37 +++++++++++++++++++ .../Runtime_100466/Runtime_100466.csproj | 5 +++ 5 files changed, 61 insertions(+), 14 deletions(-) create mode 100644 src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.cs create mode 100644 src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.csproj diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 2c77b3c4d0d344..9f2ed013aa1db0 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -1183,7 +1183,7 @@ NOINLINE HCIMPL1(void, JIT_InitClass_Framed, MethodTable* pMT) // already have initialized the Global Class CONSISTENCY_CHECK(!pMT->IsGlobalClass()); - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); pMT->CheckRunClassInitThrowing(); HELPER_METHOD_FRAME_END(); @@ -1236,7 +1236,7 @@ HCIMPL2(void, JIT_InitInstantiatedClass, CORINFO_CLASS_HANDLE typeHnd_, CORINFO_ pMT = pTemplateMT; } - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); pMT->EnsureInstanceActive(); pMT->CheckRunClassInitThrowing(); HELPER_METHOD_FRAME_END(); @@ -1488,7 +1488,7 @@ NOINLINE HCIMPL1(void*, JIT_GetGenericsGCStaticBase_Framed, MethodTable *pMT) HELPER_METHOD_FRAME_BEGIN_RET_0(); - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); pMT->CheckRunClassInitThrowing(); @@ -1549,7 +1549,7 @@ NOINLINE HCIMPL1(void*, JIT_GetGenericsNonGCStaticBase_Framed, MethodTable *pMT) HELPER_METHOD_FRAME_BEGIN_RET_0(); - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); // If pMT refers to a method table that requires some initialization work, // then pMT cannot to a method table that is shared by generic instantiations, @@ -1629,9 +1629,7 @@ HCIMPL1(void*, JIT_GetNonGCThreadStaticBase_Helper, MethodTable * pMT) HELPER_METHOD_FRAME_BEGIN_RET_0(); - // For generics, we need to call CheckRestore() for some reason - if (pMT->HasGenericsStaticsInfo()) - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); // Get the TLM ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT); @@ -1661,9 +1659,7 @@ HCIMPL1(void*, JIT_GetGCThreadStaticBase_Helper, MethodTable * pMT) HELPER_METHOD_FRAME_BEGIN_RET_0(); - // For generics, we need to call CheckRestore() for some reason - if (pMT->HasGenericsStaticsInfo()) - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); // Get the TLM ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT); @@ -2763,7 +2759,7 @@ HCIMPL3(Object*, JIT_NewMDArr, CORINFO_CLASS_HANDLE classHnd, unsigned dwNumArgs HELPER_METHOD_FRAME_BEGIN_RET_1(ret); // Set up a frame TypeHandle typeHnd(classHnd); - typeHnd.CheckRestore(); + _ASSERTE(typeHnd.IsFullyLoaded()); _ASSERTE(typeHnd.GetMethodTable()->IsArray()); ret = AllocateArrayEx(typeHnd, pArgList, dwNumArgs); @@ -2826,7 +2822,7 @@ HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData) MethodTable *pMT = clsHnd.AsMethodTable(); - pMT->CheckRestore(); + _ASSERTE(pMT->IsFullyLoaded()); _ASSERTE (pMT->IsValueType() && !pMT->IsByRefLike()); diff --git a/src/coreclr/vm/methodtable.cpp b/src/coreclr/vm/methodtable.cpp index 007e5cd949d35a..a59440400b6341 100644 --- a/src/coreclr/vm/methodtable.cpp +++ b/src/coreclr/vm/methodtable.cpp @@ -4963,7 +4963,11 @@ OBJECTREF MethodTable::FastBox(void** data) if (IsNullable()) return Nullable::Box(*data, this); - OBJECTREF ref = Allocate(); + // MethodTable::Allocate() triggers cctors, so to avoid that we + // allocate directly without triggering cctors - boxing should not trigger cctors. + EnsureInstanceActive(); + OBJECTREF ref = AllocateObject(this); + CopyValueClass(ref->UnBox(), *data, this); return ref; } diff --git a/src/coreclr/vm/object.cpp b/src/coreclr/vm/object.cpp index 213a9cde5925b3..2207b728ce1e7f 100644 --- a/src/coreclr/vm/object.cpp +++ b/src/coreclr/vm/object.cpp @@ -1636,7 +1636,12 @@ OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT) OBJECTREF obj = 0; GCPROTECT_BEGININTERIOR (src); MethodTable* argMT = nullableMT->GetInstantiation()[0].AsMethodTable(); - obj = argMT->Allocate(); + + // MethodTable::Allocate() triggers cctors, so to avoid that we + // allocate directly without triggering cctors - boxing should not trigger cctors. + argMT->EnsureInstanceActive(); + obj = AllocateObject(argMT); + CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT); GCPROTECT_END (); diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.cs b/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.cs new file mode 100644 index 00000000000000..e7d9883c47bd2a --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.cs @@ -0,0 +1,37 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using Xunit; + +public static class Runtime_100466 +{ + [Fact] + public static int TestBoxingDoesNotTriggerStaticTypeInitializers() + { + Foo foo = new Foo(); + ((object)foo).ToString(); + return s_cctorTriggered ? -1 : 100; + } + + [Fact] + public static int TestNullableBoxingDoesNotTriggerStaticTypeInitializers() + { + FooNullable? nullable = new FooNullable(); + ((object)nullable).ToString(); + return s_cctorTriggeredNullable ? -1 : 100; + } + + private static bool s_cctorTriggered; + private static bool s_cctorTriggeredNullable; + + private struct Foo + { + static Foo() => s_cctorTriggered = true; + } + + private struct FooNullable + { + static FooNullable() => s_cctorTriggeredNullable = true; + } +} diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.csproj b/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.csproj new file mode 100644 index 00000000000000..6c8c63b83414ad --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_100466/Runtime_100466.csproj @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file From a027afb198c787fbfbca0b1d272ce41d9ab7d27a Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 11:39:00 -0700 Subject: [PATCH 031/132] [main] Update dependencies from dotnet/roslyn-analyzers (#100283) * Update dependencies from https://github.com/dotnet/roslyn-analyzers build Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24175.3 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240326.7 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24176.7 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240327.1 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24177.1 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240327.1 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24177.1 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240327.1 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24177.1 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240327.1 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24177.1 * Update dependencies from https://github.com/dotnet/roslyn-analyzers build 20240327.1 Microsoft.CodeAnalysis.Analyzers , Microsoft.CodeAnalysis.NetAnalyzers From Version 3.11.0-beta1.24170.2 -> To Version 3.11.0-beta1.24177.1 --------- Co-authored-by: dotnet-maestro[bot] --- eng/Version.Details.xml | 8 ++++---- eng/Versions.props | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 3aba855572a71c..308812eeb57d37 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -372,13 +372,13 @@ https://github.com/dotnet/roslyn 77372c66fd54927312b5b0a2e399e192f74445c9 - + https://github.com/dotnet/roslyn-analyzers - 29bdbf5df540dc13d4fe440a1ca7076c6ed65864 + ad732e236e7ffcb66de4b45a1b736aad4ccdcd83 - + https://github.com/dotnet/roslyn-analyzers - 29bdbf5df540dc13d4fe440a1ca7076c6ed65864 + ad732e236e7ffcb66de4b45a1b736aad4ccdcd83 diff --git a/eng/Versions.props b/eng/Versions.props index 8d784dc5c59361..2c4ee568cf864c 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -34,8 +34,8 @@ - 3.11.0-beta1.24170.2 - 9.0.0-preview.24170.2 + 3.11.0-beta1.24177.1 + 9.0.0-preview.24177.1 false diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj index ef19eaa15fc203..e21c5d8c5e16dc 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj @@ -17,6 +17,7 @@ + true false diff --git a/src/mono/sample/wasm/Directory.Build.props b/src/mono/sample/wasm/Directory.Build.props index 93717b5fbb4f50..ef6268ac4245ec 100644 --- a/src/mono/sample/wasm/Directory.Build.props +++ b/src/mono/sample/wasm/Directory.Build.props @@ -17,6 +17,7 @@ $(MSBuildProjectDirectory)\bin\$(Configuration)\AppBundle\ + true false diff --git a/src/mono/sample/wasm/browser-bench/Console/Wasm.Console.Bench.Sample.csproj b/src/mono/sample/wasm/browser-bench/Console/Wasm.Console.Bench.Sample.csproj index 6e36786bb6fb47..924e253ddeaad3 100644 --- a/src/mono/sample/wasm/browser-bench/Console/Wasm.Console.Bench.Sample.csproj +++ b/src/mono/sample/wasm/browser-bench/Console/Wasm.Console.Bench.Sample.csproj @@ -5,7 +5,6 @@ $(BrowserProjectRoot)\test-main.js true true - false diff --git a/src/mono/sample/wasm/browser-profile/Wasm.BrowserProfile.Sample.csproj b/src/mono/sample/wasm/browser-profile/Wasm.BrowserProfile.Sample.csproj index ddf26a06303657..81f03cdc9878f8 100644 --- a/src/mono/sample/wasm/browser-profile/Wasm.BrowserProfile.Sample.csproj +++ b/src/mono/sample/wasm/browser-profile/Wasm.BrowserProfile.Sample.csproj @@ -2,8 +2,6 @@ true aot; - true - false From f37a5c18bd18440c3e81e59c28b05163874bc1ea Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Tue, 2 Apr 2024 09:22:38 -0700 Subject: [PATCH 040/132] Add JitOptRepeat to JIT experimental AzDO pipeline (#100326) Add one job that runs JitOptRepeat on all functions, 2 repetitions. --- eng/pipelines/common/templates/runtimes/run-test-job.yml | 2 ++ src/tests/Common/testenvironment.proj | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml index 8644351ff2a6db..8dc0e368dfc45e 100644 --- a/eng/pipelines/common/templates/runtimes/run-test-job.yml +++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml @@ -535,6 +535,7 @@ jobs: scenarios: - jitosr_stress - jitpartialcompilation_pgo + - jitoptrepeat ${{ else }}: scenarios: - jitosr_stress @@ -546,6 +547,7 @@ jobs: - jitphysicalpromotion_only - jitphysicalpromotion_full - jitrlcse + - jitoptrepeat ${{ if in(parameters.testGroup, 'jit-cfg') }}: scenarios: - jitcfg diff --git a/src/tests/Common/testenvironment.proj b/src/tests/Common/testenvironment.proj index 099526099f999d..73c5bf8a151c4f 100644 --- a/src/tests/Common/testenvironment.proj +++ b/src/tests/Common/testenvironment.proj @@ -81,6 +81,9 @@ DOTNET_JitSynthesizeCounts; DOTNET_JitCheckSynthesizedCounts; DOTNET_JitRLCSEGreedy; + DOTNET_JitEnableOptRepeat; + DOTNET_JitOptRepeat; + DOTNET_JitOptRepeatCount; @@ -239,6 +242,7 @@ + From 93483bae181f12efebcd29a0cb0b4184950fb334 Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Tue, 2 Apr 2024 19:50:38 +0200 Subject: [PATCH 041/132] [browser][MT] fix error propagation when calling JSImport of missing JS function (#100408) --- .../JavaScript/JSImportTest.cs | 7 +++++ .../JavaScript/JavaScriptTestHelper.cs | 3 +++ src/mono/browser/runtime/invoke-js.ts | 27 ++++++++++++++----- src/mono/browser/runtime/marshal-to-cs.ts | 10 +++---- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JSImportTest.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JSImportTest.cs index 18c989e5fb83af..b4c220519da097 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JSImportTest.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JSImportTest.cs @@ -43,6 +43,13 @@ public void MissingImport() Assert.Contains("intentionallyMissingImport must be a Function but was undefined", ex.Message); } + [Fact] + public async Task MissingImportAsync() + { + var ex = await Assert.ThrowsAsync(() => JavaScriptTestHelper.IntentionallyMissingImportAsync()); + Assert.Contains("intentionallyMissingImportAsync must be a Function but was undefined", ex.Message); + } + #if !FEATURE_WASM_MANAGED_THREADS // because in MT JSHost.ImportAsync is really async, it will finish before the caller could cancel it [Fact] public async Task CancelableImportAsync() diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JavaScriptTestHelper.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JavaScriptTestHelper.cs index 7a904c9bffa3bb..b2127558a15206 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JavaScriptTestHelper.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/JavaScriptTestHelper.cs @@ -40,6 +40,9 @@ public static void ConsoleWriteLine([JSMarshalAs] string message) [JSImport("intentionallyMissingImport", "JavaScriptTestHelper")] public static partial void IntentionallyMissingImport(); + [JSImport("intentionallyMissingImportAsync", "JavaScriptTestHelper")] + public static partial Task IntentionallyMissingImportAsync(); + [JSImport("catch1toString", "JavaScriptTestHelper")] public static partial string catch1toString(string message, string functionName); diff --git a/src/mono/browser/runtime/invoke-js.ts b/src/mono/browser/runtime/invoke-js.ts index f42c66017acc7d..07c02c719d4851 100644 --- a/src/mono/browser/runtime/invoke-js.ts +++ b/src/mono/browser/runtime/invoke-js.ts @@ -4,8 +4,8 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; import BuildConfiguration from "consts:configuration"; -import { marshal_exception_to_cs, bind_arg_marshal_to_cs } from "./marshal-to-cs"; -import { get_signature_argument_count, bound_js_function_symbol, get_sig, get_signature_version, get_signature_type, imported_js_function_symbol, get_signature_handle, get_signature_function_name, get_signature_module_name, is_receiver_should_free, get_caller_native_tid, get_sync_done_semaphore_ptr } from "./marshal"; +import { marshal_exception_to_cs, bind_arg_marshal_to_cs, marshal_task_to_cs } from "./marshal-to-cs"; +import { get_signature_argument_count, bound_js_function_symbol, get_sig, get_signature_version, get_signature_type, imported_js_function_symbol, get_signature_handle, get_signature_function_name, get_signature_module_name, is_receiver_should_free, get_caller_native_tid, get_sync_done_semaphore_ptr, get_arg } from "./marshal"; import { forceThreadMemoryViewRefresh } from "./memory"; import { JSFunctionSignature, JSMarshalerArguments, BoundMarshalerToJs, JSFnHandle, BoundMarshalerToCs, JSHandle, MarshalerType, VoidPtrNull } from "./types/internal"; import { VoidPtr } from "./types/emscripten"; @@ -28,7 +28,6 @@ export function mono_wasm_bind_js_import_ST (signature: JSFunctionSignature): Vo bind_js_import(signature); return VoidPtrNull; } catch (ex: any) { - Module.err(ex.toString()); return stringToUTF16Ptr(normalize_exception(ex)); } } @@ -45,9 +44,25 @@ export function mono_wasm_invoke_jsimport_MT (signature: JSFunctionSignature, ar try { bound_fn = bind_js_import(signature); } catch (ex: any) { - Module.err(ex.toString()); - marshal_exception_to_cs(args, ex); - return; + // propagate the exception back to caller, which could be on different thread. Handle both sync and async signatures. + try { + const res_sig = get_sig(signature, 1); + const res_type = get_signature_type(res_sig); + if (res_type === MarshalerType.Task) { + const res = get_arg(args, 1); + marshal_task_to_cs(res, Promise.reject(ex)); + } else { + marshal_exception_to_cs(args, ex); + if (monoThreadInfo.isUI) { + const done_semaphore = get_sync_done_semaphore_ptr(args); + tcwraps.mono_threads_wasm_sync_run_in_target_thread_done(done_semaphore); + } + } + return; + } catch (ex2: any) { + runtimeHelpers.nativeExit(ex2); + return; + } } } mono_assert(bound_fn, () => `Imported function handle expected ${function_handle}`); diff --git a/src/mono/browser/runtime/marshal-to-cs.ts b/src/mono/browser/runtime/marshal-to-cs.ts index 6e954ae983df39..d3c7117f2a7da1 100644 --- a/src/mono/browser/runtime/marshal-to-cs.ts +++ b/src/mono/browser/runtime/marshal-to-cs.ts @@ -48,9 +48,9 @@ export function initialize_marshalers_to_cs (): void { js_to_cs_marshalers.set(MarshalerType.JSException, marshal_exception_to_cs); js_to_cs_marshalers.set(MarshalerType.JSObject, marshal_js_object_to_cs); js_to_cs_marshalers.set(MarshalerType.Object, marshal_cs_object_to_cs); - js_to_cs_marshalers.set(MarshalerType.Task, _marshal_task_to_cs); - js_to_cs_marshalers.set(MarshalerType.TaskResolved, _marshal_task_to_cs); - js_to_cs_marshalers.set(MarshalerType.TaskRejected, _marshal_task_to_cs); + js_to_cs_marshalers.set(MarshalerType.Task, marshal_task_to_cs); + js_to_cs_marshalers.set(MarshalerType.TaskResolved, marshal_task_to_cs); + js_to_cs_marshalers.set(MarshalerType.TaskRejected, marshal_task_to_cs); js_to_cs_marshalers.set(MarshalerType.Action, _marshal_function_to_cs); js_to_cs_marshalers.set(MarshalerType.Function, _marshal_function_to_cs); js_to_cs_marshalers.set(MarshalerType.None, _marshal_null_to_cs);// also void @@ -295,7 +295,7 @@ function _marshal_function_to_cs (arg: JSMarshalerArgument, value: Function, _?: } -function _marshal_task_to_cs (arg: JSMarshalerArgument, value: Promise, _?: MarshalerType, res_converter?: MarshalerToCs) { +export function marshal_task_to_cs (arg: JSMarshalerArgument, value: Promise, _?: MarshalerType, res_converter?: MarshalerToCs) { const handleIsPreallocated = get_arg_type(arg) == MarshalerType.TaskPreCreated; if (value === null || value === undefined) { if (WasmEnableThreads && handleIsPreallocated) { @@ -415,7 +415,7 @@ export function marshal_cs_object_to_cs (arg: JSMarshalerArgument, value: any): ) { throw new Error("NotImplementedException: TypedArray"); } else if (isThenable(value)) { - _marshal_task_to_cs(arg, value); + marshal_task_to_cs(arg, value); } else if (value instanceof Span) { throw new Error("NotImplementedException: Span"); } else if (js_type == "object") { From 1ec64a7adffd5a038e9ff4c5f041279760d3b54d Mon Sep 17 00:00:00 2001 From: Buyaa Namnan Date: Tue, 2 Apr 2024 11:07:32 -0700 Subject: [PATCH 042/132] Fix more diagnostics with CA2263 (#100490) * Fix more diagnostics with CA2263 * Apply feedbacks --- .../ComInterop/ComRuntimeHelpers.cs | 8 ++++---- .../ComInterop/ComTypeEnumDesc.cs | 2 +- .../RuntimeBinder/ComInterop/ExcepInfo.cs | 2 +- .../ComInterop/IDispatchComObject.cs | 4 ++-- .../CSharp/RuntimeBinder/SymbolTable.cs | 2 +- ...ServiceCollectionServiceExtensions.Keyed.cs | 1 - .../Microsoft/VisualBasic/FileIO/FileSystem.vb | 16 ++++++++-------- .../src/System/ComponentModel/BindingList.cs | 2 +- .../AccountManagement/AD/SidList.cs | 6 +++--- .../AccountManagement/AuthZSet.cs | 8 ++++---- .../AccountManagement/SAM/SAMStoreCtx.cs | 2 +- .../AccountManagement/Utils.cs | 18 ++++++++---------- .../Protocols/common/BerConverter.cs | 2 +- .../Protocols/common/DirectoryControl.cs | 2 +- .../Protocols/ldap/LdapConnection.cs | 14 +++++++------- .../Protocols/ldap/LdapSessionOptions.cs | 4 ++-- .../src/Interop/EnumVariant.cs | 4 ++-- .../ActiveDirectoryReplicationMetaData.cs | 4 ++-- .../ActiveDirectory/ActiveDirectorySite.cs | 8 ++++---- .../ActiveDirectory/DirectoryServer.cs | 4 ++-- .../ForestTrustRelationshipInformation.cs | 14 +++++++------- .../ReplicationCursorCollection.cs | 6 +++--- .../ReplicationFailureCollection.cs | 4 ++-- .../ReplicationNeighborCollection.cs | 4 ++-- .../ReplicationOperationCollection.cs | 4 ++-- .../ActiveDirectory/TrustHelper.cs | 14 +++++++------- .../DirectoryServices/ActiveDirectory/Utils.cs | 16 +++++++--------- .../DirectoryServices/DirectorySearcher.cs | 8 ++++---- .../ComponentModel/DefaultValueAttribute.cs | 2 +- .../Runtime/Serialization/DataContract.cs | 6 +++--- .../ReflectionXmlSerializationReader.cs | 4 ++-- .../System/Xml/Xslt/XslCompiledTransform.cs | 2 +- .../Marshaling/JSMarshalerArgument.JSObject.cs | 2 +- .../Marshaling/JSMarshalerArgument.Object.cs | 2 +- .../Marshaling/JSMarshalerArgument.String.cs | 2 +- .../Formatters/Binary/BinaryObjectInfo.cs | 2 +- .../Security/Cryptography/LiteHash.Apple.cs | 4 ++-- .../src/System/Media/SoundPlayer.cs | 4 ++-- 38 files changed, 104 insertions(+), 109 deletions(-) diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComRuntimeHelpers.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComRuntimeHelpers.cs index d090f3300cdb29..c1228bc1eade72 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComRuntimeHelpers.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComRuntimeHelpers.cs @@ -166,7 +166,7 @@ internal static ComTypes.ITypeInfo GetITypeInfoFromIDispatch(IDispatch dispatch) return typeInfo; } - internal static ComTypes.TYPEATTR GetTypeAttrForTypeInfo(ComTypes.ITypeInfo typeInfo) + internal static unsafe ComTypes.TYPEATTR GetTypeAttrForTypeInfo(ComTypes.ITypeInfo typeInfo) { IntPtr pAttrs; typeInfo.GetTypeAttr(out pAttrs); @@ -179,7 +179,7 @@ internal static ComTypes.TYPEATTR GetTypeAttrForTypeInfo(ComTypes.ITypeInfo type try { - return (ComTypes.TYPEATTR)Marshal.PtrToStructure(pAttrs, typeof(ComTypes.TYPEATTR)); + return *(ComTypes.TYPEATTR*)pAttrs; } finally { @@ -187,7 +187,7 @@ internal static ComTypes.TYPEATTR GetTypeAttrForTypeInfo(ComTypes.ITypeInfo type } } - internal static ComTypes.TYPELIBATTR GetTypeAttrForTypeLib(ComTypes.ITypeLib typeLib) + internal static unsafe ComTypes.TYPELIBATTR GetTypeAttrForTypeLib(ComTypes.ITypeLib typeLib) { IntPtr pAttrs; typeLib.GetLibAttr(out pAttrs); @@ -200,7 +200,7 @@ internal static ComTypes.TYPELIBATTR GetTypeAttrForTypeLib(ComTypes.ITypeLib typ try { - return (ComTypes.TYPELIBATTR)Marshal.PtrToStructure(pAttrs, typeof(ComTypes.TYPELIBATTR)); + return *(ComTypes.TYPELIBATTR*)pAttrs; } finally { diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComTypeEnumDesc.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComTypeEnumDesc.cs index 0d9b7998872d41..26e526a28f3c36 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComTypeEnumDesc.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComTypeEnumDesc.cs @@ -36,7 +36,7 @@ internal ComTypeEnumDesc(ComTypes.ITypeInfo typeInfo, ComTypeLibDesc typeLibDesc try { - varDesc = (ComTypes.VARDESC)Marshal.PtrToStructure(p, typeof(ComTypes.VARDESC)); + varDesc = Marshal.PtrToStructure(p); if (varDesc.varkind == ComTypes.VARKIND.VAR_CONST) { diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ExcepInfo.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ExcepInfo.cs index 7d65926d813c99..b839dbb7d21db8 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ExcepInfo.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ExcepInfo.cs @@ -29,7 +29,7 @@ internal struct ExcepInfo #if DEBUG static ExcepInfo() { - Debug.Assert(Marshal.SizeOf(typeof(ExcepInfo)) == Marshal.SizeOf(typeof(ComTypes.EXCEPINFO))); + Debug.Assert(Marshal.SizeOf() == Marshal.SizeOf()); } #endif diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/IDispatchComObject.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/IDispatchComObject.cs index 5e1f62f83d7c00..f4ee622d505f0f 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/IDispatchComObject.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/IDispatchComObject.cs @@ -328,7 +328,7 @@ DynamicMetaObject IDynamicMetaObjectProvider.GetMetaObject(Expression parameter) return new IDispatchMetaObject(parameter, this); } - private static void GetFuncDescForDescIndex(ComTypes.ITypeInfo typeInfo, int funcIndex, out ComTypes.FUNCDESC funcDesc, out IntPtr funcDescHandle) + private static unsafe void GetFuncDescForDescIndex(ComTypes.ITypeInfo typeInfo, int funcIndex, out ComTypes.FUNCDESC funcDesc, out IntPtr funcDescHandle) { IntPtr pFuncDesc; typeInfo.GetFuncDesc(funcIndex, out pFuncDesc); @@ -339,7 +339,7 @@ private static void GetFuncDescForDescIndex(ComTypes.ITypeInfo typeInfo, int fun throw Error.CannotRetrieveTypeInformation(); } - funcDesc = (ComTypes.FUNCDESC)Marshal.PtrToStructure(pFuncDesc, typeof(ComTypes.FUNCDESC)); + funcDesc = *(ComTypes.FUNCDESC*)pFuncDesc; funcDescHandle = pFuncDesc; } diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/SymbolTable.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/SymbolTable.cs index b2f31a5eaf56dd..451cdd5434697f 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/SymbolTable.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/SymbolTable.cs @@ -1439,7 +1439,7 @@ private static void SetParameterDataForMethProp(MethodOrPropertySymbol methProp, if (parameters.Length > 0) { // See if we have a param array. - if (parameters[parameters.Length - 1].GetCustomAttribute(typeof(ParamArrayAttribute), false) != null) + if (parameters[parameters.Length - 1].GetCustomAttribute(false) != null) { methProp.isParamArray = true; } diff --git a/src/libraries/Microsoft.Extensions.DependencyInjection.Abstractions/src/ServiceCollectionServiceExtensions.Keyed.cs b/src/libraries/Microsoft.Extensions.DependencyInjection.Abstractions/src/ServiceCollectionServiceExtensions.Keyed.cs index 788b20f5be1932..1f1f1cef1a4e8c 100644 --- a/src/libraries/Microsoft.Extensions.DependencyInjection.Abstractions/src/ServiceCollectionServiceExtensions.Keyed.cs +++ b/src/libraries/Microsoft.Extensions.DependencyInjection.Abstractions/src/ServiceCollectionServiceExtensions.Keyed.cs @@ -327,7 +327,6 @@ public static IServiceCollection AddKeyedScoped( return services.AddKeyedScoped(typeof(TService), serviceKey, implementationFactory); } - /// /// Adds a singleton service of the type specified in with an /// implementation of the type specified in to the diff --git a/src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/FileIO/FileSystem.vb b/src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/FileIO/FileSystem.vb index 45fe6e29b9a1ea..27ac1fec7e5970 100644 --- a/src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/FileIO/FileSystem.vb +++ b/src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/FileIO/FileSystem.vb @@ -892,7 +892,7 @@ Namespace Microsoft.VisualBasic.FileIO Private Shared Sub CopyOrMoveDirectory(ByVal operation As CopyOrMove, ByVal sourceDirectoryName As String, ByVal destinationDirectoryName As String, ByVal overwrite As Boolean, ByVal showUI As UIOptionInternal, ByVal onUserCancel As UICancelOption) - Debug.Assert(System.Enum.IsDefined(GetType(CopyOrMove), operation), "Invalid Operation") + Debug.Assert([Enum].IsDefined(operation), "Invalid Operation") ' Verify enums. VerifyUICancelOption("onUserCancel", onUserCancel) @@ -961,7 +961,7 @@ Namespace Microsoft.VisualBasic.FileIO Private Shared Sub FxCopyOrMoveDirectory(ByVal operation As CopyOrMove, ByVal sourceDirectoryPath As String, ByVal targetDirectoryPath As String, ByVal overwrite As Boolean) - Debug.Assert(System.Enum.IsDefined(GetType(CopyOrMove), operation), "Invalid Operation") + Debug.Assert([Enum].IsDefined(operation), "Invalid Operation") Debug.Assert(sourceDirectoryPath <> "" And IO.Path.IsPathRooted(sourceDirectoryPath), "Invalid Source") Debug.Assert(targetDirectoryPath <> "" And IO.Path.IsPathRooted(targetDirectoryPath), "Invalid Target") @@ -1010,7 +1010,7 @@ Namespace Microsoft.VisualBasic.FileIO Private Shared Sub CopyOrMoveDirectoryNode(ByVal Operation As CopyOrMove, ByVal SourceDirectoryNode As DirectoryNode, ByVal Overwrite As Boolean, ByVal Exceptions As ListDictionary) - Debug.Assert(System.Enum.IsDefined(GetType(CopyOrMove), Operation), "Invalid Operation") + Debug.Assert([Enum].IsDefined(Operation), "Invalid Operation") Debug.Assert(Exceptions IsNot Nothing, "Null exception list") Debug.Assert(SourceDirectoryNode IsNot Nothing, "Null source node") @@ -1092,7 +1092,7 @@ Namespace Microsoft.VisualBasic.FileIO ByVal sourceFileName As String, ByVal destinationFileName As String, ByVal overwrite As Boolean, ByVal showUI As UIOptionInternal, ByVal onUserCancel As UICancelOption ) - Debug.Assert(System.Enum.IsDefined(GetType(CopyOrMove), operation), "Invalid Operation") + Debug.Assert([Enum].IsDefined(operation), "Invalid Operation") ' Verify enums. VerifyUICancelOption("onUserCancel", onUserCancel) @@ -1597,8 +1597,8 @@ Namespace Microsoft.VisualBasic.FileIO ''' Private Shared Sub ShellCopyOrMove(ByVal Operation As CopyOrMove, ByVal TargetType As FileOrDirectory, ByVal FullSourcePath As String, ByVal FullTargetPath As String, ByVal ShowUI As UIOptionInternal, ByVal OnUserCancel As UICancelOption) - Debug.Assert(System.Enum.IsDefined(GetType(CopyOrMove), Operation)) - Debug.Assert(System.Enum.IsDefined(GetType(FileOrDirectory), TargetType)) + Debug.Assert([Enum].IsDefined(Operation)) + Debug.Assert([Enum].IsDefined(TargetType)) Debug.Assert(FullSourcePath <> "" And IO.Path.IsPathRooted(FullSourcePath), "Invalid FullSourcePath") Debug.Assert(FullTargetPath <> "" And IO.Path.IsPathRooted(FullTargetPath), "Invalid FullTargetPath") Debug.Assert(ShowUI <> UIOptionInternal.NoUI, "Why call ShellDelete if ShowUI is NoUI???") @@ -1693,7 +1693,7 @@ Namespace Microsoft.VisualBasic.FileIO Private Shared Sub ShellFileOperation(ByVal OperationType As SHFileOperationType, ByVal OperationFlags As ShFileOperationFlags, ByVal FullSource As String, ByVal FullTarget As String, ByVal OnUserCancel As UICancelOption, ByVal FileOrDirectory As FileOrDirectory) - Debug.Assert(System.Enum.IsDefined(GetType(SHFileOperationType), OperationType)) + Debug.Assert([Enum].IsDefined(OperationType)) Debug.Assert(OperationType <> SHFileOperationType.FO_RENAME, "Don't call Shell to rename") Debug.Assert(FullSource <> "" And IO.Path.IsPathRooted(FullSource), "Invalid FullSource path") Debug.Assert(OperationType = SHFileOperationType.FO_DELETE OrElse (FullTarget <> "" And IO.Path.IsPathRooted(FullTarget)), "Invalid FullTarget path") @@ -1750,7 +1750,7 @@ Namespace Microsoft.VisualBasic.FileIO Private Shared Function GetShellOperationInfo( ByVal OperationType As SHFileOperationType, ByVal OperationFlags As ShFileOperationFlags, ByVal SourcePaths() As String, Optional ByVal TargetPath As String = Nothing) As SHFILEOPSTRUCT - Debug.Assert(System.Enum.IsDefined(GetType(SHFileOperationType), OperationType), "Invalid OperationType") + Debug.Assert([Enum].IsDefined(OperationType), "Invalid OperationType") Debug.Assert(TargetPath = "" Or IO.Path.IsPathRooted(TargetPath), "Invalid TargetPath") Debug.Assert(SourcePaths IsNot Nothing AndAlso SourcePaths.Length > 0, "Invalid SourcePaths") diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/BindingList.cs b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/BindingList.cs index d87deab820434c..aa28f466deeb47 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/BindingList.cs +++ b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/BindingList.cs @@ -312,7 +312,7 @@ public virtual void EndNew(int itemIndex) { // Allow event handler to supply the new item for us // If event handler did not supply new item, create one ourselves - object? newItem = FireAddingNew() ?? Activator.CreateInstance(typeof(T)); + object? newItem = FireAddingNew() ?? Activator.CreateInstance(); // Add item to end of list. Note: If event handler returned an item not of type T, // the cast below will trigger an InvalidCastException. This is by design. diff --git a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AD/SidList.cs b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AD/SidList.cs index 2d6d63fee72970..d488ad57dc2bdb 100644 --- a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AD/SidList.cs +++ b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AD/SidList.cs @@ -62,7 +62,7 @@ internal SidList(Interop.SID_AND_ATTRIBUTES[] sidAndAttr) TranslateSids(null, pSids); } - private void TranslateSids(string target, IntPtr[] pSids) + private unsafe void TranslateSids(string target, IntPtr[] pSids) { GlobalDebug.WriteLineIf(GlobalDebug.Info, "AuthZSet", "SidList: processing {0} SIDs", pSids.Length); @@ -157,8 +157,8 @@ private void TranslateSids(string target, IntPtr[] pSids) for (int i = 0; i < domainCount; i++) { - domains[i] = (Interop.LSA_TRUST_INFORMATION)Marshal.PtrToStructure(pCurrentDomain, typeof(Interop.LSA_TRUST_INFORMATION)); - pCurrentDomain = new IntPtr(pCurrentDomain.ToInt64() + Marshal.SizeOf(typeof(Interop.LSA_TRUST_INFORMATION))); + domains[i] = *(Interop.LSA_TRUST_INFORMATION*)pCurrentDomain; + pCurrentDomain += sizeof(Interop.LSA_TRUST_INFORMATION); } GlobalDebug.WriteLineIf(GlobalDebug.Info, "AuthZSet", "SidList: got {0} groups in {1} domains", sidCount, domainCount); diff --git a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AuthZSet.cs b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AuthZSet.cs index b9423466e825da..73c71722cc2ca2 100644 --- a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AuthZSet.cs +++ b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/AuthZSet.cs @@ -131,7 +131,7 @@ out pClientContext // Extract TOKEN_GROUPS.GroupCount - Interop.TOKEN_GROUPS tokenGroups = (Interop.TOKEN_GROUPS)Marshal.PtrToStructure(pBuffer, typeof(Interop.TOKEN_GROUPS)); + Interop.TOKEN_GROUPS tokenGroups = *(Interop.TOKEN_GROUPS*)pBuffer; uint groupCount = tokenGroups.GroupCount; @@ -141,13 +141,13 @@ out pClientContext // each native SID_AND_ATTRIBUTES into a managed SID_AND_ATTR. Interop.SID_AND_ATTRIBUTES[] groups = new Interop.SID_AND_ATTRIBUTES[groupCount]; - IntPtr currentItem = new IntPtr(pBuffer.ToInt64() + Marshal.SizeOf(typeof(Interop.TOKEN_GROUPS)) - sizeof(Interop.SID_AND_ATTRIBUTES)); + IntPtr currentItem = pBuffer + sizeof(Interop.TOKEN_GROUPS) - sizeof(Interop.SID_AND_ATTRIBUTES); for (int i = 0; i < groupCount; i++) { - groups[i] = (Interop.SID_AND_ATTRIBUTES)Marshal.PtrToStructure(currentItem, typeof(Interop.SID_AND_ATTRIBUTES)); + groups[i] = *(Interop.SID_AND_ATTRIBUTES*)currentItem; - currentItem = new IntPtr(currentItem.ToInt64() + Marshal.SizeOf(typeof(Interop.SID_AND_ATTRIBUTES))); + currentItem += sizeof(Interop.SID_AND_ATTRIBUTES); } _groupSidList = new SidList(groups); diff --git a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/SAM/SAMStoreCtx.cs b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/SAM/SAMStoreCtx.cs index 1f91a0e5daccd6..f31b35ca0c22b4 100644 --- a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/SAM/SAMStoreCtx.cs +++ b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/SAM/SAMStoreCtx.cs @@ -1053,7 +1053,7 @@ private void LoadComputerInfo() if (err == 0) { UnsafeNativeMethods.WKSTA_INFO_100 wkstaInfo = - (UnsafeNativeMethods.WKSTA_INFO_100)Marshal.PtrToStructure(buffer, typeof(UnsafeNativeMethods.WKSTA_INFO_100)); + Marshal.PtrToStructure(buffer); _machineFlatName = wkstaInfo.wki100_computername; GlobalDebug.WriteLineIf(GlobalDebug.Info, "SAMStoreCtx", "LoadComputerInfo: machineFlatName={0}", _machineFlatName); diff --git a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/Utils.cs b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/Utils.cs index 619c48191e264e..c4995b0a2c53e5 100644 --- a/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/Utils.cs +++ b/src/libraries/System.DirectoryServices.AccountManagement/src/System/DirectoryServices/AccountManagement/Utils.cs @@ -198,15 +198,14 @@ internal static SidType ClassifySID(byte[] sid) } - internal static SidType ClassifySID(IntPtr pSid) + internal static unsafe SidType ClassifySID(IntPtr pSid) { Debug.Assert(Interop.Advapi32.IsValidSid(pSid)); // Get the issuing authority and the first RID IntPtr pIdentAuth = Interop.Advapi32.GetSidIdentifierAuthority(pSid); - Interop.Advapi32.SID_IDENTIFIER_AUTHORITY identAuth = - (Interop.Advapi32.SID_IDENTIFIER_AUTHORITY)Marshal.PtrToStructure(pIdentAuth, typeof(Interop.Advapi32.SID_IDENTIFIER_AUTHORITY)); + Interop.Advapi32.SID_IDENTIFIER_AUTHORITY identAuth = *(Interop.Advapi32.SID_IDENTIFIER_AUTHORITY*)pIdentAuth; IntPtr pRid = Interop.Advapi32.GetSidSubAuthority(pSid, 0); int rid = Marshal.ReadInt32(pRid); @@ -333,7 +332,7 @@ internal static bool IsSamUser() } - internal static IntPtr GetCurrentUserSid() + internal static unsafe IntPtr GetCurrentUserSid() { SafeTokenHandle tokenHandle = null; IntPtr pBuffer = IntPtr.Zero; @@ -425,7 +424,7 @@ out tokenHandle } // Retrieve the user's SID from the user info - Interop.TOKEN_USER tokenUser = (Interop.TOKEN_USER)Marshal.PtrToStructure(pBuffer, typeof(Interop.TOKEN_USER)); + Interop.TOKEN_USER tokenUser = *(Interop.TOKEN_USER*)pBuffer; IntPtr pUserSid = tokenUser.sidAndAttributes.Sid; // this is a reference into the NATIVE memory (into pBuffer) Debug.Assert(Interop.Advapi32.IsValidSid(pUserSid)); @@ -457,7 +456,7 @@ out tokenHandle } - internal static IntPtr GetMachineDomainSid() + internal static unsafe IntPtr GetMachineDomainSid() { SafeLsaPolicyHandle policyHandle = null; IntPtr pBuffer = IntPtr.Zero; @@ -496,8 +495,7 @@ internal static IntPtr GetMachineDomainSid() } Debug.Assert(pBuffer != IntPtr.Zero); - UnsafeNativeMethods.POLICY_ACCOUNT_DOMAIN_INFO info = (UnsafeNativeMethods.POLICY_ACCOUNT_DOMAIN_INFO) - Marshal.PtrToStructure(pBuffer, typeof(UnsafeNativeMethods.POLICY_ACCOUNT_DOMAIN_INFO)); + UnsafeNativeMethods.POLICY_ACCOUNT_DOMAIN_INFO info = *(UnsafeNativeMethods.POLICY_ACCOUNT_DOMAIN_INFO*)pBuffer; Debug.Assert(Interop.Advapi32.IsValidSid(info.DomainSid)); @@ -570,7 +568,7 @@ internal static UnsafeNativeMethods.DomainControllerInfo GetDcName(string comput } UnsafeNativeMethods.DomainControllerInfo domainControllerInfo = - (UnsafeNativeMethods.DomainControllerInfo)Marshal.PtrToStructure(domainControllerInfoPtr, typeof(UnsafeNativeMethods.DomainControllerInfo)); + Marshal.PtrToStructure(domainControllerInfoPtr); return domainControllerInfo; } @@ -802,7 +800,7 @@ internal static bool IsMachineDC(string computerName) } UnsafeNativeMethods.DSROLE_PRIMARY_DOMAIN_INFO_BASIC dsRolePrimaryDomainInfo = - (UnsafeNativeMethods.DSROLE_PRIMARY_DOMAIN_INFO_BASIC)Marshal.PtrToStructure(dsRoleInfoPtr, typeof(UnsafeNativeMethods.DSROLE_PRIMARY_DOMAIN_INFO_BASIC)); + Marshal.PtrToStructure(dsRoleInfoPtr); return (dsRolePrimaryDomainInfo.MachineRole == UnsafeNativeMethods.DSROLE_MACHINE_ROLE.DsRole_RoleBackupDomainController || dsRolePrimaryDomainInfo.MachineRole == UnsafeNativeMethods.DSROLE_MACHINE_ROLE.DsRole_RolePrimaryDomainController); diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/BerConverter.cs b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/BerConverter.cs index 7e37a4c15175c5..13f7b8b0963a78 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/BerConverter.cs +++ b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/BerConverter.cs @@ -527,7 +527,7 @@ private static unsafe int EncodingMultiByteArrayHelper(SafeBerHandle berElement, { int i = 0; berValArray = Utility.AllocHGlobalIntPtrArray(tempValue.Length + 1); - int structSize = Marshal.SizeOf(typeof(BerVal)); + int structSize = Marshal.SizeOf(); managedBervalArray = new BerVal[tempValue.Length]; void** pBerValArray = (void**)berValArray; diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/DirectoryControl.cs b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/DirectoryControl.cs index 31548f6b2fd639..f47bc50318b946 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/DirectoryControl.cs +++ b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/common/DirectoryControl.cs @@ -716,7 +716,7 @@ public override unsafe byte[] GetValue() } IntPtr control = IntPtr.Zero; - int structSize = Marshal.SizeOf(typeof(SortKeyInterop)); + int structSize = Marshal.SizeOf(); int keyCount = nativeSortKeys.Length; IntPtr memHandle = Utility.AllocHGlobalIntPtrArray(keyCount + 1); diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.cs b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.cs index 0d478c6167ca1f..4be0407a9eea9b 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.cs +++ b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.cs @@ -544,7 +544,7 @@ private unsafe int SendRequestHelper(DirectoryRequest request, ref int messageID { // Build server control. managedServerControls = BuildControlArray(request.Controls, true); - int structSize = Marshal.SizeOf(typeof(LdapControl)); + int structSize = Marshal.SizeOf(); if (managedServerControls != null) { @@ -658,7 +658,7 @@ private unsafe int SendRequestHelper(DirectoryRequest request, ref int messageID addModCount = (modifications == null ? 1 : modifications.Length + 1); modArray = Utility.AllocHGlobalIntPtrArray(addModCount); void** pModArray = (void**)modArray; - int modStructSize = Marshal.SizeOf(typeof(LdapMod)); + int modStructSize = Marshal.SizeOf(); int i = 0; for (i = 0; i < addModCount - 1; i++) { @@ -918,12 +918,12 @@ private unsafe Interop.BOOL ProcessClientCertificate(IntPtr ldapHandle, IntPtr C var list = new ArrayList(); if (CAs != IntPtr.Zero) { - SecPkgContext_IssuerListInfoEx trustedCAs = (SecPkgContext_IssuerListInfoEx)Marshal.PtrToStructure(CAs, typeof(SecPkgContext_IssuerListInfoEx)); + SecPkgContext_IssuerListInfoEx trustedCAs = *(SecPkgContext_IssuerListInfoEx*)CAs; int issuerNumber = trustedCAs.cIssuers; for (int i = 0; i < issuerNumber; i++) { - IntPtr tempPtr = (IntPtr)((byte*)trustedCAs.aIssuers + Marshal.SizeOf(typeof(CRYPTOAPI_BLOB)) * (nint)i); - CRYPTOAPI_BLOB info = (CRYPTOAPI_BLOB)Marshal.PtrToStructure(tempPtr, typeof(CRYPTOAPI_BLOB)); + IntPtr tempPtr = (IntPtr)((byte*)trustedCAs.aIssuers + sizeof(CRYPTOAPI_BLOB) * (nint)i); + CRYPTOAPI_BLOB info = *(CRYPTOAPI_BLOB*)tempPtr; int dataLength = info.cbData; byte[] context = new byte[dataLength]; @@ -1077,7 +1077,7 @@ private void BindHelper(NetworkCredential newCredential, bool needSetCredential) var cred = new SEC_WINNT_AUTH_IDENTITY_EX() { version = Interop.SEC_WINNT_AUTH_IDENTITY_VERSION, - length = Marshal.SizeOf(typeof(SEC_WINNT_AUTH_IDENTITY_EX)), + length = Marshal.SizeOf(), flags = Interop.SEC_WINNT_AUTH_IDENTITY_UNICODE }; if (AuthType == AuthType.Kerberos) @@ -1342,7 +1342,7 @@ internal static unsafe LdapMod[] BuildAttributes(CollectionBase directoryAttribu attributes[i].values = Utility.AllocHGlobalIntPtrArray(valuesCount + 1); void** pAttributesValues = (void**)attributes[i].values; - int structSize = Marshal.SizeOf(typeof(BerVal)); + int structSize = Marshal.SizeOf(); IntPtr controlPtr; int m; diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapSessionOptions.cs b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapSessionOptions.cs index 06809037a8ba47..0fa241fc30113d 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapSessionOptions.cs +++ b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapSessionOptions.cs @@ -555,7 +555,7 @@ public unsafe void StartTransportLayerSecurity(DirectoryControlCollection contro { // build server control managedServerControls = LdapConnection.BuildControlArray(controls, true); - int structSize = Marshal.SizeOf(typeof(LdapControl)); + int structSize = Marshal.SizeOf(); if (managedServerControls != null) { serverControlArray = Utility.AllocHGlobalIntPtrArray(managedServerControls.Length + 1); @@ -848,7 +848,7 @@ private void ProcessCallBackRoutine(ReferralCallback tempCallback) { LdapReferralCallback value = new LdapReferralCallback() { - sizeofcallback = Marshal.SizeOf(typeof(LdapReferralCallback)), + sizeofcallback = Marshal.SizeOf(), query = tempCallback.QueryForConnection == null ? null : _queryDelegate, notify = tempCallback.NotifyNewConnection == null ? null : _notifiyDelegate, dereference = tempCallback.DereferenceConnection == null ? null : _dereferenceDelegate diff --git a/src/libraries/System.DirectoryServices/src/Interop/EnumVariant.cs b/src/libraries/System.DirectoryServices/src/Interop/EnumVariant.cs index eb1ac26a1d094e..b8f3f3227e7f3c 100644 --- a/src/libraries/System.DirectoryServices/src/Interop/EnumVariant.cs +++ b/src/libraries/System.DirectoryServices/src/Interop/EnumVariant.cs @@ -58,10 +58,10 @@ public void Reset() /// Moves the pointer to the next value In the contained IEnumVariant, and /// stores the current value In currentValue. /// - private void Advance() + private unsafe void Advance() { _currentValue = s_noMoreValues; - IntPtr addr = Marshal.AllocCoTaskMem(Marshal.SizeOf(typeof(Variant))); + IntPtr addr = Marshal.AllocCoTaskMem(sizeof(Variant)); try { int[] numRead = new int[] { 0 }; diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectoryReplicationMetaData.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectoryReplicationMetaData.cs index baad123def5174..6dffd10fe38221 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectoryReplicationMetaData.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectoryReplicationMetaData.cs @@ -64,14 +64,14 @@ internal void AddHelper(int count, IntPtr info, bool advanced) { if (advanced) { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_ATTR_META_DATA_2))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); AttributeMetadata managedMetaData = new AttributeMetadata(addr, true, _server, _nameTable); Add(managedMetaData.Name, managedMetaData); } else { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_ATTR_META_DATA))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); AttributeMetadata managedMetaData = new AttributeMetadata(addr, false, _server, _nameTable); Add(managedMetaData.Name, managedMetaData); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectorySite.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectorySite.cs index d51219dafba672..1a50524bb1438e 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectorySite.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ActiveDirectorySite.cs @@ -1302,7 +1302,7 @@ private unsafe void GetDomains() DomainController dc = DomainController.GetDomainController(Utils.GetNewDirectoryContext(serverName, DirectoryContextType.DirectoryServer, context)); IntPtr handle = dc.Handle; - Debug.Assert(handle != (IntPtr)0); + Debug.Assert(handle != 0); void* pDomains = null; // call DsReplicaSyncAllW @@ -1327,11 +1327,11 @@ private unsafe void GetDomains() IntPtr val = names.rItems; if (count > 0) { - Debug.Assert(val != (IntPtr)0); - IntPtr tmpPtr = (IntPtr)0; + Debug.Assert(val != 0); + IntPtr tmpPtr = 0; for (int i = 0; i < count; i++) { - tmpPtr = IntPtr.Add(val, Marshal.SizeOf(typeof(DS_NAME_RESULT_ITEM)) * i); + tmpPtr = IntPtr.Add(val, Marshal.SizeOf() * i); DS_NAME_RESULT_ITEM nameResult = new DS_NAME_RESULT_ITEM(); Marshal.PtrToStructure(tmpPtr, nameResult); if (nameResult.status == DS_NAME_ERROR.DS_NAME_NO_ERROR || nameResult.status == DS_NAME_ERROR.DS_NAME_ERROR_DOMAIN_ONLY) diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/DirectoryServer.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/DirectoryServer.cs index 6a742782d8f48a..4d7862649788d8 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/DirectoryServer.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/DirectoryServer.cs @@ -692,8 +692,8 @@ private unsafe void FreeReplicaInfo(DS_REPL_INFO_TYPE type, IntPtr value, SafeLi internal unsafe void SyncReplicaHelper(IntPtr dsHandle, bool isADAM, string partition, string? sourceServer, int option, SafeLibraryHandle libHandle) { - int structSize = Marshal.SizeOf(typeof(Guid)); - IntPtr unmanagedGuid = (IntPtr)0; + int structSize = sizeof(Guid); + IntPtr unmanagedGuid = 0; Guid guid = Guid.Empty; AdamInstance? adamServer = null; DomainController? dcServer = null; diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ForestTrustRelationshipInformation.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ForestTrustRelationshipInformation.cs index c216689bf0a072..a606eaf17bdf93 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ForestTrustRelationshipInformation.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ForestTrustRelationshipInformation.cs @@ -114,8 +114,8 @@ public unsafe void Save() { try { - IntPtr ptr = (IntPtr)0; - fileTime = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(FileTime))); + IntPtr ptr = 0; + fileTime = Marshal.AllocHGlobal(Marshal.SizeOf()); Interop.Kernel32.GetSystemTimeAsFileTime(fileTime); // set the time @@ -134,7 +134,7 @@ public unsafe void Save() ptrList.Add(ptr); Interop.NtDll.RtlInitUnicodeString(out record.TopLevelName, ptr); - tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_FOREST_TRUST_RECORD))); + tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf()); ptrList.Add(tmpPtr); Marshal.StructureToPtr(record, tmpPtr, false); @@ -163,7 +163,7 @@ public unsafe void Save() ptr = Marshal.StringToHGlobalUni(_excludedNames[i]); ptrList.Add(ptr); Interop.NtDll.RtlInitUnicodeString(out record.TopLevelName, ptr); - tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_FOREST_TRUST_RECORD))); + tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf()); ptrList.Add(tmpPtr); Marshal.StructureToPtr(record, tmpPtr, false); @@ -196,7 +196,7 @@ public unsafe void Save() ptrList.Add(record.DomainInfo.NetBIOSNameBuffer); record.DomainInfo.NetBIOSNameLength = (short)(tmp.NetBiosName == null ? 0 : tmp.NetBiosName.Length * 2); record.DomainInfo.NetBIOSNameMaximumLength = (short)(tmp.NetBiosName == null ? 0 : tmp.NetBiosName.Length * 2); - tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_FOREST_TRUST_RECORD))); + tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf()); ptrList.Add(tmpPtr); Marshal.StructureToPtr(record, tmpPtr, false); @@ -222,7 +222,7 @@ public unsafe void Save() ptrList.Add(record.Data.Buffer); Marshal.Copy((byte[])_binaryData[i]!, 0, record.Data.Buffer, record.Data.Length); } - tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_FOREST_TRUST_RECORD))); + tmpPtr = Marshal.AllocHGlobal(Marshal.SizeOf()); ptrList.Add(tmpPtr); Marshal.StructureToPtr(record, tmpPtr, false); @@ -235,7 +235,7 @@ public unsafe void Save() LSA_FOREST_TRUST_INFORMATION trustInformation = new LSA_FOREST_TRUST_INFORMATION(); trustInformation.RecordCount = count; trustInformation.Entries = records; - forestInfo = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_FOREST_TRUST_INFORMATION))); + forestInfo = Marshal.AllocHGlobal(Marshal.SizeOf()); Marshal.StructureToPtr(trustInformation, forestInfo, false); // get policy server name diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationCursorCollection.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationCursorCollection.cs index 52e2946a207db0..6b109f8663b4ef 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationCursorCollection.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationCursorCollection.cs @@ -49,13 +49,13 @@ internal void AddHelper(string partition, object cursors, bool advanced, IntPtr else count = ((DS_REPL_CURSORS)cursors).cNumCursors; - IntPtr addr = (IntPtr)0; + IntPtr addr = 0; for (int i = 0; i < count; i++) { if (advanced) { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_CURSOR_3))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); DS_REPL_CURSOR_3 cursor = new DS_REPL_CURSOR_3(); Marshal.PtrToStructure(addr, cursor); @@ -69,7 +69,7 @@ internal void AddHelper(string partition, object cursors, bool advanced, IntPtr } else { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_CURSOR))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); DS_REPL_CURSOR cursor = new DS_REPL_CURSOR(); Marshal.PtrToStructure(addr, cursor); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationFailureCollection.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationFailureCollection.cs index fcf99e14cf3d32..d67221932fc3d2 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationFailureCollection.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationFailureCollection.cs @@ -48,11 +48,11 @@ internal void AddHelper(DS_REPL_KCC_DSA_FAILURES failures, IntPtr info) // get the count int count = failures.cNumEntries; - IntPtr addr = (IntPtr)0; + IntPtr addr = 0; for (int i = 0; i < count; i++) { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_KCC_DSA_FAILURE))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); ReplicationFailure managedFailure = new ReplicationFailure(addr, _server, _nameTable); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationNeighborCollection.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationNeighborCollection.cs index 34f7203e9f520a..476a7591ef75b1 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationNeighborCollection.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationNeighborCollection.cs @@ -48,11 +48,11 @@ internal void AddHelper(DS_REPL_NEIGHBORS neighbors, IntPtr info) // get the count int count = neighbors.cNumNeighbors; - IntPtr addr = (IntPtr)0; + IntPtr addr = 0; for (int i = 0; i < count; i++) { - addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf(typeof(DS_REPL_NEIGHBOR))); + addr = IntPtr.Add(info, sizeof(int) * 2 + i * Marshal.SizeOf()); ReplicationNeighbor managedNeighbor = new ReplicationNeighbor(addr, _server, _nameTable); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationOperationCollection.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationOperationCollection.cs index 297691533afb67..99b639d6eb2627 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationOperationCollection.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/ReplicationOperationCollection.cs @@ -48,11 +48,11 @@ internal void AddHelper(DS_REPL_PENDING_OPS operations, IntPtr info) // get the count int count = operations.cNumPendingOps; - IntPtr addr = (IntPtr)0; + IntPtr addr = 0; for (int i = 0; i < count; i++) { - addr = IntPtr.Add(info, Marshal.SizeOf(typeof(DS_REPL_PENDING_OPS)) + i * Marshal.SizeOf(typeof(DS_REPL_OP))); + addr = IntPtr.Add(info, Marshal.SizeOf() + i * Marshal.SizeOf()); ReplicationOperation managedOperation = new ReplicationOperation(addr, _server, _nameTable); Add(managedOperation); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/TrustHelper.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/TrustHelper.cs index 3c1f11fec589aa..6591d54f956ea2 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/TrustHelper.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/TrustHelper.cs @@ -220,7 +220,7 @@ internal static unsafe void SetTrustedDomainInfoStatus(DirectoryContext context, } // reconstruct the unmanaged structure to set it back - newInfo = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Interop.Advapi32.TRUSTED_DOMAIN_INFORMATION_EX))); + newInfo = Marshal.AllocHGlobal(sizeof(Interop.Advapi32.TRUSTED_DOMAIN_INFORMATION_EX)); Marshal.StructureToPtr(domainInfo, newInfo, false); result = Interop.Advapi32.LsaSetTrustedDomainInfoByName(handle, trustedDomainName, Interop.Advapi32.TRUSTED_INFORMATION_CLASS.TrustedDomainInformationEx, newInfo); @@ -462,7 +462,7 @@ internal static void CreateTrust(DirectoryContext sourceContext, string? sourceN Marshal.PtrToStructure(info, domainInfo); AuthData = new LSA_AUTH_INFORMATION(); - fileTime = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(FileTime))); + fileTime = Marshal.AllocHGlobal(Marshal.SizeOf()); Interop.Kernel32.GetSystemTimeAsFileTime(fileTime); // set the time @@ -477,7 +477,7 @@ internal static void CreateTrust(DirectoryContext sourceContext, string? sourceN AuthData.AuthInfo = unmanagedPassword; AuthData.AuthInfoLength = password.Length * 2; // sizeof(WCHAR) - unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_AUTH_INFORMATION))); + unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf()); Marshal.StructureToPtr(AuthData, unmanagedAuthData, false); Interop.Advapi32.TRUSTED_DOMAIN_AUTH_INFORMATION AuthInfoEx = default; @@ -616,7 +616,7 @@ internal static unsafe string UpdateTrust(DirectoryContext context, string? sour // change the attribute value properly AuthData = new LSA_AUTH_INFORMATION(); - fileTime = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(FileTime))); + fileTime = Marshal.AllocHGlobal(Marshal.SizeOf()); Interop.Kernel32.GetSystemTimeAsFileTime(fileTime); // set the time @@ -631,7 +631,7 @@ internal static unsafe string UpdateTrust(DirectoryContext context, string? sour AuthData.AuthInfo = unmanagedPassword; AuthData.AuthInfoLength = password.Length * 2; - unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_AUTH_INFORMATION))); + unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf()); Marshal.StructureToPtr(AuthData, unmanagedAuthData, false); Interop.Advapi32.TRUSTED_DOMAIN_AUTH_INFORMATION AuthInfoEx = default; @@ -743,7 +743,7 @@ internal static unsafe void UpdateTrustDirection(DirectoryContext context, strin // change the attribute value properly AuthData = new LSA_AUTH_INFORMATION(); - fileTime = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(FileTime))); + fileTime = Marshal.AllocHGlobal(Marshal.SizeOf()); Interop.Kernel32.GetSystemTimeAsFileTime(fileTime); // set the time @@ -758,7 +758,7 @@ internal static unsafe void UpdateTrustDirection(DirectoryContext context, strin AuthData.AuthInfo = unmanagedPassword; AuthData.AuthInfoLength = password.Length * 2; - unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(LSA_AUTH_INFORMATION))); + unmanagedAuthData = Marshal.AllocHGlobal(Marshal.SizeOf()); Marshal.StructureToPtr(AuthData, unmanagedAuthData, false); Interop.Advapi32.TRUSTED_DOMAIN_AUTH_INFORMATION AuthInfoEx; diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/Utils.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/Utils.cs index dfa3f1f1e614b6..56412ca15b0279 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/Utils.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/ActiveDirectory/Utils.cs @@ -2038,7 +2038,7 @@ internal static bool IsSamUser() } - internal static IntPtr GetCurrentUserSid() + internal static unsafe IntPtr GetCurrentUserSid() { SafeTokenHandle? tokenHandle = null; IntPtr pBuffer = IntPtr.Zero; @@ -2120,7 +2120,7 @@ out tokenHandle } // Retrieve the user's SID from the user info - global::Interop.TOKEN_USER tokenUser = (global::Interop.TOKEN_USER)Marshal.PtrToStructure(pBuffer, typeof(global::Interop.TOKEN_USER))!; + Interop.TOKEN_USER tokenUser = *(Interop.TOKEN_USER*)pBuffer; IntPtr pUserSid = tokenUser.sidAndAttributes.Sid; // this is a reference into the NATIVE memory (into pBuffer) Debug.Assert(global::Interop.Advapi32.IsValidSid(pUserSid)); @@ -2147,7 +2147,7 @@ out tokenHandle } } - internal static IntPtr GetMachineDomainSid() + internal static unsafe IntPtr GetMachineDomainSid() { SafeLsaPolicyHandle? policyHandle = null; IntPtr pBuffer = IntPtr.Zero; @@ -2178,8 +2178,7 @@ internal static IntPtr GetMachineDomainSid() } Debug.Assert(pBuffer != IntPtr.Zero); - POLICY_ACCOUNT_DOMAIN_INFO info = (POLICY_ACCOUNT_DOMAIN_INFO) - Marshal.PtrToStructure(pBuffer, typeof(POLICY_ACCOUNT_DOMAIN_INFO))!; + POLICY_ACCOUNT_DOMAIN_INFO info = *(POLICY_ACCOUNT_DOMAIN_INFO*)pBuffer; Debug.Assert(global::Interop.Advapi32.IsValidSid(info.DomainSid)); @@ -2226,7 +2225,7 @@ internal static bool IsMachineDC(string? computerName) } DSROLE_PRIMARY_DOMAIN_INFO_BASIC dsRolePrimaryDomainInfo = - (DSROLE_PRIMARY_DOMAIN_INFO_BASIC)Marshal.PtrToStructure(dsRoleInfoPtr, typeof(DSROLE_PRIMARY_DOMAIN_INFO_BASIC))!; + Marshal.PtrToStructure(dsRoleInfoPtr)!; return (dsRolePrimaryDomainInfo.MachineRole == DSROLE_MACHINE_ROLE.DsRole_RoleBackupDomainController || dsRolePrimaryDomainInfo.MachineRole == DSROLE_MACHINE_ROLE.DsRole_RolePrimaryDomainController); @@ -2238,15 +2237,14 @@ internal static bool IsMachineDC(string? computerName) } } - internal static SidType ClassifySID(IntPtr pSid) + internal static unsafe SidType ClassifySID(IntPtr pSid) { Debug.Assert(global::Interop.Advapi32.IsValidSid(pSid)); // Get the issuing authority and the first RID IntPtr pIdentAuth = global::Interop.Advapi32.GetSidIdentifierAuthority(pSid); - global::Interop.Advapi32.SID_IDENTIFIER_AUTHORITY identAuth = - (global::Interop.Advapi32.SID_IDENTIFIER_AUTHORITY)Marshal.PtrToStructure(pIdentAuth, typeof(global::Interop.Advapi32.SID_IDENTIFIER_AUTHORITY))!; + Interop.Advapi32.SID_IDENTIFIER_AUTHORITY identAuth = *(Interop.Advapi32.SID_IDENTIFIER_AUTHORITY*)pIdentAuth; IntPtr pRid = global::Interop.Advapi32.GetSidSubAuthority(pSid, 0); int rid = Marshal.ReadInt32(pRid); diff --git a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/DirectorySearcher.cs b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/DirectorySearcher.cs index 02f44bcb47654e..cdc2a4951234f4 100644 --- a/src/libraries/System.DirectoryServices/src/System/DirectoryServices/DirectorySearcher.cs +++ b/src/libraries/System.DirectoryServices/src/System/DirectoryServices/DirectorySearcher.cs @@ -844,7 +844,7 @@ private unsafe void SetSearchPreferences(UnsafeNativeMethods.IDirectorySearch ad ptrVLVContexToFree = vlvValue.contextID; Marshal.Copy(_vlv.DirectoryVirtualListViewContext._context, 0, vlvValue.contextID, vlvValue.contextIDlength); } - IntPtr vlvPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(AdsVLV))); + IntPtr vlvPtr = Marshal.AllocHGlobal(Marshal.SizeOf()); byte[] vlvBytes = new byte[Marshal.SizeOf(vlvValue)]; try { @@ -892,10 +892,10 @@ private unsafe void SetSearchPreferences(UnsafeNativeMethods.IDirectorySearch ad } } - private static void DoSetSearchPrefs(UnsafeNativeMethods.IDirectorySearch adsSearch, AdsSearchPreferenceInfo[] prefs) + private static unsafe void DoSetSearchPrefs(UnsafeNativeMethods.IDirectorySearch adsSearch, AdsSearchPreferenceInfo[] prefs) { - int structSize = Marshal.SizeOf(typeof(AdsSearchPreferenceInfo)); - IntPtr ptr = Marshal.AllocHGlobal((IntPtr)(structSize * prefs.Length)); + int structSize = sizeof(AdsSearchPreferenceInfo); + IntPtr ptr = Marshal.AllocHGlobal(structSize * prefs.Length); try { IntPtr tempPtr = ptr; diff --git a/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs b/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs index e86b53d640d30f..9a9ad7ef521329 100644 --- a/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs +++ b/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs @@ -73,7 +73,7 @@ static bool TryConvertFromInvariantString( { Type? typeDescriptorType = Type.GetType("System.ComponentModel.TypeDescriptor, System.ComponentModel.TypeConverter", throwOnError: false); MethodInfo? mi = typeDescriptorType?.GetMethod("ConvertFromInvariantString", BindingFlags.NonPublic | BindingFlags.Static); - Volatile.Write(ref s_convertFromInvariantString, mi == null ? new object() : mi.CreateDelegate(typeof(Func))); + Volatile.Write(ref s_convertFromInvariantString, mi == null ? new object() : mi.CreateDelegate>()); } if (!(s_convertFromInvariantString is Func convertFromInvariantString)) diff --git a/src/libraries/System.Private.DataContractSerialization/src/System/Runtime/Serialization/DataContract.cs b/src/libraries/System.Private.DataContractSerialization/src/System/Runtime/Serialization/DataContract.cs index bcf5c1064e31c5..79bfc1a51f2a14 100644 --- a/src/libraries/System.Private.DataContractSerialization/src/System/Runtime/Serialization/DataContract.cs +++ b/src/libraries/System.Private.DataContractSerialization/src/System/Runtime/Serialization/DataContract.cs @@ -1516,8 +1516,8 @@ private static string GetDefaultDataContractNamespace(Type type) { string? clrNs = type.Namespace ?? string.Empty; string? ns = - GetGlobalDataContractNamespace(clrNs, type.Module.GetCustomAttributes(typeof(ContractNamespaceAttribute)).ToArray()) ?? - GetGlobalDataContractNamespace(clrNs, type.Assembly.GetCustomAttributes(typeof(ContractNamespaceAttribute)).ToArray()); + GetGlobalDataContractNamespace(clrNs, type.Module.GetCustomAttributes().ToArray()) ?? + GetGlobalDataContractNamespace(clrNs, type.Assembly.GetCustomAttributes().ToArray()); if (ns == null) { @@ -2228,7 +2228,7 @@ private static bool IsMemberVisibleInSerializationModule(MemberInfo member) /// internal static bool IsAssemblyFriendOfSerialization(Assembly assembly) { - InternalsVisibleToAttribute[] internalsVisibleAttributes = (InternalsVisibleToAttribute[])assembly.GetCustomAttributes(typeof(InternalsVisibleToAttribute)); + InternalsVisibleToAttribute[] internalsVisibleAttributes = (InternalsVisibleToAttribute[])assembly.GetCustomAttributes(); foreach (InternalsVisibleToAttribute internalsVisibleAttribute in internalsVisibleAttributes) { string internalsVisibleAttributeAssemblyName = internalsVisibleAttribute.AssemblyName; diff --git a/src/libraries/System.Private.Xml/src/System/Xml/Serialization/ReflectionXmlSerializationReader.cs b/src/libraries/System.Private.Xml/src/System/Xml/Serialization/ReflectionXmlSerializationReader.cs index e9e2fdfe39bcaf..ba4c82f89539f6 100644 --- a/src/libraries/System.Private.Xml/src/System/Xml/Serialization/ReflectionXmlSerializationReader.cs +++ b/src/libraries/System.Private.Xml/src/System/Xml/Serialization/ReflectionXmlSerializationReader.cs @@ -651,7 +651,7 @@ private static ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate Get MethodInfo getSetMemberValueDelegateWithTypeGenericMi = typeof(ReflectionXmlSerializationReaderHelper).GetMethod("GetSetMemberValueDelegateWithType", BindingFlags.Static | BindingFlags.Public)!; MethodInfo getSetMemberValueDelegateWithTypeMi = getSetMemberValueDelegateWithTypeGenericMi.MakeGenericMethod(o.GetType(), memberType); - var getSetMemberValueDelegateWithType = (Func)getSetMemberValueDelegateWithTypeMi.CreateDelegate(typeof(Func)); + var getSetMemberValueDelegateWithType = getSetMemberValueDelegateWithTypeMi.CreateDelegate>(); result = getSetMemberValueDelegateWithType(memberInfo); delegateCacheForType[memberName] = result; } @@ -2121,7 +2121,7 @@ public static SetMemberValueDelegate GetSetMemberValueDelegateWithType)setMethod.CreateDelegate(typeof(Action)); + setTypedDelegate = setMethod.CreateDelegate>(); } else if (memberInfo is FieldInfo fieldInfo) { diff --git a/src/libraries/System.Private.Xml/src/System/Xml/Xslt/XslCompiledTransform.cs b/src/libraries/System.Private.Xml/src/System/Xml/Xslt/XslCompiledTransform.cs index 7c5ca1d4ca3502..9b2f875ddb6c1d 100644 --- a/src/libraries/System.Private.Xml/src/System/Xml/Xslt/XslCompiledTransform.cs +++ b/src/libraries/System.Private.Xml/src/System/Xml/Xslt/XslCompiledTransform.cs @@ -229,7 +229,7 @@ public void Load(MethodInfo executeMethod, byte[] queryData, Type[]? earlyBoundT Delegate delExec = executeMethod is DynamicMethod dm ? dm.CreateDelegate(typeof(ExecuteDelegate)) - : executeMethod.CreateDelegate(typeof(ExecuteDelegate)); + : executeMethod.CreateDelegate(); _command = new XmlILCommand((ExecuteDelegate)delExec, new XmlQueryStaticData(queryData, earlyBoundTypes)); OutputSettings = _command.StaticData.DefaultWriterSettings; diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.JSObject.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.JSObject.cs index bdaf228f6a7200..76f8ad8fd9c9d3 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.JSObject.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.JSObject.cs @@ -107,7 +107,7 @@ public unsafe void ToJS(JSObject?[] value) return; } slot.Length = value.Length; - int bytes = value.Length * Marshal.SizeOf(typeof(JSMarshalerArgument)); + int bytes = value.Length * sizeof(JSMarshalerArgument); slot.Type = MarshalerType.Array; slot.ElementType = MarshalerType.JSObject; JSMarshalerArgument* payload = (JSMarshalerArgument*)Marshal.AllocHGlobal(bytes); diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.Object.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.Object.cs index 8a5105cbce828a..d9ed7cd4285851 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.Object.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.Object.cs @@ -373,7 +373,7 @@ public unsafe void ToJS(object?[] value) return; } slot.Length = value.Length; - int bytes = value.Length * Marshal.SizeOf(typeof(JSMarshalerArgument)); + int bytes = value.Length * sizeof(JSMarshalerArgument); slot.Type = MarshalerType.Array; JSMarshalerArgument* payload = (JSMarshalerArgument*)Marshal.AllocHGlobal(bytes); Unsafe.InitBlock(payload, 0, (uint)bytes); diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.String.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.String.cs index 247aad1ec613a6..d0e2d9cb7f9f71 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.String.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Marshaling/JSMarshalerArgument.String.cs @@ -115,7 +115,7 @@ public unsafe void ToJS(string?[] value) return; } slot.Length = value.Length; - int bytes = value.Length * Marshal.SizeOf(typeof(JSMarshalerArgument)); + int bytes = value.Length * sizeof(JSMarshalerArgument); slot.Type = MarshalerType.Array; JSMarshalerArgument* payload = (JSMarshalerArgument*)Marshal.AllocHGlobal(bytes); Unsafe.InitBlock(payload, 0, (uint)bytes); diff --git a/src/libraries/System.Runtime.Serialization.Formatters/src/System/Runtime/Serialization/Formatters/Binary/BinaryObjectInfo.cs b/src/libraries/System.Runtime.Serialization.Formatters/src/System/Runtime/Serialization/Formatters/Binary/BinaryObjectInfo.cs index 3a5476f069eb26..10449accd7ce9c 100644 --- a/src/libraries/System.Runtime.Serialization.Formatters/src/System/Runtime/Serialization/Formatters/Binary/BinaryObjectInfo.cs +++ b/src/libraries/System.Runtime.Serialization.Formatters/src/System/Runtime/Serialization/Formatters/Binary/BinaryObjectInfo.cs @@ -709,7 +709,7 @@ private int Position(string? name) // A field on the type isn't found. See if the field has OptionalFieldAttribute. We only throw // when the assembly format is set appropriately. if (!_isSimpleAssembly && - _cache._memberInfos[i].GetCustomAttribute(typeof(OptionalFieldAttribute), inherit: false) == null) + _cache._memberInfos[i].GetCustomAttribute(inherit: false) == null) { Debug.Assert(_cache._memberNames != null); throw new SerializationException(SR.Format(SR.Serialization_MissingMember, _cache._memberNames[i], objectType, typeof(OptionalFieldAttribute).FullName)); diff --git a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/LiteHash.Apple.cs b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/LiteHash.Apple.cs index 57e63d8dc698c5..0b2fde720a20d4 100644 --- a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/LiteHash.Apple.cs +++ b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/LiteHash.Apple.cs @@ -64,7 +64,7 @@ internal LiteHash(PAL_HashAlgorithm algorithm) throw new PlatformNotSupportedException( SR.Format( SR.Cryptography_UnknownHashAlgorithm, - Enum.GetName(typeof(PAL_HashAlgorithm), algorithm))); + Enum.GetName(algorithm))); } if (_ctx.IsInvalid) @@ -159,7 +159,7 @@ internal LiteHmac(PAL_HashAlgorithm algorithm, ReadOnlySpan key, bool prei throw new PlatformNotSupportedException( SR.Format( SR.Cryptography_UnknownHashAlgorithm, - Enum.GetName(typeof(Interop.AppleCrypto.PAL_HashAlgorithm), algorithm))); + Enum.GetName(algorithm))); } if (_ctx.IsInvalid) diff --git a/src/libraries/System.Windows.Extensions/src/System/Media/SoundPlayer.cs b/src/libraries/System.Windows.Extensions/src/System/Media/SoundPlayer.cs index 6e39549fd0d604..d77919070c24df 100644 --- a/src/libraries/System.Windows.Extensions/src/System/Media/SoundPlayer.cs +++ b/src/libraries/System.Windows.Extensions/src/System/Media/SoundPlayer.cs @@ -582,9 +582,9 @@ private unsafe void ValidateSoundFile(string fileName) if (waveFormat == null) { int dw = ck.cksize; - if (dw < Marshal.SizeOf(typeof(Interop.WinMM.WAVEFORMATEX))) + if (dw < Marshal.SizeOf()) { - dw = Marshal.SizeOf(typeof(Interop.WinMM.WAVEFORMATEX)); + dw = Marshal.SizeOf(); } waveFormat = new Interop.WinMM.WAVEFORMATEX(); From 43dfccde968e52c455a551f9a3d9a042f1e0f663 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Tue, 2 Apr 2024 11:54:42 -0700 Subject: [PATCH 043/132] Fix interop test log statement (#100511) --- .../ReversePInvoke/PassingByOut/PassingByOutTest.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/Interop/PInvoke/SizeParamIndex/ReversePInvoke/PassingByOut/PassingByOutTest.cs b/src/tests/Interop/PInvoke/SizeParamIndex/ReversePInvoke/PassingByOut/PassingByOutTest.cs index 5177ad15d5dc30..72cad72fc4fe61 100644 --- a/src/tests/Interop/PInvoke/SizeParamIndex/ReversePInvoke/PassingByOut/PassingByOutTest.cs +++ b/src/tests/Interop/PInvoke/SizeParamIndex/ReversePInvoke/PassingByOut/PassingByOutTest.cs @@ -159,7 +159,7 @@ public static void RunTestByOut() Console.WriteLine("\tScenario 3 : short ==> int16_t, Array_Size = -1, Return_Array_Size = 20"); Assert.True(DoCallBack_MarshalShortArray_AsParam_AsByOut(new DelShortArrByOutAsCdeclCaller(TestMethodForShortArray_AsReversePInvokeByOut_AsCdecl))); - Console.WriteLine("\t\tMarshalShortArray_AsReversePInvokeByOut_AsCdecl Failed!"); + Console.WriteLine("\t\tMarshalShortArray_AsReversePInvokeByOut_AsCdecl Passed!"); Console.WriteLine("\tScenario 4 : short ==> int16_t, Array_Size = 10, Return_Array_Size = -1"); Assert.True(DoCallBack_MarshalShortArrayReturnNegativeSize_AsParam_AsByOut(new DelShortArrByOutAsCdeclCaller(TestMethodForShortArrayReturnNegativeSize_AsReversePInvokeByOut_AsCdecl))); From a8daf55fa7dc51cc833f84765a653d5c6f774892 Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Tue, 2 Apr 2024 21:26:34 +0200 Subject: [PATCH 044/132] SPMI: Fix Python 3.12 warnings in superpmi.py (#100533) Python 3.12 prints some warnings when parsing superpmi.py due to some insufficiently escaped characters in some strings. --- src/coreclr/scripts/superpmi.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index 00cbdfdf5ab390..99ea6e65e52bec 100644 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -527,7 +527,7 @@ def decode_clrjit_build_string(clrjit_path): with open(clrjit_path, "rb") as fh: contents = fh.read() - match = re.search(b'RyuJIT built by ([^\0]+?) targeting ([^\0]+?)-([^\0]+?)(| \(with native PGO\)| \(without native PGO\)|)\0', contents) + match = re.search(b'RyuJIT built by ([^\0]+?) targeting ([^\0]+?)-([^\0]+?)(| \\(with native PGO\\)| \\(without native PGO\\)|)\0', contents) if match is None: return None @@ -1530,7 +1530,7 @@ def save_repro_mc_files(temp_location, coreclr_args, artifacts_base_name, repro_ def parse_replay_asserts(mch_file, replay_output): - """ Parse output from failed replay, looking for asserts and correlating them to provide the best + r""" Parse output from failed replay, looking for asserts and correlating them to provide the best repro scenarios. Look for lines like: @@ -3677,7 +3677,7 @@ def filter_local_path(path): def process_mch_files_arg(coreclr_args): - """ Process the -mch_files argument. If the argument is not specified, then download files + r""" Process the -mch_files argument. If the argument is not specified, then download files from Azure Storage and any specified private MCH stores. Any files on UNC (i.e., "\\server\share" paths on Windows) or Azure Storage stores, From 254012e84c289779b57d61bd65767f0989dab30d Mon Sep 17 00:00:00 2001 From: Parker Bibus Date: Tue, 2 Apr 2024 13:11:01 -0700 Subject: [PATCH 045/132] Allow for the manual setting of if the build is official when uploading intermediate artifacts. (#100528) --- eng/pipelines/common/upload-intermediate-artifacts-step.yml | 3 ++- eng/pipelines/coreclr/perf-non-wasm-jobs.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/eng/pipelines/common/upload-intermediate-artifacts-step.yml b/eng/pipelines/common/upload-intermediate-artifacts-step.yml index da9b1ef0b627fd..b22c60be9e3602 100644 --- a/eng/pipelines/common/upload-intermediate-artifacts-step.yml +++ b/eng/pipelines/common/upload-intermediate-artifacts-step.yml @@ -2,6 +2,7 @@ parameters: name: '' publishPackagesCondition: always() publishVSSetupCondition: false + isOfficialBuild: true steps: - task: CopyFiles@2 @@ -27,7 +28,7 @@ steps: - template: /eng/pipelines/common/templates/publish-build-artifacts.yml parameters: - isOfficialBuild: true + isOfficialBuild: ${{ parameters.isOfficialBuild }} displayName: Publish intermediate artifacts inputs: PathtoPublish: '$(Build.StagingDirectory)/IntermediateArtifacts' diff --git a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml index 730b8895e3dbca..7a1a876ea6f6f7 100644 --- a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml +++ b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml @@ -288,7 +288,7 @@ jobs: runKind: micro runJobTemplate: /eng/pipelines/coreclr/templates/run-performance-job.yml logicalmachine: 'perfowl' - + # run coreclr perfviper microbenchmarks perf job - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -376,6 +376,7 @@ jobs: - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml parameters: name: MonoRuntimePacks + isOfficialBuild: false # build PerfBDN app - template: /eng/pipelines/common/platform-matrix.yml From 995989e8b33864648080c55e31116d9818b8760c Mon Sep 17 00:00:00 2001 From: Radek Zikmund <32671551+rzikm@users.noreply.github.com> Date: Tue, 2 Apr 2024 22:20:12 +0200 Subject: [PATCH 046/132] Disable System.Net.Quic tests on arm32 (#100514) --- .../tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs | 2 +- .../tests/FunctionalTests/MsQuicRemoteExecutorTests.cs | 2 +- .../System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs | 2 +- .../tests/FunctionalTests/QuicConnectionTests.cs | 2 +- .../System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs | 2 +- .../QuicStreamConnectedStreamConformanceTests.cs | 2 +- .../System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs index 45d65a0b7cdc91..8b15670ee88a23 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs @@ -11,7 +11,7 @@ namespace System.Net.Quic.Tests [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] [SkipOnPlatform(TestPlatforms.Windows, "CipherSuitesPolicy is not supported on Windows")] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public class MsQuicCipherSuitesPolicyTests : QuicTestBase { public MsQuicCipherSuitesPolicyTests(ITestOutputHelper output) : base(output) { } diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs index 882600acf6a2dd..f57f4aef8ec670 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs @@ -14,7 +14,7 @@ namespace System.Net.Quic.Tests { [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public class MsQuicRemoteExecutorTests : QuicTestBase { public MsQuicRemoteExecutorTests() diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs index b432cfc0aba28c..4b1f07a2188e83 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs @@ -48,7 +48,7 @@ public void Dispose() [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public class MsQuicTests : QuicTestBase, IClassFixture { private static byte[] s_data = "Hello world!"u8.ToArray(); diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs index 324a76f5e5693d..5125b33bec95b1 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs @@ -16,7 +16,7 @@ namespace System.Net.Quic.Tests [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public sealed class QuicConnectionTests : QuicTestBase { const int ExpectedErrorCode = 1234; diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs index c9c394fcfb19ad..6e3971764d18fb 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs @@ -15,7 +15,7 @@ namespace System.Net.Quic.Tests { [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public sealed class QuicListenerTests : QuicTestBase { public QuicListenerTests(ITestOutputHelper output) : base(output) { } diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs index 7b9257cdc26602..bb7285ff22d774 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs @@ -16,7 +16,7 @@ namespace System.Net.Quic.Tests { [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public sealed class QuicStreamConformanceTests : ConnectedStreamConformanceTests { protected override bool UsableAfterCanceledReads => false; diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs index 9a2e421e75dc09..5bf718df7308a8 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs @@ -14,7 +14,7 @@ namespace System.Net.Quic.Tests { [Collection(nameof(QuicTestCollection))] [ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsArmProcess))] public sealed class QuicStreamTests : QuicTestBase { private static byte[] s_data = "Hello world!"u8.ToArray(); From e020a93284ba85903107258ab696e5e64debf70b Mon Sep 17 00:00:00 2001 From: Vladimir Vukicevic Date: Tue, 2 Apr 2024 17:24:13 -0700 Subject: [PATCH 047/132] Enable FEATURE_PERFMAP on OSX, and update perfjitdump.cpp to work on OSX (#99986) * Enable FEATURE_PERFMAP on OSX, and update perfjitdump.cpp to work on OSX * Update PerfMapEnabled documentation * Enable only on OSX, and use PlatformGetCurrentThreadId * Manual mach_absolute_time calls * Use QueryPerformanceCounter * Cleaner QueryPerformanceFrequency verification * Use FEATURE_PERFMAP * Put back conditional, but all __APPLE__ * Fix logic error when disabling jitdump --- src/coreclr/clrdefinitions.cmake | 3 ++ src/coreclr/inc/clrconfigvalues.h | 2 +- src/coreclr/pal/src/misc/perfjitdump.cpp | 69 ++++++++++++------------ 3 files changed, 40 insertions(+), 34 deletions(-) diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 93cb436c42ec3d..e69da7ed4ac412 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -155,6 +155,9 @@ endif(CLR_CMAKE_TARGET_LINUX AND CLR_CMAKE_HOST_LINUX) if(CLR_CMAKE_TARGET_FREEBSD) add_compile_definitions(FEATURE_PERFMAP) endif(CLR_CMAKE_TARGET_FREEBSD) +if(CLR_CMAKE_TARGET_APPLE) + add_compile_definitions(FEATURE_PERFMAP) +endif(CLR_CMAKE_TARGET_APPLE) if(FEATURE_COMWRAPPERS) add_compile_definitions(FEATURE_COMWRAPPERS) diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h index 6427f717dee947..ddc7c79506ad4e 100644 --- a/src/coreclr/inc/clrconfigvalues.h +++ b/src/coreclr/inc/clrconfigvalues.h @@ -480,7 +480,7 @@ RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_ETW_ObjectAllocationEventsPerTypePerSec, W RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_ProfAPI_ValidateNGENInstrumentation, W("ProfAPI_ValidateNGENInstrumentation"), 0, "This flag enables additional validations when using the IMetaDataEmit APIs for NGEN'ed images to ensure only supported edits are made.") #ifdef FEATURE_PERFMAP -RETAIL_CONFIG_DWORD_INFO(EXTERNAL_PerfMapEnabled, W("PerfMapEnabled"), 0, "This flag is used on Linux to enable writing /tmp/perf-$pid.map. It is disabled by default") +RETAIL_CONFIG_DWORD_INFO(EXTERNAL_PerfMapEnabled, W("PerfMapEnabled"), 0, "This flag is used on Linux and macOS to enable writing /tmp/perf-$pid.map. It is disabled by default") RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_PerfMapJitDumpPath, W("PerfMapJitDumpPath"), "Specifies a path to write the perf jitdump file. Defaults to /tmp", CLRConfig::LookupOptions::TrimWhiteSpaceFromStringValue) RETAIL_CONFIG_DWORD_INFO(EXTERNAL_PerfMapIgnoreSignal, W("PerfMapIgnoreSignal"), 0, "When perf map is enabled, this option will configure the specified signal to be accepted and ignored as a marker in the perf logs. It is disabled by default") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_PerfMapShowOptimizationTiers, W("PerfMapShowOptimizationTiers"), 1, "Shows optimization tiers in the perf map for methods, as part of the symbol name. Useful for seeing separate stack frames for different optimization tiers of each method.") diff --git a/src/coreclr/pal/src/misc/perfjitdump.cpp b/src/coreclr/pal/src/misc/perfjitdump.cpp index 50b0f2c6dadcf6..6223d533ac7f78 100644 --- a/src/coreclr/pal/src/misc/perfjitdump.cpp +++ b/src/coreclr/pal/src/misc/perfjitdump.cpp @@ -2,15 +2,15 @@ // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== -#if defined(__linux__) -#define JITDUMP_SUPPORTED -#endif - #include "pal/palinternal.h" #include "pal/dbgmsg.h" #include +#if defined(__linux__) || defined(__APPLE__) +#define JITDUMP_SUPPORTED +#endif + #ifdef JITDUMP_SUPPORTED #include @@ -61,24 +61,11 @@ namespace JIT_CODE_LOAD = 0, }; - uint64_t GetTimeStampNS() + static uint64_t GetTimeStampNS() { -#if HAVE_CLOCK_MONOTONIC - struct timespec ts; - int result = clock_gettime(CLOCK_MONOTONIC, &ts); - - if (result != 0) - { - ASSERT("clock_gettime(CLOCK_MONOTONIC) failed: %d\n", result); - return 0; - } - else - { - return ts.tv_sec * 1000000000ULL + ts.tv_nsec; - } -#else - #error "The PAL jitdump requires clock_gettime(CLOCK_MONOTONIC) to be supported." -#endif + LARGE_INTEGER result; + QueryPerformanceCounter(&result); + return result.QuadPart; } struct FileHeader @@ -115,7 +102,7 @@ namespace { JitCodeLoadRecord() : pid(getpid()), - tid(syscall(SYS_gettid)) + tid((uint32_t)PlatformGetCurrentThreadId()) { header.id = JIT_CODE_LOAD; header.timestamp = GetTimeStampNS(); @@ -170,6 +157,19 @@ struct PerfJitDumpState { int result = 0; + // On platforms where JITDUMP is used, the PAL QueryPerformanceFrequency + // returns tccSecondsToNanoSeconds, meaning QueryPerformanceCounter + // will return a direct nanosecond value. If this isn't true, + // then some other method will need to be used to implement GetTimeStampNS. + // Validate this is true once in Start here. + LARGE_INTEGER freq; + QueryPerformanceFrequency(&freq); + if (freq.QuadPart != tccSecondsToNanoSeconds) + { + _ASSERTE(!"QueryPerformanceFrequency does not return tccSecondsToNanoSeconds. Implement JITDUMP GetTimeStampNS directly for this platform.\n"); + FatalError(); + } + // Write file header FileHeader header; @@ -203,12 +203,18 @@ struct PerfJitDumpState if (result == -1) return FatalError(); +#if !defined(__APPLE__) // mmap jitdump file - // this is a marker for perf inject to find the jitdumpfile + // this is a marker for perf inject to find the jitdumpfile on linux. + // On OSX, samply and others hook open and mmap is not needed. It also fails on OSX, + // likely because of PROT_EXEC and hardened runtime mmapAddr = mmap(nullptr, sizeof(FileHeader), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); if (mmapAddr == MAP_FAILED) return FatalError(); +#else + mmapAddr = NULL; +#endif enabled = true; @@ -308,16 +314,13 @@ struct PerfJitDumpState { enabled = false; - if (result != 0) - return FatalError(); - - if (!enabled) - goto exit; - - result = munmap(mmapAddr, sizeof(FileHeader)); + if (mmapAddr != NULL) + { + result = munmap(mmapAddr, sizeof(FileHeader)); - if (result == -1) - return FatalError(); + if (result == -1) + return FatalError(); + } mmapAddr = MAP_FAILED; @@ -333,7 +336,7 @@ struct PerfJitDumpState fd = -1; } -exit: + return 0; } }; From 85e8f688ffaf02743da29adff95e98ebd22a6f53 Mon Sep 17 00:00:00 2001 From: Elinor Fung Date: Tue, 2 Apr 2024 19:13:30 -0700 Subject: [PATCH 048/132] Cleanup of `fx_resolver_t` and tests in `NativeHostApis` (#100542) Slight cleanup of `fx_resolver_t` and `NativeHostApis` tests in preparation for #99027: - Collapse `reconcile_fx_references_helper` into `reconcile_fx_references` - Make `NativeHostApis` tests / `HostApiInvokerApp` more consistent in how they log and validate results --- .../tests/AppHost.Bundle.Tests/BundleProbe.cs | 3 +- .../Projects/HostApiInvokerApp/HostFXR.cs | 76 +++---- .../HostApiInvokerApp/HostRuntimeContract.cs | 4 +- .../Projects/HostApiInvokerApp/Program.cs | 9 +- .../HostActivation.Tests/NativeHostApis.cs | 208 ++++++++---------- .../Assertions/CommandResultAssertions.cs | 4 +- src/installer/tests/TestUtils/Constants.cs | 1 + src/native/corehost/fxr/fx_resolver.cpp | 84 +++---- src/native/corehost/fxr/fx_resolver.h | 6 +- 9 files changed, 165 insertions(+), 230 deletions(-) diff --git a/src/installer/tests/AppHost.Bundle.Tests/BundleProbe.cs b/src/installer/tests/AppHost.Bundle.Tests/BundleProbe.cs index c74a9d211eea95..6c7fc9f3f43e97 100644 --- a/src/installer/tests/AppHost.Bundle.Tests/BundleProbe.cs +++ b/src/installer/tests/AppHost.Bundle.Tests/BundleProbe.cs @@ -35,8 +35,7 @@ private void SingleFileApp_ProbeFiles() }; var result = Command.Create(singleFile, $"host_runtime_contract.bundle_probe {string.Join(" ", itemsToProbe.Select(i => i.Path))}") - .CaptureStdErr() - .CaptureStdOut() + .EnableTracingAndCaptureOutputs() .Execute(); result.Should().Pass(); diff --git a/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostFXR.cs b/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostFXR.cs index 76dc6db1d65f2f..3dc91d5ea69f7b 100644 --- a/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostFXR.cs +++ b/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostFXR.cs @@ -106,62 +106,48 @@ internal static extern int hostfxr_get_dotnet_environment_info( /// /// Test invoking the native hostfxr api hostfxr_resolve_sdk2 /// - /// hostfxr_get_available_sdks - /// Directory of dotnet executable - /// Working directory where search for global.json begins - /// Flags + /// Directory of dotnet executable + /// Working directory where search for global.json begins + /// Flags static void Test_hostfxr_resolve_sdk2(string[] args) { - if (args.Length != 4) + if (args.Length != 3) { throw new ArgumentException("Invalid number of arguments passed"); } var data = new List<(hostfxr.hostfxr_resolve_sdk2_result_key_t, string)>(); int rc = hostfxr.hostfxr_resolve_sdk2( - exe_dir: args[1], - working_dir: args[2], - flags: Enum.Parse(args[3]), + exe_dir: args[0], + working_dir: args[1], + flags: Enum.Parse(args[2]), result: (key, value) => data.Add((key, value))); - if (rc == 0) - { - Console.WriteLine("hostfxr_resolve_sdk2:Success"); - } - else - { - Console.WriteLine($"hostfxr_resolve_sdk2:Fail[{rc}]"); - } - - Console.WriteLine($"hostfxr_resolve_sdk2 data:[{string.Join(';', data)}]"); + string api = nameof(hostfxr.hostfxr_resolve_sdk2); + LogResult(api, rc); + Console.WriteLine($"{api} data:[{string.Join(';', data)}]"); } /// /// Test invoking the native hostfxr api hostfxr_get_available_sdks /// - /// hostfxr_get_available_sdks - /// Directory of dotnet executable + /// Directory of dotnet executable static void Test_hostfxr_get_available_sdks(string[] args) { - if (args.Length != 2) + if (args.Length != 1) { throw new ArgumentException("Invalid number of arguments passed"); } string[] sdks = null; int rc = hostfxr.hostfxr_get_available_sdks( - exe_dir: args[1], + exe_dir: args[0], (sdk_count, sdk_dirs) => sdks = sdk_dirs); - if (rc == 0) - { - Console.WriteLine("hostfxr_get_available_sdks:Success"); - Console.WriteLine($"hostfxr_get_available_sdks sdks:[{string.Join(';', sdks)}]"); - } - else - { - Console.WriteLine($"hostfxr_get_available_sdks:Fail[{rc}]"); - } + string api = nameof(hostfxr.hostfxr_get_available_sdks); + LogResult(api, rc); + if (sdks != null) + Console.WriteLine($"{api} sdks:[{string.Join(';', sdks)}]"); } static void Test_hostfxr_set_error_writer(string[] args) @@ -193,13 +179,12 @@ static void Test_hostfxr_set_error_writer(string[] args) /// /// Test that invokes native api hostfxr_get_dotnet_environment_info. /// - /// hostfxr_get_dotnet_environment_info - /// (Optional) Path to the directory with dotnet.exe + /// (Optional) Path to the directory with dotnet.exe static void Test_hostfxr_get_dotnet_environment_info(string[] args) { string dotnetExeDir = null; - if (args.Length >= 2) - dotnetExeDir = args[1]; + if (args.Length >= 1) + dotnetExeDir = args[0]; string hostfxr_version; string hostfxr_commit_hash; @@ -254,21 +239,20 @@ static void Test_hostfxr_get_dotnet_environment_info(string[] args) result: result_fn, result_context: new IntPtr(42)); - if (rc != 0) - { - Console.WriteLine($"hostfxr_get_dotnet_environment_info:Fail[{rc}]"); - } - - Console.WriteLine($"hostfxr_get_dotnet_environment_info sdk versions:[{string.Join(";", sdks.Select(s => s.version).ToList())}]"); - Console.WriteLine($"hostfxr_get_dotnet_environment_info sdk paths:[{string.Join(";", sdks.Select(s => s.path).ToList())}]"); + string api = nameof(hostfxr.hostfxr_get_dotnet_environment_info); + LogResult(api, rc); - Console.WriteLine($"hostfxr_get_dotnet_environment_info framework names:[{string.Join(";", frameworks.Select(f => f.name).ToList())}]"); - Console.WriteLine($"hostfxr_get_dotnet_environment_info framework versions:[{string.Join(";", frameworks.Select(f => f.version).ToList())}]"); - Console.WriteLine($"hostfxr_get_dotnet_environment_info framework paths:[{string.Join(";", frameworks.Select(f => f.path).ToList())}]"); + Console.WriteLine($"{api} sdk versions:[{string.Join(";", sdks.Select(s => s.version).ToList())}]"); + Console.WriteLine($"{api} sdk paths:[{string.Join(";", sdks.Select(s => s.path).ToList())}]"); - Console.WriteLine("hostfxr_get_dotnet_environment_info:Success"); + Console.WriteLine($"{api} framework names:[{string.Join(";", frameworks.Select(f => f.name).ToList())}]"); + Console.WriteLine($"{api} framework versions:[{string.Join(";", frameworks.Select(f => f.version).ToList())}]"); + Console.WriteLine($"{api} framework paths:[{string.Join(";", frameworks.Select(f => f.path).ToList())}]"); } + private static void LogResult(string apiName, int rc) + => Console.WriteLine(rc == 0 ? $"{apiName}:Success" : $"{apiName}:Fail[0x{rc:x}]"); + public static bool RunTest(string apiToTest, string[] args) { switch (apiToTest) diff --git a/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostRuntimeContract.cs b/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostRuntimeContract.cs index 99ebe3adfe5d08..4ecf59b2761ace 100644 --- a/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostRuntimeContract.cs +++ b/src/installer/tests/Assets/Projects/HostApiInvokerApp/HostRuntimeContract.cs @@ -113,10 +113,10 @@ public static bool RunTest(string apiToTest, string[] args) switch (apiToTest) { case $"{nameof(host_runtime_contract)}.{nameof(host_runtime_contract.get_runtime_property)}": - Test_get_runtime_property(args[1..]); + Test_get_runtime_property(args); break; case $"{nameof(host_runtime_contract)}.{nameof(host_runtime_contract.bundle_probe)}": - Test_bundle_probe(args[1..]); + Test_bundle_probe(args); break; default: return false; diff --git a/src/installer/tests/Assets/Projects/HostApiInvokerApp/Program.cs b/src/installer/tests/Assets/Projects/HostApiInvokerApp/Program.cs index 2831ed8c3d4878..f14003995e6a2c 100644 --- a/src/installer/tests/Assets/Projects/HostApiInvokerApp/Program.cs +++ b/src/installer/tests/Assets/Projects/HostApiInvokerApp/Program.cs @@ -31,9 +31,6 @@ public static void MainCore(string[] args) Console.WriteLine("Hello World!"); Console.WriteLine(string.Join(Environment.NewLine, args)); - // Enable tracing so that test assertion failures are easier to diagnose. - Environment.SetEnvironmentVariable("COREHOST_TRACE", "1"); - // If requested, test multilevel lookup using fake Global SDK directories: // 1. using a fake ProgramFiles location // 2. using a fake SDK Self-Registered location @@ -61,13 +58,13 @@ public static void MainCore(string[] args) } string apiToTest = args[0]; - if (HostFXR.RunTest(apiToTest, args)) + if (HostFXR.RunTest(apiToTest, args[1..])) return; - if (HostPolicy.RunTest(apiToTest, args)) + if (HostPolicy.RunTest(apiToTest, args[1..])) return; - if (HostRuntimeContract.RunTest(apiToTest, args)) + if (HostRuntimeContract.RunTest(apiToTest, args[1..])) return; throw new ArgumentException($"Invalid API to test passed as args[0]): {apiToTest}"); diff --git a/src/installer/tests/HostActivation.Tests/NativeHostApis.cs b/src/installer/tests/HostActivation.Tests/NativeHostApis.cs index 959239d08d0301..0c1ba3116d49a9 100644 --- a/src/installer/tests/HostActivation.Tests/NativeHostApis.cs +++ b/src/installer/tests/HostActivation.Tests/NativeHostApis.cs @@ -4,7 +4,7 @@ using System; using System.Collections.Generic; using System.IO; - +using FluentAssertions; using Microsoft.DotNet.Cli.Build; using Microsoft.DotNet.TestUtils; using Xunit; @@ -20,6 +20,13 @@ public NativeHostApis(SharedTestState fixture) sharedTestState = fixture; } + private class ApiNames + { + public const string hostfxr_get_available_sdks = nameof(hostfxr_get_available_sdks); + public const string hostfxr_resolve_sdk2 = nameof(hostfxr_resolve_sdk2); + public const string hostfxr_get_dotnet_environment_info = nameof(hostfxr_get_dotnet_environment_info); + } + private class SdkResolutionFixture { private readonly TestApp _app; @@ -124,17 +131,17 @@ public void Hostfxr_get_available_sdks_with_multilevel_lookup() Path.Combine(f.LocalSdkDir, "5.6.7-preview"), }); + string api = ApiNames.hostfxr_get_available_sdks; using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) { - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_available_sdks", f.ExeDir }) + f.Dotnet.Exec(f.AppDll, api, f.ExeDir) .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .CaptureStdOut() - .CaptureStdErr() + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_available_sdks:Success") - .And.HaveStdOutContaining($"hostfxr_get_available_sdks sdks:[{expectedList}]"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdks:[{expectedList}]"); } } @@ -152,13 +159,13 @@ public void Hostfxr_get_available_sdks_without_multilevel_lookup() Path.Combine(f.LocalSdkDir, "5.6.7-preview"), }); - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_available_sdks", f.ExeDir }) - .CaptureStdOut() - .CaptureStdErr() + string api = ApiNames.hostfxr_get_available_sdks; + f.Dotnet.Exec(f.AppDll, api, f.ExeDir) + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_available_sdks:Success") - .And.HaveStdOutContaining($"hostfxr_get_available_sdks sdks:[{expectedList}]"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdks:[{expectedList}]"); } [Fact] @@ -173,13 +180,13 @@ public void Hostfxr_resolve_sdk2_without_global_json_or_flags() ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "5.6.7-preview")), }); - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_resolve_sdk2", f.ExeDir, f.WorkingDir, "0" }) - .CaptureStdOut() - .CaptureStdErr() + string api = ApiNames.hostfxr_resolve_sdk2; + f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "0") + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_resolve_sdk2:Success") - .And.HaveStdOutContaining($"hostfxr_resolve_sdk2 data:[{expectedData}]"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} data:[{expectedData}]"); } [Fact] @@ -194,13 +201,13 @@ public void Hostfxr_resolve_sdk2_without_global_json_and_disallowing_previews() ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "1.2.3")) }); - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_resolve_sdk2", f.ExeDir, f.WorkingDir, "disallow_prerelease" }) - .CaptureStdOut() - .CaptureStdErr() + string api = ApiNames.hostfxr_resolve_sdk2; + f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "disallow_prerelease") + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_resolve_sdk2:Success") - .And.HaveStdOutContaining($"hostfxr_resolve_sdk2 data:[{expectedData}]"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} data:[{expectedData}]"); } [Fact] @@ -221,21 +228,20 @@ public void Hostfxr_resolve_sdk2_with_global_json_and_disallowing_previews() ("requested_version", requestedVersion), }); - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_resolve_sdk2", f.ExeDir, f.WorkingDir, "disallow_prerelease" }) - .CaptureStdOut() - .CaptureStdErr() + string api = ApiNames.hostfxr_resolve_sdk2; + f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "disallow_prerelease") + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_resolve_sdk2:Success") - .And.HaveStdOutContaining($"hostfxr_resolve_sdk2 data:[{expectedData}]"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} data:[{expectedData}]"); } [Fact] public void Hostfxr_corehost_set_error_writer_test() { TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, "Test_hostfxr_set_error_writer") - .CaptureStdOut() - .CaptureStdErr() + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass(); } @@ -279,17 +285,17 @@ public void Hostfxr_get_dotnet_environment_info_dotnet_root_only() Path.Combine(f.LocalFrameworksDir, "HostFxr.Test.C") }); - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", f.ExeDir }) - .CaptureStdOut() - .CaptureStdErr() - .Execute() - .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Success") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk versions:[{expectedSdkVersions}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk paths:[{expectedSdkPaths}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework names:[{expectedFrameworkNames}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework versions:[{expectedFrameworkVersions}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework paths:[{expectedFrameworkPaths}]"); + string api = ApiNames.hostfxr_get_dotnet_environment_info; + f.Dotnet.Exec(f.AppDll, api, f.ExeDir) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdk versions:[{expectedSdkVersions}]") + .And.HaveStdOutContaining($"{api} sdk paths:[{expectedSdkPaths}]") + .And.HaveStdOutContaining($"{api} framework names:[{expectedFrameworkNames}]") + .And.HaveStdOutContaining($"{api} framework versions:[{expectedFrameworkVersions}]") + .And.HaveStdOutContaining($"{api} framework paths:[{expectedFrameworkPaths}]"); } [Fact] @@ -334,19 +340,19 @@ public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_with_dotn using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) { - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", f.ExeDir }) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .CaptureStdOut() - .CaptureStdErr() - .Execute() - .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Success") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk versions:[{expectedSdkVersions}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk paths:[{expectedSdkPaths}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework names:[{expectedFrameworkNames}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework versions:[{expectedFrameworkVersions}]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework paths:[{expectedFrameworkPaths}]"); + string api = ApiNames.hostfxr_get_dotnet_environment_info; + f.Dotnet.Exec(f.AppDll, new[] { api, f.ExeDir }) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdk versions:[{expectedSdkVersions}]") + .And.HaveStdOutContaining($"{api} sdk paths:[{expectedSdkPaths}]") + .And.HaveStdOutContaining($"{api} framework names:[{expectedFrameworkNames}]") + .And.HaveStdOutContaining($"{api} framework versions:[{expectedFrameworkVersions}]") + .And.HaveStdOutContaining($"{api} framework paths:[{expectedFrameworkPaths}]"); } } @@ -362,94 +368,66 @@ public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_only() { // We pass f.WorkingDir so that we don't resolve dotnet_dir to the global installation // in the native side. - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", f.WorkingDir }) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .CaptureStdOut() - .CaptureStdErr() - .Execute() - .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Success") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk versions:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info sdk paths:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework names:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework versions:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework paths:[]"); + string api = ApiNames.hostfxr_get_dotnet_environment_info; + f.Dotnet.Exec(f.AppDll, api, f.WorkingDir) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdk versions:[]") + .And.HaveStdOutContaining($"{api} sdk paths:[]") + .And.HaveStdOutContaining($"{api} framework names:[]") + .And.HaveStdOutContaining($"{api} framework versions:[]") + .And.HaveStdOutContaining($"{api} framework paths:[]"); } } [Fact] - [PlatformSpecific(TestPlatforms.Windows)] // The test setup only works on Windows (and MLL was Windows-only anyway) - public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_only_self_register_program_files() + public void Hostfxr_get_dotnet_environment_info_global_install_path() { + string api = ApiNames.hostfxr_get_dotnet_environment_info; var f = new SdkResolutionFixture(sharedTestState); - - using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) - { - // We pass f.WorkingDir so that we don't resolve dotnet_dir to the global installation - // in the native side. - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", f.WorkingDir }) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - // Test with a self-registered path the same as ProgramFiles, with a trailing slash. Expect this to be de-duped - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", Path.Combine(f.ProgramFiles, "dotnet") + Path.DirectorySeparatorChar) - .CaptureStdOut() - .CaptureStdErr() + f.Dotnet.Exec(f.AppDll, api) + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Success") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework names:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework versions:[]") - .And.HaveStdOutContaining($"hostfxr_get_dotnet_environment_info framework paths:[]"); - } - } - - [Fact] - public void Hostfxr_get_dotnet_environment_info_global_install_path() - { - var f = new SdkResolutionFixture(sharedTestState); - - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info" }) - .CaptureStdOut() - .CaptureStdErr() - .Execute() - .Should().Pass() - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Success"); + .And.ReturnStatusCode(api, Constants.ErrorCode.Success); } [Fact] public void Hostfxr_get_dotnet_environment_info_result_is_nullptr_fails() { var f = new SdkResolutionFixture(sharedTestState); - - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", "test_invalid_result_ptr" }) + string api = ApiNames.hostfxr_get_dotnet_environment_info; + f.Dotnet.Exec(f.AppDll, api, "test_invalid_result_ptr") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - // 0x80008081 (InvalidArgFailure) - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Fail[-2147450751]") - .And.HaveStdErrContaining("hostfxr_get_dotnet_environment_info received an invalid argument: result should not be null."); + .And.ReturnStatusCode(api, Constants.ErrorCode.InvalidArgFailure) + .And.HaveStdErrContaining($"{api} received an invalid argument: result should not be null."); } [Fact] public void Hostfxr_get_dotnet_environment_info_reserved_is_not_nullptr_fails() { var f = new SdkResolutionFixture(sharedTestState); - - f.Dotnet.Exec(f.AppDll, new[] { "hostfxr_get_dotnet_environment_info", "test_invalid_reserved_ptr" }) + string api = ApiNames.hostfxr_get_dotnet_environment_info; + f.Dotnet.Exec(f.AppDll, api, "test_invalid_reserved_ptr") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() // 0x80008081 (InvalidArgFailure) - .And.HaveStdOutContaining("hostfxr_get_dotnet_environment_info:Fail[-2147450751]") - .And.HaveStdErrContaining("hostfxr_get_dotnet_environment_info received an invalid argument: reserved should be null."); + .And.ReturnStatusCode(api, Constants.ErrorCode.InvalidArgFailure) + .And.HaveStdErrContaining($"{api} received an invalid argument: reserved should be null."); } [Fact] public void Hostpolicy_corehost_set_error_writer_test() { TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, "Test_corehost_set_error_writer") - .CaptureStdOut() - .CaptureStdErr() + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass(); } @@ -459,8 +437,7 @@ public void HostRuntimeContract_get_runtime_property() { TestApp app = sharedTestState.HostApiInvokerApp; TestContext.BuiltDotNet.Exec(app.AppDll, "host_runtime_contract.get_runtime_property", "APP_CONTEXT_BASE_DIRECTORY", "RUNTIME_IDENTIFIER", "DOES_NOT_EXIST", "ENTRY_ASSEMBLY_NAME") - .CaptureStdOut() - .CaptureStdErr() + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() .And.HaveStdOutContaining($"APP_CONTEXT_BASE_DIRECTORY = {Path.GetDirectoryName(app.AppDll)}") @@ -473,8 +450,7 @@ public void HostRuntimeContract_get_runtime_property() public void HostRuntimeContract_bundle_probe() { TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, "host_runtime_contract.bundle_probe", "APP_CONTEXT_BASE_DIRECTORY", "RUNTIME_IDENTIFIER", "DOES_NOT_EXIST", "ENTRY_ASSEMBLY_NAME") - .CaptureStdOut() - .CaptureStdErr() + .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() .And.HaveStdOutContaining("host_runtime_contract.bundle_probe is not set"); @@ -501,4 +477,14 @@ public void Dispose() } } } + + public static class HostApisCommandResultExtensions + { + public static AndConstraint ReturnStatusCode(this CommandResultAssertions assertion, string apiName, int statusCode) + { + return statusCode == Constants.ErrorCode.Success + ? assertion.HaveStdOutContaining($"{apiName}:Success") + : assertion.HaveStdOutContaining($"{apiName}:Fail[0x{statusCode:x}]"); + } + } } diff --git a/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs b/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs index f6958a1a5391b7..a43b1406189509 100644 --- a/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs +++ b/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs @@ -112,14 +112,14 @@ public AndConstraint HaveStdErrMatching(string pattern, public AndConstraint NotHaveStdOut() { Execute.Assertion.ForCondition(string.IsNullOrEmpty(Result.StdOut)) - .FailWith($"Expected command to not output to stdout but it was not:{GetDiagnosticsInfo()}"); + .FailWith($"Expected command to not output to stdout but it did:{GetDiagnosticsInfo()}"); return new AndConstraint(this); } public AndConstraint NotHaveStdErr() { Execute.Assertion.ForCondition(string.IsNullOrEmpty(Result.StdErr)) - .FailWith($"Expected command to not output to stderr but it was not:{GetDiagnosticsInfo()}"); + .FailWith($"Expected command to not output to stderr but it did:{GetDiagnosticsInfo()}"); return new AndConstraint(this); } diff --git a/src/installer/tests/TestUtils/Constants.cs b/src/installer/tests/TestUtils/Constants.cs index 61363f2eecc878..2a5ce2f53f5593 100644 --- a/src/installer/tests/TestUtils/Constants.cs +++ b/src/installer/tests/TestUtils/Constants.cs @@ -113,6 +113,7 @@ public static class DotnetRoot public static class ErrorCode { + public const int Success = 0; public const int InvalidArgFailure = unchecked((int)0x80008081); public const int CoreHostLibMissingFailure = unchecked((int)0x80008083); public const int ResolverInitFailure = unchecked((int)0x8000808b); diff --git a/src/native/corehost/fxr/fx_resolver.cpp b/src/native/corehost/fxr/fx_resolver.cpp index ec6e4f5ed16da5..1340af625423cb 100644 --- a/src/native/corehost/fxr/fx_resolver.cpp +++ b/src/native/corehost/fxr/fx_resolver.cpp @@ -307,25 +307,6 @@ namespace } } -StatusCode fx_resolver_t::reconcile_fx_references_helper( - const fx_reference_t& lower_fx_ref, - const fx_reference_t& higher_fx_ref, - /*out*/ fx_reference_t& effective_fx_ref) -{ - if (!lower_fx_ref.is_compatible_with_higher_version(higher_fx_ref.get_fx_version_number())) - { - // Error condition - not compatible with the other reference - display_incompatible_framework_error(higher_fx_ref.get_fx_version(), lower_fx_ref); - return StatusCode::FrameworkCompatFailure; - } - - effective_fx_ref = fx_reference_t(higher_fx_ref); // copy - effective_fx_ref.merge_roll_forward_settings_from(lower_fx_ref); - - display_compatible_framework_trace(higher_fx_ref.get_fx_version(), lower_fx_ref); - return StatusCode::Success; -} - // Reconciles two framework references into a new effective framework reference // This process is sometimes also called "soft roll forward" (soft as in no IO) // - fx_ref_a - one of the framework references to reconcile @@ -341,16 +322,24 @@ StatusCode fx_resolver_t::reconcile_fx_references( const fx_reference_t& fx_ref_b, /*out*/ fx_reference_t& effective_fx_ref) { - // The function is split into the helper because the various tracing messages + // Determine which framework reference is higher to do the compat check. The various tracing messages // make more sense if they're always written with higher/lower versions ordered in particular way. - if (fx_ref_a.get_fx_version_number() >= fx_ref_b.get_fx_version_number()) - { - return reconcile_fx_references_helper(fx_ref_b, fx_ref_a, effective_fx_ref); - } - else + bool is_a_higher_than_b = fx_ref_a.get_fx_version_number() >= fx_ref_b.get_fx_version_number(); + const fx_reference_t& lower_fx_ref = is_a_higher_than_b ? fx_ref_b : fx_ref_a; + const fx_reference_t& higher_fx_ref = is_a_higher_than_b ? fx_ref_a : fx_ref_b; + + if (!lower_fx_ref.is_compatible_with_higher_version(higher_fx_ref.get_fx_version_number())) { - return reconcile_fx_references_helper(fx_ref_a, fx_ref_b, effective_fx_ref); + // Error condition - not compatible with the other reference + display_incompatible_framework_error(higher_fx_ref.get_fx_version(), lower_fx_ref); + return StatusCode::FrameworkCompatFailure; } + + effective_fx_ref = fx_reference_t(higher_fx_ref); // copy + effective_fx_ref.merge_roll_forward_settings_from(lower_fx_ref); + + display_compatible_framework_trace(higher_fx_ref.get_fx_version(), lower_fx_ref); + return StatusCode::Success; } void fx_resolver_t::update_newest_references( @@ -415,7 +404,7 @@ StatusCode fx_resolver_t::read_framework( // This reconciles duplicate references to minimize the number of resolve retries. update_newest_references(config); - StatusCode rc = StatusCode::Success; + StatusCode rc; // Loop through each reference and resolve the framework for (const fx_reference_t& original_fx_ref : config.get_frameworks()) @@ -432,23 +421,20 @@ StatusCode fx_resolver_t::read_framework( const fx_reference_t& current_effective_fx_ref = m_effective_fx_references[fx_name]; fx_reference_t new_effective_fx_ref; + // Reconcile the framework reference with the most up to date so far we have for the framework. + // This does not read any physical framework folders yet. + rc = reconcile_fx_references(fx_ref, current_effective_fx_ref, new_effective_fx_ref); + if (rc != StatusCode::Success) + return rc; + auto existing_framework = std::find_if( fx_definitions.begin(), fx_definitions.end(), [&](const std::unique_ptr & fx) { return fx_name == fx->get_name(); }); - if (existing_framework == fx_definitions.end()) { - // Reconcile the framework reference with the most up to date so far we have for the framework. - // This does not read any physical framework folders yet. // Since we didn't find the framework in the resolved list yet, it's OK to update the effective reference // as we haven't processed it yet. - rc = reconcile_fx_references(fx_ref, current_effective_fx_ref, new_effective_fx_ref); - if (rc) - { - break; // Error case - } - m_effective_fx_references[fx_name] = new_effective_fx_ref; // Resolve the effective framework reference against the existing physical framework folders @@ -463,7 +449,7 @@ StatusCode fx_resolver_t::read_framework( app_display_name != nullptr ? app_display_name : host_info.host_path.c_str(), get_current_arch_name()); display_missing_framework_error(fx_name, new_effective_fx_ref.get_fx_version(), pal::string_t(), host_info.dotnet_root, disable_multilevel_lookup); - return FrameworkMissingFailure; + return StatusCode::FrameworkMissingFailure; } // Do NOT update the effective reference to have the same version as the resolved framework. @@ -492,23 +478,13 @@ StatusCode fx_resolver_t::read_framework( } rc = read_framework(host_info, disable_multilevel_lookup, override_settings, new_config, &new_effective_fx_ref, fx_definitions, app_display_name); - if (rc) - { - break; // Error case - } + if (rc != StatusCode::Success) + return rc; } else { - // Reconcile the framework reference with the most up to date so far we have for the framework. - // Note that since we found the framework in the already resolved frameworks - // any update to the effective framework reference needs to restart the resolution process - // so that we re-resolve the framework against disk. - rc = reconcile_fx_references(fx_ref, current_effective_fx_ref, new_effective_fx_ref); - if (rc) - { - break; // Error case - } - + // Since we found the framework in the already resolved frameworks, any update to the effective framework + // reference needs to restart the resolution process so that we re-resolve the framework against disk. if (new_effective_fx_ref != current_effective_fx_ref) { display_retry_framework_trace(current_effective_fx_ref, fx_ref); @@ -522,11 +498,7 @@ StatusCode fx_resolver_t::read_framework( } } - return rc; -} - -fx_resolver_t::fx_resolver_t() -{ + return StatusCode::Success; } StatusCode fx_resolver_t::resolve_frameworks_for_app( diff --git a/src/native/corehost/fxr/fx_resolver.h b/src/native/corehost/fxr/fx_resolver.h index 35c6fd250af5ad..018294148f30d2 100644 --- a/src/native/corehost/fxr/fx_resolver.h +++ b/src/native/corehost/fxr/fx_resolver.h @@ -27,7 +27,7 @@ class fx_resolver_t const std::unordered_map &existing_framework_versions_by_name); private: - fx_resolver_t(); + fx_resolver_t() = default; void update_newest_references( const runtime_config_t& config); @@ -40,10 +40,6 @@ class fx_resolver_t fx_definition_vector_t& fx_definitions, const pal::char_t* app_display_name); - static StatusCode reconcile_fx_references_helper( - const fx_reference_t& lower_fx_ref, - const fx_reference_t& higher_fx_ref, - /*out*/ fx_reference_t& effective_fx_ref); static StatusCode reconcile_fx_references( const fx_reference_t& fx_ref_a, const fx_reference_t& fx_ref_b, From e0884ed6191c4e6839175be35d84f6ac2a2fc2da Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 2 Apr 2024 21:53:42 -0700 Subject: [PATCH 049/132] fix sve unit test (#100549) --- src/coreclr/jit/codegenarm64test.cpp | 6 +++--- src/coreclr/jit/emitarm64sve.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/coreclr/jit/codegenarm64test.cpp b/src/coreclr/jit/codegenarm64test.cpp index 946e38ff46707d..750daa569613ff 100644 --- a/src/coreclr/jit/codegenarm64test.cpp +++ b/src/coreclr/jit/codegenarm64test.cpp @@ -4748,7 +4748,7 @@ void CodeGen::genArm64EmitterUnitTestsSve() // IF_SVE_CE_2C theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P0, REG_V31, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV .H, [] - theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V1, REG_P1, 0, INS_OPTS_SCALABLE_H, + theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P1, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV .H, [] // IF_SVE_CE_2D @@ -5396,7 +5396,7 @@ void CodeGen::genArm64EmitterUnitTestsSve() INS_OPTS_SCALABLE_D); // UXTW .D, /M, .D // IF_SVE_AR_4A - theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, + theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_V2, REG_V19, INS_OPTS_SCALABLE_B); // MLA ., /M, ., . theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); // MLS ., /M, ., . @@ -5578,7 +5578,7 @@ void CodeGen::genArm64EmitterUnitTestsSve() INS_OPTS_SCALABLE_H); // CPY ., /Z, #{, } theEmitter->emitIns_R_R_I(INS_sve_cpy, EA_SCALABLE, REG_V1, REG_P0, -32768, INS_OPTS_SCALABLE_S); // CPY ., /Z, #{, } - theEmitter->emitIns_R_R_I(INS_sve_mov, EA_SCALABLE, REG_P0, REG_V0, 32512, + theEmitter->emitIns_R_R_I(INS_sve_mov, EA_SCALABLE, REG_V0, REG_P0, 32512, INS_OPTS_SCALABLE_D); // MOV ., /Z, #{, } // IF_SVE_BV_2A_A diff --git a/src/coreclr/jit/emitarm64sve.cpp b/src/coreclr/jit/emitarm64sve.cpp index 8a0894426cfb53..63fb80089c80b1 100644 --- a/src/coreclr/jit/emitarm64sve.cpp +++ b/src/coreclr/jit/emitarm64sve.cpp @@ -13350,8 +13350,8 @@ void emitter::emitInsSveSanityCheck(instrDesc* id) case IF_SVE_DP_2A: // ........xx...... .......MMMMddddd -- SVE saturating inc/dec vector by predicate count case IF_SVE_DN_2A: // ........xx...... .......MMMMddddd -- SVE inc/dec vector by predicate count assert(insOptsScalableAtLeastHalf(id->idInsOpt())); // xx - assert(isPredicateRegister(id->idReg1())); // MMMM - assert(isVectorRegister(id->idReg2())); // ddddd + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isPredicateRegister(id->idReg2())); // MMMM assert(isScalableVectorSize(id->idOpSize())); break; From 0b542b960dc18883347e62e51d0840476a1ba966 Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Wed, 3 Apr 2024 07:28:51 +0200 Subject: [PATCH 050/132] JIT: Add ABI classifier for arm32 (#100526) --- src/coreclr/jit/abi.cpp | 33 +++++++ src/coreclr/jit/abi.h | 30 ++++++ src/coreclr/jit/lclvars.cpp | 26 ++++- src/coreclr/jit/targetarm.cpp | 181 ++++++++++++++++++++++++++++++++++ 4 files changed, 268 insertions(+), 2 deletions(-) diff --git a/src/coreclr/jit/abi.cpp b/src/coreclr/jit/abi.cpp index c52c13273c63cc..3dd8fcec32fcc9 100644 --- a/src/coreclr/jit/abi.cpp +++ b/src/coreclr/jit/abi.cpp @@ -140,6 +140,39 @@ ABIPassingInformation ABIPassingInformation::FromSegment(Compiler* comp, const A return info; } +#ifdef DEBUG +//----------------------------------------------------------------------------- +// Dump: +// Dump the ABIPassingInformation to stdout. +// +void ABIPassingInformation::Dump() const +{ + if (NumSegments != 1) + { + printf("%u segments\n", NumSegments); + } + + for (unsigned i = 0; i < NumSegments; i++) + { + if (NumSegments > 1) + { + printf(" [%u] ", i); + } + + const ABIPassingSegment& seg = Segments[i]; + + if (Segments[i].IsPassedInRegister()) + { + printf("[%02u..%02u) reg %s\n", seg.Offset, seg.Offset + seg.Size, getRegName(seg.GetRegister())); + } + else + { + printf("[%02u..%02u) stack @ +%02u\n", seg.Offset, seg.Offset + seg.Size, seg.GetStackOffset()); + } + } +} +#endif + //----------------------------------------------------------------------------- // RegisterQueue::Dequeue: // Dequeue a register from the queue. diff --git a/src/coreclr/jit/abi.h b/src/coreclr/jit/abi.h index f6303899b2509a..27e53c27efc7e3 100644 --- a/src/coreclr/jit/abi.h +++ b/src/coreclr/jit/abi.h @@ -50,6 +50,10 @@ struct ABIPassingInformation bool IsSplitAcrossRegistersAndStack() const; static ABIPassingInformation FromSegment(Compiler* comp, const ABIPassingSegment& segment); + +#ifdef DEBUG + void Dump() const; +#endif }; class RegisterQueue @@ -141,6 +145,30 @@ class Arm64Classifier WellKnownArg wellKnownParam); }; +class Arm32Classifier +{ + const ClassifierInfo& m_info; + // 4 int regs are available for parameters. This gives the index of the + // next one. + // A.k.a. "NCRN": Next Core Register Number + unsigned m_nextIntReg = 0; + // 16 float regs are available for parameters. We keep them as a mask as + // they can be backfilled. + unsigned m_floatRegs = 0xFFFF; + // A.k.a. "NSAA": Next Stack Argument Address + unsigned m_stackArgSize = 0; + + ABIPassingInformation ClassifyFloat(Compiler* comp, var_types type, unsigned elems); + +public: + Arm32Classifier(const ClassifierInfo& info); + + ABIPassingInformation Classify(Compiler* comp, + var_types type, + ClassLayout* structLayout, + WellKnownArg wellKnownParam); +}; + #if defined(TARGET_X86) typedef X86Classifier PlatformClassifier; #elif defined(WINDOWS_AMD64_ABI) @@ -149,6 +177,8 @@ typedef WinX64Classifier PlatformClassifier; typedef SysVX64Classifier PlatformClassifier; #elif defined(TARGET_ARM64) typedef Arm64Classifier PlatformClassifier; +#elif defined(TARGET_ARM) +typedef Arm32Classifier PlatformClassifier; #endif #ifdef SWIFT_SUPPORT diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 978fff461e146a..50997980ca7488 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -1647,6 +1647,14 @@ void Compiler::lvaClassifyParameterABI(Classifier& classifier) #endif lvaParameterPassingInfo[i] = classifier.Classify(this, dsc->TypeGet(), structLayout, wellKnownArg); + +#ifdef DEBUG + if (verbose) + { + printf("Parameter #%u ABI info: ", i); + lvaParameterPassingInfo[i].Dump(); + } +#endif } } @@ -1675,7 +1683,7 @@ void Compiler::lvaClassifyParameterABI() } else #endif -#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) { PlatformClassifier classifier(cInfo); lvaClassifyParameterABI(classifier); @@ -1698,11 +1706,25 @@ void Compiler::lvaClassifyParameterABI() unsigned numSegmentsToCompare = abiInfo.NumSegments; if (dsc->lvIsHfa()) { - assert(abiInfo.NumSegments >= 1); // LclVarDsc only has one register set for HFAs numSegmentsToCompare = 1; } +#ifdef TARGET_ARM + // On arm the old representation only represents the start register for + // struct multireg args. + if (varTypeIsStruct(dsc)) + { + numSegmentsToCompare = 1; + } + + // And also for TYP_DOUBLE on soft FP + if (opts.compUseSoftFP && (dsc->TypeGet() == TYP_DOUBLE)) + { + numSegmentsToCompare = 1; + } +#endif + for (unsigned i = 0; i < numSegmentsToCompare; i++) { const ABIPassingSegment& expected = abiInfo.Segments[i]; diff --git a/src/coreclr/jit/targetarm.cpp b/src/coreclr/jit/targetarm.cpp index 8e117bae810270..14cb85adbcc16d 100644 --- a/src/coreclr/jit/targetarm.cpp +++ b/src/coreclr/jit/targetarm.cpp @@ -26,4 +26,185 @@ const regMaskTP fltArgMasks[] = {RBM_F0, RBM_F1, RBM_F2, RBM_F3, RBM_F4, RBM_F5, static_assert_no_msg(RBM_ALLDOUBLE == (RBM_ALLDOUBLE_HIGH >> 1)); +//----------------------------------------------------------------------------- +// Arm32Classifier: +// Construct a new instance of the arm32 ABI classifier. +// +// Parameters: +// info - Info about the method being classified. +// +Arm32Classifier::Arm32Classifier(const ClassifierInfo& info) : m_info(info) +{ +} + +//----------------------------------------------------------------------------- +// Classify: +// Classify a parameter for the arm32 ABI. +// +// Parameters: +// comp - Compiler instance +// type - The type of the parameter +// structLayout - The layout of the struct. Expected to be non-null if +// varTypeIsStruct(type) is true. +// wellKnownParam - Well known type of the parameter (if it may affect its ABI classification) +// +// Returns: +// Classification information for the parameter. +// +ABIPassingInformation Arm32Classifier::Classify(Compiler* comp, + var_types type, + ClassLayout* structLayout, + WellKnownArg wellKnownParam) +{ + if (!comp->opts.compUseSoftFP) + { + if (varTypeIsStruct(type)) + { + var_types hfaType = comp->GetHfaType(structLayout->GetClassHandle()); + + if (hfaType != TYP_UNDEF) + { + unsigned slots = structLayout->GetSize() / genTypeSize(hfaType); + return ClassifyFloat(comp, hfaType, slots); + } + } + + if (varTypeIsFloating(type)) + { + return ClassifyFloat(comp, type, 1); + } + } + + unsigned alignment = 4; + if ((type == TYP_LONG) || (type == TYP_DOUBLE) || + ((type == TYP_STRUCT) && + (comp->info.compCompHnd->getClassAlignmentRequirement(structLayout->GetClassHandle()) == 8))) + { + alignment = 8; + m_nextIntReg = roundUp(m_nextIntReg, 2); + } + + unsigned size = type == TYP_STRUCT ? structLayout->GetSize() : genTypeSize(type); + unsigned alignedSize = roundUp(size, alignment); + + unsigned numInRegs = min(alignedSize / 4, 4 - m_nextIntReg); + bool anyOnStack = numInRegs < (alignedSize / 4); + + // If we already passed anything on stack (due to float args) then we + // cannot split an arg. + if ((numInRegs > 0) && anyOnStack && (m_stackArgSize != 0)) + { + numInRegs = 0; + } + + ABIPassingInformation info; + info.NumSegments = numInRegs + (anyOnStack ? 1 : 0); + info.Segments = new (comp, CMK_ABI) ABIPassingSegment[info.NumSegments]; + + for (unsigned i = 0; i < numInRegs; i++) + { + unsigned endOffs = min((i + 1) * 4, size); + info.Segments[i] = + ABIPassingSegment::InRegister(static_cast(static_cast(REG_R0) + m_nextIntReg + i), + i * 4, endOffs - (i * 4)); + } + + m_nextIntReg += numInRegs; + + if (anyOnStack) + { + m_stackArgSize = roundUp(m_stackArgSize, alignment); + unsigned stackSize = size - (numInRegs * 4); + info.Segments[numInRegs] = ABIPassingSegment::OnStack(m_stackArgSize, 0, stackSize); + m_stackArgSize += roundUp(stackSize, 4); + + // As soon as any int arg goes on stack we cannot put anything else in + // int registers. This situation can happen if an arg would normally be + // split but wasn't because a float arg was already passed on stack. + m_nextIntReg = 4; + } + + return info; +} + +//----------------------------------------------------------------------------- +// ClassifyFloat: +// Classify a parameter that uses float registers. +// +// Parameters: +// comp - Compiler instance +// type - The type of the parameter +// numElems - Number of elements for the parameter. +// +// Returns: +// Classification information for the parameter. +// +// Remarks: +// Float parameters can require multiple registers; the double registers are +// overlaid on top of the float registers so that d0 = s0, s1, d1 = s2, s3 +// etc. This means that allocating a double register automatically makes the +// two corresponding float registers unavailable. +// +// The ABI also supports HFAs that similarly require multiple registers for +// passing. When multiple registers are required for a single argument they +// must always be allocated into consecutive float registers. However, +// backfilling is allowed. For example, a signature like +// Foo(float x, double y, float z) allocates x in REG_F0 = s0, y in REG_F2 = +// d1, z in REG_F1 = s1. +// +ABIPassingInformation Arm32Classifier::ClassifyFloat(Compiler* comp, var_types type, unsigned numElems) +{ + assert((type == TYP_FLOAT) || (type == TYP_DOUBLE)); + + unsigned numConsecutive = type == TYP_FLOAT ? numElems : (numElems * 2); + + // Find the first start index that has a consecutive run of + // 'numConsecutive' bits set. + unsigned startRegMask = m_floatRegs; + for (unsigned i = 1; i < numConsecutive; i++) + { + startRegMask &= m_floatRegs >> i; + } + + // Doubles can only start at even indices. + if (type == TYP_DOUBLE) + { + startRegMask &= 0b0101010101010101; + } + + if (startRegMask != 0) + { + unsigned startRegIndex = BitOperations::TrailingZeroCount(startRegMask); + unsigned usedRegsMask = ((1 << numConsecutive) - 1) << startRegIndex; + // First consecutive run of numConsecutive bits start at startRegIndex + assert((m_floatRegs & usedRegsMask) == usedRegsMask); + + m_floatRegs ^= usedRegsMask; + ABIPassingInformation info; + info.NumSegments = numElems; + info.Segments = new (comp, CMK_ABI) ABIPassingSegment[numElems]; + unsigned numRegsPerElem = type == TYP_FLOAT ? 1 : 2; + for (unsigned i = 0; i < numElems; i++) + { + regNumber reg = static_cast(static_cast(REG_F0) + startRegIndex + i * numRegsPerElem); + info.Segments[i] = ABIPassingSegment::InRegister(reg, i * genTypeSize(type), genTypeSize(type)); + } + + return info; + } + else + { + // As soon as any float arg goes on stack no other float arg can go in a register. + m_floatRegs = 0; + + m_stackArgSize = roundUp(m_stackArgSize, genTypeSize(type)); + ABIPassingInformation info = + ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, + numElems * genTypeSize(type))); + m_stackArgSize += numElems * genTypeSize(type); + + return info; + } +} + #endif // TARGET_ARM From 0c0b8b21118a3c05e3d7eb269576eb51685f3661 Mon Sep 17 00:00:00 2001 From: "Mukund Raghav Sharma (Moko)" <68247673+mrsharm@users.noreply.github.com> Date: Tue, 2 Apr 2024 22:47:17 -0700 Subject: [PATCH 051/132] Guard against -1 Returned from sysconf for the Cache Sizes Causing Large Gen0 Sizes and Budgets for Certain Linux Distributions. (#100502) * Logging. * Fixed comparison check * Fix logical operations * Completely guard against the cacheSize as UINTMAX_MAX * Fix for right macro * Ensure we are guarded against all cases where cacheSize == SIZE_MAX * Added an extra guard and removed redundant case * Comment clean * Added some additional asserts * Removed unnecessary checks for cacheSize == SIZE_MAX * Cleaned up logic * Fix type casting comparison * Removed redundant comment * Removed one more unneccesary guard --- src/coreclr/gc/unix/gcenv.unix.cpp | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp index 6f1a254a0528c4..e8a92a831361dc 100644 --- a/src/coreclr/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/gc/unix/gcenv.unix.cpp @@ -868,28 +868,30 @@ bool ReadMemoryValueFromFile(const char* filename, uint64_t* val) return result; } -#define UPDATE_CACHE_SIZE_AND_LEVEL(NEW_CACHE_SIZE, NEW_CACHE_LEVEL) if (NEW_CACHE_SIZE > cacheSize) { cacheSize = NEW_CACHE_SIZE; cacheLevel = NEW_CACHE_LEVEL; } +#define UPDATE_CACHE_SIZE_AND_LEVEL(NEW_CACHE_SIZE, NEW_CACHE_LEVEL) if (NEW_CACHE_SIZE > ((long)cacheSize)) { cacheSize = NEW_CACHE_SIZE; cacheLevel = NEW_CACHE_LEVEL; } static size_t GetLogicalProcessorCacheSizeFromOS() { size_t cacheLevel = 0; size_t cacheSize = 0; - size_t size; + long size; + // sysconf can return -1 if the cache size is unavailable in some distributions and 0 in others. + // UPDATE_CACHE_SIZE_AND_LEVEL should handle both the cases by not updating cacheSize if either of cases are met. #ifdef _SC_LEVEL1_DCACHE_SIZE - size = ( size_t) sysconf(_SC_LEVEL1_DCACHE_SIZE); + size = sysconf(_SC_LEVEL1_DCACHE_SIZE); UPDATE_CACHE_SIZE_AND_LEVEL(size, 1) #endif #ifdef _SC_LEVEL2_CACHE_SIZE - size = ( size_t) sysconf(_SC_LEVEL2_CACHE_SIZE); + size = sysconf(_SC_LEVEL2_CACHE_SIZE); UPDATE_CACHE_SIZE_AND_LEVEL(size, 2) #endif #ifdef _SC_LEVEL3_CACHE_SIZE - size = ( size_t) sysconf(_SC_LEVEL3_CACHE_SIZE); + size = sysconf(_SC_LEVEL3_CACHE_SIZE); UPDATE_CACHE_SIZE_AND_LEVEL(size, 3) #endif #ifdef _SC_LEVEL4_CACHE_SIZE - size = ( size_t) sysconf(_SC_LEVEL4_CACHE_SIZE); + size = sysconf(_SC_LEVEL4_CACHE_SIZE); UPDATE_CACHE_SIZE_AND_LEVEL(size, 4) #endif @@ -912,17 +914,22 @@ static size_t GetLogicalProcessorCacheSizeFromOS() { path_to_size_file[index] = (char)(48 + i); - if (ReadMemoryValueFromFile(path_to_size_file, &size)) + uint64_t cache_size_from_sys_file = 0; + + if (ReadMemoryValueFromFile(path_to_size_file, &cache_size_from_sys_file)) { + // uint64_t to long conversion as ReadMemoryValueFromFile takes a uint64_t* as an argument for the val argument. + size = (long)cache_size_from_sys_file; path_to_level_file[index] = (char)(48 + i); if (ReadMemoryValueFromFile(path_to_level_file, &level)) { UPDATE_CACHE_SIZE_AND_LEVEL(size, level) } + else { - cacheSize = std::max(cacheSize, size); + cacheSize = std::max((long)cacheSize, size); } } } From b66d44a6a551daeca12888ac7bcac6a097882618 Mon Sep 17 00:00:00 2001 From: Ilona Tomkowicz <32700855+ilonatommy@users.noreply.github.com> Date: Wed, 3 Apr 2024 07:49:46 +0200 Subject: [PATCH 052/132] Enable System.Runtime.Serialization.Schema.Tests for HybridGlobalization (#100522) --- .../tests/System/Runtime/Serialization/Schema/RoundTripTest.cs | 1 - .../System/Diagnostics/StackTraceHiddenAttributeTests.cs | 1 - 2 files changed, 2 deletions(-) diff --git a/src/libraries/System.Runtime.Serialization.Schema/tests/System/Runtime/Serialization/Schema/RoundTripTest.cs b/src/libraries/System.Runtime.Serialization.Schema/tests/System/Runtime/Serialization/Schema/RoundTripTest.cs index eaad37ff1546b5..b8fe734b1a55a0 100644 --- a/src/libraries/System.Runtime.Serialization.Schema/tests/System/Runtime/Serialization/Schema/RoundTripTest.cs +++ b/src/libraries/System.Runtime.Serialization.Schema/tests/System/Runtime/Serialization/Schema/RoundTripTest.cs @@ -21,7 +21,6 @@ public RoundTripTest(ITestOutputHelper output) [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/73961", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/95981", typeof(PlatformDetection), nameof(PlatformDetection.IsHybridGlobalizationOnBrowser))] public void RountTripTest() { // AppContext SetSwitch seems to be unreliable in the unit test case. So let's not rely on it diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Diagnostics/StackTraceHiddenAttributeTests.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Diagnostics/StackTraceHiddenAttributeTests.cs index 9e8a77a4feb9a0..90371c32bd2453 100644 --- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Diagnostics/StackTraceHiddenAttributeTests.cs +++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Diagnostics/StackTraceHiddenAttributeTests.cs @@ -8,7 +8,6 @@ namespace System.Tests { [ActiveIssue("https://github.com/dotnet/runtime/issues/50957", typeof(PlatformDetection), nameof(PlatformDetection.IsBrowser), nameof(PlatformDetection.IsMonoAOT))] - [ActiveIssue("https://github.com/dotnet/runtime/issues/95981", typeof(PlatformDetection), nameof(PlatformDetection.IsHybridGlobalizationOnBrowser))] public class StackTraceHiddenAttributeTests { [Fact] From c5bead63f8386f716b8ddd909c93086b3546efed Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Wed, 3 Apr 2024 09:02:30 +0200 Subject: [PATCH 053/132] JIT: Disable more_tailcalls under GCStress (#100540) --- src/tests/JIT/Directed/tailcall/more_tailcalls.ilproj | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tests/JIT/Directed/tailcall/more_tailcalls.ilproj b/src/tests/JIT/Directed/tailcall/more_tailcalls.ilproj index 3f6be099a84e6e..17f0cf113434f6 100644 --- a/src/tests/JIT/Directed/tailcall/more_tailcalls.ilproj +++ b/src/tests/JIT/Directed/tailcall/more_tailcalls.ilproj @@ -1,7 +1,8 @@ - + true + true PdbOnly From 8763f6d892a6957ac4ae9ab6de3b5ceac8270c0a Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Wed, 3 Apr 2024 09:52:32 +0200 Subject: [PATCH 054/132] [browser][MT] smaller thread pool (#100415) --- .../Interop/JavaScriptImports.Generated.cs | 5 ---- .../InteropServices/JavaScript/JSWebWorker.cs | 25 +------------------ .../System.Threading.Tasks.Tests.csproj | 1 + .../System.Threading.Thread.Tests.csproj | 4 +++ .../System.Threading.ThreadPool.Tests.csproj | 4 +++ .../tests/System.Threading.Tests.csproj | 1 + src/mono/browser/runtime/exports-internal.ts | 3 +-- src/mono/browser/runtime/loader/config.ts | 4 +-- src/mono/browser/runtime/pthreads/index.ts | 2 +- .../browser/runtime/pthreads/ui-thread.ts | 25 +++---------------- 10 files changed, 19 insertions(+), 55 deletions(-) diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptImports.Generated.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptImports.Generated.cs index 10f737f5a9c9ce..e7ba1f9aadd65c 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptImports.Generated.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptImports.Generated.cs @@ -48,11 +48,6 @@ internal static unsafe partial class JavaScriptImports [JSImport("INTERNAL.mono_wasm_bind_cs_function")] public static partial void BindCSFunction(IntPtr monoMethod, string assemblyName, string namespaceName, string shortClassName, string methodName, int signatureHash, IntPtr signature); -#if FEATURE_WASM_MANAGED_THREADS - [JSImport("INTERNAL.thread_available")] - public static partial Task ThreadAvailable(); -#endif - #if DEBUG [JSImport("globalThis.console.log")] [return: JSMarshalAs] // this means that the message will arrive out of order, especially across threads. diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSWebWorker.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSWebWorker.cs index 928339b8062061..776f6dc3d5dbe2 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSWebWorker.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSWebWorker.cs @@ -76,30 +76,7 @@ public JSWebWorkerInstance(Func> body, CancellationToken cancellationTok public Task Start() { - if (JSProxyContext.MainThreadContext.IsCurrentThread()) - { - // give browser chance to load more threads - // until there at least one thread loaded, it doesn't make sense to `Start` - // because that would also hang, but in a way blocking the UI thread, much worse. - JavaScriptImports.ThreadAvailable().ContinueWith(static (t, o) => - { - var self = (JSWebWorkerInstance)o!; - if (t.IsCompletedSuccessfully) - { - self._thread.Start(); - } - if (t.IsCanceled) - { - throw new OperationCanceledException("Cancelled while waiting for underlying WebWorker to become available.", self._cancellationToken); - } - throw t.Exception!; - // ideally this will execute on UI thread quickly: ExecuteSynchronously - }, this, _cancellationToken, TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.FromCurrentSynchronizationContext()); - } - else - { - _thread.Start(); - } + _thread.Start(); return _taskCompletionSource.Task; } diff --git a/src/libraries/System.Runtime/tests/System.Threading.Tasks.Tests/System.Threading.Tasks.Tests.csproj b/src/libraries/System.Runtime/tests/System.Threading.Tasks.Tests/System.Threading.Tasks.Tests.csproj index 8f7a8cb6b51cf3..57ce44b8d41a12 100644 --- a/src/libraries/System.Runtime/tests/System.Threading.Tasks.Tests/System.Threading.Tasks.Tests.csproj +++ b/src/libraries/System.Runtime/tests/System.Threading.Tasks.Tests/System.Threading.Tasks.Tests.csproj @@ -6,6 +6,7 @@ true + <_WasmPThreadPoolUnusedSize>10 diff --git a/src/libraries/System.Threading.Thread/tests/System.Threading.Thread.Tests.csproj b/src/libraries/System.Threading.Thread/tests/System.Threading.Thread.Tests.csproj index 6a9977dc6e11b6..ed29b66576eea4 100644 --- a/src/libraries/System.Threading.Thread/tests/System.Threading.Thread.Tests.csproj +++ b/src/libraries/System.Threading.Thread/tests/System.Threading.Thread.Tests.csproj @@ -5,6 +5,10 @@ true $(NetCoreAppCurrent) + + true + <_WasmPThreadPoolUnusedSize>10 + diff --git a/src/libraries/System.Threading.ThreadPool/tests/System.Threading.ThreadPool.Tests.csproj b/src/libraries/System.Threading.ThreadPool/tests/System.Threading.ThreadPool.Tests.csproj index 0cb21c9d38492b..ad3f9814e0adfc 100644 --- a/src/libraries/System.Threading.ThreadPool/tests/System.Threading.ThreadPool.Tests.csproj +++ b/src/libraries/System.Threading.ThreadPool/tests/System.Threading.ThreadPool.Tests.csproj @@ -4,6 +4,10 @@ $(NetCoreAppCurrent) true + + true + <_WasmPThreadPoolUnusedSize>10 + diff --git a/src/libraries/System.Threading/tests/System.Threading.Tests.csproj b/src/libraries/System.Threading/tests/System.Threading.Tests.csproj index 54261d3a1fe665..768bb7b665d925 100644 --- a/src/libraries/System.Threading/tests/System.Threading.Tests.csproj +++ b/src/libraries/System.Threading/tests/System.Threading.Tests.csproj @@ -9,6 +9,7 @@ true + <_WasmPThreadPoolUnusedSize>10 diff --git a/src/mono/browser/runtime/exports-internal.ts b/src/mono/browser/runtime/exports-internal.ts index 074b54a18fcc7e..c2ef78c0a1f9e3 100644 --- a/src/mono/browser/runtime/exports-internal.ts +++ b/src/mono/browser/runtime/exports-internal.ts @@ -23,7 +23,7 @@ import { mono_wasm_get_func_id_to_name_mappings } from "./logging"; import { monoStringToStringUnsafe } from "./strings"; import { mono_wasm_bind_cs_function } from "./invoke-cs"; -import { mono_wasm_dump_threads, thread_available } from "./pthreads"; +import { mono_wasm_dump_threads } from "./pthreads"; export function export_internal (): any { return { @@ -63,7 +63,6 @@ export function export_internal (): any { get_global_this, get_dotnet_instance: () => exportedRuntimeAPI, dynamic_import, - thread_available: WasmEnableThreads ? thread_available : undefined, mono_wasm_bind_cs_function, // BrowserWebSocket diff --git a/src/mono/browser/runtime/loader/config.ts b/src/mono/browser/runtime/loader/config.ts index 1739bc09d1ef67..8fd7f00fe3152d 100644 --- a/src/mono/browser/runtime/loader/config.ts +++ b/src/mono/browser/runtime/loader/config.ts @@ -190,10 +190,10 @@ export function normalizeConfig () { if (WasmEnableThreads) { if (!Number.isInteger(config.pthreadPoolInitialSize)) { - config.pthreadPoolInitialSize = 7; + config.pthreadPoolInitialSize = 5; } if (!Number.isInteger(config.pthreadPoolUnusedSize)) { - config.pthreadPoolUnusedSize = 3; + config.pthreadPoolUnusedSize = 1; } if (!Number.isInteger(config.finalizerThreadStartDelayMs)) { config.finalizerThreadStartDelayMs = 200; diff --git a/src/mono/browser/runtime/pthreads/index.ts b/src/mono/browser/runtime/pthreads/index.ts index 0a5911605282d0..195df3e126ab65 100644 --- a/src/mono/browser/runtime/pthreads/index.ts +++ b/src/mono/browser/runtime/pthreads/index.ts @@ -6,7 +6,7 @@ export { mono_wasm_pthread_ptr, update_thread_info, isMonoThreadMessage, monoThreadInfo, } from "./shared"; export { - mono_wasm_dump_threads, thread_available, cancelThreads, is_thread_available, + mono_wasm_dump_threads, cancelThreads, is_thread_available, populateEmscriptenPool, mono_wasm_init_threads, init_finalizer_thread, waitForThread, replaceEmscriptenPThreadUI } from "./ui-thread"; diff --git a/src/mono/browser/runtime/pthreads/ui-thread.ts b/src/mono/browser/runtime/pthreads/ui-thread.ts index fcb010ae3a187c..14a7c9353b2fb9 100644 --- a/src/mono/browser/runtime/pthreads/ui-thread.ts +++ b/src/mono/browser/runtime/pthreads/ui-thread.ts @@ -5,11 +5,10 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; import BuildConfiguration from "consts:configuration"; import { } from "../globals"; -import { mono_log_debug, mono_log_warn } from "../logging"; import { MonoWorkerToMainMessage, monoThreadInfo, mono_wasm_pthread_ptr, update_thread_info, worker_empty_prefix } from "./shared"; import { Module, ENVIRONMENT_IS_WORKER, createPromiseController, loaderHelpers, mono_assert, runtimeHelpers } from "../globals"; -import { PThreadLibrary, MainToWorkerMessageType, MonoThreadMessage, PThreadInfo, PThreadPtr, PThreadPtrNull, PThreadWorker, PromiseAndController, PromiseController, Thread, WorkerToMainMessageType, monoMessageSymbol } from "../types/internal"; -import { mono_log_error, mono_log_info } from "../logging"; +import { PThreadLibrary, MainToWorkerMessageType, MonoThreadMessage, PThreadInfo, PThreadPtr, PThreadPtrNull, PThreadWorker, PromiseController, Thread, WorkerToMainMessageType, monoMessageSymbol } from "../types/internal"; +import { mono_log_error, mono_log_info, mono_log_debug } from "../logging"; import { threads_c_functions as cwraps } from "../cwraps"; const threadPromises: Map[]> = new Map(); @@ -119,32 +118,16 @@ function monoWorkerMessageHandler (worker: PThreadWorker, ev: MessageEvent) } } -let pendingWorkerLoad: PromiseAndController | undefined; - /// Called by Emscripten internals on the browser thread when a new pthread worker is created and added to the pthread worker pool. /// At this point the worker doesn't have any pthread assigned to it, yet. export function onWorkerLoadInitiated (worker: PThreadWorker, loaded: Promise): void { if (!WasmEnableThreads) return; worker.addEventListener("message", (ev) => monoWorkerMessageHandler(worker, ev)); - if (pendingWorkerLoad == undefined) { - pendingWorkerLoad = createPromiseController(); - } loaded.then(() => { worker.info.isLoaded = true; - if (pendingWorkerLoad != undefined) { - pendingWorkerLoad.promise_control.resolve(); - pendingWorkerLoad = undefined; - } }); } -export function thread_available (): Promise { - if (!WasmEnableThreads) return null as any; - if (pendingWorkerLoad == undefined) { - return Promise.resolve(); - } - return pendingWorkerLoad.promise; -} export function populateEmscriptenPool (): void { if (!WasmEnableThreads) return; @@ -295,7 +278,7 @@ function getNewWorker (modulePThread: PThreadLibrary): PThreadWorker { if (!WasmEnableThreads) return null as any; if (modulePThread.unusedWorkers.length == 0) { - mono_log_warn(`Failed to find unused WebWorker, this may deadlock. Please increase the pthreadPoolReady. Running threads ${modulePThread.runningWorkers.length}. Loading workers: ${modulePThread.unusedWorkers.length}`); + mono_log_debug(`Failed to find unused WebWorker, this may deadlock. Please increase the pthreadPoolReady. Running threads ${modulePThread.runningWorkers.length}. Loading workers: ${modulePThread.unusedWorkers.length}`); const worker = allocateUnusedWorker(); modulePThread.loadWasmModuleToWorker(worker); availableThreadCount--; @@ -316,7 +299,7 @@ function getNewWorker (modulePThread: PThreadLibrary): PThreadWorker { return worker; } } - mono_log_warn(`Failed to find loaded WebWorker, this may deadlock. Please increase the pthreadPoolReady. Running threads ${modulePThread.runningWorkers.length}. Loading workers: ${modulePThread.unusedWorkers.length}`); + mono_log_debug(`Failed to find loaded WebWorker, this may deadlock. Please increase the pthreadPoolReady. Running threads ${modulePThread.runningWorkers.length}. Loading workers: ${modulePThread.unusedWorkers.length}`); availableThreadCount--; // negative value return modulePThread.unusedWorkers.pop()!; } From 3bae822e56590dec674ed25df8e5b7f23927af9a Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Wed, 3 Apr 2024 10:10:36 +0200 Subject: [PATCH 055/132] [browser][MT] mono_wasm_schedule_synchronization_context (#100251) --- .../src/Interop/Browser/Interop.Runtime.cs | 2 +- .../JavaScript/JSSynchronizationContext.cs | 11 +-- src/mono/browser/runtime/corebindings.c | 6 +- src/mono/browser/runtime/cwraps.ts | 2 + src/mono/browser/runtime/driver.c | 7 ++ src/mono/browser/runtime/exports-binding.ts | 2 + .../browser/runtime/pthreads/deputy-thread.ts | 2 + src/mono/browser/runtime/pthreads/shared.ts | 15 ++++ src/mono/browser/runtime/scheduling.ts | 36 +++------- src/mono/mono/mini/mini-wasm.c | 14 ++-- src/mono/mono/mini/mini-wasm.h | 7 ++ src/mono/mono/utils/mono-threads-wasm.c | 69 ++++--------------- src/mono/mono/utils/mono-threads-wasm.h | 2 +- src/mono/mono/utils/mono-threads.c | 12 ---- src/mono/mono/utils/mono-threads.h | 4 +- 15 files changed, 79 insertions(+), 112 deletions(-) diff --git a/src/libraries/Common/src/Interop/Browser/Interop.Runtime.cs b/src/libraries/Common/src/Interop/Browser/Interop.Runtime.cs index 7779005dce1629..518a4ff53bee20 100644 --- a/src/libraries/Common/src/Interop/Browser/Interop.Runtime.cs +++ b/src/libraries/Common/src/Interop/Browser/Interop.Runtime.cs @@ -42,7 +42,7 @@ internal static unsafe partial class Runtime #if FEATURE_WASM_MANAGED_THREADS [MethodImpl(MethodImplOptions.InternalCall)] - public static extern void InstallWebWorkerInterop(nint proxyContextGCHandle, void* beforeSyncJSImport, void* afterSyncJSImport); + public static extern void InstallWebWorkerInterop(nint proxyContextGCHandle, void* beforeSyncJSImport, void* afterSyncJSImport, void* pumpHandler); [MethodImpl(MethodImplOptions.InternalCall)] public static extern void UninstallWebWorkerInterop(); diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSSynchronizationContext.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSSynchronizationContext.cs index f967540d450793..2a2ff0f6f02226 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSSynchronizationContext.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSSynchronizationContext.cs @@ -68,7 +68,8 @@ public static unsafe JSSynchronizationContext InstallWebWorkerInterop(bool isMai Interop.Runtime.InstallWebWorkerInterop(proxyContext.ContextHandle, (delegate* unmanaged[Cdecl])&JavaScriptExports.BeforeSyncJSExport, - (delegate* unmanaged[Cdecl])&JavaScriptExports.AfterSyncJSExport); + (delegate* unmanaged[Cdecl])&JavaScriptExports.AfterSyncJSExport, + (delegate* unmanaged[Cdecl])&PumpHandler); return ctx; } @@ -170,7 +171,7 @@ private unsafe void ScheduleJSPump() { // While we COULD pump here, we don't want to. We want the pump to happen on the next event loop turn. // Otherwise we could get a chain where a pump generates a new work item and that makes us pump again, forever. - TargetThreadScheduleBackgroundJob(ProxyContext.NativeTID, (delegate* unmanaged[Cdecl])&BackgroundJobHandler); + ScheduleSynchronizationContext(ProxyContext.NativeTID); } public override void Post(SendOrPostCallback d, object? state) @@ -236,13 +237,13 @@ public override void Send(SendOrPostCallback d, object? state) } [MethodImplAttribute(MethodImplOptions.InternalCall)] - internal static extern unsafe void TargetThreadScheduleBackgroundJob(IntPtr targetTID, void* callback); + internal static extern unsafe void ScheduleSynchronizationContext(IntPtr targetTID); #pragma warning disable CS3016 // Arrays as attribute arguments is not CLS-compliant [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] #pragma warning restore CS3016 // this callback will arrive on the target thread, called from mono_background_exec - private static void BackgroundJobHandler() + private static void PumpHandler() { var ctx = JSProxyContext.AssertIsInteropThread(); ctx.SynchronizationContext.Pump(); @@ -286,7 +287,7 @@ private void Pump() } catch (Exception e) { - Environment.FailFast($"JSSynchronizationContext.BackgroundJobHandler failed, ManagedThreadId: {Environment.CurrentManagedThreadId}. {Environment.NewLine} {e.StackTrace}"); + Environment.FailFast($"JSSynchronizationContext.Pump failed, ManagedThreadId: {Environment.CurrentManagedThreadId}. {Environment.NewLine} {e.StackTrace}"); } } diff --git a/src/mono/browser/runtime/corebindings.c b/src/mono/browser/runtime/corebindings.c index 79b4427e316faf..37a8598ec6c4eb 100644 --- a/src/mono/browser/runtime/corebindings.c +++ b/src/mono/browser/runtime/corebindings.c @@ -42,7 +42,7 @@ void mono_wasm_resolve_or_reject_promise_post (pthread_t target_tid, void *args) void mono_wasm_cancel_promise_post (pthread_t target_tid, int task_holder_gc_handle); extern void mono_wasm_install_js_worker_interop (int context_gc_handle); -void mono_wasm_install_js_worker_interop_wrapper (int context_gc_handle, void* beforeSyncJSImport, void* afterSyncJSImport); +void mono_wasm_install_js_worker_interop_wrapper (int context_gc_handle, void* beforeSyncJSImport, void* afterSyncJSImport, void* pumpHandler); extern void mono_wasm_uninstall_js_worker_interop (); extern void mono_wasm_invoke_jsimport_MT (void* signature, void* args); void mono_wasm_invoke_jsimport_async_post (pthread_t target_tid, void* signature, void* args); @@ -258,11 +258,13 @@ void mono_wasm_get_assembly_export (char *assembly_name, char *namespace, char * void* before_sync_js_import; void* after_sync_js_import; +void* synchronization_context_pump_handler; -void mono_wasm_install_js_worker_interop_wrapper (int context_gc_handle, void* beforeSyncJSImport, void* afterSyncJSImport) +void mono_wasm_install_js_worker_interop_wrapper (int context_gc_handle, void* beforeSyncJSImport, void* afterSyncJSImport, void* pumpHandler) { before_sync_js_import = beforeSyncJSImport; after_sync_js_import = afterSyncJSImport; + synchronization_context_pump_handler = pumpHandler; mono_wasm_install_js_worker_interop (context_gc_handle); } diff --git a/src/mono/browser/runtime/cwraps.ts b/src/mono/browser/runtime/cwraps.ts index 7f4a05cbd87098..3ce3302e3b3514 100644 --- a/src/mono/browser/runtime/cwraps.ts +++ b/src/mono/browser/runtime/cwraps.ts @@ -33,6 +33,7 @@ const threading_cwraps: SigLine[] = WasmEnableThreads ? [ [true, "mono_wasm_register_ui_thread", "void", []], [true, "mono_wasm_register_io_thread", "void", []], [true, "mono_wasm_print_thread_dump", "void", []], + [true, "mono_wasm_synchronization_context_pump", "void", []], [true, "mono_threads_wasm_sync_run_in_target_thread_done", "void", ["number"]], ] : []; @@ -157,6 +158,7 @@ export interface t_ThreadingCwraps { mono_wasm_register_ui_thread(): void; mono_wasm_register_io_thread(): void; mono_wasm_print_thread_dump(): void; + mono_wasm_synchronization_context_pump(): void; mono_threads_wasm_sync_run_in_target_thread_done(sem: VoidPtr): void; } diff --git a/src/mono/browser/runtime/driver.c b/src/mono/browser/runtime/driver.c index 6816642a48b2a5..f231a86119e7e6 100644 --- a/src/mono/browser/runtime/driver.c +++ b/src/mono/browser/runtime/driver.c @@ -287,8 +287,10 @@ mono_wasm_invoke_jsexport_async_post (void* target_thread, MonoMethod *method, v typedef void (*js_interop_event)(void* args); +typedef void (*sync_context_pump)(void); extern js_interop_event before_sync_js_import; extern js_interop_event after_sync_js_import; +extern sync_context_pump synchronization_context_pump_handler; // this is running on the target thread EMSCRIPTEN_KEEPALIVE void @@ -306,6 +308,11 @@ mono_wasm_invoke_jsexport_sync_send (void* target_thread, MonoMethod *method, vo mono_threads_wasm_sync_run_in_target_thread_vii (target_thread, (void (*)(gpointer, gpointer))mono_wasm_invoke_jsexport_sync, method, args); } +EMSCRIPTEN_KEEPALIVE void mono_wasm_synchronization_context_pump (void) +{ + synchronization_context_pump_handler (); +} + #endif /* DISABLE_THREADS */ EMSCRIPTEN_KEEPALIVE void diff --git a/src/mono/browser/runtime/exports-binding.ts b/src/mono/browser/runtime/exports-binding.ts index 251869cfd60a60..bcecfec8b10ea4 100644 --- a/src/mono/browser/runtime/exports-binding.ts +++ b/src/mono/browser/runtime/exports-binding.ts @@ -32,6 +32,7 @@ import { mono_wasm_pthread_on_pthread_registered, mono_wasm_pthread_set_name, mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop, mono_wasm_start_io_thread_async } from "./pthreads"; import { mono_wasm_dump_threads } from "./pthreads/ui-thread"; +import { mono_wasm_schedule_synchronization_context } from "./pthreads/shared"; // the JS methods would be visible to EMCC linker and become imports of the WASM module @@ -44,6 +45,7 @@ export const mono_wasm_threads_imports = !WasmEnableThreads ? [] : [ mono_wasm_pthread_set_name, mono_wasm_start_deputy_thread_async, mono_wasm_start_io_thread_async, + mono_wasm_schedule_synchronization_context, // mono-threads.c mono_wasm_dump_threads, diff --git a/src/mono/browser/runtime/pthreads/deputy-thread.ts b/src/mono/browser/runtime/pthreads/deputy-thread.ts index bb032c2479c0d6..4b514b28a4aed3 100644 --- a/src/mono/browser/runtime/pthreads/deputy-thread.ts +++ b/src/mono/browser/runtime/pthreads/deputy-thread.ts @@ -9,6 +9,7 @@ import { monoThreadInfo, postMessageToMain, update_thread_info } from "./shared" import { Module, loaderHelpers, runtimeHelpers } from "../globals"; import { start_runtime } from "../startup"; import { WorkerToMainMessageType } from "../types/internal"; +import { forceThreadMemoryViewRefresh } from "../memory"; export function mono_wasm_start_deputy_thread_async () { if (!WasmEnableThreads) return; @@ -28,6 +29,7 @@ export function mono_wasm_start_deputy_thread_async () { Module.runtimeKeepalivePush(); Module.safeSetTimeout(async () => { try { + forceThreadMemoryViewRefresh(); await start_runtime(); diff --git a/src/mono/browser/runtime/pthreads/shared.ts b/src/mono/browser/runtime/pthreads/shared.ts index efa3be35ceff04..f72804fbcf873c 100644 --- a/src/mono/browser/runtime/pthreads/shared.ts +++ b/src/mono/browser/runtime/pthreads/shared.ts @@ -11,6 +11,8 @@ import { set_thread_prefix } from "../logging"; import { bindings_init } from "../startup"; import { forceDisposeProxies } from "../gc-handles"; import { monoMessageSymbol, GCHandleNull, PThreadPtrNull, WorkerToMainMessageType } from "../types/internal"; +import { threads_c_functions as tcwraps } from "../cwraps"; +import { forceThreadMemoryViewRefresh } from "../memory"; // A duplicate in loader/assets.ts export const worker_empty_prefix = " - "; @@ -105,6 +107,19 @@ export function update_thread_info (): void { } } +export function exec_synchronization_context_pump (): void { + if (!loaderHelpers.is_runtime_running()) { + return; + } + forceThreadMemoryViewRefresh(); + tcwraps.mono_wasm_synchronization_context_pump(); +} + +export function mono_wasm_schedule_synchronization_context (): void { + if (!WasmEnableThreads) return; + Module.safeSetTimeout(exec_synchronization_context_pump, 0); +} + export function mono_wasm_pthread_ptr (): PThreadPtr { if (!WasmEnableThreads) return PThreadPtrNull; return (Module)["_pthread_self"](); diff --git a/src/mono/browser/runtime/scheduling.ts b/src/mono/browser/runtime/scheduling.ts index decbc6a2940bf6..35552123845ada 100644 --- a/src/mono/browser/runtime/scheduling.ts +++ b/src/mono/browser/runtime/scheduling.ts @@ -4,14 +4,14 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; import cwraps from "./cwraps"; -import { ENVIRONMENT_IS_WORKER, Module, loaderHelpers } from "./globals"; +import { Module, loaderHelpers } from "./globals"; import { forceThreadMemoryViewRefresh } from "./memory"; -import { is_thread_available } from "./pthreads"; let spread_timers_maximum = 0; let pump_count = 0; export function prevent_timer_throttling (): void { + if (WasmEnableThreads) return; if (!loaderHelpers.isChromium) { return; } @@ -30,26 +30,22 @@ export function prevent_timer_throttling (): void { } function prevent_timer_throttling_tick () { + if (WasmEnableThreads) return; Module.maybeExit(); if (!loaderHelpers.is_runtime_running()) { return; } - if (WasmEnableThreads) { - forceThreadMemoryViewRefresh(); - } cwraps.mono_wasm_execute_timer(); pump_count++; mono_background_exec_until_done(); } function mono_background_exec_until_done () { + if (WasmEnableThreads) return; Module.maybeExit(); if (!loaderHelpers.is_runtime_running()) { return; } - if (WasmEnableThreads) { - forceThreadMemoryViewRefresh(); - } while (pump_count > 0) { --pump_count; cwraps.mono_background_exec(); @@ -57,39 +53,23 @@ function mono_background_exec_until_done () { } export function schedule_background_exec (): void { + if (WasmEnableThreads) return; ++pump_count; - let max_postpone_count = 10; - function postpone_schedule_background () { - if (max_postpone_count < 0 || is_thread_available()) { - Module.safeSetTimeout(mono_background_exec_until_done, 0); - } else { - max_postpone_count--; - Module.safeSetTimeout(postpone_schedule_background, 10); - } - } - - if (WasmEnableThreads && !ENVIRONMENT_IS_WORKER) { - // give threads chance to load before we run more synchronous code on UI thread - postpone_schedule_background(); - } else { - Module.safeSetTimeout(mono_background_exec_until_done, 0); - } + Module.safeSetTimeout(mono_background_exec_until_done, 0); } let lastScheduledTimeoutId: any = undefined; export function mono_wasm_schedule_timer (shortestDueTimeMs: number): void { + if (WasmEnableThreads) return; if (lastScheduledTimeoutId) { globalThis.clearTimeout(lastScheduledTimeoutId); lastScheduledTimeoutId = undefined; - // NOTE: Multi-threaded Module.safeSetTimeout() does the runtimeKeepalivePush() - // and non-Multi-threaded Module.safeSetTimeout does not runtimeKeepalivePush() - // but clearTimeout does not runtimeKeepalivePop() so we need to do it here in MT only. - if (WasmEnableThreads) Module.runtimeKeepalivePop(); } lastScheduledTimeoutId = Module.safeSetTimeout(mono_wasm_schedule_timer_tick, shortestDueTimeMs); } function mono_wasm_schedule_timer_tick () { + if (WasmEnableThreads) return; Module.maybeExit(); if (WasmEnableThreads) { forceThreadMemoryViewRefresh(); diff --git a/src/mono/mono/mini/mini-wasm.c b/src/mono/mono/mini/mini-wasm.c index 991135288f63de..db7c8b2de39a60 100644 --- a/src/mono/mono/mini/mini-wasm.c +++ b/src/mono/mono/mini/mini-wasm.c @@ -444,14 +444,17 @@ mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_targe //functions exported to be used by JS G_BEGIN_DECLS -EMSCRIPTEN_KEEPALIVE void mono_wasm_execute_timer (void); //JS functions imported that we use +#ifdef DISABLE_THREADS +EMSCRIPTEN_KEEPALIVE void mono_wasm_execute_timer (void); +EMSCRIPTEN_KEEPALIVE void mono_background_exec (void); extern void mono_wasm_schedule_timer (int shortestDueTimeMs); +#else +extern void mono_target_thread_schedule_synchronization_context(MonoNativeThreadId target_thread); +#endif // DISABLE_THREADS G_END_DECLS -void mono_background_exec (void); - #endif // HOST_BROWSER gpointer @@ -588,6 +591,8 @@ mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo return FALSE; } +#ifdef DISABLE_THREADS + // this points to System.Threading.TimerQueue.TimerHandler C# method static void *timer_handler; @@ -605,7 +610,6 @@ mono_wasm_execute_timer (void) MONO_EXIT_GC_UNSAFE; } -#ifdef DISABLE_THREADS void mono_wasm_main_thread_schedule_timer (void *timerHandler, int shortestDueTimeMs) { @@ -626,7 +630,7 @@ mono_arch_register_icall (void) mono_add_internal_call_internal ("System.Threading.TimerQueue::MainThreadScheduleTimer", mono_wasm_main_thread_schedule_timer); mono_add_internal_call_internal ("System.Threading.ThreadPool::MainThreadScheduleBackgroundJob", mono_main_thread_schedule_background_job); #else - mono_add_internal_call_internal ("System.Runtime.InteropServices.JavaScript.JSSynchronizationContext::TargetThreadScheduleBackgroundJob", mono_target_thread_schedule_background_job); + mono_add_internal_call_internal ("System.Runtime.InteropServices.JavaScript.JSSynchronizationContext::ScheduleSynchronizationContext", mono_target_thread_schedule_synchronization_context); #endif /* DISABLE_THREADS */ #endif /* HOST_BROWSER */ } diff --git a/src/mono/mono/mini/mini-wasm.h b/src/mono/mono/mini/mini-wasm.h index 77c7f3a78fa5cc..95bafda3336c04 100644 --- a/src/mono/mono/mini/mini-wasm.h +++ b/src/mono/mono/mini/mini-wasm.h @@ -100,11 +100,18 @@ typedef struct { // sdks/wasm/driver.c is C and uses this G_EXTERN_C void mono_wasm_enable_debugging (int log_level); +#ifdef HOST_BROWSER + +//JS functions imported that we use #ifdef DISABLE_THREADS +void mono_wasm_execute_timer (void); void mono_wasm_main_thread_schedule_timer (void *timerHandler, int shortestDueTimeMs); #endif // DISABLE_THREADS void mono_wasm_print_stack_trace (void); +#endif // HOST_BROWSER + + gboolean mini_wasm_is_scalar_vtype (MonoType *type, MonoType **etype); diff --git a/src/mono/mono/utils/mono-threads-wasm.c b/src/mono/mono/utils/mono-threads-wasm.c index 8ba904a5f6a0fa..597592a7966c66 100644 --- a/src/mono/mono/utils/mono-threads-wasm.c +++ b/src/mono/mono/utils/mono-threads-wasm.c @@ -346,7 +346,6 @@ G_EXTERN_C extern void schedule_background_exec (void); // when this is called from ThreadPool, the cb would be System.Threading.ThreadPool.BackgroundJobHandler -// when this is called from JSSynchronizationContext, the cb would be System.Runtime.InteropServices.JavaScript.JSSynchronizationContext.BackgroundJobHandler // when this is called from sgen it would be wrapper of sgen_perform_collection_inner // when this is called from gc, it would be mono_runtime_do_background_work #ifdef DISABLE_THREADS @@ -354,77 +353,24 @@ void mono_main_thread_schedule_background_job (background_job_cb cb) { g_assert (cb); - THREADS_DEBUG ("mono_main_thread_schedule_background_job2: thread %p queued job %p to current thread\n", (gpointer)pthread_self(), (gpointer) cb); - mono_current_thread_schedule_background_job (cb); -} -#endif /*DISABLE_THREADS*/ - -#ifndef DISABLE_THREADS -MonoNativeTlsKey jobs_key; -#else /* DISABLE_THREADS */ -GSList *jobs; -#endif /* DISABLE_THREADS */ - -void -mono_current_thread_schedule_background_job (background_job_cb cb) -{ - g_assert (cb); -#ifdef DISABLE_THREADS + THREADS_DEBUG ("mono_main_thread_schedule_background_job: thread %p queued job %p to current thread\n", (gpointer)pthread_self(), (gpointer) cb); if (!jobs) schedule_background_exec (); if (!g_slist_find (jobs, (gconstpointer)cb)) jobs = g_slist_prepend (jobs, (gpointer)cb); - -#else /*DISABLE_THREADS*/ - - GSList *jobs = mono_native_tls_get_value (jobs_key); - THREADS_DEBUG ("mono_current_thread_schedule_background_job1: thread %p queuing job %p into %p\n", (gpointer)pthread_self(), (gpointer) cb, (gpointer) jobs); - if (!jobs) - { - THREADS_DEBUG ("mono_current_thread_schedule_background_job2: thread %p calling schedule_background_exec before job %p\n", (gpointer)pthread_self(), (gpointer) cb); - schedule_background_exec (); - } - - if (!g_slist_find (jobs, (gconstpointer)cb)) - { - jobs = g_slist_prepend (jobs, (gpointer)cb); - mono_native_tls_set_value (jobs_key, jobs); - THREADS_DEBUG ("mono_current_thread_schedule_background_job3: thread %p queued job %p\n", (gpointer)pthread_self(), (gpointer) cb); - } - -#endif /*DISABLE_THREADS*/ } -#ifndef DISABLE_THREADS -void -mono_target_thread_schedule_background_job (MonoNativeThreadId target_thread, background_job_cb cb) -{ - THREADS_DEBUG ("worker %p queued job %p to worker %p \n", (gpointer)pthread_self(), (gpointer) cb, (gpointer) target_thread); - // NOTE: here the cb is [UnmanagedCallersOnly] which wraps it with MONO_ENTER_GC_UNSAFE/MONO_EXIT_GC_UNSAFE - mono_threads_wasm_async_run_in_target_thread_vi ((pthread_t) target_thread, (void*)mono_current_thread_schedule_background_job, (gpointer)cb); -} -#endif /*DISABLE_THREADS*/ - -G_EXTERN_C -EMSCRIPTEN_KEEPALIVE void -mono_background_exec (void); +GSList *jobs; G_EXTERN_C EMSCRIPTEN_KEEPALIVE void mono_background_exec (void) { MONO_ENTER_GC_UNSAFE; -#ifdef DISABLE_THREADS GSList *j = jobs, *cur; jobs = NULL; -#else /* DISABLE_THREADS */ - THREADS_DEBUG ("mono_background_exec on thread %p started\n", (gpointer)pthread_self()); - GSList *jobs = mono_native_tls_get_value (jobs_key); - GSList *j = jobs, *cur; - mono_native_tls_set_value (jobs_key, NULL); -#endif /* DISABLE_THREADS */ for (cur = j; cur; cur = cur->next) { background_job_cb cb = (background_job_cb)cur->data; @@ -437,6 +383,17 @@ mono_background_exec (void) MONO_EXIT_GC_UNSAFE; } +#else /*DISABLE_THREADS*/ + +extern void mono_wasm_schedule_synchronization_context (); + +void mono_target_thread_schedule_synchronization_context(MonoNativeThreadId target_thread) +{ + emscripten_dispatch_to_thread_async ((pthread_t) target_thread, EM_FUNC_SIG_V, mono_wasm_schedule_synchronization_context, NULL); +} + +#endif /*DISABLE_THREADS*/ + gboolean mono_threads_platform_is_main_thread (void) { diff --git a/src/mono/mono/utils/mono-threads-wasm.h b/src/mono/mono/utils/mono-threads-wasm.h index 08cc690673df12..927c5b0eb0ea54 100644 --- a/src/mono/mono/utils/mono-threads-wasm.h +++ b/src/mono/mono/utils/mono-threads-wasm.h @@ -87,9 +87,9 @@ mono_wasm_atomic_wait_i32 (volatile int32_t *addr, int32_t expected, int32_t tim return __builtin_wasm_memory_atomic_wait32((int32_t*)addr, expected, timeout_ns); } -extern MonoNativeTlsKey jobs_key; #else /* DISABLE_THREADS */ extern GSList *jobs; +void mono_background_exec (void); #endif /* DISABLE_THREADS */ void diff --git a/src/mono/mono/utils/mono-threads.c b/src/mono/mono/utils/mono-threads.c index 14a00bc9154291..144feb38878770 100644 --- a/src/mono/mono/utils/mono-threads.c +++ b/src/mono/mono/utils/mono-threads.c @@ -523,12 +523,6 @@ register_thread (MonoThreadInfo *info) g_assert (staddr); #endif /* TARGET_WASM */ -#ifdef HOST_WASM -#ifndef DISABLE_THREADS - mono_native_tls_set_value (jobs_key, NULL); -#endif /* DISABLE_THREADS */ -#endif /* HOST_WASM */ - g_assert (stsize); info->stack_start_limit = staddr; info->stack_end = staddr + stsize; @@ -979,12 +973,6 @@ mono_thread_info_init (size_t info_size) mono_threads_suspend_policy_init (); -#ifdef HOST_WASM -#ifndef DISABLE_THREADS - res = mono_native_tls_alloc (&jobs_key, NULL); -#endif /* DISABLE_THREADS */ -#endif /* HOST_BROWSER */ - #ifdef HOST_WIN32 res = mono_native_tls_alloc (&thread_info_key, NULL); res = mono_native_tls_alloc (&thread_exited_key, NULL); diff --git a/src/mono/mono/utils/mono-threads.h b/src/mono/mono/utils/mono-threads.h index 08db699e1d6058..8410e43ef9301a 100644 --- a/src/mono/mono/utils/mono-threads.h +++ b/src/mono/mono/utils/mono-threads.h @@ -847,9 +847,9 @@ void mono_threads_join_unlock (void); typedef void (*background_job_cb)(void); #ifdef DISABLE_THREADS void mono_main_thread_schedule_background_job (background_job_cb cb); +#else +void mono_target_thread_schedule_synchronization_context(MonoNativeThreadId target_thread); #endif // DISABLE_THREADS -void mono_current_thread_schedule_background_job (background_job_cb cb); -void mono_target_thread_schedule_background_job (MonoNativeThreadId target_thread, background_job_cb cb); #endif #ifdef USE_WINDOWS_BACKEND From 0c2e8d2684f7f4123a731416b5711b54b85a99a5 Mon Sep 17 00:00:00 2001 From: Ilona Tomkowicz <32700855+ilonatommy@users.noreply.github.com> Date: Wed, 3 Apr 2024 12:51:14 +0200 Subject: [PATCH 056/132] [browser][mt] Block Blazor WBT that timeouts (#100577) --- .../wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs | 1 + .../wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs index c92fc5f35bf980..556d40d42a40ec 100644 --- a/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs +++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs @@ -38,6 +38,7 @@ public SimpleMultiThreadedTests(ITestOutputHelper output, SharedBuildPerTestClas // } [ConditionalTheory(typeof(BuildTestBase), nameof(IsWorkloadWithMultiThreadingForDefaultFramework))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/100373")] // to be fixed by: "https://github.com/dotnet/aspnetcore/issues/54365" // [InlineData("Debug", false)] // ActiveIssue https://github.com/dotnet/runtime/issues/98758 // [InlineData("Debug", true)] [InlineData("Release", false)] diff --git a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs index 0c705079d4874b..1b09272b487931 100644 --- a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs +++ b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs @@ -24,6 +24,7 @@ public SignalRClientTests(ITestOutputHelper output, SharedBuildPerTestClassFixtu } [ConditionalTheory(typeof(BuildTestBase), nameof(IsWorkloadWithMultiThreadingForDefaultFramework))] + [ActiveIssue("https://github.com/dotnet/runtime/issues/100445")] // to be fixed by: "https://github.com/dotnet/aspnetcore/issues/54365" [InlineData("Debug", "LongPolling")] [InlineData("Release", "LongPolling")] [InlineData("Debug", "WebSockets")] From e5cf6905f6065b45f32f8780fe9645969e836ecf Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Wed, 3 Apr 2024 12:58:08 +0200 Subject: [PATCH 057/132] [wasm][bench] Fix startup measurements (#100579) --- .../browser-bench/Wasm.Browser.Bench.Sample.csproj | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/mono/sample/wasm/browser-bench/Wasm.Browser.Bench.Sample.csproj b/src/mono/sample/wasm/browser-bench/Wasm.Browser.Bench.Sample.csproj index 752927021914cb..b4d32416389ad0 100644 --- a/src/mono/sample/wasm/browser-bench/Wasm.Browser.Bench.Sample.csproj +++ b/src/mono/sample/wasm/browser-bench/Wasm.Browser.Bench.Sample.csproj @@ -40,6 +40,7 @@ </disabledPackageSources> </configuration> + $(MSBuildThisFileDirectory)nugetPackages @@ -50,6 +51,8 @@ + + @@ -68,12 +71,12 @@ Overwrite="true" Lines="$(NugetConfigContent)" /> - + - + @@ -104,13 +107,13 @@ Overwrite="true" Lines="$(NugetConfigContent)" /> - + - + From 86188140a8c619451591168e044845b757167202 Mon Sep 17 00:00:00 2001 From: Ilona Tomkowicz <32700855+ilonatommy@users.noreply.github.com> Date: Wed, 3 Apr 2024 15:25:46 +0200 Subject: [PATCH 058/132] [browser][ws] Do not duplicate code executed in `local_on_close` (#99685) * Do not duplicate code executed in "local_on_close". * Fix. --- .../System.Net.WebSockets.Client/tests/CloseTest.cs | 12 ++++++++---- src/mono/browser/runtime/web-socket.ts | 5 ++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/libraries/System.Net.WebSockets.Client/tests/CloseTest.cs b/src/libraries/System.Net.WebSockets.Client/tests/CloseTest.cs index f4690cf9a6a1c1..fb73485fc7fe1d 100644 --- a/src/libraries/System.Net.WebSockets.Client/tests/CloseTest.cs +++ b/src/libraries/System.Net.WebSockets.Client/tests/CloseTest.cs @@ -264,8 +264,8 @@ public async Task CloseOutputAsync_ClientInitiated_CanReceive_CanClose(Uri serve [ActiveIssue("https://github.com/dotnet/runtime/issues/28957", typeof(PlatformDetection), nameof(PlatformDetection.IsNotBrowser))] [OuterLoop("Uses external servers", typeof(PlatformDetection), nameof(PlatformDetection.LocalEchoServerIsNotAvailable))] - [ConditionalTheory(nameof(WebSocketsSupported)), MemberData(nameof(EchoServers))] - public async Task CloseOutputAsync_ServerInitiated_CanReceive(Uri server) + [ConditionalTheory(nameof(WebSocketsSupported)), MemberData(nameof(EchoServersWithSwitch))] + public async Task CloseOutputAsync_ServerInitiated_CanReceive(Uri server, bool delayReceiving) { var expectedCloseStatus = WebSocketCloseStatus.NormalClosure; var expectedCloseDescription = ".shutdownafter"; @@ -280,6 +280,10 @@ await cws.SendAsync( true, cts.Token); + // let server close the output before we request receiving + if (delayReceiving) + await Task.Delay(1000); + // Should be able to receive the message echoed by the server. var recvBuffer = new byte[100]; var segmentRecv = new ArraySegment(recvBuffer); @@ -363,7 +367,7 @@ await cws.SendAsync( } } - public static IEnumerable EchoServersSyncState => + public static IEnumerable EchoServersWithSwitch => EchoServers.SelectMany(server => new List { new object[] { server[0], true }, @@ -371,7 +375,7 @@ await cws.SendAsync( }); [ActiveIssue("https://github.com/dotnet/runtime/issues/28957", typeof(PlatformDetection), nameof(PlatformDetection.IsNotBrowser))] - [ConditionalTheory(nameof(WebSocketsSupported)), MemberData(nameof(EchoServersSyncState))] + [ConditionalTheory(nameof(WebSocketsSupported)), MemberData(nameof(EchoServersWithSwitch))] public async Task CloseOutputAsync_ServerInitiated_CanReceiveAfterClose(Uri server, bool syncState) { using (ClientWebSocket cws = await GetConnectedWebSocket(server, TimeOutMilliseconds, _output)) diff --git a/src/mono/browser/runtime/web-socket.ts b/src/mono/browser/runtime/web-socket.ts index 9246c1bbaf2af6..d97cc76cefe4a6 100644 --- a/src/mono/browser/runtime/web-socket.ts +++ b/src/mono/browser/runtime/web-socket.ts @@ -44,7 +44,7 @@ function verifyEnvironment () { } } -export function ws_get_state (ws: WebSocketExtension) : number { +export function ws_get_state (ws: WebSocketExtension): number { if (ws.readyState != WebSocket.CLOSED) return ws.readyState ?? -1; const receive_event_queue = ws[wasm_ws_pending_receive_event_queue]; @@ -228,8 +228,7 @@ export function ws_wasm_receive (ws: WebSocketExtension, buffer_ptr: VoidPtr, bu return resolvedPromise(); } - const readyState = ws.readyState; - if (readyState == WebSocket.CLOSED) { + if (ws[wasm_ws_close_received]) { const receive_status_ptr = ws[wasm_ws_receive_status_ptr]; setI32(receive_status_ptr, 0); // count setI32(receive_status_ptr + 4, 2); // type:close From d0023cf27ea754a851080e7046201633635a011b Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Wed, 3 Apr 2024 15:44:03 +0200 Subject: [PATCH 059/132] [browser][MT] assert no managed transition on UI thread after start (#100410) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Marek Fišera --- src/mono/browser/runtime/exports-internal.ts | 3 +++ src/mono/browser/runtime/gc-lock.ts | 3 +++ src/mono/browser/runtime/marshal-to-cs.ts | 6 +++--- src/mono/browser/runtime/marshal-to-js.ts | 6 +++--- src/mono/browser/runtime/roots.ts | 5 ++++- src/mono/browser/runtime/startup.ts | 4 +++- src/mono/browser/runtime/strings.ts | 7 +++++++ src/mono/browser/runtime/types/internal.ts | 1 + 8 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/mono/browser/runtime/exports-internal.ts b/src/mono/browser/runtime/exports-internal.ts index c2ef78c0a1f9e3..bd5cfacafad643 100644 --- a/src/mono/browser/runtime/exports-internal.ts +++ b/src/mono/browser/runtime/exports-internal.ts @@ -127,6 +127,9 @@ export function cwraps_internal (internal: any): void { /* @deprecated not GC safe, legacy support for Blazor */ export function monoObjectAsBoolOrNullUnsafe (obj: MonoObject): boolean | null { + // TODO https://github.com/dotnet/runtime/issues/100411 + // after Blazor stops using monoObjectAsBoolOrNullUnsafe + if (obj === MonoObjectNull) { return null; } diff --git a/src/mono/browser/runtime/gc-lock.ts b/src/mono/browser/runtime/gc-lock.ts index 7a85513fd5ebde..f787c4710bab11 100644 --- a/src/mono/browser/runtime/gc-lock.ts +++ b/src/mono/browser/runtime/gc-lock.ts @@ -7,6 +7,9 @@ import cwraps from "./cwraps"; export let gc_locked = false; +// TODO https://github.com/dotnet/runtime/issues/100411 +// after Blazor stops using mono_wasm_gc_lock, mono_wasm_gc_unlock + export function mono_wasm_gc_lock (): void { if (gc_locked) { throw new Error("GC is already locked"); diff --git a/src/mono/browser/runtime/marshal-to-cs.ts b/src/mono/browser/runtime/marshal-to-cs.ts index d3c7117f2a7da1..360cefec509cfa 100644 --- a/src/mono/browser/runtime/marshal-to-cs.ts +++ b/src/mono/browser/runtime/marshal-to-cs.ts @@ -22,7 +22,6 @@ import { _zero_region, localHeapViewF64, localHeapViewI32, localHeapViewU8 } fro import { stringToMonoStringRoot, stringToUTF16 } from "./strings"; import { JSMarshalerArgument, JSMarshalerArguments, JSMarshalerType, MarshalerToCs, MarshalerToJs, BoundMarshalerToCs, MarshalerType } from "./types/internal"; import { TypedArray } from "./types/emscripten"; -import { gc_locked } from "./gc-lock"; export const jsinteropDoc = "For more information see https://aka.ms/dotnet-wasm-jsinterop"; @@ -224,6 +223,7 @@ function _marshal_string_to_cs_impl (arg: JSMarshalerArgument, value: string) { set_arg_intptr(arg, buffer); set_arg_length(arg, value.length); } else { + mono_assert(!WasmEnableThreads, "Marshaling strings by reference is not supported in multithreaded mode"); const root = get_string_root(arg); try { stringToMonoStringRoot(value, root); @@ -463,7 +463,7 @@ export function marshal_array_to_cs_impl (arg: JSMarshalerArgument, value: Array mono_check(Array.isArray(value), "Value is not an Array"); _zero_region(buffer_ptr, buffer_length); if (!WasmEnableJsInteropByValue) { - mono_assert(!WasmEnableThreads || !gc_locked, "GC must not be locked when creating a GC root"); + mono_assert(!WasmEnableThreads, "Marshaling strings by reference is not supported in multithreaded mode"); cwraps.mono_wasm_register_root(buffer_ptr, buffer_length, "marshal_array_to_cs"); } for (let index = 0; index < length; index++) { @@ -474,7 +474,7 @@ export function marshal_array_to_cs_impl (arg: JSMarshalerArgument, value: Array mono_check(Array.isArray(value), "Value is not an Array"); _zero_region(buffer_ptr, buffer_length); if (!WasmEnableJsInteropByValue) { - mono_assert(!WasmEnableThreads || !gc_locked, "GC must not be locked when creating a GC root"); + mono_assert(!WasmEnableThreads, "Marshaling objects by reference is not supported in multithreaded mode"); cwraps.mono_wasm_register_root(buffer_ptr, buffer_length, "marshal_array_to_cs"); } for (let index = 0; index < length; index++) { diff --git a/src/mono/browser/runtime/marshal-to-js.ts b/src/mono/browser/runtime/marshal-to-js.ts index 1cc4575bf55b87..e636aa5f52974b 100644 --- a/src/mono/browser/runtime/marshal-to-js.ts +++ b/src/mono/browser/runtime/marshal-to-js.ts @@ -22,7 +22,6 @@ import { TypedArray } from "./types/emscripten"; import { get_marshaler_to_cs_by_type, jsinteropDoc, marshal_exception_to_cs } from "./marshal-to-cs"; import { localHeapViewF64, localHeapViewI32, localHeapViewU8 } from "./memory"; import { call_delegate } from "./managed-exports"; -import { gc_locked } from "./gc-lock"; import { mono_log_debug } from "./logging"; import { invoke_later_when_on_ui_thread_async } from "./invoke-js"; @@ -390,6 +389,7 @@ export function marshal_string_to_js (arg: JSMarshalerArgument): string | null { Module._free(buffer as any); return value; } else { + mono_assert(!WasmEnableThreads, "Marshaling strings by reference is not supported in multithreaded mode"); const root = get_string_root(arg); try { const value = monoStringToString(root); @@ -504,7 +504,7 @@ function _marshal_array_to_js_impl (arg: JSMarshalerArgument, element_type: Mars result[index] = marshal_string_to_js(element_arg); } if (!WasmEnableJsInteropByValue) { - mono_assert(!WasmEnableThreads || !gc_locked, "GC must not be locked when disposing a GC root"); + mono_assert(!WasmEnableThreads, "Marshaling string by reference is not supported in multithreaded mode"); cwraps.mono_wasm_deregister_root(buffer_ptr); } } else if (element_type == MarshalerType.Object) { @@ -514,7 +514,7 @@ function _marshal_array_to_js_impl (arg: JSMarshalerArgument, element_type: Mars result[index] = _marshal_cs_object_to_js(element_arg); } if (!WasmEnableJsInteropByValue) { - mono_assert(!WasmEnableThreads || !gc_locked, "GC must not be locked when disposing a GC root"); + mono_assert(!WasmEnableThreads, "Marshaling objects by reference is not supported in multithreaded mode"); cwraps.mono_wasm_deregister_root(buffer_ptr); } } else if (element_type == MarshalerType.JSObject) { diff --git a/src/mono/browser/runtime/roots.ts b/src/mono/browser/runtime/roots.ts index cef2a17f0decbe..fdd4b2a5302f2a 100644 --- a/src/mono/browser/runtime/roots.ts +++ b/src/mono/browser/runtime/roots.ts @@ -4,7 +4,7 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; import cwraps from "./cwraps"; -import { Module, mono_assert } from "./globals"; +import { Module, mono_assert, runtimeHelpers } from "./globals"; import { VoidPtr, ManagedPointer, NativePointer } from "./types/emscripten"; import { MonoObjectRef, MonoObjectRefNull, MonoObject, is_nullish, WasmRoot, WasmRootBuffer } from "./types/internal"; import { _zero_region, localHeapViewU32 } from "./memory"; @@ -24,6 +24,7 @@ const _external_root_free_instances: WasmExternalRoot[] = []; * For small numbers of roots, it is preferable to use the mono_wasm_new_root and mono_wasm_new_roots APIs instead. */ export function mono_wasm_new_root_buffer (capacity: number, name?: string): WasmRootBuffer { + if (WasmEnableThreads && runtimeHelpers.disableManagedTransition) throw new Error("External roots are not supported when threads are enabled"); if (capacity <= 0) throw new Error("capacity >= 1"); @@ -44,6 +45,7 @@ export function mono_wasm_new_root_buffer (capacity: number, name?: string): Was * Releasing this root will not de-allocate the root space. You still need to call .release(). */ export function mono_wasm_new_external_root (address: VoidPtr | MonoObjectRef): WasmRoot { + if (WasmEnableThreads && runtimeHelpers.disableManagedTransition) throw new Error("External roots are not supported in multithreaded mode"); let result: WasmExternalRoot; if (!address) @@ -67,6 +69,7 @@ export function mono_wasm_new_external_root (address: Void * When you are done using the root you must call its .release() method. */ export function mono_wasm_new_root (value: T | undefined = undefined): WasmRoot { + if (WasmEnableThreads && runtimeHelpers.disableManagedTransition) throw new Error("External roots are not supported in multithreaded mode"); let result: WasmRoot; if (_scratch_root_free_instances.length > 0) { diff --git a/src/mono/browser/runtime/startup.ts b/src/mono/browser/runtime/startup.ts index 2ab5110ec46875..24fbb1d16a04f2 100644 --- a/src/mono/browser/runtime/startup.ts +++ b/src/mono/browser/runtime/startup.ts @@ -288,7 +288,7 @@ async function onRuntimeInitializedAsync (userOnRuntimeInitialized: () => void) runtimeHelpers.ioThreadTID = tcwraps.mono_wasm_create_io_thread(); } - // TODO make UI thread not managed + // TODO make UI thread not managed/attached https://github.com/dotnet/runtime/issues/100411 tcwraps.mono_wasm_register_ui_thread(); monoThreadInfo.isAttached = true; monoThreadInfo.isRegistered = true; @@ -296,6 +296,8 @@ async function onRuntimeInitializedAsync (userOnRuntimeInitialized: () => void) runtimeHelpers.runtimeReady = true; update_thread_info(); bindings_init(); + + runtimeHelpers.disableManagedTransition = true; } else { // load mono runtime and apply environment settings (if necessary) await start_runtime(); diff --git a/src/mono/browser/runtime/strings.ts b/src/mono/browser/runtime/strings.ts index c5cb3e7b321797..a596b4e35ef5a2 100644 --- a/src/mono/browser/runtime/strings.ts +++ b/src/mono/browser/runtime/strings.ts @@ -1,6 +1,8 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +import WasmEnableThreads from "consts:wasmEnableThreads"; + import { mono_wasm_new_root, mono_wasm_new_root_buffer } from "./roots"; import { MonoString, MonoStringNull, WasmRoot, WasmRootBuffer } from "./types/internal"; import { Module } from "./globals"; @@ -118,6 +120,10 @@ export function stringToUTF16Ptr (str: string): VoidPtr { } export function monoStringToString (root: WasmRoot): string | null { + // TODO https://github.com/dotnet/runtime/issues/100411 + // after Blazor stops using monoStringToStringUnsafe + // mono_assert(!WasmEnableThreads, "Marshaling strings by reference is not supported in multithreaded mode"); + if (root.value === MonoStringNull) return null; @@ -152,6 +158,7 @@ export function monoStringToString (root: WasmRoot): string | null { } export function stringToMonoStringRoot (string: string, result: WasmRoot): void { + if (WasmEnableThreads) return; result.clear(); if (string === null) diff --git a/src/mono/browser/runtime/types/internal.ts b/src/mono/browser/runtime/types/internal.ts index b333e254703b02..2017a86922423e 100644 --- a/src/mono/browser/runtime/types/internal.ts +++ b/src/mono/browser/runtime/types/internal.ts @@ -206,6 +206,7 @@ export type RuntimeHelpers = { getMemory(): WebAssembly.Memory, getWasmIndirectFunctionTable(): WebAssembly.Table, runtimeReady: boolean, + disableManagedTransition: boolean, monoThreadInfo: PThreadInfo, proxyGCHandle: GCHandle | undefined, managedThreadTID: PThreadPtr, From 5c3bfeb8477b34a15ef851faa54cae1968ab4237 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 3 Apr 2024 07:15:48 -0700 Subject: [PATCH 060/132] JIT: fix early convergence in profile count solver (#100552) We have two stopping criteria for the iterative solver: relative and absolute. The absolute threshold was too high and allowed the solver to stop with high relative residuals. Remove the absolute check and just use the relative residual for stopping. Closes #100477. --- src/coreclr/jit/fgprofilesynthesis.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 0ab8576cb24b86..77803454f0cfde 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -1104,7 +1104,6 @@ void ProfileSynthesis::GaussSeidelSolver() weight_t relResidual = 0; weight_t oldRelResidual = 0; weight_t eigenvalue = 0; - weight_t const stopResidual = 0.005; weight_t const stopRelResidual = 0.002; BasicBlock* residualBlock = nullptr; BasicBlock* relResidualBlock = nullptr; @@ -1312,9 +1311,9 @@ void ProfileSynthesis::GaussSeidelSolver() JITDUMP("iteration %u: max rel residual is at " FMT_BB " : " FMT_WT "\n", i, relResidualBlock->bbNum, relResidual); - // If max residual or relative residual is sufficiently small, then stop. + // If max relative residual is sufficiently small, then stop. // - if ((residual < stopResidual) || (relResidual < stopRelResidual)) + if (relResidual < stopRelResidual) { converged = true; break; From dd1b8b59ed732aaf0bccf4384d7ee5369a89ba6f Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 10:32:14 -0500 Subject: [PATCH 061/132] Update dependencies from https://github.com/dotnet/source-build-externals build 20240401.3 (#100586) Microsoft.SourceBuild.Intermediate.source-build-externals From Version 9.0.0-alpha.1.24175.4 -> To Version 9.0.0-alpha.1.24201.3 Co-authored-by: dotnet-maestro[bot] --- eng/Version.Details.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 308812eeb57d37..1dfdca6c9bca10 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -85,9 +85,9 @@ - + https://github.com/dotnet/source-build-externals - 52d6569d44f86b5d442017f4a9eb3cda4c766afb + bcd44732882bc2b81b30146c778eb6ccb7fea793 From b040ed6bdfb7ad6c73948cd9ff275aca8ba48c68 Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Wed, 3 Apr 2024 12:05:13 -0400 Subject: [PATCH 062/132] Several improvements / simplifications in Regex (#100315) * Several improvements / simplifications in Regex This started out as a small improvement for one thing and grew to be something else. Initially, my intent was just to improve how `SearchValues` applies to character classes with subtraction. Character class subtraction isn't frequently used, but it is a convenient way to express removing subsets of ranges, e.g. all ASCII other than digits `[\u0000-\u007F-[0-9]]`. Currently when we go to enumerate the characters in a char class, for perf reasons we only do the enumeration if we can enumerate sets and up to the max space provided, in order to keep the time down. We immediately give up if the char class has subtraction, but given that we've already limited how many values we're enumerating, if there is subtraction we can afford to query for just those chars that would otherwise pass in order to enable the subtraction. So, with this PR, we can now support using SearchValues in this manner: **this means that whereas previously we would have generated an IndexOfAny for any of the ASCII characters or anything non-ASCII, then with a fallback for if we hit something non-ASCII, now we'll just create an IndexOfAny for the full set**. However, that triggered a (then defunct) assert which led me to see that we have a bunch of duplicated logic around asserts: we'd frequently be checking to see if a set contained at most 5 chars (in support of a time when we didn't have SearchValues and only optimized IndexOfAny for up to 5 chars) and then subsequently would see if it contained only ASCII. We no longer need that separation, especially since SearchValues will now both vectorize probabilistic map searches and will first do a search for the ASCII portion (or anything non-ASCII). **This then means we can delete a variety of duplicated code while also expanding what we recognize for use with SearchValues.** This then lead to seeing that in a variety of places we compute the set of chars in a set and then check whether it could instead be satisfied just as a range but not if the set of chars is small. The former check is more expensive than the latter, but we were doing the first one first presumably in order to be able to do the set size check as part of the latter. However, we don't need it for that, as a single subtraction gives us the size of the range, **so we can just do the range check first and skip the more expensive set check if it's not needed.** That then led to seeing that we're not using range-based searching in the interpreter or non-backtracking engines. **This adds that support, such that the interpreter/non-backtracking engines will now search for the next starting location using IndexOfAny{Except}InRange if appropriate.**. * Update src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs Co-authored-by: Miha Zupan --------- Co-authored-by: Miha Zupan --- .../gen/RegexGenerator.Emitter.cs | 52 ++++--------- ...m.Text.RegularExpressions.Generator.csproj | 2 +- .../Text/RegularExpressions/RegexCharClass.cs | 51 +++++++------ .../Text/RegularExpressions/RegexCompiler.cs | 75 ++++--------------- .../RegexFindOptimizations.cs | 15 +++- .../Text/RegularExpressions/RegexNode.cs | 29 ++----- .../RegularExpressions/RegexPrefixAnalyzer.cs | 21 +++--- 7 files changed, 87 insertions(+), 158 deletions(-) diff --git a/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs b/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs index 2029a7fb9ae4f2..b2ab52c4c71fac 100644 --- a/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs +++ b/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs @@ -391,11 +391,13 @@ private static void AddIsECMABoundaryHelper(Dictionary require /// Adds a SearchValues instance declaration to the required helpers collection if the chars are ASCII. private static string EmitSearchValuesOrLiteral(ReadOnlySpan chars, Dictionary requiredHelpers) { - // SearchValues is faster than a regular IndexOfAny("abcd") for sets of 4/5 values iff they are ASCII. - // Only emit SearchValues instances when we know they'll be faster to avoid increasing the startup cost too much. - Debug.Assert(chars.Length is 4 or 5); + Debug.Assert(chars.Length > 3); - return RegexCharClass.IsAscii(chars) + // IndexOfAny(SearchValues) is faster than a regular IndexOfAny("abcd") if: + // - There are more than 5 characters in the needle, or + // - There are only 4 or 5 characters in the needle and they're all ASCII. + + return chars.Length > 5 || RegexCharClass.IsAscii(chars) ? EmitSearchValues(chars.ToArray(), requiredHelpers) : Literal(chars.ToString()); } @@ -3510,11 +3512,10 @@ void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitL { if (iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && - subsequent?.FindStartingLiteral(4) is RegexNode.StartingLiteralData literal && // 5 == max efficiently optimized by IndexOfAny, and we need to reserve 1 for node.Ch + subsequent?.FindStartingLiteral() is RegexNode.StartingLiteralData literal && !literal.Negated && // not negated; can't search for both the node.Ch and a negated subsequent char with an IndexOf* method (literal.String is not null || literal.SetChars is not null || - (literal.AsciiChars is not null && node.Ch < 128) || // for ASCII sets, only allow when the target can be efficiently included in the set literal.Range.LowInclusive == literal.Range.HighInclusive || (literal.Range.LowInclusive <= node.Ch && node.Ch <= literal.Range.HighInclusive))) // for ranges, only allow when the range overlaps with the target, since there's no accelerated way to search for the union { @@ -3546,18 +3547,6 @@ literal.SetChars is not null || (false, _) => $"{startingPos} = {sliceSpan}.IndexOfAny({EmitSearchValuesOrLiteral($"{node.Ch}{literal.SetChars}".AsSpan(), requiredHelpers)});", }); } - else if (literal.AsciiChars is not null) // set of only ASCII characters - { - char[] asciiChars = literal.AsciiChars; - overlap = asciiChars.Contains(node.Ch); - if (!overlap) - { - Debug.Assert(node.Ch < 128); - Array.Resize(ref asciiChars, asciiChars.Length + 1); - asciiChars[asciiChars.Length - 1] = node.Ch; - } - writer.WriteLine($"{startingPos} = {sliceSpan}.IndexOfAny({EmitSearchValues(asciiChars, requiredHelpers)});"); - } else if (literal.Range.LowInclusive == literal.Range.HighInclusive) // single char from a RegexNode.One { overlap = literal.Range.LowInclusive == node.Ch; @@ -4928,11 +4917,10 @@ private static bool TryEmitIndexOf( { bool negated = RegexCharClass.IsNegated(node.Str) ^ negate; - Span setChars = stackalloc char[5]; // current max that's vectorized - int setCharsCount = RegexCharClass.GetSetChars(node.Str, setChars); - - // Prefer IndexOfAnyInRange over IndexOfAny for sets of 3-5 values that fit in a single range. - if (setCharsCount is not (1 or 2) && RegexCharClass.TryGetSingleRange(node.Str, out char lowInclusive, out char highInclusive)) + // IndexOfAny{Except}InRange + // Prefer IndexOfAnyInRange over IndexOfAny, except for tiny ranges (1 or 2 items) that IndexOfAny handles more efficiently + if (RegexCharClass.TryGetSingleRange(node.Str, out char lowInclusive, out char highInclusive) && + (highInclusive - lowInclusive) > 1) { string indexOfAnyInRangeName = !negated ? "IndexOfAnyInRange" : @@ -4944,13 +4932,15 @@ private static bool TryEmitIndexOf( return true; } - if (setCharsCount > 0) + // IndexOfAny{Except}(ch1, ...) + Span setChars = stackalloc char[128]; + setChars = setChars.Slice(0, RegexCharClass.GetSetChars(node.Str, setChars)); + if (!setChars.IsEmpty) { (string indexOfName, string indexOfAnyName) = !negated ? ("IndexOf", "IndexOfAny") : ("IndexOfAnyExcept", "IndexOfAnyExcept"); - setChars = setChars.Slice(0, setCharsCount); indexOfExpr = setChars.Length switch { 1 => $"{last}{indexOfName}({Literal(setChars[0])})", @@ -4962,18 +4952,6 @@ private static bool TryEmitIndexOf( literalLength = 1; return true; } - - if (RegexCharClass.TryGetAsciiSetChars(node.Str, out char[]? asciiChars)) - { - string indexOfAnyName = !negated ? - "IndexOfAny" : - "IndexOfAnyExcept"; - - indexOfExpr = $"{last}{indexOfAnyName}({EmitSearchValues(asciiChars, requiredHelpers)})"; - - literalLength = 1; - return true; - } } indexOfExpr = null; diff --git a/src/libraries/System.Text.RegularExpressions/gen/System.Text.RegularExpressions.Generator.csproj b/src/libraries/System.Text.RegularExpressions/gen/System.Text.RegularExpressions.Generator.csproj index ec5b22d79229ba..7208f5a5185dca 100644 --- a/src/libraries/System.Text.RegularExpressions/gen/System.Text.RegularExpressions.Generator.csproj +++ b/src/libraries/System.Text.RegularExpressions/gen/System.Text.RegularExpressions.Generator.csproj @@ -11,7 +11,7 @@ false true false - $(NoWarn);CS0436;CS0649 + $(NoWarn);CS0436;CS0649;CA1872 true cs diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs index ed67df6819023d..5666498347e468 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs @@ -815,17 +815,23 @@ public static bool TryGetDoubleRange( /// If 0 is returned, no assumptions can be made about the characters. /// /// - /// Only considers character classes that only contain sets (no categories) - /// and no subtraction... just simple sets containing starting/ending pairs. - /// The returned characters may be negated: if IsNegated(set) is false, then - /// the returned characters are the only ones that match; if it returns true, - /// then the returned characters are the only ones that don't match. + /// Only considers character classes that only contain sets (no categories), + /// just simple sets containing starting/ending pairs (subtraction from those pairs + /// is factored in, however).The returned characters may be negated: if IsNegated(set) + /// is false, then the returned characters are the only ones that match; if it returns + /// true, then the returned characters are the only ones that don't match. /// public static int GetSetChars(string set, Span chars) { // We get the characters by enumerating the set portion, so we validate that it's // set up to enable that, e.g. no categories. - if (!CanEasilyEnumerateSetContents(set)) + if (!CanEasilyEnumerateSetContents(set, out bool hasSubtraction)) + { + return 0; + } + + // Negation with subtraction is too cumbersome to reason about efficiently. + if (hasSubtraction && IsNegated(set)) { return 0; } @@ -837,17 +843,30 @@ public static int GetSetChars(string set, Span chars) // based on it a) complicating things, and b) it being really unlikely to // be part of a small set. int setLength = set[SetLengthIndex]; - int count = 0; + int count = 0, evaluated = 0; for (int i = SetStartIndex; i < SetStartIndex + setLength; i += 2) { int curSetEnd = set[i + 1]; for (int c = set[i]; c < curSetEnd; c++) { - if (count >= chars.Length) + // Keep track of how many characters we've checked. This could work + // just comparing count rather than evaluated, but we also want to + // limit how much work is done here, which we can do by constraining + // the number of checks to the size of the storage provided. + if (++evaluated > chars.Length) { return 0; } + // If the set is all ranges but has a subtracted class, + // validate the char is actually in the set prior to storing it: + // it might be in the subtracted range. + if (hasSubtraction && !CharInClass((char)c, set)) + { + continue; + } + + Debug.Assert(count <= evaluated); chars[count++] = (char)c; } } @@ -855,22 +874,6 @@ public static int GetSetChars(string set, Span chars) return count; } - public static bool TryGetAsciiSetChars(string set, [NotNullWhen(true)] out char[]? asciiChars) - { - Span chars = stackalloc char[128]; - - chars = chars.Slice(0, GetSetChars(set, chars)); - - if (chars.IsEmpty || !IsAscii(chars)) - { - asciiChars = null; - return false; - } - - asciiChars = chars.ToArray(); - return true; - } - /// /// Determines whether two sets may overlap. /// diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs index dd2357183d4b3a..082087939bcd37 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs @@ -939,7 +939,7 @@ void EmitFixedSet_LeftToRight() default: // tmp = ...IndexOfAny(setChars); // tmp = ...IndexOfAny(s_searchValues); - EmitIndexOfAnyWithSearchValuesOrLiteral(new string(primarySet.Chars), except: primarySet.Negated); + EmitIndexOfAnyWithSearchValuesOrLiteral(primarySet.Chars, except: primarySet.Negated); break; } } @@ -3587,11 +3587,10 @@ void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitL if (!rtl && iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && - subsequent?.FindStartingLiteral(4) is RegexNode.StartingLiteralData literal && // 5 == max optimized by IndexOfAny, and we need to reserve 1 for node.Ch + subsequent?.FindStartingLiteral() is RegexNode.StartingLiteralData literal && !literal.Negated && // not negated; can't search for both the node.Ch and a negated subsequent char with an IndexOf* method (literal.String is not null || literal.SetChars is not null || - (literal.AsciiChars is not null && node.Ch < 128) || // for ASCII sets, only allow when the target can be efficiently included in the set literal.Range.LowInclusive == literal.Range.HighInclusive || (literal.Range.LowInclusive <= node.Ch && node.Ch <= literal.Range.HighInclusive))) // for ranges, only allow when the range overlaps with the target, since there's no accelerated way to search for the union { @@ -3660,18 +3659,6 @@ literal.SetChars is not null || break; } } - else if (literal.AsciiChars is not null) // set of only ASCII characters - { - char[] asciiChars = literal.AsciiChars; - overlap = asciiChars.AsSpan().Contains(node.Ch); - if (!overlap) - { - Debug.Assert(node.Ch < 128); - Array.Resize(ref asciiChars, asciiChars.Length + 1); - asciiChars[^1] = node.Ch; - } - EmitIndexOfAnyWithSearchValuesOrLiteral(new string(asciiChars)); - } else if (literal.Range.LowInclusive == literal.Range.HighInclusive) // single char from a RegexNode.One { overlap = literal.Range.LowInclusive == node.Ch; @@ -5153,21 +5140,9 @@ bool CanEmitIndexOf(RegexNode node, out int literalLength) if (node.IsSetFamily) { - Span setChars = stackalloc char[5]; // current max that's vectorized - int setCharsCount; - if ((setCharsCount = RegexCharClass.GetSetChars(node.Str, setChars)) > 0) - { - literalLength = 1; - return true; - } - - if (RegexCharClass.TryGetSingleRange(node.Str, out char lowInclusive, out char highInclusive)) - { - literalLength = 1; - return true; - } - - if (RegexCharClass.TryGetAsciiSetChars(node.Str, out _)) + Span setChars = stackalloc char[128]; + if (RegexCharClass.TryGetSingleRange(node.Str, out _, out _) || + RegexCharClass.GetSetChars(node.Str, setChars) > 0) { literalLength = 1; return true; @@ -5218,26 +5193,11 @@ void EmitIndexOf(RegexNode node, bool useLast, bool negate) { bool negated = RegexCharClass.IsNegated(node.Str) ^ negate; - Span setChars = stackalloc char[5]; // current max that's vectorized - int setCharsCount = RegexCharClass.GetSetChars(node.Str, setChars); - // IndexOfAny{Except}InRange - // Prefer IndexOfAnyInRange over IndexOfAny for sets of 3-5 values that fit in a single range. - if (setCharsCount is not (1 or 2) && RegexCharClass.TryGetSingleRange(node.Str, out char lowInclusive, out char highInclusive)) + // Prefer IndexOfAnyInRange over IndexOfAny, except for tiny ranges (1 or 2 items) that IndexOfAny handles more efficiently + if (RegexCharClass.TryGetSingleRange(node.Str, out char lowInclusive, out char highInclusive) && + (highInclusive - lowInclusive) > 1) { - if (lowInclusive == highInclusive) - { - Ldc(lowInclusive); - Call((useLast, negated) switch - { - (false, false) => s_spanIndexOfChar, - (false, true) => s_spanIndexOfAnyExceptChar, - (true, false) => s_spanLastIndexOfChar, - (true, true) => s_spanLastIndexOfAnyExceptChar, - }); - return; - } - Ldc(lowInclusive); Ldc(highInclusive); Call((useLast, negated) switch @@ -5251,6 +5211,8 @@ void EmitIndexOf(RegexNode node, bool useLast, bool negate) } // IndexOfAny{Except}(ch1, ...) + Span setChars = stackalloc char[128]; // arbitrary cut-off that accomodates all of ASCII and doesn't take too long to compute + int setCharsCount = RegexCharClass.GetSetChars(node.Str, setChars); if (setCharsCount > 0) { setChars = setChars.Slice(0, setCharsCount); @@ -5293,17 +5255,10 @@ void EmitIndexOf(RegexNode node, bool useLast, bool negate) return; default: - EmitIndexOfAnyWithSearchValuesOrLiteral(setChars.ToString(), last: useLast, except: negated); + EmitIndexOfAnyWithSearchValuesOrLiteral(setChars, last: useLast, except: negated); return; } } - - // IndexOfAny{Except}(SearchValues) - if (RegexCharClass.TryGetAsciiSetChars(node.Str, out char[]? asciiChars)) - { - EmitIndexOfAnyWithSearchValuesOrLiteral(new string(asciiChars), last: useLast, except: negated); - return; - } } Debug.Fail("We should never get here. This method should only be called if CanEmitIndexOf returned true, and all of the same cases should be covered."); @@ -6197,15 +6152,15 @@ private void EmitTimeoutCheckIfNeeded() } /// Emits a call to either IndexOfAny("abcd") or IndexOfAny(SearchValues) depending on the . - private void EmitIndexOfAnyWithSearchValuesOrLiteral(string chars, bool last = false, bool except = false) + private void EmitIndexOfAnyWithSearchValuesOrLiteral(ReadOnlySpan chars, bool last = false, bool except = false) { - Debug.Assert(chars.Length > 3); + Debug.Assert(chars.Length > 3, $"chars.Length == {chars.Length}"); // SearchValues is faster than a regular IndexOfAny("abcd") for sets of 4/5 values iff they are ASCII. // Only emit SearchValues instances when we know they'll be faster to avoid increasing the startup cost too much. if (chars.Length is 4 or 5 && !RegexCharClass.IsAscii(chars)) { - Ldstr(chars); + Ldstr(chars.ToString()); Call(s_stringAsSpanMethod); Call((last, except) switch { @@ -6217,7 +6172,7 @@ private void EmitIndexOfAnyWithSearchValuesOrLiteral(string chars, bool last = f } else { - LoadSearchValues(chars.ToCharArray()); + LoadSearchValues(chars.ToArray()); Call((last, except) switch { (false, false) => s_spanIndexOfAnySearchValues, diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs index a8dc9f4fd0e581..fa90486e7407a3 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs @@ -94,7 +94,7 @@ public RegexFindOptimizations(RegexNode root, RegexOptions options) if (RegexPrefixAnalyzer.FindFirstCharClass(root) is string charClass) { // See if the set is limited to holding only a few characters. - Span scratch = stackalloc char[5]; // max efficiently optimized by IndexOfAny today + Span scratch = stackalloc char[5]; // max efficiently optimized by IndexOfAny today without SearchValues, which isn't used for RTL int scratchCount; char[]? chars = null; if (!RegexCharClass.IsNegated(charClass) && @@ -278,7 +278,6 @@ public RegexFindOptimizations(RegexNode root, RegexOptions options) /// Data about a character class at a fixed offset from the start of any match to a pattern. public struct FixedDistanceSet(char[]? chars, string set, int distance) { - /// The character class description. public string Set = set; /// Whether the is negated. @@ -606,9 +605,9 @@ public bool TryFindNextStartingPositionLeftToRight(ReadOnlySpan textSpan, case FindNextStartingPositionMode.LeadingSet_LeftToRight: { FixedDistanceSet primarySet = FixedDistanceSets![0]; - char[]? chars = primarySet.Chars; ReadOnlySpan span = textSpan.Slice(pos); + char[]? chars = primarySet.Chars; if (chars is { Length: <= 5 }) // 5 == currently the max length efficiently handled by IndexOfAny{Except} without SearchValues { int i = primarySet.Negated ? span.IndexOfAnyExcept(chars) : span.IndexOfAny(chars); @@ -618,6 +617,16 @@ public bool TryFindNextStartingPositionLeftToRight(ReadOnlySpan textSpan, return true; } } + else if (primarySet.Range is not null) + { + (char low, char high) = primarySet.Range.GetValueOrDefault(); + int i = primarySet.Negated ? span.IndexOfAnyExceptInRange(low, high) : span.IndexOfAnyInRange(low, high); + if (i >= 0) + { + pos += i; + return true; + } + } else { ref uint[]? startingAsciiLookup = ref _asciiLookups![0]; diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs index 42cb10d85ed0f3..4d9b7a0efdabe8 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs @@ -1426,10 +1426,8 @@ public char FirstCharOfOneOrMulti() /// A tuple of data about the literal: only one of the Char/String/SetChars fields is relevant. /// The Negated value indicates whether the Char/SetChars should be considered exclusionary. /// - public StartingLiteralData? FindStartingLiteral(int maxSetCharacters = 5) // 5 is max efficiently optimized by IndexOfAny today + public StartingLiteralData? FindStartingLiteral() { - Debug.Assert(maxSetCharacters is >= 0 and <= 128, $"{nameof(maxSetCharacters)} == {maxSetCharacters} should be small enough to be stack allocated."); - if (FindStartingLiteralNode() is RegexNode node) { switch (node.Kind) @@ -1441,23 +1439,18 @@ public char FirstCharOfOneOrMulti() return new StartingLiteralData(range: (node.Ch, node.Ch), negated: true); case RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy: - Span setChars = stackalloc char[maxSetCharacters]; - int numChars; - if ((numChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0) - { - setChars = setChars.Slice(0, numChars); - return new StartingLiteralData(setChars: setChars.ToString(), negated: RegexCharClass.IsNegated(node.Str!)); - } - - if (RegexCharClass.TryGetSingleRange(node.Str!, out char lowInclusive, out char highInclusive)) + if (RegexCharClass.TryGetSingleRange(node.Str!, out char lowInclusive, out char highInclusive) && + (highInclusive - lowInclusive) > 1) // prefer IndexOfAny for 1 or 2 elements as an optimization { Debug.Assert(lowInclusive < highInclusive); return new StartingLiteralData(range: (lowInclusive, highInclusive), negated: RegexCharClass.IsNegated(node.Str!)); } - if (RegexCharClass.TryGetAsciiSetChars(node.Str!, out char[]? asciiChars)) + Span setChars = stackalloc char[128]; + int numChars; + if ((numChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0) { - return new StartingLiteralData(asciiChars: asciiChars, negated: RegexCharClass.IsNegated(node.Str!)); + return new StartingLiteralData(setChars: setChars.Slice(0, numChars).ToString(), negated: RegexCharClass.IsNegated(node.Str!)); } break; @@ -1475,7 +1468,6 @@ public readonly struct StartingLiteralData public readonly (char LowInclusive, char HighInclusive) Range; public readonly string? String; public readonly string? SetChars; - public readonly char[]? AsciiChars; public readonly bool Negated; public StartingLiteralData((char LowInclusive, char HighInclusive) range, bool negated) @@ -1496,13 +1488,6 @@ public StartingLiteralData(string? setChars, bool negated) SetChars = setChars; Negated = negated; } - - public StartingLiteralData(char[]? asciiChars, bool negated) - { - Debug.Assert(asciiChars is not null); - AsciiChars = asciiChars; - Negated = negated; - } } /// diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs index 97aba89b9804c5..926a28339162f6 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs @@ -535,25 +535,24 @@ static bool Process(RegexNode node, ref ValueStringBuilder vsb) // For every entry, try to get the chars that make up the set, if there are few enough. // For any for which we couldn't get the small chars list, see if we can get other useful info. - Span scratch = stackalloc char[128]; // limit based on what's currently efficiently handled by SearchValues + Span scratch = stackalloc char[128]; for (int i = 0; i < results.Count; i++) { RegexFindOptimizations.FixedDistanceSet result = results[i]; result.Negated = RegexCharClass.IsNegated(result.Set); - int count = RegexCharClass.GetSetChars(result.Set, scratch); - if (count > 0) + if (RegexCharClass.TryGetSingleRange(result.Set, out char lowInclusive, out char highInclusive) && + (highInclusive - lowInclusive) > 1) // prefer IndexOfAny for tiny sets of 1 or 2 elements { - result.Chars = scratch.Slice(0, count).ToArray(); + result.Range = (lowInclusive, highInclusive); } - - // Prefer IndexOfAnyInRange over IndexOfAny for sets of 3-5 values that fit in a single range. - if (thorough && - (result.Chars is null || result.Chars.Length > 2) && - RegexCharClass.TryGetSingleRange(result.Set, out char lowInclusive, out char highInclusive)) + else { - result.Chars = null; - result.Range = (lowInclusive, highInclusive); + int count = RegexCharClass.GetSetChars(result.Set, scratch); + if (count > 0) + { + result.Chars = scratch.Slice(0, count).ToArray(); + } } results[i] = result; From e21bdfe15f4ffbec1f6c56395a2f70433cdf455b Mon Sep 17 00:00:00 2001 From: Aaron Robinson Date: Wed, 3 Apr 2024 09:46:00 -0700 Subject: [PATCH 063/132] Fix use of uninitialized `bool` value (#100560) Fixes https://github.com/dotnet/runtime/issues/100559 --- src/coreclr/vm/methodtablebuilder.cpp | 2 +- src/coreclr/vm/readytoruninfo.cpp | 21 +++++++++------------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index e52b0ced06aef0..5a9670b19f2da7 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -2783,7 +2783,7 @@ MethodTableBuilder::EnumerateClassMethods() } } - bool hasGenericMethodArgsComputed; + bool hasGenericMethodArgsComputed = false; bool hasGenericMethodArgs = this->GetModule()->m_pMethodIsGenericMap->IsGeneric(tok, &hasGenericMethodArgsComputed); if (!hasGenericMethodArgsComputed) { diff --git a/src/coreclr/vm/readytoruninfo.cpp b/src/coreclr/vm/readytoruninfo.cpp index 2a56362e9200bf..a047e17ffa3dfb 100644 --- a/src/coreclr/vm/readytoruninfo.cpp +++ b/src/coreclr/vm/readytoruninfo.cpp @@ -1929,20 +1929,17 @@ bool ReadyToRun_TypeGenericInfoMap::HasConstraints(mdTypeDef input, bool *foundR bool ReadyToRun_MethodIsGenericMap::IsGeneric(mdMethodDef input, bool *foundResult) const { -#ifdef DACCESS_COMPILE - *foundResult = false; - return false; -#else +#ifndef DACCESS_COMPILE uint32_t rid = RidFromToken(input); - if ((rid > MethodCount) || (rid == 0)) + if ((rid <= MethodCount) && (rid != 0)) { - *foundResult = false; - return false; + uint8_t chunk = ((uint8_t*)&MethodCount)[((rid - 1) / 8) + sizeof(uint32_t)]; + chunk >>= 7 - ((rid - 1) % 8); + *foundResult = true; + return !!(chunk & 1); } - - uint8_t chunk = ((uint8_t*)&MethodCount)[((rid - 1) / 8) + sizeof(uint32_t)]; - chunk >>= 7 - ((rid - 1) % 8); - return !!(chunk & 1); -#endif +#endif // !DACCESS_COMPILE + *foundResult = false; + return false; } From eb73369d61b5aa41ed85c5adf594a60a4afd3b26 Mon Sep 17 00:00:00 2001 From: Sven Boemer Date: Wed, 3 Apr 2024 10:19:29 -0700 Subject: [PATCH 064/132] Fix origin for warnings due to event methods (#100500) This fixes the warning origin to prevent warnings from being reported on events for annotated event methods. Instead the warning is shown at the location that introduces the dependency on the event. Doesn't fix the multiple warnings reported in some cases. This is due to MarkEvent also marking event methods, unlike MarkProperty. We might consider fixing this too, but it was looking like a larger change than I wanted to make here. This removes some spurious warnings that were showing up when bubbling up RUC in winforms. --- .../illink/src/linker/Linker.Steps/MarkStep.cs | 15 ++++++++------- .../Reflection/TypeHierarchyReflectionWarnings.cs | 3 ++- .../RequiresCapability/BasicRequires.cs | 2 -- .../RequiresCapability/RequiresOnClass.cs | 14 ++++---------- 4 files changed, 14 insertions(+), 20 deletions(-) diff --git a/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs b/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs index 9290b157920dd2..02fd1412caf845 100644 --- a/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs +++ b/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs @@ -405,7 +405,7 @@ internal void MarkEntireType (TypeDefinition type, in DependencyInfo reason) if (type.HasEvents) { foreach (var ev in type.Events) { - MarkEventVisibleToReflection (ev, new DependencyInfo (DependencyKind.MemberOfType, type), ScopeStack.CurrentScope.Origin); + MarkEventVisibleToReflection (ev, new DependencyInfo (DependencyKind.MemberOfType, ScopeStack.CurrentScope.Origin), ScopeStack.CurrentScope.Origin); } } } @@ -3325,6 +3325,7 @@ static DependencyKind PropagateDependencyKindToAccessors (DependencyKind parentD case DependencyKind.AlreadyMarked: case DependencyKind.TypePreserve: case DependencyKind.PreservedMethod: + case DependencyKind.DynamicallyAccessedMemberOnType: return parentDependencyKind; default: @@ -3583,15 +3584,15 @@ protected internal virtual void MarkEvent (EventDefinition evt, in DependencyInf if (!Annotations.MarkProcessed (evt, reason)) return; + var origin = reason.Source is IMemberDefinition member ? new MessageOrigin (member) : ScopeStack.CurrentScope.Origin; + DependencyKind dependencyKind = PropagateDependencyKindToAccessors (reason.Kind, DependencyKind.EventMethod); + MarkMethodIfNotNull (evt.AddMethod, new DependencyInfo (dependencyKind, evt), origin); + MarkMethodIfNotNull (evt.InvokeMethod, new DependencyInfo (dependencyKind, evt), origin); + MarkMethodIfNotNull (evt.RemoveMethod, new DependencyInfo (dependencyKind, evt), origin); + using var eventScope = ScopeStack.PushLocalScope (new MessageOrigin (evt)); MarkCustomAttributes (evt, new DependencyInfo (DependencyKind.CustomAttribute, evt)); - - DependencyKind dependencyKind = PropagateDependencyKindToAccessors (reason.Kind, DependencyKind.EventMethod); - MarkMethodIfNotNull (evt.AddMethod, new DependencyInfo (dependencyKind, evt), ScopeStack.CurrentScope.Origin); - MarkMethodIfNotNull (evt.InvokeMethod, new DependencyInfo (dependencyKind, evt), ScopeStack.CurrentScope.Origin); - MarkMethodIfNotNull (evt.RemoveMethod, new DependencyInfo (dependencyKind, evt), ScopeStack.CurrentScope.Origin); - DoAdditionalEventProcessing (evt); } diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/Reflection/TypeHierarchyReflectionWarnings.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/Reflection/TypeHierarchyReflectionWarnings.cs index f1e2faf60c46e7..13ecc2ec29068c 100644 --- a/src/tools/illink/test/Mono.Linker.Tests.Cases/Reflection/TypeHierarchyReflectionWarnings.cs +++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/Reflection/TypeHierarchyReflectionWarnings.cs @@ -246,10 +246,11 @@ class AnnotatedPublicEvents public delegate void MyEventHandler (object sender, int i); [Kept] - [ExpectedWarning ("IL2026", "--RUC on add_RUCEvent--", ProducedBy = Tool.Trimmer)] public event MyEventHandler RUCEvent { [Kept] [ExpectedWarning ("IL2112", nameof (AnnotatedPublicEvents), "--RUC on add_RUCEvent--")] + // https://github.com/dotnet/runtime/issues/100499 + [ExpectedWarning ("IL2112", nameof (AnnotatedPublicEvents), "--RUC on add_RUCEvent--", ProducedBy = Tool.Trimmer)] [KeptAttributeAttribute (typeof (RequiresUnreferencedCodeAttribute))] [RequiresUnreferencedCode ("--RUC on add_RUCEvent--")] add { } diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/BasicRequires.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/BasicRequires.cs index d7643f8abe1cde..d9536b9358109a 100644 --- a/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/BasicRequires.cs +++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/BasicRequires.cs @@ -137,7 +137,6 @@ static void TestRequiresFromNameOf () class OnEventMethod { - [ExpectedWarning ("IL2026", "--EventToTestRemove.remove--", ProducedBy = Tool.Trimmer)] static event EventHandler EventToTestRemove { add { } [RequiresUnreferencedCode ("Message for --EventToTestRemove.remove--")] @@ -146,7 +145,6 @@ static event EventHandler EventToTestRemove { remove { } } - [ExpectedWarning ("IL2026", "--EventToTestAdd.add--", ProducedBy = Tool.Trimmer)] static event EventHandler EventToTestAdd { [RequiresUnreferencedCode ("Message for --EventToTestAdd.add--")] [RequiresAssemblyFiles ("Message for --EventToTestAdd.add--")] diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/RequiresOnClass.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/RequiresOnClass.cs index 1489f11526886b..f1faf7684750f3 100644 --- a/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/RequiresOnClass.cs +++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/RequiresCapability/RequiresOnClass.cs @@ -492,9 +492,6 @@ class MemberTypesWithRequires public static int field; public static int Property { get; set; } - // These should not be reported https://github.com/mono/linker/issues/2218 - [ExpectedWarning ("IL2026", "MemberTypesWithRequires.Event.add", ProducedBy = Tool.Trimmer)] - [ExpectedWarning ("IL2026", "MemberTypesWithRequires.Event.remove", ProducedBy = Tool.Trimmer)] public static event EventHandler Event; } @@ -838,24 +835,21 @@ public static void Test () class ReflectionAccessOnEvents { - // Most of the tests in this run into https://github.com/dotnet/linker/issues/2218 + // Most of the tests in this run into https://github.com/dotnet/runtime/issues/100499 // So for now keeping just a very simple test [RequiresUnreferencedCode ("--WithRequires--")] [RequiresDynamicCode ("--WithRequires--")] class WithRequires { - // These should be reported only in TestDirectReflectionAccess - // https://github.com/mono/linker/issues/2218 - [ExpectedWarning ("IL2026", "StaticEvent.add", ProducedBy = Tool.Trimmer)] - [ExpectedWarning ("IL2026", "StaticEvent.remove", ProducedBy = Tool.Trimmer)] public static event EventHandler StaticEvent; } [ExpectedWarning ("IL2026", "StaticEvent.add")] [ExpectedWarning ("IL3050", "StaticEvent.add", ProducedBy = Tool.NativeAot)] - // https://github.com/mono/linker/issues/2218 - [ExpectedWarning ("IL2026", "StaticEvent.remove", ProducedBy = Tool.Analyzer | Tool.NativeAot)] + // https://github.com/dotnet/runtime/issues/100499 + [ExpectedWarning ("IL2026", "StaticEvent.add", ProducedBy = Tool.Trimmer)] + [ExpectedWarning ("IL2026", "StaticEvent.remove")] [ExpectedWarning ("IL3050", "StaticEvent.remove", ProducedBy = Tool.NativeAot)] static void TestDirectReflectionAccess () { From 7c030e8ef0d14c4cf6c473cff8d761f8040f0e88 Mon Sep 17 00:00:00 2001 From: SingleAccretion <62474226+SingleAccretion@users.noreply.github.com> Date: Wed, 3 Apr 2024 20:21:12 +0300 Subject: [PATCH 065/132] Delete dead code (#100551) * Delete TinyArray * Delete JitNoRegLoc * Delete sameRegAsDst * Delete gtIsStaticFieldPtrToBoxedStruct * Delete OperIsArithmetic * JitNoRangeChks -> JitNoRngChks * Delete bbFPinVars * Delete genTempLiveChg/genTempOldLife * Delete StressCOMCall * Delete isLclVarUsedFromMemory and isLclFldUsedFromMemory * Delete fgLclFldAssign * Delete CorInfoIndirectCallReason * Delete VNSimdTypeInfo * Delete genShouldRoundFP and related * Delete TRANSLATE_PDB decls * Delete lvStackAligned * Delete ifdef VERIFIER * Formatting --- src/coreclr/inc/corinfo.h | 30 ------------ src/coreclr/jit/CMakeLists.txt | 1 - src/coreclr/jit/assertionprop.cpp | 4 +- src/coreclr/jit/block.h | 13 +---- src/coreclr/jit/codegen.h | 2 - src/coreclr/jit/codegencommon.cpp | 27 ----------- src/coreclr/jit/codegeninterface.h | 5 -- src/coreclr/jit/codegenlinear.cpp | 39 --------------- src/coreclr/jit/compiler.h | 17 ------- src/coreclr/jit/compiler.hpp | 22 +-------- src/coreclr/jit/emitpub.h | 22 --------- src/coreclr/jit/flowgraph.cpp | 9 ---- src/coreclr/jit/gentree.cpp | 12 ----- src/coreclr/jit/gentree.h | 22 --------- src/coreclr/jit/jitconfigvalues.h | 14 +----- src/coreclr/jit/tinyarray.h | 78 ------------------------------ src/coreclr/jit/valuenum.h | 7 --- 17 files changed, 6 insertions(+), 318 deletions(-) delete mode 100644 src/coreclr/jit/tinyarray.h diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index 346feed74f324d..35f7d2f0bdf862 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -1008,36 +1008,6 @@ enum CorInfoInitClassResult // requirement around class initialization such as shared generics. }; -// Reason codes for making indirect calls -#define INDIRECT_CALL_REASONS() \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_UNKNOWN) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_EXOTIC) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_PINVOKE) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_GENERIC) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_NO_CODE) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_FIXUPS) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_STUB) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_REMOTING) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_CER) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_RESTORE_METHOD) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_RESTORE_FIRST_CALL) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_RESTORE_VALUE_TYPE) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_RESTORE) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_CANT_PATCH) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_PROFILING) \ - INDIRECT_CALL_REASON_FUNC(CORINFO_INDIRECT_CALL_OTHER_LOADER_MODULE) \ - -enum CorInfoIndirectCallReason -{ - #undef INDIRECT_CALL_REASON_FUNC - #define INDIRECT_CALL_REASON_FUNC(x) x, - INDIRECT_CALL_REASONS() - - #undef INDIRECT_CALL_REASON_FUNC - - CORINFO_INDIRECT_CALL_COUNT -}; - inline bool dontInline(CorInfoInline val) { return(val < 0); } diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index 806ce48dce9029..2f0b3659aa5ad3 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -385,7 +385,6 @@ set( JIT_HEADERS targetamd64.h targetarm.h targetarm64.h - tinyarray.h treelifeupdater.h typelist.h unwind.h diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 1b3ed18f93b45a..459d816181e502 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5273,12 +5273,12 @@ GenTree* Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree assert(tree->OperIs(GT_BOUNDS_CHECK)); #ifdef FEATURE_ENABLE_NO_RANGE_CHECKS - if (JitConfig.JitNoRangeChks()) + if (JitConfig.JitNoRngChks()) { #ifdef DEBUG if (verbose) { - printf("\nFlagging check redundant due to JitNoRangeChks in " FMT_BB ":\n", compCurBB->bbNum); + printf("\nFlagging check redundant due to JitNoRngChks in " FMT_BB ":\n", compCurBB->bbNum); gtDispTree(tree, nullptr, nullptr, true); } #endif // DEBUG diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index dfaace466a6570..68f41e3610173a 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -1535,10 +1535,7 @@ struct BasicBlock : private LIR::Range #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead - union { - unsigned short bbStkDepth; // stack depth on entry - unsigned short bbFPinVars; // number of inner enregistered FP vars - }; + unsigned short bbStkDepth; // stack depth on entry // Basic block predecessor lists. Predecessor lists are created by fgLinkBasicBlocks(), stored // in 'bbPreds', and then maintained throughout compilation. 'fgPredsComputed' will be 'true' after the @@ -1671,14 +1668,6 @@ struct BasicBlock : private LIR::Range void* bbEmitCookie; -#ifdef VERIFIER - stackDesc bbStackIn; // stack descriptor for input - stackDesc bbStackOut; // stack descriptor for output - - verTypeVal* bbTypesIn; // list of variable types on input - verTypeVal* bbTypesOut; // list of variable types on output -#endif // VERIFIER - //------------------------------------------------------------------------- #if MEASURE_BLOCK_SIZE diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index 7a2359c9fb5fc9..d63e0809b62693 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -101,8 +101,6 @@ class CodeGen final : public CodeGenInterface } } - static bool genShouldRoundFP(); - static GenTreeIndir indirForm(var_types type, GenTree* base); static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 24f5a922f18878..a62bcde9e556d0 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -120,7 +120,6 @@ CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) #endif #ifdef DEBUG - genTempLiveChg = true; genTrnslLocalVarCount = 0; // Shouldn't be used before it is set in genFnProlog() @@ -262,29 +261,6 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta() const #endif // defined(TARGET_X86) || defined(TARGET_ARM) -/***************************************************************************** - * Should we round simple operations (assignments, arithmetic operations, etc.) - */ - -// inline -// static -bool CodeGen::genShouldRoundFP() -{ - RoundLevel roundLevel = getRoundFloatLevel(); - - switch (roundLevel) - { - case ROUND_NEVER: - case ROUND_CMP_CONST: - case ROUND_CMP: - return false; - - default: - assert(roundLevel == ROUND_ALWAYS); - return true; - } -} - /***************************************************************************** * * Initialize some global variables. @@ -1715,9 +1691,6 @@ void CodeGen::genGenerateMachineCode() /* Prepare the emitter */ GetEmitter()->Init(); -#ifdef DEBUG - VarSetOps::AssignNoCopy(compiler, genTempOldLife, VarSetOps::MakeEmpty(compiler)); -#endif #ifdef DEBUG if (compiler->opts.disAsmSpilled && regSet.rsNeededSpillReg) diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h index a28a60b50ca8cd..63954adc6ffbb3 100644 --- a/src/coreclr/jit/codegeninterface.h +++ b/src/coreclr/jit/codegeninterface.h @@ -153,11 +153,6 @@ class CodeGenInterface void genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree); protected: -#ifdef DEBUG - VARSET_TP genTempOldLife; - bool genTempLiveChg; -#endif - VARSET_TP genLastLiveSet; // A one element map (genLastLiveSet-> genLastLiveMask) regMaskTP genLastLiveMask; // these two are used in genLiveMask diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 5aa961e3bcd077..5e05f1b0819829 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -1028,45 +1028,6 @@ void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree) varDsc->SetRegNum(tree->GetRegNum()); } -//------------------------------------------------------------------------ -// sameRegAsDst: Return the child that has the same reg as the dst (if any) -// -// Arguments: -// tree - the node of interest -// other - an out parameter to return the other child -// -// Notes: -// If 'tree' has a child with the same assigned register as its target reg, -// that child will be returned, and 'other' will contain the non-matching child. -// Otherwise, both other and the return value will be nullptr. -// -GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/) -{ - if (tree->GetRegNum() == REG_NA) - { - other = nullptr; - return nullptr; - } - - GenTree* op1 = tree->AsOp()->gtOp1; - GenTree* op2 = tree->AsOp()->gtOp2; - if (op1->GetRegNum() == tree->GetRegNum()) - { - other = op2; - return op1; - } - if (op2->GetRegNum() == tree->GetRegNum()) - { - other = op1; - return op2; - } - else - { - other = nullptr; - return nullptr; - } -} - //------------------------------------------------------------------------ // genUnspillLocal: Reload a register candidate local into a register, if needed. // diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 8615784b6ab1d8..ee89502ce9b62d 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -40,7 +40,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" -#include "tinyarray.h" #include "valuenum.h" #include "scev.h" #include "namedintrinsiclist.h" @@ -1116,11 +1115,6 @@ class LclVarDsc { return (var_types)lvType; } - bool lvStackAligned() const - { - assert(lvIsStructField); - return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); - } // NormalizeOnLoad Rules: // 1. All small locals are actually TYP_INT locals. @@ -3572,12 +3566,6 @@ class Compiler bool gtSplitTree( BasicBlock* block, Statement* stmt, GenTree* splitPoint, Statement** firstNewStmt, GenTree*** splitPointUse); - // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the - // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but - // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing - // the given "fldHnd", is such an object pointer. - bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); - bool gtStoreDefinesField( LclVarDsc* fieldVarDsc, ssize_t offset, unsigned size, ssize_t* pFieldStoreOffset, unsigned* pFieldStoreSize); @@ -6726,11 +6714,6 @@ class Compiler bool fgForwardSubHasStoreInterference(Statement* defStmt, Statement* nextStmt, GenTree* nextStmtUse); void fgForwardSubUpdateLiveness(GenTree* newSubListFirst, GenTree* newSubListLast); - // The given local variable, required to be a struct variable, is being assigned via - // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that - // the variable is not enregistered, and is therefore not promoted independently. - void fgLclFldAssign(unsigned lclNum); - enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index b6ba06ba7a1986..daccf6027efd10 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -36,7 +36,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX inline bool getInlinePInvokeEnabled() { #ifdef DEBUG - return JitConfig.JitPInvokeEnabled() && !JitConfig.StressCOMCall(); + return JitConfig.JitPInvokeEnabled(); #else return true; #endif @@ -65,26 +65,6 @@ inline UINT32 forceCastToUInt32(double d) return u; } -enum RoundLevel -{ - ROUND_NEVER = 0, // Never round - ROUND_CMP_CONST = 1, // Round values compared against constants - ROUND_CMP = 2, // Round comparands and return values - ROUND_ALWAYS = 3, // Round always - - COUNT_ROUND_LEVEL, - DEFAULT_ROUND_LEVEL = ROUND_NEVER -}; - -inline RoundLevel getRoundFloatLevel() -{ -#ifdef DEBUG - return (RoundLevel)JitConfig.JitRoundFloat(); -#else - return DEFAULT_ROUND_LEVEL; -#endif -} - /*****************************************************************************/ /***************************************************************************** * diff --git a/src/coreclr/jit/emitpub.h b/src/coreclr/jit/emitpub.h index 674b98a7f4bacd..c31d21153fd970 100644 --- a/src/coreclr/jit/emitpub.h +++ b/src/coreclr/jit/emitpub.h @@ -110,28 +110,6 @@ static regNumber inst3opImulReg(instruction ins); static instruction inst3opImulForReg(regNumber reg); #endif -/************************************************************************/ -/* Emit PDB offset translation information */ -/************************************************************************/ - -#ifdef TRANSLATE_PDB - -static void SetILBaseOfCode(BYTE* pTextBase); -static void SetILMethodBase(BYTE* pMethodEntry); -static void SetILMethodStart(BYTE* pMethodCode); -static void SetImgBaseOfCode(BYTE* pTextBase); - -void SetIDBaseToProlog(); -void SetIDBaseToOffset(int methodOffset); - -static void DisablePDBTranslation(); -static bool IsPDBEnabled(); - -static void InitTranslationMaps(int ilCodeSize); -static void DeleteTranslationMaps(); -static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADER** rgpHeader, int numSections); -#endif - /************************************************************************/ /* Interface for generating unwind information */ /************************************************************************/ diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index e2f450a7cb194d..691fa5ef349f27 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -3920,15 +3920,6 @@ void Compiler::fgSetBlockOrder(BasicBlock* block) return firstNode; } -void Compiler::fgLclFldAssign(unsigned lclNum) -{ - assert(varTypeIsStruct(lvaTable[lclNum].lvType)); - if (lvaTable[lclNum].lvPromoted && lvaTable[lclNum].lvFieldCnt > 1) - { - lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); - } -} - #ifdef DEBUG //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 05f50f4f1132e0..dd77f8e6e1cece 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -18332,18 +18332,6 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSe return false; } -bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd) -{ - if (fieldNodeType != TYP_REF) - { - return false; - } - noway_assert(fldHnd != nullptr); - CorInfoType cit = info.compCompHnd->getFieldType(fldHnd); - var_types fieldTyp = JITtype2varType(cit); - return fieldTyp != TYP_REF; -} - //------------------------------------------------------------------------ // gtStoreDefinesField: Does the given parent store modify the given field? // diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 841b846d94bfd0..c1fc5b33a7175f 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -907,16 +907,6 @@ struct GenTree isUsedFromSpillTemp()); } - bool isLclVarUsedFromMemory() const - { - return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp()); - } - - bool isLclFldUsedFromMemory() const - { - return isLclField() && (isContained() || isUsedFromSpillTemp()); - } - bool isUsedFromReg() const { return !isContained() && !isUsedFromSpillTemp(); @@ -1421,18 +1411,6 @@ struct GenTree return OperIsMul(gtOper); } - bool OperIsArithmetic() const - { - genTreeOps op = OperGet(); - return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD - - || op == GT_UDIV || op == GT_UMOD - - || op == GT_OR || op == GT_XOR || op == GT_AND - - || OperIsShiftOrRotate(op); - } - #ifdef TARGET_XARCH static bool OperIsRMWMemOp(genTreeOps gtOper) { diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index e0ca766f97a6e8..e0a2d7cb16fcb8 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -125,9 +125,8 @@ CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zer // flags. CONFIG_INTEGER(JitNoForwardSub, W("JitNoForwardSub"), 0) // Disables forward sub CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0) -CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods -CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers -CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0) +CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods +CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion 1 - for all, 2 - for // params. CONFIG_INTEGER(JitNoUnroll, W("JitNoUnroll"), 0) @@ -152,7 +151,6 @@ CONFIG_METHODSET(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods") CONFIG_INTEGER(JitProfileChecks, W("JitProfileChecks"), -1) CONFIG_INTEGER(JitRequired, W("JITRequired"), -1) -CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL) CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE) CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0) CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks @@ -177,7 +175,6 @@ CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the com // code and fall back to the default compiler. CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0) -CONFIG_INTEGER(StressCOMCall, W("StressCOMCall"), 0) CONFIG_INTEGER(TailcallStress, W("TailcallStress"), 0) CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1, display each tree before/after morphing @@ -292,13 +289,6 @@ CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 1) // If set, align inner loop CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 0) #endif -/// -/// JIT -/// -#ifdef FEATURE_ENABLE_NO_RANGE_CHECKS -CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks -#endif - // AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate // fallback to main JIT on hitting a NYI. CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff diff --git a/src/coreclr/jit/tinyarray.h b/src/coreclr/jit/tinyarray.h deleted file mode 100644 index 36cd462a786194..00000000000000 --- a/src/coreclr/jit/tinyarray.h +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#ifndef TINYARRAY_H -#define TINYARRAY_H - -/*****************************************************************************/ - -// This is an array packed into some kind of integral data type -// storagetype is the type (integral) which your array is going to be packed into -// itemtype is the type of array elements -// bits_per_element is size of the elements in bits -template -class TinyArray -{ -public: - // operator[] returns a 'ref' (usually a ref to the element type) - // This presents a problem if you wanted to implement something like a - // bitvector via this packed array, because you cannot make a ref to - // the element type. - // The trick is you define something that acts like a ref (TinyArrayRef in this case) - // which for our purposes means you can assign to and from it and our chosen - // element type. - class TinyArrayRef - { - public: - // this is really the getter for the array. - operator itemType() - { - storageType mask = ((1 << bits_per_element) - 1); - int shift = bits_per_element * index; - - itemType result = (itemType)((*data >> shift) & mask); - return result; - } - - void operator=(const itemType b) - { - storageType mask = ((1 << bits_per_element) - 1); - assert(itemType(b & mask) == b); - - mask <<= bits_per_element * index; - - *data &= ~mask; - *data |= b << (bits_per_element * index); - } - friend class TinyArray; - - protected: - TinyArrayRef(storageType* d, int idx) : data(d), index(idx) - { - } - - storageType* data; - int index; - }; - - storageType data; - - void clear() - { - data = 0; - } - - TinyArrayRef operator[](unsigned int n) - { - assert((n + 1) * bits_per_element <= sizeof(storageType) * 8); - return TinyArrayRef(&data, n); - } - // only use this for clearing it - void operator=(void* rhs) - { - assert(rhs == nullptr); - data = 0; - } -}; - -#endif // TINYARRAY_H diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index a976893a06581f..7cd6c27aec206c 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -205,13 +205,6 @@ struct VNFuncApp } }; -// An instance of this struct represents the decoded information of a SIMD type from a value number. -struct VNSimdTypeInfo -{ - unsigned int m_simdSize; - CorInfoType m_simdBaseJitType; -}; - // We use a unique prefix character when printing value numbers in dumps: i.e. $1c0 // This define is used with string concatenation to put this in printf format strings #define FMT_VN "$%x" From 89bd91044e14ef0b4040dac086b30341dd1c831d Mon Sep 17 00:00:00 2001 From: Maoni Stephens Date: Wed, 3 Apr 2024 11:48:38 -0700 Subject: [PATCH 066/132] DATAS for small HCs (#100390) I adjusted the formula for determining a new HC and change how we calculate the gen0 budget based on gen2 size. changes included - + currently we have a very simplistic formula for actually adapting to the size and this basically just makes all the asp.net benchmarks with low surv rate adjust to the min 2.5 mb gen0 budget, while those run ok with such a small budget on a 28 core machine, it doesn't work if we limit the heap count to a small number, eg, 4. what happens is the % time in GC is very high, some benchmarks run with 20% to 40% time in GC. this is obviously not desirable. I reworked this to make it actually adapting to the size. and we'll take the min of this and what we calculated without DATAS. + the formula I had previously did not handle small HCs well so I also adjust that. + got rid of the adjusting to cache size in gc1 for DATAS, this just makes things unpredictable especially for small workloads. --- src/coreclr/gc/gc.cpp | 151 +++++++++++++++++++++++--------------- src/coreclr/gc/gcpriv.h | 58 +++++++++++++++ src/coreclr/inc/gcmsg.inl | 2 +- 3 files changed, 151 insertions(+), 60 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 40cb8694fd4d52..c8161a1e56d83f 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -22526,6 +22526,15 @@ void gc_heap::gc1() { limit = total_generation_count-1; } + + size_t total_max_gen_size = 0; + for (int i = 0; i < gc_heap::n_heaps; i++) + { + gc_heap* hp = gc_heap::g_heaps[i]; + dynamic_data* dd = hp->dynamic_data_of (max_generation); + total_max_gen_size += dd_current_size (dd) + dd_desired_allocation (dd); + } + for (int gen = 0; gen <= limit; gen++) { size_t total_desired = 0; @@ -22554,20 +22563,35 @@ void gc_heap::gc1() total_already_consumed = temp_total_already_consumed; } - size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps, - get_alignment_constant (gen <= max_generation)); + size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps, get_alignment_constant (gen <= max_generation)); size_t already_consumed_per_heap = total_already_consumed / gc_heap::n_heaps; if (gen == 0) { -#if 1 //subsumed by the linear allocation model +#ifdef DYNAMIC_HEAP_COUNT + if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) + { + size_t new_allocation_datas = dynamic_heap_count_data.compute_gen0_new_allocation (total_max_gen_size); + new_allocation_datas = Align (new_allocation_datas, get_alignment_constant (gen <= max_generation)); + dprintf (6666, ("gen0 new_alloc %Id (%.3fmb), from datas: %Id (%.3fmb)", + desired_per_heap, ((double)desired_per_heap / 1000.0 / 1000.0), + new_allocation_datas, ((double)new_allocation_datas / 1000.0 / 1000.0))); + desired_per_heap = min (desired_per_heap, new_allocation_datas); + } +#endif //DYNAMIC_HEAP_COUNT + // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. + size_t desired_per_heap_before_smoothing = desired_per_heap; desired_per_heap = exponential_smoothing (gen, dd_collection_count (dynamic_data_of(gen)), desired_per_heap); -#endif //0 + size_t desired_per_heap_after_smoothing = desired_per_heap; - if (!heap_hard_limit) + if (!heap_hard_limit +#ifdef DYNAMIC_HEAP_COUNT + && (dynamic_adaptation_mode != dynamic_adaptation_to_application_sizes) +#endif //DYNAMIC_HEAP_COUNT + ) { // if desired_per_heap is close to min_gc_size, trim it // down to min_gc_size to stay in the cache @@ -22584,7 +22608,10 @@ void gc_heap::gc1() } #ifdef HOST_64BIT desired_per_heap = joined_youngest_desired (desired_per_heap); - dprintf (2, ("final gen0 new_alloc: %zd", desired_per_heap)); + + dprintf (6666, ("final gen0 new_alloc: total desired: %Id (%.3fmb/heap), before smooth %zd -> after smooth %zd -> after joined %zd", + total_desired, ((double)(total_desired / n_heaps)/ 1000.0 / 1000.0), + desired_per_heap_before_smoothing, desired_per_heap_after_smoothing, desired_per_heap)); #endif // HOST_64BIT gc_data_global.final_youngest_desired = desired_per_heap; } @@ -25347,9 +25374,10 @@ int gc_heap::calculate_new_heap_count () // on the way up, we essentially multiply the heap count by 1.5, so we go 1, 2, 3, 5, 8 ... // we don't go all the way to the number of CPUs, but stay 1 or 2 short int step_up = (n_heaps + 1) / 2; - int extra_heaps = 1 + (n_max_heaps >= 32); + int extra_heaps = (n_max_heaps >= 16) + (n_max_heaps >= 64); int actual_n_max_heaps = n_max_heaps - extra_heaps; - int max_growth = max ((n_max_heaps / 4), 2); + int max_growth = max ((n_max_heaps / 4), (1 + (actual_n_max_heaps > 3))); + step_up = min (step_up, (actual_n_max_heaps - n_heaps)); // on the way down, we essentially divide the heap count by 1.5 @@ -25392,13 +25420,15 @@ int gc_heap::calculate_new_heap_count () // target_tcp should be configurable. float target_tcp = 5.0; float target_gen2_tcp = 10.0; - float log_base = (float)1.1; + float log_base = (float)1.11; dynamic_heap_count_data.add_to_recorded_tcp (median_throughput_cost_percent); // This is the average of whatever is in the recorded tcp buffer. float avg_recorded_tcp = 0.0; + size_t num_gcs_since_last_change = current_gc_index - dynamic_heap_count_data.last_changed_gc_index; + if (process_eph_samples_p) { dynamic_heap_count_data.last_processed_stcp = smoothed_median_throughput_cost_percent; @@ -25407,22 +25437,21 @@ int gc_heap::calculate_new_heap_count () { // If median is high but stcp is lower than target, and if this situation continues, stcp will quickly be above target anyway; otherwise // we treat it as an outlier. - if (smoothed_median_throughput_cost_percent > target_tcp) + if (smoothed_median_throughput_cost_percent >= (target_tcp + 1.0)) { - float step_up_percent = log_with_base ((smoothed_median_throughput_cost_percent - target_tcp + log_base), log_base); - float step_up_float = (float)(step_up_percent / 100.0 * actual_n_max_heaps); + float step_up_float = (float)(1 + actual_n_max_heaps * log_with_base ((smoothed_median_throughput_cost_percent - target_tcp), log_base) / 100.0); int step_up_int = (int)step_up_float; dprintf (6666, ("[CHP0] inc %d(%.3f), last inc %d, %Id GCs elapsed, last stcp %.3f", step_up_int, step_up_float, (int)dynamic_heap_count_data.last_changed_count, - (current_gc_index - dynamic_heap_count_data.last_changed_gc_index), dynamic_heap_count_data.last_changed_stcp)); + num_gcs_since_last_change, dynamic_heap_count_data.last_changed_stcp)); // Don't adjust if we just adjusted last time we checked, unless we are in an extreme situation. if ((smoothed_median_throughput_cost_percent < 20.0f) && (avg_throughput_cost_percent < 20.0f) && - ((current_gc_index - dynamic_heap_count_data.last_changed_gc_index) < (2 * dynamic_heap_count_data_t::sample_size))) + (num_gcs_since_last_change < (2 * dynamic_heap_count_data_t::sample_size))) { - dprintf (6666, ("[CHP0] we just adjusted %Id GCs ago, skipping", (current_gc_index - dynamic_heap_count_data.last_changed_gc_index))); + dprintf (6666, ("[CHP0] we just adjusted %Id GCs ago, skipping", num_gcs_since_last_change)); } else { @@ -25435,9 +25464,9 @@ int gc_heap::calculate_new_heap_count () } if (((int)dynamic_heap_count_data.last_changed_count > 0) && (dynamic_heap_count_data.last_changed_gc_index > 0.0) && - ((current_gc_index - dynamic_heap_count_data.last_changed_gc_index) <= (3 * dynamic_heap_count_data_t::sample_size))) + (num_gcs_since_last_change <= (3 * dynamic_heap_count_data_t::sample_size))) { - dprintf (6666, ("[CHP0-0] just grew %d GCs ago, no change", (current_gc_index - dynamic_heap_count_data.last_changed_gc_index))); + dprintf (6666, ("[CHP0-0] just grew %d GCs ago, no change", num_gcs_since_last_change)); step_up_int = 0; } else @@ -25487,9 +25516,18 @@ int gc_heap::calculate_new_heap_count () { if (((int)dynamic_heap_count_data.last_changed_count > 0) && (dynamic_heap_count_data.last_changed_gc_index > 0.0)) { - (dynamic_heap_count_data.inc_failure_count)++; - dprintf (6666, ("[CHP0-4] just grew %d GCs ago, grow more aggressively from %d -> %d more heaps", - (current_gc_index - dynamic_heap_count_data.last_changed_gc_index), step_up_int, (step_up_int * (dynamic_heap_count_data.inc_failure_count + 1)))); + if (num_gcs_since_last_change > (16 * dynamic_heap_count_data_t::sample_size)) + { + dynamic_heap_count_data.inc_failure_count = 0; + dprintf (6666, ("[CHP0-4] grew %d GCs ago, too far in the past, set aggressive factor to 0, grow from %d -> %d more heaps", + num_gcs_since_last_change, dynamic_heap_count_data.inc_failure_count, step_up_int, (step_up_int * (dynamic_heap_count_data.inc_failure_count + 1)))); + } + else + { + (dynamic_heap_count_data.inc_failure_count)++; + dprintf (6666, ("[CHP0-4] grew %d GCs ago, aggressive factor is %d, grow more aggressively from %d -> %d more heaps", + num_gcs_since_last_change, dynamic_heap_count_data.inc_failure_count, step_up_int, (step_up_int * (dynamic_heap_count_data.inc_failure_count + 1)))); + } step_up_int *= dynamic_heap_count_data.inc_failure_count + 1; } } @@ -25514,9 +25552,9 @@ int gc_heap::calculate_new_heap_count () dynamic_heap_count_data.last_changed_stcp = smoothed_median_throughput_cost_percent; } - dprintf (6666, ("[CHP0] tcp %.3f, stcp %.3f -> (%d * %.3f%% = %.3f) -> %d + %d = %d -> %d", + dprintf (6666, ("[CHP0] tcp %.3f, stcp %.3f -> (%d -> %.3f) -> %d + %d = %d -> %d", median_throughput_cost_percent, smoothed_median_throughput_cost_percent, - actual_n_max_heaps, step_up_percent, step_up_float, step_up_int, n_heaps, (n_heaps + step_up_int), new_n_heaps)); + actual_n_max_heaps, step_up_float, step_up_int, n_heaps, (n_heaps + step_up_int), new_n_heaps)); } } } @@ -25533,7 +25571,7 @@ int gc_heap::calculate_new_heap_count () } dprintf (6666, ("[CHP1] last time adjusted %s by %d at GC#%Id (%Id GCs since), stcp was %.3f, now stcp is %.3f", ((dynamic_heap_count_data.last_changed_count > 0.0) ? "up" : "down"), (int)dynamic_heap_count_data.last_changed_count, - dynamic_heap_count_data.last_changed_gc_index, (current_gc_index - dynamic_heap_count_data.last_changed_gc_index), + dynamic_heap_count_data.last_changed_gc_index, num_gcs_since_last_change, dynamic_heap_count_data.last_changed_stcp, smoothed_median_throughput_cost_percent)); float below_target_diff = target_tcp - median_throughput_cost_percent; @@ -25546,10 +25584,16 @@ int gc_heap::calculate_new_heap_count () if (dynamic_heap_count_data.below_target_accumulation >= dynamic_heap_count_data.below_target_threshold) { int below_target_tcp_count = dynamic_heap_count_data.rearrange_recorded_tcp (); - float below_target_tcp_slope = slope (dynamic_heap_count_data.recorded_tcp, below_target_tcp_count, &avg_recorded_tcp); + float below_target_tcp_slope = slope (dynamic_heap_count_data.recorded_tcp_rearranged, below_target_tcp_count, &avg_recorded_tcp); float diff_pct = (target_tcp - smoothed_median_throughput_cost_percent) / target_tcp; int step_down_int = (int)(diff_pct / 2.0 * n_heaps); - dprintf (6666, ("[CHP1] observed %d tcp's <= or ~ target, avg %.3f, slope %.3f, stcp %.3f below target, shrink by %.3f * %d = %d heaps", + if ((step_down_int == 0) && dynamic_heap_count_data.is_tcp_far_below (diff_pct)) + { + dprintf (6666, ("[CHP1] we are far below target, reduce by 1 heap")); + step_down_int = 1; + } + + dprintf (6666, ("[CHP1] observed %d tcp's <= or ~ target, avg %.3f, slope %.3f, stcp %.3f%% below target, shrink by %.3f%% * %d = %d heaps", below_target_tcp_count, avg_recorded_tcp, below_target_tcp_slope, (diff_pct * 100.0), (diff_pct * 50.0), n_heaps, step_down_int)); bool shrink_p = false; @@ -25629,11 +25673,22 @@ int gc_heap::calculate_new_heap_count () if (shrink_p && step_down_int && (new_n_heaps > step_down_int)) { - // TODO - if we see that it wants to shrink by 1 heap too many times, we do want to shrink. if (step_down_int == 1) { - step_down_int = 0; - dprintf (6666, ("[CHP1-3] don't shrink if it's just one heap. not worth it")); + if (dynamic_heap_count_data.should_dec_by_one()) + { + dprintf (6666, ("[CHP1-3] shrink by one heap")); + } + else + { + step_down_int = 0; + dprintf (6666, ("[CHP1-3] don't shrink just yet if it's just one heap")); + } + } + else + { + dynamic_heap_count_data.reset_dec_by_one(); + dprintf (6666, ("[CHP1-3] shrink by %d heap(s), reset dec by one", step_down_int)); } new_n_heaps -= step_down_int; @@ -26265,7 +26320,7 @@ bool gc_heap::change_heap_count (int new_n_heaps) assert (gen_size >= dd_fragmentation (dd)); dd_current_size (dd) = gen_size - dd_fragmentation (dd); - dprintf (3, ("h%d g%d: budget: %zd, left in budget: %zd, %zd generation_size: %zd fragmentation: %zd current_size: %zd", + dprintf (3, ("h%d g%d: budget: %zd, left in budget: %zd, generation_size: %zd fragmentation: %zd current_size: %zd", i, gen_idx, desired_alloc_per_heap[gen_idx], @@ -43608,35 +43663,6 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd, new_allocation = min (new_allocation, max (min_gc_size, (max_size/3))); } - -#ifdef DYNAMIC_HEAP_COUNT - if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) - { - // if this is set, limit gen 0 size to a small multiple of the older generations - float f_older_gen = ((10.0f / conserve_mem_setting) - 1) * 0.5f; - - // compute the total size of the older generations - size_t older_size = 0; - for (int gen_index_older = 1; gen_index_older < total_generation_count; gen_index_older++) - { - dynamic_data* dd_older = dynamic_data_of (gen_index_older); - older_size += dd_current_size (dd_older); - } - // derive a new allocation size from it - size_t new_allocation_from_older = (size_t)(older_size*f_older_gen); - - // limit the new allocation to this value - new_allocation = min (new_allocation, new_allocation_from_older); - - // but make sure it doesn't drop below the minimum size - new_allocation = max (new_allocation, min_gc_size); - - dprintf (2, ("f_older_gen: %d%% older_size: %zd new_allocation: %zd", - (int)(f_older_gen*100), - older_size, - new_allocation)); - } -#endif //DYNAMIC_HEAP_COUNT } } @@ -48782,7 +48808,8 @@ HRESULT GCHeap::Initialize() // start with only 1 heap gc_heap::smoothed_desired_total[0] /= gc_heap::n_heaps; int initial_n_heaps = 1; - dprintf (9999, ("gc_heap::n_heaps is %d, initial %d", gc_heap::n_heaps, initial_n_heaps)); + + dprintf (6666, ("n_heaps is %d, initial n_heaps is %d, %d cores", gc_heap::n_heaps, initial_n_heaps, g_num_processors)); { if (!gc_heap::prepare_to_change_heap_count (initial_n_heaps)) @@ -48810,6 +48837,12 @@ HRESULT GCHeap::Initialize() gc_heap::dynamic_heap_count_data.below_target_threshold = 10.0; gc_heap::dynamic_heap_count_data.inc_recheck_threshold = 5; gc_heap::dynamic_heap_count_data.dec_failure_recheck_threshold = 5; + // This should really be set as part of computing static data and should take conserve_mem_setting into consideration. + gc_heap::dynamic_heap_count_data.max_gen0_new_allocation = min (dd_max_size (gc_heap::g_heaps[0]->dynamic_data_of (0)), (64 * 1024 * 1024)); + gc_heap::dynamic_heap_count_data.min_gen0_new_allocation = dd_min_size (gc_heap::g_heaps[0]->dynamic_data_of (0)); + + dprintf (6666, ("datas max gen0 budget %Id, min %Id", + gc_heap::dynamic_heap_count_data.max_gen0_new_allocation, gc_heap::dynamic_heap_count_data.min_gen0_new_allocation)); } #endif //DYNAMIC_HEAP_COUNT GCScan::GcRuntimeStructuresValid (TRUE); diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 788cbff9f5e507..6a3b600f8633f0 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -4358,6 +4358,10 @@ class gc_heap float below_target_accumulation; float below_target_threshold; + // TODO: we should refactor this and the inc checks into a utility class. + bool dec_by_one_scheduled; + int dec_by_one_count; + // Currently only used for dprintf. size_t first_below_target_gc_index; @@ -4371,11 +4375,65 @@ class gc_heap return ((diff_pct <= 0.2) && (diff_pct >= -0.2) && (slope <= 0.1) && (slope >= -0.1)); } + bool is_tcp_far_below (float diff_pct) + { + return (diff_pct >= 0.4); + } + bool is_close_to_max (int new_n, int max) { return ((max - new_n) <= (max / 10)); } + bool should_dec_by_one() + { + if (!dec_by_one_scheduled) + { + dec_by_one_scheduled = true; + } + + if (dec_by_one_scheduled) + { + dec_by_one_count++; + dprintf (6666, ("scheduled to dec by 1 heap %d times", dec_by_one_count)); + } + + return (dec_by_one_count >= 5); + } + + void reset_dec_by_one() + { + dec_by_one_scheduled = false; + dec_by_one_count = 0; + } + + size_t max_gen0_new_allocation; + size_t min_gen0_new_allocation; + + size_t compute_gen0_new_allocation (size_t total_old_gen_size) + { + assert (total_old_gen_size > 0); + + // TODO: adjust these based on conserve_mem_setting. + double old_gen_growth_factor = 16.0 / sqrt ((double)total_old_gen_size / 1000.0 / 1000.0); + double saved_old_gen_growth_factor = old_gen_growth_factor; + old_gen_growth_factor = min (10.0, old_gen_growth_factor); + old_gen_growth_factor = max (0.1, old_gen_growth_factor); + + size_t total_new_allocation_old_gen = (size_t)(old_gen_growth_factor * (double)total_old_gen_size); + size_t new_allocation_old_gen = total_new_allocation_old_gen / n_heaps; + + dprintf (6666, ("total gen2 %Id (%.3fmb), factor %.3f=>%.3f -> total gen0 new_alloc %Id (%Id/heap, %.3fmb)", + total_old_gen_size, ((double)total_old_gen_size / 1000.0 / 1000.0), + saved_old_gen_growth_factor, old_gen_growth_factor, total_new_allocation_old_gen, + new_allocation_old_gen, ((double)new_allocation_old_gen / 1000.0 / 1000.0))); + + new_allocation_old_gen = min (max_gen0_new_allocation, new_allocation_old_gen); + new_allocation_old_gen = max (min_gen0_new_allocation, new_allocation_old_gen); + + return new_allocation_old_gen; + } + // // gen2 GCs are handled separately only as a backstop. // diff --git a/src/coreclr/inc/gcmsg.inl b/src/coreclr/inc/gcmsg.inl index 59951bae9ef716..4171b7e8fa91ad 100644 --- a/src/coreclr/inc/gcmsg.inl +++ b/src/coreclr/inc/gcmsg.inl @@ -50,7 +50,7 @@ static const char* gcDetailedEndMsg() { STATIC_CONTRACT_LEAF; - return "*EGC* %zd(gen0:%zd)(%zd)(%d)(%s)(%s)(%s)(ml: %d->%d)"; + return "*EGC* %zd(gen0:%zd)(%zd)(%d)(%s)(%s)(%s)(ml: %d->%d)\n"; } static const char* gcStartMarkMsg() From 95f68f5d0ef04c57392e1991cc8602d01dc1628b Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 3 Apr 2024 12:45:05 -0700 Subject: [PATCH 067/132] JIT: add note on profile reconstruction (#100601) I left a long note on the algorithm as a comment on #99992. Move it to the doc folder. --- .../jit/profile-count-reconstruction.md | 282 ++++++++++++++++++ 1 file changed, 282 insertions(+) create mode 100644 docs/design/coreclr/jit/profile-count-reconstruction.md diff --git a/docs/design/coreclr/jit/profile-count-reconstruction.md b/docs/design/coreclr/jit/profile-count-reconstruction.md new file mode 100644 index 00000000000000..94fb989a8f28bc --- /dev/null +++ b/docs/design/coreclr/jit/profile-count-reconstruction.md @@ -0,0 +1,282 @@ +## Numeric Solvers for Profile Count Reconstruction + +It may not be readily apparent how count reconstruction works. Perhaps these notes will shed some light on things. + +In our flowgraph model we assume that the edge likelihoods are trustworthy and well formed (meaning each edge's likelihood is in [0,1] and the sum of all likelihoods for a block's successor edges is 1). + +The appeal of edge well-formedness is easy to check and relatively easy to maintain during various optimizations. It is a *local* property. + +We will use $p_{i,j}$ to denote the likelihood that block $i$ transfers control to block $j$. Thus local consistency means: + +$$ 0 \le p_{i,j} \le 1 $$ +and, for blocks with successors: +$$ \sum_i p_{i,j} = 1 $$ + +By contrast, block weight consistency requires that the flow into a block be balanced by the flow out of a block. It is a *global* property and harder to maintain during optimizations. It may also not be true initially. + +We will use $w_j$ for the weight of block $j$. We will also assume there is an external source and sink of weight for some blocks (method entry and exit points), $e_j$. Then block consistency means: + +$$ e_j + \sum_i w_i p_{i,j} = \sum_k w_j p_{j,k} $$ + +where the LHS is flow in and the RHS is flow out of block $j$. But + +$$ \sum_k w_j p_{j,k} = w_j \sum_k p_{j,k} = w_j $$ + +so we can restate this as saying the external flow plus the flow into the block must equal the block weight: + +$$ e_j + \sum_i w_i p_{i,j} = w_j$$ + +The goal of this work is to explore methods for reconstructing a set of consistent block weights $w_j$ from the external weight sources and sinks $e_j$ and edge likelihoods $p_{i,j}$. + +### General Solution + +The above can be summarized in matrix-vector form as + +$$ \boldsymbol w = \boldsymbol e + \boldsymbol P \boldsymbol w $$ + +where to be able to express the sum of incoming flow as a standard matrix-vector product we have: + +$$ \boldsymbol P_{i,j} = { p_{j,i} } $$ + +(that is, in $\boldsymbol P$, the flow from block $i$ is described by the entries in the $i\text{th}$ column, and the flow into block $i$ by the $i\text{th}$ row). A bit of rearranging puts this into the standard linear equation form + +$$ (\boldsymbol I - \boldsymbol P) \boldsymbol w = \boldsymbol e$$ + +and this can be solved (in principle) for $\boldsymbol w$ by computing the inverse of $\boldsymbol I - \boldsymbol P$ (assuming this exists), giving + +$$ \boldsymbol w = {(\boldsymbol I - \boldsymbol P)}^{-1} \boldsymbol e $$ + +For example, given the following graph with edge likelihoods a shown: + +

+ +

+ +we have + +```math +\boldsymbol P = +\begin{bmatrix} + 0 & 0 & 0 & 0 \cr + 1 & 0 & 0.8 & 0 \cr + 0 & 0.5 & 0 & 0 \cr + 0 & 0.5 & 0.2 & 0 +\end{bmatrix} +``` + +Note each column save the last sums to 1.0, representing the fact that the outgoing likelihoods from each block must sum to 1.0, unless there are no successors. + +Thus +```math +(\boldsymbol I - \boldsymbol P) = +\begin{bmatrix} + 1 & 0 & 0 & 0 \\\ +-1 & 1 & -0.8 & 0 \\\ + 0 & -0.5 & 1 & 0 \\\ + 0 & -0.5 & -0.2 & 1 +\end{bmatrix} +``` +and so (details of computing the inverse left as exercise for the reader) +```math +{(\boldsymbol I - \boldsymbol P)}^{-1} = +\begin{bmatrix} +1 & 0 & 0 & 0 \\\ +1.67 & 1.67 & 1.33 & 0 \\\ +0.83 & 0.83 & 1.67 & 0 \\\ +1 & 1 & 1 & 1 +\end{bmatrix} +``` +Note the elements of ${(\boldsymbol I - \boldsymbol P)}^{-1}$ are all non-negative; intuitively, if we increase flow anywhere in the graph, it can only cause weights to increase or stay the same. + +If we feed 6 units of flow into A, we have +```math +\boldsymbol w = \begin{bmatrix} 6 \\\ 10 \\\ 5 \\\ 6 \end{bmatrix} +``` + +or graphically + +

+ +

+ +However, explicit computation of the inverse of a matrix is computationally expensive. + +Also note (though it's not fully obvious from such a small example) that the matrix $(\boldsymbol I - \boldsymbol P)$ is *sparse*: a typical block has only 1 or 2 successors, so the number of nonzero entries in each column will generally be either 2 or 3, no matter how many nodes we have. The inverse of a sparse matrix is typically not sparse, so computing it is not only costly in time but also in space. + +So solution techniques that can leverage sparseness are of particular interest. + +### A More Practical Solution + +Note the matrix $\boldsymbol I - \boldsymbol P$ has non-negative diagonal elements and negative non-diagonal elements, since all entries of $\boldsymbol P$ are in the range [0,1]. + +If we further restrict ourselves to the case where $p_{i,i} \lt 1$ (meaning there are are no infinite self-loops) then all the diagonal entries are positive and the matrix has an inverse with no negative elements. + +Such matrices are known as M-matrices. + +It is well known that for an M-matrix $(\boldsymbol I - \boldsymbol P)$ the inverse can be computed as the limit of an infinite series + +$$ {(\boldsymbol I - \boldsymbol P)}^{-1} = \boldsymbol I + \boldsymbol P + \boldsymbol P^2 + \dots $$ + +This gives rise to a simple *iterative* procedure for computing an approximate value of $\boldsymbol w$ (here superscripts on $\boldsymbol w$ are successive iterates, not powers) + +$$ \boldsymbol w^{(0)} = \boldsymbol e $$ +$$ \boldsymbol w^{(1)} = (\boldsymbol I + \boldsymbol P) \boldsymbol e = \boldsymbol e + \boldsymbol P \boldsymbol w^{(0)} $$ +$$ \boldsymbol w^{(2)} = (\boldsymbol I + \boldsymbol P + \boldsymbol P^2) \boldsymbol e = \boldsymbol e + \boldsymbol P \boldsymbol w^{(1)}$$ +$$ \dots$$ +$$ \boldsymbol w^{(k + 1)} = \boldsymbol e + \boldsymbol P \boldsymbol w^{(k)} $$ + +where we can achieve any desired precision for $\boldsymbol w$ by iterating until the successive $\boldsymbol w$ differ by a small amount. + +Intuitively this should make sense, we are effectively pouring weight into the entry block(s) and letting the weights flow around in the graph until they reach a fixed point. If we do this for the example above, we get the following sequence of values for $\boldsymbol w^n$: + +```math +\boldsymbol w^{(0)} = \begin{bmatrix} 6 \\\ 0 \\\ 0 \\\ 0 \end{bmatrix}, +\boldsymbol w^{(1)} = \begin{bmatrix} 6 \\\ 6 \\\ 0 \\\ 0 \end{bmatrix}, +\boldsymbol w^{(2)} = \begin{bmatrix} 6 \\\ 6 \\\ 3 \\\ 3 \end{bmatrix}, +\boldsymbol w^{(3)} = \begin{bmatrix} 6 \\\ 8.4 \\\ 3 \\\ 3.6 \end{bmatrix}, +\boldsymbol w^{(4)} = \begin{bmatrix} 6 \\\ 8.4 \\\ 4.2 \\\ 3.6 \end{bmatrix}, +\boldsymbol w^{(5)} = \begin{bmatrix} 6 \\\ 9.36 \\\ 4.2 \\\ 3.6 \end{bmatrix}, +\dots, +\boldsymbol w^{(20)} = \begin{bmatrix} 6 \\\ 9.9990 \\\ 4.9995 \\\ 5.9992 \end{bmatrix}, +\dots +``` + +and the process converges to the weights found using the inverse. However convergence is fairly slow. + +Classically this approach is known as *Jacobi's method*. At each iterative step, the new values are based only on the old values. + +### Jacobi's Method + +If you read the math literature on iterative solvers, Jacobi's method is often described as follows. Given a linear system $\boldsymbol A \boldsymbol x = \boldsymbol b$, a *splitting* of $\boldsymbol A$ is $\boldsymbol A = \boldsymbol M - \boldsymbol N$, where $\boldsymbol M^{-1}$ exists. Then the *iteration matrix* $\boldsymbol H$ is given by $\boldsymbol H = \boldsymbol M^{-1} \boldsymbol N$. Given some initial guess at an answer $\boldsymbol x^{(0)}$ the iteration scheme is: + +$$ \boldsymbol x^{(k+1)} = \boldsymbol H \boldsymbol x^{(k)} + \boldsymbol M^{-1}\boldsymbol b$$ + +And provided that $\rho(\boldsymbol H) \lt 1$, + +$$\lim_{k \to \infty} \boldsymbol x^{(k)}=\boldsymbol A^{-1} \boldsymbol b$$ + +In our case $\boldsymbol A = \boldsymbol I - \boldsymbol P$ and so the splitting is simply $\boldsymbol M = \boldsymbol I$ and $\boldsymbol N = \boldsymbol P$. Since $\boldsymbol M = \boldsymbol I$, $\boldsymbol M^{-1} = \boldsymbol I$ (the identity matrix is its own inverse), $\boldsymbol H = \boldsymbol P$, $\boldsymbol x = \boldsymbol w$ and $\boldsymbol b = \boldsymbol e$, we end up with + +$$ \boldsymbol w^{(k+1)} = \boldsymbol P \boldsymbol w^{(k)} + \boldsymbol e$$ + +as we derived above. + +As an alternative we could split $\boldsymbol A = (\boldsymbol I - \boldsymbol P)$ into diagonal part $\boldsymbol M = \boldsymbol D$ and remainder part $\boldsymbol N$. This only leads to differences from the splitting above when there are self loops, otherwise the diagonal of $\boldsymbol P$ is all zeros. + +With that splitting, + + +```math + \boldsymbol D^{-1}_{i,i} = 1/a_{i,i} = 1/(1 - p_{i,i}) +``` + +so as $p_{i,i}$ gets close to 1.0 the value can be quite large: these are the count amplifications caused by self-loops. If we write things out component-wise we get the classic formulation for Jacobi iteration: + +```math + x^{(k+1)}_i = \frac{1}{a_{i,i}} \left (b_i - \sum_{j \ne i} a_{i,j} x^{(k)}_j \right) +``` + +or in our block weight and edge likelihood notation + +```math + w^{(k+1)}_i = \frac{1}{(1 - p_{i,i})} \left (e_i + \sum_{j \ne i} p_{j,i} w^{(k)}_j \right) +``` + +Intuitively this reads: the new value of node $i$ is the sum of the external input (if any) plus the weights flowing in from (non-self) predecessors, with the sum scaled up by the self-loop factor. + +### On Convergence and Stability + +While the iterative method above is guaranteed to converge when $\boldsymbol A$ is an M-matrix, its rate of convergence is potentially problematic. For an iterative scheme, the asymptotic rate of convergence can be shown to be $R \approx -log_{10} \rho(\boldsymbol H)$ digits / iteration. + +Here the spectral radius $\rho(\boldsymbol H)$ is the magnitude of the largest eigenvalue of $\boldsymbol H$. For the example above $\boldsymbol H = \boldsymbol P$ and $\rho(\boldsymbol P) \approx 0.63$, giving $R = 0.2$. So to converge to $4$ decimal places takes about $20$ iterations, as the table of data above indicates. + +it is also worth noting that for synthesis the matrix $(\boldsymbol I - \boldsymbol P)$ is often *ill-conditioned*, meaning that small changes in the input vector $\boldsymbol e$ (or small inaccuracies in the likelihoods $p_{i,j}$) can lead to large changes in the solution vector $\boldsymbol w$. In some sense this is a feature; we know that blocks in flow graphs can have widely varying weights, with some blocks rarely executed and others executed millions of times per call to the method. So it must be possible for $(\boldsymbol I - \boldsymbol P)$ to amplify the magnitude of a "small" input (say 1 call to the method) into large block counts. + +### Accelerating Convergence I: Gauss-Seidel and Reverse Postorder + +It's also well-known that Gauss-Seidel iteration often converges faster than Jacobi iteration. Here instead of always using the old iteration values, we try and use the new iteration values that are available, where we presume each update happens in order of increasing $i$: + +```math + x^{(k+1)}_i = \frac{1}{a_{i,i}} \left(b_i - \sum_{j \lt i} a_{i,j} x^{(k+1)}_j - \sum_{j \gt i} a_{i,j} x^{(k)}_j \right) $$ +``` + +or again in our notation + +```math + w^{(k+1)}_i = \frac{1}{(1 - p_{i,i})} \left(e_i + \sum_{j \lt i} p_{j,i} w^{(k + 1)}_j + \sum_{j \gt i} p_{j,i} w^{(k)}_j \right) $$ +``` + +In the above scheme the order of visiting successive blocks is fixed unspecified, and (in principle) any order can be used. But by using a reverse postorder to index the blocks, we can ensure a maximal amount of forward propagation per iteration. Note that if a block has an incoming edge from a node that appears later in the reverse postorder, that block is a loop header. + +If we do, that the code above nicely corresponds to our notion of forward and backward edges in the RPO: + +```math + w^{(k+1)}_i = \frac{1}{\underbrace{(1 - p_{i,i}}_\text{self edge})} \left(e_i + \underbrace{\sum_{j \lt i} p_{j,i} w^{(k + 1)}_j}_\text{forward edges in RPO} + \underbrace{\sum_{j \gt i} p_{j,i} w^{(k)}_j}_\text{backward edges in RPO} \right) +``` + +Note because of the order of reads and writes, $\boldsymbol w^{(k+1)}$ can share storage with $\boldsymbol w^{(k)}$. + +On the example above this results in: + +$$ +\boldsymbol w^{(0)} = \begin{bmatrix} 6 \\\ 6 \\\ 3 \\\ 3 \end{bmatrix}, +\boldsymbol w^{(1)} = \begin{bmatrix} 6 \\\ 8.4 \\\ 4.2 \\\ 5.04 \end{bmatrix}, +\boldsymbol w^{(2)} = \begin{bmatrix} 6 \\\ 9.36 \\\ 4.68 \\\ 5.62 \end{bmatrix}, +\boldsymbol w^{(3)} = \begin{bmatrix} 6 \\\ 9.74 \\\ 4.87 \\\ 5.85 \end{bmatrix}, +\boldsymbol w^{(4)} = \begin{bmatrix} 6 \\\ 9.90 \\\ 4.95 \\\ 5.94 \end{bmatrix}, +\boldsymbol w^{(5)} = \begin{bmatrix} 6 \\\ 9.96 \\\ 4.98 \\\ 5.98 \end{bmatrix}, +\dots, +\boldsymbol w^{(9)} = \begin{bmatrix} 6 \\\ 9.9990 \\\ 4.9995 \\\ 5.9994 \end{bmatrix}, +\dots +$$ + +So it is converging about twice as fast. As with the Jacobi method one can re-express this as a splitting and determine an iteration matrix $\boldsymbol H$ and determine the dominant eigenvalue, and from this the rate of convergence, but we will not do so here. + +### Accelerating Convergence II: Cyclic Probabilities + +A flow graph is reducible (or is said to have reducible loops) if every cycle in the graph has a block in the cycle that dominates the other blocks in the cycle. We will call such cycles natural loops, distinguished by their entry blocks. + +For reducible loops we can compute the amount by which they amplify flow using a technique described by Wu and Larus: given a loop head $h$ we classify the predecessor into two sets: input edges that do not come from a block within the loop, and back edges that come from a block within the loop. We then inject one unit of flow into the block and propagate it through the loop, and compute the sum of the weights on the back edges. This value will be some $p$ where $0 \le p \le 1$. Then the *cyclic probability* $C_p$ for $h$ is $C_p(h) = 1 / (1 - p)$. To avoid dividing by zero we artificially cap $C_p$ at some value less than $1$. + +Note also that the technique above won't compute an accurate $C_p$ for loops that contain improper (irreducible) loops, as solving for $C_p$ in such cases would require iteration (the single-pass $C_p$ will be an underestimate). So we must also track which loops contain improper loops. + +If we add this refinement to our algorithm we end up with: + +```math + w^{(k+1)}_i = +\begin{cases} + C_p(i) \left(e_i + \sum_{j \lt i} p_{j,i} w^{(k + 1)}_j \right), \text{ block } i \text{ is a natural loop head, and does not contain an improper loop} \\\ + \frac{1}{(1 - p_{i,i})} \left(e_i + \sum_{j \lt i} p_{j,i} w^{(k + 1)}_j + \sum_{j \gt i} p_{j,i} w^{(k)}_j \right) +\end{cases} +``` + +the second clause includes both blocks without any back edges, blocks with back edges that are not headers of natural loops, and blocks that are headers of natural loops where the loop contains an improper loop. + +On an example like the one above this converges in one pass. If any $C_p$ was capped then the solution will be approximate and we will have failed to achieve a global balance. But we will also (generally) have avoided creating infinite or very large counts. + +One can imagine that if we cap some $C_p$ we could also try to alter some of the $p_{j,i}$ to bring things back into balance, but this seems tricky if there are multiple paths through the loop. And we're basically deciding at that point that consistency is more important than accuracy. + +Since the remainder of the JIT is going to have to cope with lack of global balance anyways (recall it is hard to preserve) for now we are going to ty and tolerate reconstruction inconsistencies. + +The algorithm described above is implemented in the code as the `GaussSeidel` solver. + +### Cycles That Are Not Natural Loops, More Sophisticated Solvers, and Deep Nests + +If the flow graph has cycles that are not natural loops (irreducible loops) the above computations will converge but again may converge very slowly. On a sample of about 500 graphs with irreducible loops the modified Gauss-Seidel approach above required more than 20 iterations in 120 cases and more than 50 iterations in 70 cases, with worst-case around 500 iterations. + +SOR is a classic convergence altering technique, but unfortunately, for M-Matrices SOR can only safely be used to slow down convergence. + +There does not seem to be a good analog of $C_p$ for such cases, though it's possible that "block diagonal" solvers may be tackling exactly that problem. + +It's possible that more sophisticated solution techniques like BiCGstab or CGS might be worth consideration. Or perhaps a least-squares solution, if we're forced to be approximate, to try and minimize the overall approximation error. + +In very deep loop nests even $C_p$ is not enough to prevent creation of large counts. We could try and adjust the cap level downwards as the loops get deeper, or distribute the $C_p$ "tax" across all the loops. This tends to only be a problem for stress test cases. + +### References + +Carl D. Meyer. *Matrix Analysis and Applied Linear Algebra*, in particular section 7.10. + +Nick Higham. [What is an M-Matrix?](https://nhigham.com/2021/03/16/what-is-an-m-matrix/) + +Youfeng Wu and James R. Larus. Static branch frequency and program profile analysis, Micro-27 (1994). + From 71f45aaa6a394e5feef380477e0553a6fbee7b81 Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 15:12:05 -0500 Subject: [PATCH 068/132] [main] Update dependencies from dotnet/emsdk (#100402) * Update dependencies from https://github.com/dotnet/emsdk build 20240327.2 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24177.2 * Update dependencies from https://github.com/dotnet/emsdk build 20240328.3 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24178.3 * Update dependencies from https://github.com/dotnet/emsdk build 20240329.3 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24179.3 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24172.10 -> To Version 16.0.5-alpha.1.24178.1 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport * Update dependencies from https://github.com/dotnet/emsdk build 20240329.3 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24179.3 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24172.10 -> To Version 16.0.5-alpha.1.24178.1 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport * Update dependencies from https://github.com/dotnet/emsdk build 20240329.3 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24179.3 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24172.10 -> To Version 16.0.5-alpha.1.24178.1 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport * Update dependencies from https://github.com/dotnet/emsdk build 20240401.2 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24201.2 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24172.10 -> To Version 16.0.5-alpha.1.24179.1 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport * Update dependencies from https://github.com/dotnet/emsdk build 20240401.2 Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24176.2 -> To Version 9.0.0-preview.4.24201.2 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24172.10 -> To Version 16.0.5-alpha.1.24179.1 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport --------- Co-authored-by: dotnet-maestro[bot] Co-authored-by: Larry Ewing --- eng/Version.Details.xml | 100 ++++++++++++++++++++-------------------- eng/Versions.props | 48 +++++++++---------- 2 files changed, 74 insertions(+), 74 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 1dfdca6c9bca10..8d15853b82f753 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -12,41 +12,41 @@ https://github.com/dotnet/wcf 7f504aabb1988e9a093c1e74d8040bd52feb2f01 - + https://github.com/dotnet/emsdk - 5b7beea1daa64d283d62d52a0027b13ee9484ff6 + bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 https://github.com/dotnet/command-line-api @@ -68,14 +68,14 @@ 9c8ea966df62f764523b51772763e74e71040a92 - + https://github.com/dotnet/emsdk - 5b7beea1daa64d283d62d52a0027b13ee9484ff6 + bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec - + https://github.com/dotnet/emsdk - 5b7beea1daa64d283d62d52a0027b13ee9484ff6 + bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec @@ -226,61 +226,61 @@ https://github.com/dotnet/runtime-assets f282faa0ddd1b3672a3cba54518943fb1d0b4e36 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/llvm-project - 0b2a167a918c95d9129129e2f3bc33a2be2bdf48 + c1305278000772701230efa9353cc136e10a8717 https://github.com/dotnet/runtime diff --git a/eng/Versions.props b/eng/Versions.props index 2c4ee568cf864c..acc78b3a7b26bb 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -216,39 +216,39 @@ 2.3.5 9.0.0-alpha.1.24167.3 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 - 9.0.0-preview.4.24176.2 + 9.0.0-preview.4.24201.2 $(MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion) - 9.0.0-preview.4.24176.2 + 9.0.0-preview.4.24201.2 1.1.87-gba258badda 1.0.0-v3.14.0.5722 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 - 16.0.5-alpha.1.24172.10 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24179.1 3.1.7 1.0.406601 From c08bd7b666430bbd7bb932281f74c89a7d094204 Mon Sep 17 00:00:00 2001 From: Andrew Au Date: Wed, 3 Apr 2024 13:27:00 -0700 Subject: [PATCH 069/132] Getting vxsort working on Linux amd64 (#98712) Co-authored-by: Jan Vorlicek Co-authored-by: Adeel Mujahid <3840695+am11@users.noreply.github.com> --- .../dlls/mscoree/coreclr/CMakeLists.txt | 6 +++ src/coreclr/gc/CMakeLists.txt | 26 +++++----- src/coreclr/gc/gc.cpp | 6 +-- src/coreclr/gc/gcsvr.cpp | 2 +- src/coreclr/gc/gcwks.cpp | 2 +- src/coreclr/gc/unix/gcenv.unix.cpp | 6 --- src/coreclr/gc/vxsort/CMakeLists.txt | 29 +++++++++++ src/coreclr/gc/vxsort/defs.h | 31 +----------- src/coreclr/gc/vxsort/machine_traits.avx2.h | 4 +- src/coreclr/gc/vxsort/machine_traits.avx512.h | 4 +- src/coreclr/gc/vxsort/packer.h | 12 ++--- .../bitonic_sort.AVX2.int32_t.generated.h | 2 +- .../bitonic_sort.AVX2.int64_t.generated.h | 2 +- .../bitonic_sort.AVX512.int32_t.generated.h | 2 +- .../bitonic_sort.AVX512.int64_t.generated.h | 2 +- .../gc/vxsort/smallsort/codegen/avx2.py | 2 +- .../gc/vxsort/smallsort/codegen/avx512.py | 2 +- src/coreclr/gc/vxsort/vxsort.h | 49 ++++++++++++++----- src/coreclr/inc/palclr_win.h | 4 -- .../Microsoft.NETCore.Native.Unix.targets | 3 ++ src/coreclr/nativeaot/Runtime/CMakeLists.txt | 4 +- .../nativeaot/Runtime/Full/CMakeLists.txt | 16 +++--- src/coreclr/pal/inc/pal.h | 8 --- src/coreclr/pal/inc/rt/specstrings_strict.h | 1 - src/coreclr/pal/inc/rt/specstrings_undef.h | 1 - src/coreclr/pal/src/include/pal/palinternal.h | 6 --- src/coreclr/vm/CMakeLists.txt | 15 ------ .../Directory.Build.props | 2 + src/native/libs/Common/pal_io_common.h | 1 - src/native/libs/Common/pal_utilities.h | 12 +---- src/native/minipal/utils.h | 19 +++++++ .../SmokeTests/HardwareIntrinsics/Program.cs | 2 +- 32 files changed, 142 insertions(+), 141 deletions(-) create mode 100644 src/coreclr/gc/vxsort/CMakeLists.txt diff --git a/src/coreclr/dlls/mscoree/coreclr/CMakeLists.txt b/src/coreclr/dlls/mscoree/coreclr/CMakeLists.txt index 2e2a8bf87eccda..c600af1fb6aada 100644 --- a/src/coreclr/dlls/mscoree/coreclr/CMakeLists.txt +++ b/src/coreclr/dlls/mscoree/coreclr/CMakeLists.txt @@ -111,6 +111,12 @@ set(CORECLR_LIBRARIES gc_pal ) +if(CLR_CMAKE_TARGET_ARCH_AMD64) + list(APPEND CORECLR_LIBRARIES + gc_vxsort + ) +endif(CLR_CMAKE_TARGET_ARCH_AMD64) + if(CLR_CMAKE_TARGET_WIN32) list(APPEND CORECLR_LIBRARIES ${STATIC_MT_CRT_LIB} diff --git a/src/coreclr/gc/CMakeLists.txt b/src/coreclr/gc/CMakeLists.txt index a1509b9898b621..89937554c04177 100644 --- a/src/coreclr/gc/CMakeLists.txt +++ b/src/coreclr/gc/CMakeLists.txt @@ -36,20 +36,9 @@ else() windows/Native.rc) endif(CLR_CMAKE_HOST_UNIX) -if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) - set (GC_SOURCES - ${GC_SOURCES} - vxsort/isa_detection.cpp - vxsort/do_vxsort_avx2.cpp - vxsort/do_vxsort_avx512.cpp - vxsort/machine_traits.avx2.cpp - vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp - vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp - vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp - vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp - vxsort/smallsort/avx2_load_mask_tables.cpp -) -endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) +if (CLR_CMAKE_TARGET_ARCH_AMD64) + add_subdirectory(vxsort) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) if (CLR_CMAKE_TARGET_WIN32) set(GC_HEADERS @@ -87,7 +76,7 @@ if (CLR_CMAKE_TARGET_WIN32) handletablepriv.h objecthandle.h softwarewritewatch.h - vxsort/do_vxsort.h) + ) endif(CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_HOST_WIN32) @@ -100,6 +89,13 @@ endif(CLR_CMAKE_HOST_WIN32) set (GC_LINK_LIBRARIES ${GC_LINK_LIBRARIES} gc_pal) +if(CLR_CMAKE_TARGET_ARCH_AMD64) + list(APPEND GC_LINK_LIBRARIES + gc_vxsort + ) +endif(CLR_CMAKE_TARGET_ARCH_AMD64) + + list(APPEND GC_SOURCES ${GC_HEADERS}) convert_to_absolute_path(GC_SOURCES ${GC_SOURCES}) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index c8161a1e56d83f..67d6fa75e051f5 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -18,7 +18,7 @@ #include "gcpriv.h" -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#ifdef TARGET_AMD64 #define USE_VXSORT #else #define USE_INTROSORT @@ -10305,11 +10305,11 @@ static void do_vxsort (uint8_t** item_array, ptrdiff_t item_count, uint8_t* rang { // above this threshold, using AVX2 for sorting will likely pay off // despite possible downclocking on some devices - const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; + const ptrdiff_t AVX2_THRESHOLD_SIZE = 8 * 1024; // above this threshold, using AVX512F for sorting will likely pay off // despite possible downclocking on current devices - const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; + const ptrdiff_t AVX512F_THRESHOLD_SIZE = 128 * 1024; if (item_count <= 1) return; diff --git a/src/coreclr/gc/gcsvr.cpp b/src/coreclr/gc/gcsvr.cpp index 9e4a784735302f..5dc848f40c3f7f 100644 --- a/src/coreclr/gc/gcsvr.cpp +++ b/src/coreclr/gc/gcsvr.cpp @@ -20,7 +20,7 @@ #define SERVER_GC 1 -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#ifdef TARGET_AMD64 #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/gc/gcwks.cpp b/src/coreclr/gc/gcwks.cpp index 7d599e8d8e51ff..6b4cfe1681463b 100644 --- a/src/coreclr/gc/gcwks.cpp +++ b/src/coreclr/gc/gcwks.cpp @@ -20,7 +20,7 @@ #undef SERVER_GC #endif -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#ifdef TARGET_AMD64 #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp index e8a92a831361dc..b50297e25b25a6 100644 --- a/src/coreclr/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/gc/unix/gcenv.unix.cpp @@ -35,12 +35,6 @@ #define __has_cpp_attribute(x) (0) #endif -#if __has_cpp_attribute(fallthrough) -#define FALLTHROUGH [[fallthrough]] -#else -#define FALLTHROUGH -#endif - #include #if HAVE_SYS_TIME_H diff --git a/src/coreclr/gc/vxsort/CMakeLists.txt b/src/coreclr/gc/vxsort/CMakeLists.txt new file mode 100644 index 00000000000000..fc55956832e3db --- /dev/null +++ b/src/coreclr/gc/vxsort/CMakeLists.txt @@ -0,0 +1,29 @@ +set(CMAKE_INCLUDE_CURRENT_DIR ON) +include_directories("../env") + +if(CLR_CMAKE_HOST_UNIX) + set_source_files_properties(isa_detection.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(do_vxsort_avx2.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(do_vxsort_avx512.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(machine_traits.avx2.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(smallsort/bitonic_sort.AVX2.int64_t.generated.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(smallsort/bitonic_sort.AVX2.int32_t.generated.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(smallsort/bitonic_sort.AVX512.int64_t.generated.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(smallsort/bitonic_sort.AVX512.int32_t.generated.cpp PROPERTIES COMPILE_FLAGS -mavx2) + set_source_files_properties(smallsort/avx2_load_mask_tables.cpp PROPERTIES COMPILE_FLAGS -mavx2) +endif(CLR_CMAKE_HOST_UNIX) + +set (VXSORT_SOURCES + isa_detection.cpp + do_vxsort_avx2.cpp + do_vxsort_avx512.cpp + machine_traits.avx2.cpp + smallsort/bitonic_sort.AVX2.int64_t.generated.cpp + smallsort/bitonic_sort.AVX2.int32_t.generated.cpp + smallsort/bitonic_sort.AVX512.int64_t.generated.cpp + smallsort/bitonic_sort.AVX512.int32_t.generated.cpp + smallsort/avx2_load_mask_tables.cpp + do_vxsort.h +) + +add_library(gc_vxsort STATIC ${VXSORT_SOURCES}) diff --git a/src/coreclr/gc/vxsort/defs.h b/src/coreclr/gc/vxsort/defs.h index 0cc72b23fa24e1..d048185884770e 100644 --- a/src/coreclr/gc/vxsort/defs.h +++ b/src/coreclr/gc/vxsort/defs.h @@ -45,36 +45,7 @@ #define NOINLINE __attribute__((noinline)) #endif -namespace std { -template -class numeric_limits { - public: - static constexpr _Ty Max() { static_assert(sizeof(_Ty) != sizeof(_Ty), "func must be specialized!"); return _Ty(); } - static constexpr _Ty Min() { static_assert(sizeof(_Ty) != sizeof(_Ty), "func must be specialized!"); return _Ty(); } -}; - -template <> -class numeric_limits { -public: - static constexpr int32_t Max() { return 0x7fffffff; } - static constexpr int32_t Min() { return -0x7fffffff - 1; } -}; - -template <> -class numeric_limits { -public: - static constexpr uint32_t Max() { return 0xffffffff; } - static constexpr uint32_t Min() { return 0; } -}; - -template <> -class numeric_limits { - public: - static constexpr int64_t Max() { return 0x7fffffffffffffffi64; } - - static constexpr int64_t Min() { return -0x7fffffffffffffffi64 - 1; } -}; -} // namespace std +#include #ifndef max template diff --git a/src/coreclr/gc/vxsort/machine_traits.avx2.h b/src/coreclr/gc/vxsort/machine_traits.avx2.h index ccadc2a9a27a53..7aca281e288ea9 100644 --- a/src/coreclr/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/gc/vxsort/machine_traits.avx2.h @@ -13,6 +13,7 @@ #include #include #include +#include #include "defs.h" #include "machine_traits.h" @@ -123,8 +124,7 @@ class vxsort_machine_traits { template static constexpr bool can_pack(T span) { - const auto PACK_LIMIT = (((TU) std::numeric_limits::Max() + 1)) << Shift; - return ((TU) span) < PACK_LIMIT; + return ((TU) span) < ((((TU) std::numeric_limits::max() + 1)) << Shift); } static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); } diff --git a/src/coreclr/gc/vxsort/machine_traits.avx512.h b/src/coreclr/gc/vxsort/machine_traits.avx512.h index 8df8660aa13a76..78f59dee99a36d 100644 --- a/src/coreclr/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/gc/vxsort/machine_traits.avx512.h @@ -11,6 +11,7 @@ #include "vxsort_targets_enable_avx512.h" #include +#include #include "defs.h" #include "machine_traits.h" @@ -92,8 +93,7 @@ class vxsort_machine_traits { template static constexpr bool can_pack(T span) { - const auto PACK_LIMIT = (((TU) std::numeric_limits::Max() + 1)) << Shift; - return ((TU) span) < PACK_LIMIT; + return ((TU) span) < ((((TU) std::numeric_limits::max() + 1)) << Shift); } static INLINE TV load_vec(TV* p) { return _mm512_loadu_si512(p); } diff --git a/src/coreclr/gc/vxsort/packer.h b/src/coreclr/gc/vxsort/packer.h index be50b7d5fb41b8..94f293dac71f7f 100644 --- a/src/coreclr/gc/vxsort/packer.h +++ b/src/coreclr/gc/vxsort/packer.h @@ -56,7 +56,7 @@ class packer { public: static void pack(TFrom *mem, size_t len, TFrom base) { - TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::Min()); + TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::min()); auto baseVec = MT::broadcast(offset); auto pre_aligned_mem = reinterpret_cast(reinterpret_cast(mem) & ~ALIGN_MASK); @@ -87,8 +87,8 @@ class packer { assert(AH::is_aligned(mem_read)); - auto memv_read = (TV *) mem_read; - auto memv_write = (TV *) mem_write; + TV * memv_read = (TV *) mem_read; + TV * memv_write = (TV *) mem_write; auto lenv = len / N; len -= (lenv * N); @@ -156,7 +156,7 @@ class packer { static void unpack(TTo *mem, size_t len, TFrom base) { - TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::Min()); + TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::min()); auto baseVec = MT::broadcast(offset); auto mem_read = mem + len; @@ -184,8 +184,8 @@ class packer { assert(AH::is_aligned(mem_read)); auto lenv = len / (N * 2); - auto memv_read = ((TV *) mem_read) - 1; - auto memv_write = ((TV *) mem_write) - 2; + TV * memv_read = ((TV *) mem_read) - 1; + TV * memv_write = ((TV *) mem_write) - 2; len -= lenv * N * 2; while (lenv >= Unroll) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h index c3f141c1046bb1..c805a425fbeaed 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -39,7 +39,7 @@ extern "C" const uint8_t mask_table_8[M8_SIZE]; template<> struct bitonic { static const int N = 8; - static constexpr int32_t MAX = std::numeric_limits::Max(); + static constexpr int32_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m256i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h index a012161c99dd9c..c3403bbe31aaa4 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -39,7 +39,7 @@ extern "C" const uint8_t mask_table_8[M8_SIZE]; template<> struct bitonic { static const int N = 4; - static constexpr int64_t MAX = std::numeric_limits::Max(); + static constexpr int64_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m256i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h index 1326c8fee5e5c7..eb9ee4d275926d 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h @@ -36,7 +36,7 @@ namespace vxsort { namespace smallsort { template<> struct bitonic { static const int N = 16; - static constexpr int32_t MAX = std::numeric_limits::Max(); + static constexpr int32_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m512i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h index ac44992fe23920..98fe507b734306 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h @@ -36,7 +36,7 @@ namespace vxsort { namespace smallsort { template<> struct bitonic { static const int N = 8; - static constexpr int64_t MAX = std::numeric_limits::Max(); + static constexpr int64_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m512i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py b/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py index 9944cbbc8968e6..b9c39770d549cb 100644 --- a/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py +++ b/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py @@ -303,7 +303,7 @@ def generate_prologue(self, f): template<> struct bitonic<{t}, AVX2> {{ static const int N = {self.vector_size()}; - static constexpr {t} MAX = std::numeric_limits<{t}>::Max(); + static constexpr {t} MAX = std::numeric_limits<{t}>::max(); public: """ print(s, file=f) diff --git a/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py b/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py index e259027c5636bb..9b417723c6e3b3 100644 --- a/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py +++ b/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py @@ -299,7 +299,7 @@ def generate_prologue(self, f): namespace smallsort {{ template<> struct bitonic<{t}, AVX512> {{ static const int N = {self.vector_size()}; - static constexpr {t} MAX = std::numeric_limits<{t}>::Max(); + static constexpr {t} MAX = std::numeric_limits<{t}>::max(); public: """ print(s, file=f) diff --git a/src/coreclr/gc/vxsort/vxsort.h b/src/coreclr/gc/vxsort/vxsort.h index b8eaac51f42131..ace20c10734fd9 100644 --- a/src/coreclr/gc/vxsort/vxsort.h +++ b/src/coreclr/gc/vxsort/vxsort.h @@ -13,10 +13,11 @@ #endif #endif - #include #include +#include + #include "defs.h" #include "alignment.h" #include "machine_traits.h" @@ -374,7 +375,7 @@ class vxsort { auto pivot = *right; // We do this here just in case we need to pre-align to the right // We end up - *right = std::numeric_limits::Max(); + *right = std::numeric_limits::max(); // Broadcast the selected pivot const TV P = MT::broadcast(pivot); @@ -421,16 +422,16 @@ class vxsort { // From now on, we are fully aligned // and all reading is done in full vector units - auto readLeftV = (TV*) readLeft; - auto readRightV = (TV*) readRight; + TV* readLeftV = (TV*) readLeft; + TV* readRightV = (TV*) readRight; #ifndef NDEBUG readLeft = nullptr; readRight = nullptr; #endif for (auto u = 0; u < InnerUnroll; u++) { - auto dl = MT::load_vec(readLeftV + u); - auto dr = MT::load_vec(readRightV - (u + 1)); + TV dl = MT::load_vec(readLeftV + u); + TV dr = MT::load_vec(readRightV - (u + 1)); partition_block(dl, P, tmpLeft, tmpRight); partition_block(dr, P, tmpLeft, tmpRight); } @@ -458,31 +459,53 @@ class vxsort { switch (InnerUnroll) { case 12: d12 = MT::load_vec(nextPtr + InnerUnroll - 12); + FALLTHROUGH; case 11: d11 = MT::load_vec(nextPtr + InnerUnroll - 11); + FALLTHROUGH; case 10: d10 = MT::load_vec(nextPtr + InnerUnroll - 10); + FALLTHROUGH; case 9: d09 = MT::load_vec(nextPtr + InnerUnroll - 9); + FALLTHROUGH; case 8: d08 = MT::load_vec(nextPtr + InnerUnroll - 8); + FALLTHROUGH; case 7: d07 = MT::load_vec(nextPtr + InnerUnroll - 7); + FALLTHROUGH; case 6: d06 = MT::load_vec(nextPtr + InnerUnroll - 6); + FALLTHROUGH; case 5: d05 = MT::load_vec(nextPtr + InnerUnroll - 5); + FALLTHROUGH; case 4: d04 = MT::load_vec(nextPtr + InnerUnroll - 4); + FALLTHROUGH; case 3: d03 = MT::load_vec(nextPtr + InnerUnroll - 3); + FALLTHROUGH; case 2: d02 = MT::load_vec(nextPtr + InnerUnroll - 2); + FALLTHROUGH; case 1: d01 = MT::load_vec(nextPtr + InnerUnroll - 1); } switch (InnerUnroll) { case 12: partition_block(d12, P, writeLeft, writeRight); + FALLTHROUGH; case 11: partition_block(d11, P, writeLeft, writeRight); + FALLTHROUGH; case 10: partition_block(d10, P, writeLeft, writeRight); + FALLTHROUGH; case 9: partition_block(d09, P, writeLeft, writeRight); + FALLTHROUGH; case 8: partition_block(d08, P, writeLeft, writeRight); + FALLTHROUGH; case 7: partition_block(d07, P, writeLeft, writeRight); + FALLTHROUGH; case 6: partition_block(d06, P, writeLeft, writeRight); + FALLTHROUGH; case 5: partition_block(d05, P, writeLeft, writeRight); + FALLTHROUGH; case 4: partition_block(d04, P, writeLeft, writeRight); + FALLTHROUGH; case 3: partition_block(d03, P, writeLeft, writeRight); + FALLTHROUGH; case 2: partition_block(d02, P, writeLeft, writeRight); + FALLTHROUGH; case 1: partition_block(d01, P, writeLeft, writeRight); } } @@ -499,7 +522,7 @@ class vxsort { readLeftV += 1; } - auto d = MT::load_vec(nextPtr); + TV d = MT::load_vec(nextPtr); partition_block(d, P, writeLeft, writeRight); //partition_block_without_compress(d, P, writeLeft, writeRight); } @@ -534,8 +557,8 @@ class vxsort { const auto rightAlign = hint.right_align; const auto rai = ~((rightAlign - 1) >> 31); const auto lai = leftAlign >> 31; - const auto preAlignedLeft = (TV*) (left + leftAlign); - const auto preAlignedRight = (TV*) (right + rightAlign - N); + TV* const preAlignedLeft = (TV*) (left + leftAlign); + TV* const preAlignedRight = (TV*) (right + rightAlign - N); #ifdef VXSORT_STATS vxsort_stats::bump_vec_loads(2); @@ -554,8 +577,8 @@ class vxsort { // were actually needed to be written to the right hand side // e) We write the right portion of the left vector to the right side // now that its write position has been updated - auto RT0 = MT::load_vec(preAlignedRight); - auto LT0 = MT::load_vec(preAlignedLeft); + TV RT0 = MT::load_vec(preAlignedRight); + TV LT0 = MT::load_vec(preAlignedLeft); auto rtMask = MT::get_cmpgt_mask(RT0, P); auto ltMask = MT::get_cmpgt_mask(LT0, P); const auto rtPopCountRightPart = max(_mm_popcnt_u32(rtMask), rightAlign); @@ -617,8 +640,8 @@ class vxsort { * larger-than than all values contained within the provided array. */ NOINLINE void sort(T* left, T* right, - T left_hint = std::numeric_limits::Min(), - T right_hint = std::numeric_limits::Max()) + T left_hint = std::numeric_limits::min(), + T right_hint = std::numeric_limits::max()) { // init_isa_detection(); diff --git a/src/coreclr/inc/palclr_win.h b/src/coreclr/inc/palclr_win.h index a9ee78e32f42f2..be0b725e1a6896 100644 --- a/src/coreclr/inc/palclr_win.h +++ b/src/coreclr/inc/palclr_win.h @@ -140,8 +140,4 @@ typedef HMODULE NATIVE_LIBRARY_HANDLE; #endif // HOST_WINDOWS -#ifndef FALLTHROUGH -#define FALLTHROUGH __fallthrough -#endif // FALLTHROUGH - #endif // __PALCLR_WIN_H__ diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets index 42a05293f7697a..d82f02fd7f17af 100644 --- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets +++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets @@ -59,6 +59,8 @@ The .NET Foundation licenses this file to you under the MIT license. libeventpipe-enabled true + libRuntime.VxsortEnabled + libRuntime.VxsortDisabled libstandalonegc-disabled libstandalonegc-enabled @@ -117,6 +119,7 @@ The .NET Foundation licenses this file to you under the MIT license. + diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index 3d0dc1541af04b..c1bb58caa30b9a 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -185,7 +185,7 @@ if (CLR_CMAKE_TARGET_APPLE) ) endif (CLR_CMAKE_TARGET_APPLE) -if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) +if (CLR_CMAKE_TARGET_ARCH_AMD64) set(VXSORT_SOURCES ${GC_DIR}/vxsort/isa_detection.cpp ${GC_DIR}/vxsort/do_vxsort_avx2.cpp @@ -201,7 +201,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) set(DUMMY_VXSORT_SOURCES ${GC_DIR}/vxsort/dummy.cpp ) -endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) list(APPEND RUNTIME_SOURCES_ARCH_ASM ${ARCH_SOURCES_DIR}/AllocFast.${ASM_SUFFIX} diff --git a/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt index e665a6c88ee10b..f9b390e18d117a 100644 --- a/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt @@ -41,10 +41,10 @@ if(CLR_CMAKE_TARGET_WIN32) add_dependencies(standalonegc-enabled aot_etw_headers) endif() -if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64) add_library(Runtime.VxsortEnabled STATIC ${VXSORT_SOURCES}) add_library(Runtime.VxsortDisabled STATIC ${DUMMY_VXSORT_SOURCES}) -endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) target_compile_definitions(Runtime.ServerGC PRIVATE -DFEATURE_SVR_GC) @@ -116,13 +116,15 @@ install_static_library(Runtime.ServerGC aotsdk nativeaot) install_static_library(standalonegc-disabled aotsdk nativeaot) install_static_library(standalonegc-enabled aotsdk nativeaot) if (CLR_CMAKE_TARGET_WIN32) - if (CLR_CMAKE_TARGET_ARCH_AMD64) - install_static_library(Runtime.VxsortEnabled aotsdk nativeaot) - install_static_library(Runtime.VxsortDisabled aotsdk nativeaot) - install_static_library(Runtime.VxsortEnabled.GuardCF aotsdk nativeaot) - endif (CLR_CMAKE_TARGET_ARCH_AMD64) install_static_library(Runtime.ServerGC.GuardCF aotsdk nativeaot) add_dependencies(Runtime.ServerGC.GuardCF aot_eventing_headers) install_static_library(standalonegc-disabled.GuardCF aotsdk nativeaot) install_static_library(standalonegc-enabled.GuardCF aotsdk nativeaot) endif (CLR_CMAKE_TARGET_WIN32) +if (CLR_CMAKE_TARGET_ARCH_AMD64) + install_static_library(Runtime.VxsortEnabled aotsdk nativeaot) + install_static_library(Runtime.VxsortDisabled aotsdk nativeaot) + if (CLR_CMAKE_TARGET_WIN32) + install_static_library(Runtime.VxsortEnabled.GuardCF aotsdk nativeaot) + endif (CLR_CMAKE_TARGET_WIN32) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) \ No newline at end of file diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 00164249770069..7d27a1109b4507 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -175,14 +175,6 @@ extern bool g_arm64_atomics_present; #define __has_cpp_attribute(x) (0) #endif -#ifndef FALLTHROUGH -#if __has_cpp_attribute(fallthrough) -#define FALLTHROUGH [[fallthrough]] -#else // __has_cpp_attribute(fallthrough) -#define FALLTHROUGH -#endif // __has_cpp_attribute(fallthrough) -#endif // FALLTHROUGH - #ifndef PAL_STDCPP_COMPAT #if __GNUC__ diff --git a/src/coreclr/pal/inc/rt/specstrings_strict.h b/src/coreclr/pal/inc/rt/specstrings_strict.h index dadb49930ceb88..52ade79cde13c0 100644 --- a/src/coreclr/pal/inc/rt/specstrings_strict.h +++ b/src/coreclr/pal/inc/rt/specstrings_strict.h @@ -630,7 +630,6 @@ #define __callback __allowed(on_function) #define __format_string __allowed(on_parameter_or_return) #define __blocksOn(resource) __allowed(on_function) -#define __fallthrough __allowed(as_statement) #define __range(lb,ub) __allowed(on_return) #define __in_range(lb,ub) _SAL_VERSION_CHECK(__in_range) #define __out_range(lb,ub) _SAL_VERSION_CHECK(__out_range) diff --git a/src/coreclr/pal/inc/rt/specstrings_undef.h b/src/coreclr/pal/inc/rt/specstrings_undef.h index b0e1848c5eb86b..374b10069c1bf8 100644 --- a/src/coreclr/pal/inc/rt/specstrings_undef.h +++ b/src/coreclr/pal/inc/rt/specstrings_undef.h @@ -261,7 +261,6 @@ #undef __encoded_array #undef __encoded_pointer #undef __exceptthat -#undef __fallthrough #undef __field_bcount #undef __field_bcount_full #undef __field_bcount_full_opt diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 041118d3916514..15887d03773822 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -769,12 +769,6 @@ const char StackOverflowMessage[] = "Stack overflow.\n"; #endif // __cplusplus -#if __has_cpp_attribute(fallthrough) -#define FALLTHROUGH [[fallthrough]] -#else -#define FALLTHROUGH -#endif - DWORD PALAPI GetCurrentSessionId(); #endif /* _PAL_INTERNAL_H_ */ diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index 844b8b4731ec81..345d5ac35f00e9 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -487,21 +487,6 @@ set(GC_SOURCES_WKS ../gc/softwarewritewatch.cpp ../gc/handletablecache.cpp) -if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) - set ( GC_SOURCES_WKS - ${GC_SOURCES_WKS} - ../gc/vxsort/isa_detection.cpp - ../gc/vxsort/do_vxsort_avx2.cpp - ../gc/vxsort/do_vxsort_avx512.cpp - ../gc/vxsort/machine_traits.avx2.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp - ../gc/vxsort/smallsort/avx2_load_mask_tables.cpp -) -endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) - set(GC_HEADERS_WKS ${GC_HEADERS_DAC_AND_WKS_COMMON} ../gc/gceventstatus.h diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props b/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props index aedd89c6147dfa..e1e1ef7c08aef2 100644 --- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props +++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Directory.Build.props @@ -145,6 +145,8 @@ + + diff --git a/src/native/libs/Common/pal_io_common.h b/src/native/libs/Common/pal_io_common.h index 328b33f43022c7..27022e5c8fe6a2 100644 --- a/src/native/libs/Common/pal_io_common.h +++ b/src/native/libs/Common/pal_io_common.h @@ -8,7 +8,6 @@ #include #include #include -#include /** * Our intermediate pollfd struct to normalize the data types diff --git a/src/native/libs/Common/pal_utilities.h b/src/native/libs/Common/pal_utilities.h index 3fece3a08aa3e8..7b5fa63b6cac03 100644 --- a/src/native/libs/Common/pal_utilities.h +++ b/src/native/libs/Common/pal_utilities.h @@ -15,6 +15,8 @@ #include #include +#include + #ifdef DEBUG #define assert_err(cond, msg, err) do \ { \ @@ -43,16 +45,6 @@ #define CONST_CAST2(TOTYPE, FROMTYPE, X) ((union { FROMTYPE _q; TOTYPE _nq; }){ ._q = (X) }._nq) #define CONST_CAST(TYPE, X) CONST_CAST2(TYPE, const TYPE, (X)) -#ifndef __has_attribute -#define __has_attribute(x) (0) -#endif - -#if __has_attribute(fallthrough) -#define FALLTHROUGH __attribute__((fallthrough)) -#else -#define FALLTHROUGH -#endif - /** * Abstraction helper method to safely copy strings using strlcpy or strcpy_s * or a different safe copy method, depending on the current platform. diff --git a/src/native/minipal/utils.h b/src/native/minipal/utils.h index 644ed21f2714fb..ef840a529f48f7 100644 --- a/src/native/minipal/utils.h +++ b/src/native/minipal/utils.h @@ -13,6 +13,25 @@ #define __has_builtin(x) 0 #endif +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#ifdef __cplusplus +# ifndef __has_cpp_attribute +# define __has_cpp_attribute(x) 0 +# endif +# if __has_cpp_attribute(fallthrough) +# define FALLTHROUGH [[fallthrough]] +# else +# define FALLTHROUGH +# endif +#elif __has_attribute(fallthrough) +# define FALLTHROUGH __attribute__((fallthrough)) +#else +# define FALLTHROUGH +#endif + #if defined(_MSC_VER) # if defined(__SANITIZE_ADDRESS__) # define HAS_ADDRESS_SANITIZER diff --git a/src/tests/nativeaot/SmokeTests/HardwareIntrinsics/Program.cs b/src/tests/nativeaot/SmokeTests/HardwareIntrinsics/Program.cs index e628938c57db82..28b678b5c2b227 100644 --- a/src/tests/nativeaot/SmokeTests/HardwareIntrinsics/Program.cs +++ b/src/tests/nativeaot/SmokeTests/HardwareIntrinsics/Program.cs @@ -22,7 +22,7 @@ static int Main() long lowerBound, upperBound; lowerBound = 1300 * 1024; // ~1.3 MB - upperBound = 1750 * 1024; // ~1.75 MB + upperBound = 1900 * 1024; // ~1.90 MB if (fileSize < lowerBound || fileSize > upperBound) { From 48ea62a8e7807a3bddb67c30903c42fc3862dd0e Mon Sep 17 00:00:00 2001 From: Badre BSAILA <54767641+pedrobsaila@users.noreply.github.com> Date: Wed, 3 Apr 2024 22:32:10 +0200 Subject: [PATCH 070/132] PEReader throws exception when using PEStreamOptions.PrefetchMetadata for some assemblies (#100472) --- .../System/Reflection/PortableExecutable/PEReader.cs | 6 +++++- .../tests/PortableExecutable/PEReaderTests.cs | 10 ++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/libraries/System.Reflection.Metadata/src/System/Reflection/PortableExecutable/PEReader.cs b/src/libraries/System.Reflection.Metadata/src/System/Reflection/PortableExecutable/PEReader.cs index 6b179a54c1cc09..37533d8c3c7c3d 100644 --- a/src/libraries/System.Reflection.Metadata/src/System/Reflection/PortableExecutable/PEReader.cs +++ b/src/libraries/System.Reflection.Metadata/src/System/Reflection/PortableExecutable/PEReader.cs @@ -201,7 +201,11 @@ public unsafe PEReader(Stream peStream, PEStreamOptions options, int size) { // The peImage is left null, but the lazyMetadataBlock is initialized up front. _lazyPEHeaders = new PEHeaders(peStream, actualSize, IsLoadedImage); - _lazyMetadataBlock = StreamMemoryBlockProvider.ReadMemoryBlockNoLock(peStream, _lazyPEHeaders.MetadataStartOffset, _lazyPEHeaders.MetadataSize); + + if (_lazyPEHeaders.MetadataStartOffset != -1) + { + _lazyMetadataBlock = StreamMemoryBlockProvider.ReadMemoryBlockNoLock(peStream, _lazyPEHeaders.MetadataStartOffset, _lazyPEHeaders.MetadataSize); + } } // We read all we need, the stream is going to be closed. } diff --git a/src/libraries/System.Reflection.Metadata/tests/PortableExecutable/PEReaderTests.cs b/src/libraries/System.Reflection.Metadata/tests/PortableExecutable/PEReaderTests.cs index 20477b5a44b928..80312c66a44241 100644 --- a/src/libraries/System.Reflection.Metadata/tests/PortableExecutable/PEReaderTests.cs +++ b/src/libraries/System.Reflection.Metadata/tests/PortableExecutable/PEReaderTests.cs @@ -870,5 +870,15 @@ public unsafe void InvokeCtorWithIsLoadedImageAndPrefetchMetadataOptions2() } } } + + [Fact] + public void HasMetadataShouldReturnFalseWhenPrefetchingMetadataOfImageWithoutMetadata() + { + using (var fileStream = new MemoryStream(Misc.KeyPair)) + using (var peReader = new PEReader(fileStream, PEStreamOptions.PrefetchMetadata | PEStreamOptions.LeaveOpen)) + { + Assert.False(peReader.HasMetadata); + } + } } } From c19d68e52c6d743c8f81ed9049e8c172ae461c99 Mon Sep 17 00:00:00 2001 From: Alhad Deshpande <97085048+alhad-deshpande@users.noreply.github.com> Date: Thu, 4 Apr 2024 02:10:59 +0530 Subject: [PATCH 071/132] [ppc64le] Fixed insufficient memory exception issue (#100337) * [ppc64le] Fixed thunk address 8 byte alignment issue * Fixed FSharp crash issue * [ppc64le] Implementation of mono_arch_get_delegate_virtual_invoke_impl method for ppc64le architecture * Fixed clang15 build issues and returning address of sc_sp instead of value * Added float32 support and implemented related opcodes * Correction in OP_RCONV_TO_R cases * Corrected code for few opcodes * [ppc64le] performance improvements while branching * [ppc64le] Fixed insufficient memory exception issue --- src/mono/mono/mini/method-to-ir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mono/mono/mini/method-to-ir.c b/src/mono/mono/mini/method-to-ir.c index 187e1a61d5c7cc..8d569eae92a35d 100644 --- a/src/mono/mono/mini/method-to-ir.c +++ b/src/mono/mono/mini/method-to-ir.c @@ -10561,7 +10561,6 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b context_used = mini_class_check_context_used (cfg, klass); -#ifndef TARGET_S390X if (sp [0]->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4) { MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4); ins->sreg1 = sp [0]->dreg; @@ -10570,7 +10569,8 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } -#else + +#if defined(TARGET_S390X) || defined(TARGET_POWERPC64) /* The array allocator expects a 64-bit input, and we cannot rely on the high bits of a 32-bit result, so we have to extend. */ if (sp [0]->type == STACK_I4 && TARGET_SIZEOF_VOID_P == 8) { From 44b1e87884c4d1c40054a4e699200a60cd70176d Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Wed, 3 Apr 2024 14:18:48 -0700 Subject: [PATCH 072/132] Enable JitOptRepeat as a perf experiment (#100607) --- eng/pipelines/coreclr/perf-non-wasm-jobs.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml index 7a1a876ea6f6f7..c48103af929c81 100644 --- a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml +++ b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml @@ -342,6 +342,24 @@ jobs: logicalmachine: 'perfowl' experimentName: 'rlcse' + # run coreclr perfowl microbenchmarks perf jitoptrepeat jobs + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: coreclr + platforms: + - linux_x64 + - windows_x64 + jobParameters: + testGroup: perf + liveLibrariesBuildConfig: Release + projectFile: microbenchmarks.proj + runKind: micro + runJobTemplate: /eng/pipelines/coreclr/templates/run-performance-job.yml + logicalmachine: 'perfowl' + experimentName: 'jitoptrepeat' + # run coreclr crossgen perf job - template: /eng/pipelines/common/platform-matrix.yml parameters: From f558631580f6939732e9049212c7f09ff868befb Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 3 Apr 2024 14:28:31 -0700 Subject: [PATCH 073/132] JIT: fix some display math issues in reconstruction doc (#100609) Github does not handle `$$..$$` blocks on consecutive lines. Add some blank lines in between. --- docs/design/coreclr/jit/profile-count-reconstruction.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/design/coreclr/jit/profile-count-reconstruction.md b/docs/design/coreclr/jit/profile-count-reconstruction.md index 94fb989a8f28bc..f5f4c8006eb5e1 100644 --- a/docs/design/coreclr/jit/profile-count-reconstruction.md +++ b/docs/design/coreclr/jit/profile-count-reconstruction.md @@ -9,7 +9,9 @@ The appeal of edge well-formedness is easy to check and relatively easy to maint We will use $p_{i,j}$ to denote the likelihood that block $i$ transfers control to block $j$. Thus local consistency means: $$ 0 \le p_{i,j} \le 1 $$ + and, for blocks with successors: + $$ \sum_i p_{i,j} = 1 $$ By contrast, block weight consistency requires that the flow into a block be balanced by the flow out of a block. It is a *global* property and harder to maintain during optimizations. It may also not be true initially. @@ -120,9 +122,13 @@ $$ {(\boldsymbol I - \boldsymbol P)}^{-1} = \boldsymbol I + \boldsymbol P + \bol This gives rise to a simple *iterative* procedure for computing an approximate value of $\boldsymbol w$ (here superscripts on $\boldsymbol w$ are successive iterates, not powers) $$ \boldsymbol w^{(0)} = \boldsymbol e $$ + $$ \boldsymbol w^{(1)} = (\boldsymbol I + \boldsymbol P) \boldsymbol e = \boldsymbol e + \boldsymbol P \boldsymbol w^{(0)} $$ + $$ \boldsymbol w^{(2)} = (\boldsymbol I + \boldsymbol P + \boldsymbol P^2) \boldsymbol e = \boldsymbol e + \boldsymbol P \boldsymbol w^{(1)}$$ + $$ \dots$$ + $$ \boldsymbol w^{(k + 1)} = \boldsymbol e + \boldsymbol P \boldsymbol w^{(k)} $$ where we can achieve any desired precision for $\boldsymbol w$ by iterating until the successive $\boldsymbol w$ differ by a small amount. From 862c82f0e99b0addacba0793627a9c0df53fc097 Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Wed, 3 Apr 2024 14:43:36 -0700 Subject: [PATCH 074/132] Update JIT sources to clang-format/clang-tidy 17.0.6 (#100498) * Update JIT sources to clang-format/clang-tidy 17.0.6 * Reformat * Reformat x86 --- eng/formatting/download-tools.ps1 | 14 +- eng/formatting/download-tools.sh | 21 +- .../coreclr/templates/format-job.yml | 3 +- src/coreclr/jit/.clang-format | 119 ++- src/coreclr/jit/_typeinfo.h | 19 +- src/coreclr/jit/abi.h | 7 +- src/coreclr/jit/alloc.cpp | 5 +- src/coreclr/jit/alloc.h | 13 +- src/coreclr/jit/arraystack.h | 3 +- src/coreclr/jit/assertionprop.cpp | 24 +- src/coreclr/jit/bitset.h | 13 +- src/coreclr/jit/bitsetasshortlong.h | 54 +- src/coreclr/jit/bitsetasuint64.h | 4 +- src/coreclr/jit/bitsetasuint64inclass.h | 7 +- src/coreclr/jit/block.cpp | 8 +- src/coreclr/jit/block.h | 160 ++-- src/coreclr/jit/blockset.h | 9 +- src/coreclr/jit/buildstring.cpp | 4 +- src/coreclr/jit/codegen.h | 155 +-- src/coreclr/jit/codegenarm.cpp | 10 +- src/coreclr/jit/codegenarm64.cpp | 16 +- src/coreclr/jit/codegenarm64test.cpp | 8 +- src/coreclr/jit/codegenarmarch.cpp | 92 +- src/coreclr/jit/codegencommon.cpp | 40 +- src/coreclr/jit/codegeninterface.h | 16 +- src/coreclr/jit/codegenlinear.cpp | 11 +- src/coreclr/jit/codegenloongarch64.cpp | 46 +- src/coreclr/jit/codegenriscv64.cpp | 46 +- src/coreclr/jit/codegenxarch.cpp | 56 +- src/coreclr/jit/compiler.cpp | 228 ++--- src/coreclr/jit/compiler.h | 387 ++++---- src/coreclr/jit/compiler.hpp | 40 +- src/coreclr/jit/compilerbitsettraits.h | 4 +- src/coreclr/jit/copyprop.cpp | 4 +- src/coreclr/jit/debuginfo.h | 18 +- src/coreclr/jit/decomposelongs.h | 5 +- src/coreclr/jit/disasm.cpp | 34 +- src/coreclr/jit/ee_il_dll.cpp | 8 +- src/coreclr/jit/ee_il_dll.hpp | 4 +- src/coreclr/jit/eeinterface.cpp | 43 +- src/coreclr/jit/emit.cpp | 152 +-- src/coreclr/jit/emit.h | 399 ++++---- src/coreclr/jit/emitarm.cpp | 42 +- src/coreclr/jit/emitarm.h | 4 +- src/coreclr/jit/emitarm64.cpp | 128 +-- src/coreclr/jit/emitarm64.h | 158 ++-- src/coreclr/jit/emitarm64sve.cpp | 358 +++---- src/coreclr/jit/emitloongarch64.cpp | 34 +- src/coreclr/jit/emitloongarch64.h | 16 +- src/coreclr/jit/emitpub.h | 36 +- src/coreclr/jit/emitriscv64.cpp | 28 +- src/coreclr/jit/emitriscv64.h | 24 +- src/coreclr/jit/emitxarch.cpp | 118 +-- src/coreclr/jit/emitxarch.h | 50 +- src/coreclr/jit/error.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 27 +- src/coreclr/jit/fgdiagnostic.cpp | 49 +- src/coreclr/jit/fgehopt.cpp | 8 +- src/coreclr/jit/fginline.cpp | 127 +-- src/coreclr/jit/fgopt.cpp | 38 +- src/coreclr/jit/fgprofile.cpp | 76 +- src/coreclr/jit/fgprofilesynthesis.h | 3 +- src/coreclr/jit/flowgraph.cpp | 35 +- src/coreclr/jit/forwardsub.cpp | 8 +- src/coreclr/jit/gcencode.cpp | 41 +- src/coreclr/jit/gcinfo.cpp | 3 +- src/coreclr/jit/gentree.cpp | 165 ++-- src/coreclr/jit/gentree.h | 517 ++++++---- src/coreclr/jit/gschecks.cpp | 3 +- src/coreclr/jit/hashbv.cpp | 4 +- src/coreclr/jit/hashbv.h | 31 +- src/coreclr/jit/helperexpansion.cpp | 12 +- src/coreclr/jit/host.h | 6 +- src/coreclr/jit/hostallocator.h | 2 +- src/coreclr/jit/hwintrinsic.cpp | 2 +- src/coreclr/jit/hwintrinsic.h | 25 +- src/coreclr/jit/hwintrinsiccodegenarm64.cpp | 5 +- src/coreclr/jit/hwintrinsiccodegenxarch.cpp | 30 +- src/coreclr/jit/hwintrinsicxarch.cpp | 14 +- src/coreclr/jit/importer.cpp | 85 +- src/coreclr/jit/importercalls.cpp | 886 +++++++++--------- src/coreclr/jit/importervectorization.cpp | 12 +- src/coreclr/jit/indirectcalltransformer.cpp | 16 +- src/coreclr/jit/inductionvariableopts.cpp | 7 +- src/coreclr/jit/inline.cpp | 2 +- src/coreclr/jit/inline.h | 42 +- src/coreclr/jit/inlinepolicy.cpp | 25 +- src/coreclr/jit/inlinepolicy.h | 39 +- src/coreclr/jit/instr.cpp | 12 +- src/coreclr/jit/instrsarm.h | 2 +- src/coreclr/jit/instrsarm64.h | 2 +- src/coreclr/jit/instrsloongarch64.h | 2 +- src/coreclr/jit/instrsxarch.h | 2 +- src/coreclr/jit/jit.h | 101 +- src/coreclr/jit/jitconfig.cpp | 4 +- src/coreclr/jit/jitconfig.h | 8 +- src/coreclr/jit/jitconfigvalues.h | 6 +- src/coreclr/jit/jitee.h | 6 +- src/coreclr/jit/jiteh.cpp | 20 +- src/coreclr/jit/jiteh.h | 7 +- src/coreclr/jit/jitexpandarray.h | 9 +- src/coreclr/jit/jitgcinfo.h | 23 +- src/coreclr/jit/jithashtable.h | 40 +- src/coreclr/jit/layout.cpp | 8 +- src/coreclr/jit/layout.h | 5 +- src/coreclr/jit/lclmorph.cpp | 12 +- src/coreclr/jit/lclvars.cpp | 109 ++- src/coreclr/jit/likelyclass.cpp | 8 +- src/coreclr/jit/lir.cpp | 47 +- src/coreclr/jit/lir.h | 22 +- src/coreclr/jit/liveness.cpp | 32 +- src/coreclr/jit/loopcloning.cpp | 4 +- src/coreclr/jit/loopcloning.h | 75 +- src/coreclr/jit/lower.cpp | 28 +- src/coreclr/jit/lower.h | 136 +-- src/coreclr/jit/lowerarmarch.cpp | 2 +- src/coreclr/jit/lowerxarch.cpp | 40 +- src/coreclr/jit/lsra.cpp | 131 ++- src/coreclr/jit/lsra.h | 348 ++++--- src/coreclr/jit/lsraarmarch.cpp | 34 +- src/coreclr/jit/lsrabuild.cpp | 179 ++-- src/coreclr/jit/lsraxarch.cpp | 38 +- src/coreclr/jit/morph.cpp | 102 +- src/coreclr/jit/morphblock.cpp | 10 +- src/coreclr/jit/objectalloc.cpp | 10 +- src/coreclr/jit/objectalloc.h | 30 +- src/coreclr/jit/optcse.cpp | 34 +- src/coreclr/jit/optcse.h | 24 +- src/coreclr/jit/optimizebools.cpp | 8 +- src/coreclr/jit/optimizer.cpp | 37 +- src/coreclr/jit/patchpoint.cpp | 4 +- src/coreclr/jit/phase.h | 17 +- src/coreclr/jit/promotion.cpp | 24 +- src/coreclr/jit/promotion.h | 71 +- src/coreclr/jit/promotiondecomposition.cpp | 8 +- src/coreclr/jit/rangecheck.cpp | 11 +- src/coreclr/jit/rangecheck.h | 29 +- src/coreclr/jit/rationalize.cpp | 3 +- src/coreclr/jit/rationalize.h | 3 +- src/coreclr/jit/redundantbranchopts.cpp | 4 +- src/coreclr/jit/regset.cpp | 10 +- src/coreclr/jit/regset.h | 10 +- src/coreclr/jit/scev.cpp | 8 +- src/coreclr/jit/scev.h | 34 +- src/coreclr/jit/scopeinfo.cpp | 14 +- src/coreclr/jit/sideeffects.cpp | 24 +- src/coreclr/jit/sideeffects.h | 3 +- src/coreclr/jit/simd.h | 18 +- src/coreclr/jit/simdashwintrinsic.cpp | 6 +- src/coreclr/jit/sm.cpp | 4 +- src/coreclr/jit/smallhash.h | 26 +- src/coreclr/jit/smcommon.h | 2 +- src/coreclr/jit/smopenum.h | 3 +- src/coreclr/jit/ssabuilder.cpp | 5 +- src/coreclr/jit/ssabuilder.h | 2 +- src/coreclr/jit/ssarenamestate.cpp | 5 +- src/coreclr/jit/ssarenamestate.h | 7 +- src/coreclr/jit/stacklevelsetter.h | 4 +- src/coreclr/jit/switchrecognition.cpp | 2 +- src/coreclr/jit/target.h | 39 +- src/coreclr/jit/targetamd64.cpp | 6 +- src/coreclr/jit/targetarm.cpp | 3 +- src/coreclr/jit/targetarm64.cpp | 4 +- src/coreclr/jit/targetx86.cpp | 3 +- src/coreclr/jit/treelifeupdater.cpp | 2 +- src/coreclr/jit/typelist.h | 2 +- src/coreclr/jit/unwind.cpp | 2 +- src/coreclr/jit/unwind.h | 70 +- src/coreclr/jit/unwindamd64.cpp | 2 +- src/coreclr/jit/unwindarm64.cpp | 6 +- src/coreclr/jit/unwindarmarch.cpp | 37 +- src/coreclr/jit/unwindloongarch64.cpp | 12 +- src/coreclr/jit/unwindriscv64.cpp | 2 +- src/coreclr/jit/utils.cpp | 19 +- src/coreclr/jit/utils.h | 44 +- src/coreclr/jit/valuenum.cpp | 40 +- src/coreclr/jit/valuenum.h | 111 ++- src/coreclr/jit/valuenumtype.h | 4 +- src/coreclr/jit/varset.h | 2 +- src/coreclr/jit/vartype.h | 12 +- 180 files changed, 4572 insertions(+), 3622 deletions(-) diff --git a/eng/formatting/download-tools.ps1 b/eng/formatting/download-tools.ps1 index 603a015c5e5f44..62d518bb11a8b2 100644 --- a/eng/formatting/download-tools.ps1 +++ b/eng/formatting/download-tools.ps1 @@ -8,17 +8,25 @@ function DownloadClangTool { $downloadOutputPath ) - $baseUri = "https://clrjit.blob.core.windows.net/clang-tools/windows" + $clangVersion = "17.0.6" + $clangToolsRootUrl = "https://clrjit2.blob.core.windows.net/clang-tools" + $clangPlatform = "windows-x64" + + $toolUrl = "$clangToolsRootUrl/$clangVersion/$clangPlatform/$toolName.exe" + $targetPath = "$downloadOutputPath\$toolName.exe" if (-not $(ls $downloadOutputPath | Where-Object { $_.Name -eq "$toolName.exe" })) { Retry({ - Write-Output "Downloading '$baseUri/$toolName.exe'" + Write-Output "Downloading '$toolUrl' to '$targetPath'" # Pass -PassThru as otherwise Invoke-WebRequest leaves a corrupted file if the download fails. With -PassThru the download is buffered first. # -UseBasicParsing is necessary for older PowerShells when Internet Explorer might not be installed/configured - $null = Invoke-WebRequest -Uri "$baseUri/$toolName.exe" -OutFile $(Join-Path $downloadOutputPath -ChildPath "$toolName.exe") -PassThru -UseBasicParsing + $null = Invoke-WebRequest -Uri "$toolUrl" -OutFile $(Join-Path $downloadOutputPath -ChildPath "$toolName.exe") -PassThru -UseBasicParsing }) } + else { + Write-Output "Found '$targetPath'" + } } $downloadPathFolder = Split-Path $PSScriptRoot -Parent | Split-Path -Parent | Join-Path -ChildPath "artifacts" | Join-Path -ChildPath "tools" diff --git a/eng/formatting/download-tools.sh b/eng/formatting/download-tools.sh index 44459dbc885ba6..023ed55ed6e005 100755 --- a/eng/formatting/download-tools.sh +++ b/eng/formatting/download-tools.sh @@ -15,21 +15,22 @@ done scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" function DownloadClangTool { - targetPlatform=$(dotnet --info |grep RID:) - targetPlatform=${targetPlatform##*RID:* } - echo "dotnet RID: ${targetPlatform}" + + clangVersion="17.0.6" + clangToolsRootUrl="https://clrjit2.blob.core.windows.net/clang-tools" + + clangPlatform="$(dotnet --info | grep 'RID:')" + clangPlatform="${clangPlatform##*RID:* }" + echo "dotnet RID: ${clangPlatform}" # override common RIDs with compatible version so we don't need to upload binaries for each RID - case $targetPlatform in - osx.*-x64) - targetPlatform=osx.10.15-x64 - ;; + case $clangPlatform in ubuntu.*-x64) - targetPlatform=ubuntu.18.04-x64 - ;; + clangPlatform=linux-x64 + ;; esac - toolUrl=https://clrjit.blob.core.windows.net/clang-tools/${targetPlatform}/$1 + toolUrl="${clangToolsRootUrl}/${clangVersion}/${clangPlatform}/$1" toolOutput=$2/$1 echo "Downloading $1 from ${toolUrl} to ${toolOutput}" diff --git a/eng/pipelines/coreclr/templates/format-job.yml b/eng/pipelines/coreclr/templates/format-job.yml index 7850b9b43ec46e..a4d5181fd6b5b0 100644 --- a/eng/pipelines/coreclr/templates/format-job.yml +++ b/eng/pipelines/coreclr/templates/format-job.yml @@ -48,8 +48,7 @@ jobs: displayName: 'Install .NET SDK' inputs: packageType: 'sdk' - version: '6.x' - includePreviewVersions: true + version: '8.x' installationPath: $(Agent.ToolsDirectory)/dotnet - script: $(PythonSetupScript) diff --git a/src/coreclr/jit/.clang-format b/src/coreclr/jit/.clang-format index 1e3930f7379d13..307b1d7128bdfb 100644 --- a/src/coreclr/jit/.clang-format +++ b/src/coreclr/jit/.clang-format @@ -1,80 +1,131 @@ --- -Language: Cpp +Language: Cpp AccessModifierOffset: -4 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: true -AlignConsecutiveDeclarations: true -AlignEscapedNewlinesLeft: false -AlignOperands: true -AlignTrailingComments: true + +AlignConsecutiveAssignments: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: true + +AlignConsecutiveBitFields: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignConsecutiveDeclarations: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignConsecutiveMacros: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignEscapedNewlines: Right +AlignOperands: true + +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 + +AllowAllArgumentsOnNextLine: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false +AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: Empty -AllowShortIfStatementsOnASingleLine: false +AllowShortEnumsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: false + +BreakBeforeBraces: Custom BraceWrapping: - AfterClass: true - AfterControlStatement: true - AfterEnum: false - AfterFunction: true - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: true - AfterUnion: true - BeforeCatch: true - BeforeElse: true - IndentBraces: false + AfterCaseLabel: true + AfterClass: true + AfterControlStatement: Always + AfterEnum: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + AfterExternBlock: true + BeforeCatch: true + BeforeElse: true + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true + BreakBeforeBinaryOperators: None -BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: true -ColumnLimit: 120 -CommentPragmas: '^ IWYU pragma:' -ConstructorInitializerAllOnOneLineOrOnePerLine: true +BreakConstructorInitializers: BeforeComma +BreakInheritanceList: BeforeComma +BreakStringLiterals: false + +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false -DisableFormat: false +DisableFormat: false + +EmptyLineAfterAccessModifier: Leave +EmptyLineBeforeAccessModifier: Leave ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ ] +ForEachMacros: [ ] +IndentAccessModifiers: false +IndentCaseBlocks: false IndentCaseLabels: true -IndentWidth: 4 +IndentExternBlock: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 4 IndentWrappedFunctionNames: false + +InsertNewlineAtEOF: true KeepEmptyLinesAtTheStartOfBlocks: true +LambdaBodyIndentation: OuterScope MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: true + PenaltyBreakBeforeFirstCallParameter: 400 PenaltyBreakComment: 50 PenaltyBreakFirstLessLess: 500 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 + PointerAlignment: Left ReflowComments: true -SortIncludes: false +SortIncludes: Never + SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 -SpacesInAngles: false +SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 + +Standard: Latest TabWidth: 4 UseTab: Never ... diff --git a/src/coreclr/jit/_typeinfo.h b/src/coreclr/jit/_typeinfo.h index 42526eeb8de4bd..9285535b5531c4 100644 --- a/src/coreclr/jit/_typeinfo.h +++ b/src/coreclr/jit/_typeinfo.h @@ -41,25 +41,34 @@ class typeInfo private: var_types m_type; - union { + union + { CORINFO_CLASS_HANDLE m_cls; // Valid, but not always available, for TYP_REFs. methodPointerInfo* m_methodPointerInfo; // Valid only for function pointers. }; public: - typeInfo() : m_type(TYP_UNDEF), m_cls(NO_CLASS_HANDLE) + typeInfo() + : m_type(TYP_UNDEF) + , m_cls(NO_CLASS_HANDLE) { } - typeInfo(var_types type) : m_type(type), m_cls(NO_CLASS_HANDLE) + typeInfo(var_types type) + : m_type(type) + , m_cls(NO_CLASS_HANDLE) { } - typeInfo(CORINFO_CLASS_HANDLE cls) : m_type(TYP_REF), m_cls(cls) + typeInfo(CORINFO_CLASS_HANDLE cls) + : m_type(TYP_REF) + , m_cls(cls) { } - typeInfo(methodPointerInfo* methodPointerInfo) : m_type(TYP_I_IMPL), m_methodPointerInfo(methodPointerInfo) + typeInfo(methodPointerInfo* methodPointerInfo) + : m_type(TYP_I_IMPL) + , m_methodPointerInfo(methodPointerInfo) { assert(methodPointerInfo != nullptr); assert(methodPointerInfo->m_token.hMethod != nullptr); diff --git a/src/coreclr/jit/abi.h b/src/coreclr/jit/abi.h index 27e53c27efc7e3..82ec58b5d807f0 100644 --- a/src/coreclr/jit/abi.h +++ b/src/coreclr/jit/abi.h @@ -63,7 +63,9 @@ class RegisterQueue unsigned int m_index = 0; public: - RegisterQueue(const regNumber* regs, unsigned int numRegs) : m_regs(regs), m_numRegs(numRegs) + RegisterQueue(const regNumber* regs, unsigned int numRegs) + : m_regs(regs) + , m_numRegs(numRegs) { } @@ -187,7 +189,8 @@ class SwiftABIClassifier PlatformClassifier m_classifier; public: - SwiftABIClassifier(const ClassifierInfo& info) : m_classifier(info) + SwiftABIClassifier(const ClassifierInfo& info) + : m_classifier(info) { } diff --git a/src/coreclr/jit/alloc.cpp b/src/coreclr/jit/alloc.cpp index 6300376beeb6d4..7178066ab584c5 100644 --- a/src/coreclr/jit/alloc.cpp +++ b/src/coreclr/jit/alloc.cpp @@ -42,7 +42,10 @@ size_t ArenaAllocator::getDefaultPageSize() // ArenaAllocator::ArenaAllocator: // Default-constructs an arena allocator. ArenaAllocator::ArenaAllocator() - : m_firstPage(nullptr), m_lastPage(nullptr), m_nextFreeByte(nullptr), m_lastFreeByte(nullptr) + : m_firstPage(nullptr) + , m_lastPage(nullptr) + , m_nextFreeByte(nullptr) + , m_lastFreeByte(nullptr) { #if MEASURE_MEM_ALLOC memset(&m_stats, 0, sizeof(m_stats)); diff --git a/src/coreclr/jit/alloc.h b/src/coreclr/jit/alloc.h index cb3da79232f8bb..8899b87ad35523 100644 --- a/src/coreclr/jit/alloc.h +++ b/src/coreclr/jit/alloc.h @@ -22,9 +22,9 @@ enum CompMemKind class ArenaAllocator { private: - ArenaAllocator(const ArenaAllocator& other) = delete; + ArenaAllocator(const ArenaAllocator& other) = delete; ArenaAllocator& operator=(const ArenaAllocator& other) = delete; - ArenaAllocator& operator=(ArenaAllocator&& other) = delete; + ArenaAllocator& operator=(ArenaAllocator&& other) = delete; struct PageDescriptor { @@ -52,7 +52,7 @@ class ArenaAllocator void* allocateNewPage(size_t size); static void* allocateHostMemory(size_t size, size_t* pActualSize); - static void freeHostMemory(void* block, size_t size); + static void freeHostMemory(void* block, size_t size); #if MEASURE_MEM_ALLOC struct MemStats @@ -125,8 +125,8 @@ class ArenaAllocator public: MemStatsAllocator* getMemStatsAllocator(CompMemKind kind); - void finishMemStats(); - void dumpMemStats(FILE* file); + void finishMemStats(); + void dumpMemStats(FILE* file); static void dumpMaxMemStats(FILE* file); static void dumpAggregateMemStats(FILE* file); @@ -276,7 +276,8 @@ class CompIAllocator : public IAllocator char m_zeroLenAllocTarg; public: - CompIAllocator(CompAllocator alloc) : m_alloc(alloc) + CompIAllocator(CompAllocator alloc) + : m_alloc(alloc) { } diff --git a/src/coreclr/jit/arraystack.h b/src/coreclr/jit/arraystack.h index 83a43c9432ba0e..5d8a697a3820d3 100644 --- a/src/coreclr/jit/arraystack.h +++ b/src/coreclr/jit/arraystack.h @@ -10,7 +10,8 @@ class ArrayStack static const int builtinSize = 8; public: - explicit ArrayStack(CompAllocator alloc, int initialCapacity = builtinSize) : m_alloc(alloc) + explicit ArrayStack(CompAllocator alloc, int initialCapacity = builtinSize) + : m_alloc(alloc) { if (initialCapacity > builtinSize) { diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 459d816181e502..e4e73adc58c6c4 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -1524,9 +1524,8 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, assertion.op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum(); assert((assertion.op1.lcl.ssaNum == SsaConfig::RESERVED_SSA_NUM) || - (assertion.op1.vn == - vnStore->VNConservativeNormalValue( - lvaGetDesc(lclNum)->GetPerSsaData(assertion.op1.lcl.ssaNum)->m_vnPair))); + (assertion.op1.vn == vnStore->VNConservativeNormalValue( + lvaGetDesc(lclNum)->GetPerSsaData(assertion.op1.lcl.ssaNum)->m_vnPair))); ssize_t cnsValue = 0; GenTreeFlags iconFlags = GTF_EMPTY; @@ -2770,7 +2769,7 @@ GenTree* Compiler::optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTre case GT_CALL: return optVNBasedFoldExpr_Call(block, parent, tree->AsCall()); - // We can add more VN-based foldings here. + // We can add more VN-based foldings here. default: break; @@ -3325,7 +3324,7 @@ bool Compiler::optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, // GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)) + Statement* stmt DEBUGARG(AssertionIndex index)) { const unsigned lclNum = tree->GetLclNum(); @@ -3580,7 +3579,7 @@ bool Compiler::optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVar // GenTree* Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)) + Statement* stmt DEBUGARG(AssertionIndex index)) { const AssertionDsc::AssertionDscOp1& op1 = curAssertion->op1; const AssertionDsc::AssertionDscOp2& op2 = curAssertion->op2; @@ -4529,8 +4528,9 @@ GenTree* Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, Gen { printf("\nVN relop based copy assertion prop in " FMT_BB ":\n", compCurBB->bbNum); printf("Assertion index=#%02u: V%02d.%02d %s V%02d.%02d\n", index, op1->AsLclVar()->GetLclNum(), - op1->AsLclVar()->GetSsaNum(), (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=", - op2->AsLclVar()->GetLclNum(), op2->AsLclVar()->GetSsaNum()); + op1->AsLclVar()->GetSsaNum(), + (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=", op2->AsLclVar()->GetLclNum(), + op2->AsLclVar()->GetSsaNum()); gtDispTree(tree, nullptr, nullptr, true); } #endif @@ -4824,7 +4824,7 @@ GenTree* Compiler::optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tr // If both VN and assertion table yield a matching assertion, "pVnBased" // is only set and the return value is "NO_ASSERTION_INDEX." // -bool Compiler::optAssertionIsNonNull(GenTree* op, +bool Compiler::optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)) { @@ -4871,7 +4871,7 @@ bool Compiler::optAssertionIsNonNull(GenTree* op, // Return Value: // index of assertion, or NO_ASSERTION_INDEX // -AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTree* op, +AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)) { @@ -6283,7 +6283,9 @@ struct VNAssertionPropVisitorInfo Statement* stmt; BasicBlock* block; VNAssertionPropVisitorInfo(Compiler* pThis, BasicBlock* block, Statement* stmt) - : pThis(pThis), stmt(stmt), block(block) + : pThis(pThis) + , stmt(stmt) + , block(block) { } }; diff --git a/src/coreclr/jit/bitset.h b/src/coreclr/jit/bitset.h index b34d1f04b85f1f..6f1e3d8dcd0db1 100644 --- a/src/coreclr/jit/bitset.h +++ b/src/coreclr/jit/bitset.h @@ -59,7 +59,10 @@ class BitSetSupport FILE* OpOutputFile; public: - BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(nullptr) + BitSetOpCounter(const char* fileName) + : TotalOps(0) + , m_fileName(fileName) + , OpOutputFile(nullptr) { for (unsigned i = 0; i < BSOP_NUMOPS; i++) { @@ -435,7 +438,9 @@ class BitSetOpsWithCounter Env m_env; public: - Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs), m_env(env) + Iter(Env env, BitSetValueArgType bs) + : m_iter(env, bs) + , m_env(env) { } @@ -449,8 +454,8 @@ class BitSetOpsWithCounter // We define symbolic names for the various bitset implementations available, to allow choices between them. -#define BSUInt64 0 -#define BSShortLong 1 +#define BSUInt64 0 +#define BSShortLong 1 #define BSUInt64Class 2 /*****************************************************************************/ diff --git a/src/coreclr/jit/bitsetasshortlong.h b/src/coreclr/jit/bitsetasshortlong.h index 2ef293820fd264..006f66fc178dcb 100644 --- a/src/coreclr/jit/bitsetasshortlong.h +++ b/src/coreclr/jit/bitsetasshortlong.h @@ -32,36 +32,36 @@ class BitSetOps m_bs; public: - BitSetUint64ValueRetType(const BitSetUint64& bs) : m_bs(bs) + BitSetUint64ValueRetType(const BitSetUint64& bs) + : m_bs(bs) { } }; @@ -451,7 +452,9 @@ class BitSetOps, unsigned m_bitNum; public: - Iter(Env env, const BitSetUint64& bs) : m_bits(bs.m_bits), m_bitNum(0) + Iter(Env env, const BitSetUint64& bs) + : m_bits(bs.m_bits) + , m_bitNum(0) { } diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 6eea265871c04c..6cde9e0e93d8b1 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -34,7 +34,7 @@ unsigned BasicBlock::s_nMaxTrees; FlowEdge* ShuffleHelper(unsigned hash, FlowEdge* res) { FlowEdge* head = res; - for (FlowEdge *prev = nullptr; res != nullptr; prev = res, res = res->getNextPredEdge()) + for (FlowEdge* prev = nullptr; res != nullptr; prev = res, res = res->getNextPredEdge()) { unsigned blkHash = (hash ^ (res->getSourceBlock()->bbNum << 16) ^ res->getSourceBlock()->bbNum); if (((blkHash % 1879) & 1) && prev != nullptr) @@ -140,7 +140,8 @@ void FlowEdge::addLikelihood(weight_t addedLikelihood) // comp - Compiler instance // block - The block whose successors are to be iterated // -AllSuccessorEnumerator::AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block) +AllSuccessorEnumerator::AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) + : m_block(block) { m_numSuccs = 0; block->VisitAllSuccs(comp, [this](BasicBlock* succ) { @@ -1891,7 +1892,8 @@ BBswtDesc::BBswtDesc(Compiler* comp, const BBswtDesc* other) // comp - compiler instance // other - existing descriptor to copy // -BBehfDesc::BBehfDesc(Compiler* comp, const BBehfDesc* other) : bbeCount(other->bbeCount) +BBehfDesc::BBehfDesc(Compiler* comp, const BBehfDesc* other) + : bbeCount(other->bbeCount) { // Allocate and fill in a new dst tab // diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 68f41e3610173a..16321157664a50 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -162,7 +162,8 @@ class MemoryKindIterator int value; public: - explicit inline MemoryKindIterator(int val) : value(val) + explicit inline MemoryKindIterator(int val) + : value(val) { } inline MemoryKindIterator& operator++() @@ -244,7 +245,8 @@ class PredEdgeList }; public: - PredEdgeList(FlowEdge* pred) : m_begin(pred) + PredEdgeList(FlowEdge* pred) + : m_begin(pred) { } @@ -297,7 +299,8 @@ class PredBlockList }; public: - PredBlockList(FlowEdge* pred) : m_begin(pred) + PredBlockList(FlowEdge* pred) + : m_begin(pred) { } @@ -322,7 +325,8 @@ class BBArrayIterator FlowEdge* const* m_edgeEntry; public: - BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) + BBArrayIterator(FlowEdge* const* edgeEntry) + : m_edgeEntry(edgeEntry) { } @@ -351,7 +355,8 @@ class FlowEdgeArrayIterator FlowEdge* const* m_edgeEntry; public: - FlowEdgeArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) + FlowEdgeArrayIterator(FlowEdge* const* edgeEntry) + : m_edgeEntry(edgeEntry) { } @@ -727,7 +732,8 @@ struct BasicBlock : private LIR::Range BBKinds bbKind; // jump (if any) at the end of this block /* The following union describes the jump target(s) of this block */ - union { + union + { unsigned bbTargetOffs; // PC offset (temporary only) FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) @@ -1165,11 +1171,11 @@ struct BasicBlock : private LIR::Range } #ifdef DEBUG - void dspFlags() const; // Print the flags - unsigned dspPreds() const; // Print the predecessors (bbPreds) - void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH - // regions are printed: see NumSucc() for details. - void dspKind() const; // Print the block jump kind (e.g., BBJ_ALWAYS, BBJ_COND, etc.). + void dspFlags() const; // Print the flags + unsigned dspPreds() const; // Print the predecessors (bbPreds) + void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH + // regions are printed: see NumSucc() for details. + void dspKind() const; // Print the block jump kind (e.g., BBJ_ALWAYS, BBJ_COND, etc.). // Print a simple basic block header for various output, including a list of predecessors and successors. void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); @@ -1177,11 +1183,11 @@ struct BasicBlock : private LIR::Range const char* dspToString(int blockNumPadding = 0) const; #endif // DEBUG -#define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs -#define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs -#define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops -#define BB_ZERO_WEIGHT 0.0 -#define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. +#define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs +#define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs +#define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops +#define BB_ZERO_WEIGHT 0.0 +#define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. weight_t bbWeight; // The dynamic execution weight of this block @@ -1402,12 +1408,14 @@ struct BasicBlock : private LIR::Range #define NO_BASE_TMP UINT_MAX // base# to use when we have none - union { + union + { unsigned bbStkTempsIn; // base# for input stack temps int bbCountSchemaIndex; // schema index for count instrumentation }; - union { + union + { unsigned bbStkTempsOut; // base# for output stack temps int bbHistogramSchemaIndex; // schema index for histogram instrumentation }; @@ -1527,11 +1535,11 @@ struct BasicBlock : private LIR::Range bool hasEHBoundaryOut() const; // Some non-zero value that will not collide with real tokens for bbCatchTyp -#define BBCT_NONE 0x00000000 -#define BBCT_FAULT 0xFFFFFFFC -#define BBCT_FINALLY 0xFFFFFFFD -#define BBCT_FILTER 0xFFFFFFFE -#define BBCT_FILTER_HANDLER 0xFFFFFFFF +#define BBCT_NONE 0x00000000 +#define BBCT_FAULT 0xFFFFFFFC +#define BBCT_FINALLY 0xFFFFFFFD +#define BBCT_FILTER 0xFFFFFFFE +#define BBCT_FILTER_HANDLER 0xFFFFFFFF #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead @@ -1574,7 +1582,8 @@ struct BasicBlock : private LIR::Range void ensurePredListOrder(Compiler* compiler); void reorderPredList(Compiler* compiler); - union { + union + { BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate // Dominator) used to compute the dominance tree. FlowEdge* bbLastPred; // Used early on by fgLinkBasicBlock/fgAddRefPred @@ -1623,7 +1632,9 @@ struct BasicBlock : private LIR::Range return m_ssaNum; } - MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg) + MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) + : m_ssaNum(ssaNum) + , m_nextArg(nextArg) { } @@ -1649,18 +1660,21 @@ struct BasicBlock : private LIR::Range * thus we can union them since the two operations are completely disjunct. */ - union { + union + { EXPSET_TP bbCseGen; // CSEs computed by block ASSERT_TP bbAssertionGen; // assertions created by block (global prop) ASSERT_TP bbAssertionOutIfTrue; // assertions available on exit along true/jump edge (BBJ_COND, local prop) }; - union { + union + { EXPSET_TP bbCseIn; // CSEs available on entry ASSERT_TP bbAssertionIn; // assertions available on entry (global prop) }; - union { + union + { EXPSET_TP bbCseOut; // CSEs available on exit ASSERT_TP bbAssertionOut; // assertions available on exit (global prop, local prop & !BBJ_COND) ASSERT_TP bbAssertionOutIfFalse; // assertions available on exit along false/next edge (BBJ_COND, local prop) @@ -1668,7 +1682,7 @@ struct BasicBlock : private LIR::Range void* bbEmitCookie; -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #if MEASURE_BLOCK_SIZE static size_t s_Size; @@ -1703,8 +1717,8 @@ struct BasicBlock : private LIR::Range unsigned bbID; #endif // DEBUG - unsigned bbStackDepthOnEntry() const; - void bbSetStack(StackEntry* stack); + unsigned bbStackDepthOnEntry() const; + void bbSetStack(StackEntry* stack); StackEntry* bbStackOnEntry() const; // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding @@ -1754,7 +1768,10 @@ struct BasicBlock : private LIR::Range Statement* FirstNonPhiDef() const; Statement* FirstNonPhiDefOrCatchArgStore() const; - BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) + BasicBlock() + : bbStmtList(nullptr) + , bbLiveIn(VarSetOps::UninitVal()) + , bbLiveOut(VarSetOps::UninitVal()) { } @@ -1766,7 +1783,9 @@ struct BasicBlock : private LIR::Range BasicBlock* m_block; public: - Successors(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + Successors(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -1777,11 +1796,15 @@ struct BasicBlock : private LIR::Range TPosition m_pos; public: - iterator(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block), m_pos(comp, block) + iterator(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) + , m_pos(comp, block) { } - iterator() : m_pos() + iterator() + : m_pos() { } @@ -1854,7 +1877,8 @@ struct BasicBlock : private LIR::Range class BBSuccList : private SuccList { public: - BBSuccList(const BasicBlock* block) : SuccList(block) + BBSuccList(const BasicBlock* block) + : SuccList(block) { } @@ -1876,7 +1900,8 @@ struct BasicBlock : private LIR::Range class BBSuccEdgeList : private SuccList { public: - BBSuccEdgeList(const BasicBlock* block) : SuccList(block) + BBSuccEdgeList(const BasicBlock* block) + : SuccList(block) { } @@ -1912,7 +1937,9 @@ struct BasicBlock : private LIR::Range public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) - : m_comp(comp), m_block(block), m_succNum(succNum) + : m_comp(comp) + , m_block(block) + , m_succNum(succNum) { } @@ -1937,7 +1964,9 @@ struct BasicBlock : private LIR::Range }; public: - BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + BBCompilerSuccList(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -1973,7 +2002,9 @@ struct BasicBlock : private LIR::Range public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) - : m_comp(comp), m_block(block), m_succNum(succNum) + : m_comp(comp) + , m_block(block) + , m_succNum(succNum) { } @@ -1998,7 +2029,9 @@ struct BasicBlock : private LIR::Range }; public: - BBCompilerSuccEdgeList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + BBCompilerSuccEdgeList(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -2108,7 +2141,8 @@ class BasicBlockIterator BasicBlock* m_block; public: - BasicBlockIterator(BasicBlock* block) : m_block(block) + BasicBlockIterator(BasicBlock* block) + : m_block(block) { } @@ -2144,7 +2178,8 @@ class BasicBlockSimpleList BasicBlock* m_begin; public: - BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin) + BasicBlockSimpleList(BasicBlock* begin) + : m_begin(begin) { } @@ -2174,7 +2209,9 @@ class BasicBlockRangeList BasicBlock* m_end; public: - BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end) + BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) + : m_begin(begin) + , m_end(end) { assert(begin != nullptr); assert(end != nullptr); @@ -2214,7 +2251,9 @@ struct BBswtDesc bool bbsHasDefault; // true if last switch case is a default case bool bbsHasDominantCase; // true if switch has a dominant case - BBswtDesc() : bbsHasDefault(true), bbsHasDominantCase(false) + BBswtDesc() + : bbsHasDefault(true) + , bbsHasDominantCase(false) { } @@ -2241,7 +2280,8 @@ struct BBswtDesc // BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements). // -inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc) +inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) + : m_bbsDesc(bbsDesc) { assert(m_bbsDesc != nullptr); assert(m_bbsDesc->bbsDstTab != nullptr); @@ -2264,7 +2304,9 @@ struct BBehfDesc FlowEdge** bbeSuccs; // array of `FlowEdge*` pointing to BBJ_EHFINALLYRET block successors unsigned bbeCount; // size of `bbeSuccs` array - BBehfDesc() : bbeSuccs(nullptr), bbeCount(0) + BBehfDesc() + : bbeSuccs(nullptr) + , bbeCount(0) { } @@ -2274,7 +2316,8 @@ struct BBehfDesc // BBEhfSuccList out-of-class-declaration implementations (here due to C++ ordering requirements). // -inline BBEhfSuccList::BBEhfSuccList(BBehfDesc* bbeDesc) : m_bbeDesc(bbeDesc) +inline BBEhfSuccList::BBEhfSuccList(BBehfDesc* bbeDesc) + : m_bbeDesc(bbeDesc) { assert(m_bbeDesc != nullptr); assert((m_bbeDesc->bbeSuccs != nullptr) || (m_bbeDesc->bbeCount == 0)); @@ -2373,11 +2416,15 @@ struct BasicBlockList BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list. BasicBlock* block; // The BasicBlock of interest. - BasicBlockList() : next(nullptr), block(nullptr) + BasicBlockList() + : next(nullptr) + , block(nullptr) { } - BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk) + BasicBlockList(BasicBlock* blk, BasicBlockList* rest) + : next(rest) + , block(blk) { } }; @@ -2403,7 +2450,8 @@ inline BasicBlock* BBArrayIterator::operator*() const // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and FlowEdge) -inline PredEdgeList::iterator::iterator(FlowEdge* pred) : m_pred(pred) +inline PredEdgeList::iterator::iterator(FlowEdge* pred) + : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->getNextPredEdge(); @@ -2425,7 +2473,8 @@ inline PredEdgeList::iterator& PredEdgeList::iterator::operator++() } template -inline PredBlockList::iterator::iterator(FlowEdge* pred) : m_pred(pred) +inline PredBlockList::iterator::iterator(FlowEdge* pred) + : m_pred(pred) { bool initNextPointer = allowEdits; INDEBUG(initNextPointer = true); @@ -2435,13 +2484,13 @@ inline PredBlockList::iterator::iterator(FlowEdge* pred) : m_pred(pr } } -template +template inline BasicBlock* PredBlockList::iterator::operator*() const { return m_pred->getSourceBlock(); } -template +template inline typename PredBlockList::iterator& PredBlockList::iterator::operator++() { if (allowEdits) @@ -2480,7 +2529,8 @@ void* emitCodeGetCookie(const BasicBlock* block); class AllSuccessorEnumerator { BasicBlock* m_block; - union { + union + { // We store up to 4 successors inline in the enumerator. For ASP.NET // and libraries.pmi this is enough in 99.7% of cases. BasicBlock* m_successors[4]; diff --git a/src/coreclr/jit/blockset.h b/src/coreclr/jit/blockset.h index 83de7a5dad1e59..f69e1e59ace324 100644 --- a/src/coreclr/jit/blockset.h +++ b/src/coreclr/jit/blockset.h @@ -24,10 +24,11 @@ #include "compilerbitsettraits.h" #include "bitsetasshortlong.h" -class BlockSetOps : public BitSetOps +class BlockSetOps + : public BitSetOps { public: // Specialize BlockSetOps::MakeFull(). Since we number basic blocks from one, we remove bit zero from diff --git a/src/coreclr/jit/buildstring.cpp b/src/coreclr/jit/buildstring.cpp index f432fec47475ff..3f0222ad2649ac 100644 --- a/src/coreclr/jit/buildstring.cpp +++ b/src/coreclr/jit/buildstring.cpp @@ -1,9 +1,9 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#define STRINGIFY(L) #L +#define STRINGIFY(L) #L #define MAKESTRING(M, L) M(L) -#define STRINGIZE(X) MAKESTRING(STRINGIFY, X) +#define STRINGIZE(X) MAKESTRING(STRINGIFY, X) #if defined(__clang__) #define BUILD_COMPILER \ diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index d63e0809b62693..0ab8a81d89ef93 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -101,7 +101,7 @@ class CodeGen final : public CodeGenInterface } } - static GenTreeIndir indirForm(var_types type, GenTree* base); + static GenTreeIndir indirForm(var_types type, GenTree* base); static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data); GenTreeIntCon intForm(var_types type, ssize_t value); @@ -177,8 +177,8 @@ class CodeGen final : public CodeGenInterface #ifdef JIT32_GCENCODER void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); - void* genCreateAndStoreGCInfoJIT32(unsigned codeSize, - unsigned prologSize, + void* genCreateAndStoreGCInfoJIT32(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); #else // !JIT32_GCENCODER void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); @@ -206,7 +206,7 @@ class CodeGen final : public CodeGenInterface unsigned genCurDispOffset; static const char* genInsName(instruction ins); - const char* genInsDisplayName(emitter::instrDesc* id); + const char* genInsDisplayName(emitter::instrDesc* id); static const char* genSizeStr(emitAttr size); @@ -317,11 +317,17 @@ class CodeGen final : public CodeGenInterface regNumber reg2; bool useSaveNextPair; - RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false) + RegPair(regNumber reg1) + : reg1(reg1) + , reg2(REG_NA) + , useSaveNextPair(false) { } - RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false) + RegPair(regNumber reg1, regNumber reg2) + : reg1(reg1) + , reg2(reg2) + , useSaveNextPair(false) { assert(reg2 == REG_NEXT(reg1)); } @@ -364,8 +370,8 @@ class CodeGen final : public CodeGenInterface bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg); - void genPushFltRegs(regMaskTP regMask); - void genPopFltRegs(regMaskTP regMask); + void genPushFltRegs(regMaskTP regMask); + void genPopFltRegs(regMaskTP regMask); regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat); regMaskTP genJmpCallArgMask(); @@ -679,17 +685,17 @@ class CodeGen final : public CodeGenInterface void genSinglePush(); void genSinglePop(); regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs); - void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs); - -/* -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XX XX -XX Debugging Support XX -XX XX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -*/ + void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs); + + /* + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XX XX + XX Debugging Support XX + XX XX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + */ #ifdef DEBUG void genIPmappingDisp(unsigned mappingNum, const IPmappingDsc* ipMapping); @@ -939,7 +945,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genCompareFloat(GenTree* treeNode); void genCompareInt(GenTree* treeNode); #ifdef TARGET_XARCH - bool genCanAvoidEmittingCompareAgainstZero(GenTree* tree, var_types opType); + bool genCanAvoidEmittingCompareAgainstZero(GenTree* tree, var_types opType); GenTree* genTryFindFlagsConsumer(GenTree* flagsProducer, GenCondition** condition); #endif @@ -1112,12 +1118,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum); void genUnspillLocal( unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse); - void genUnspillRegIfNeeded(GenTree* tree); - void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex); + void genUnspillRegIfNeeded(GenTree* tree); + void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex); regNumber genConsumeReg(GenTree* tree); regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex); - void genCopyRegIfNeeded(GenTree* tree, regNumber needReg); - void genConsumeRegAndCopy(GenTree* tree, regNumber needReg); + void genCopyRegIfNeeded(GenTree* tree, regNumber needReg); + void genConsumeRegAndCopy(GenTree* tree, regNumber needReg); void genConsumeIfReg(GenTree* tree) { @@ -1127,15 +1133,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } } - void genRegCopy(GenTree* tree); + void genRegCopy(GenTree* tree); regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex); - void genTransferRegGCState(regNumber dst, regNumber src); - void genConsumeAddress(GenTree* addr); - void genConsumeAddrMode(GenTreeAddrMode* mode); - void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg); - void genConsumeBlockSrc(GenTreeBlk* blkNode); - void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg); - void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg); + void genTransferRegGCState(regNumber dst, regNumber src); + void genConsumeAddress(GenTree* addr); + void genConsumeAddrMode(GenTreeAddrMode* mode); + void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg); + void genConsumeBlockSrc(GenTreeBlk* blkNode); + void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg); + void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg); #ifdef FEATURE_PUT_STRUCT_ARG_STK void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode, @@ -1243,10 +1249,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); - void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset); - void genStoreRegToStackArg(var_types type, regNumber reg, int offset); - void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode); - void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode); + void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset); + void genStoreRegToStackArg(var_types type, regNumber reg, int offset); + void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode); + void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode); #ifdef TARGET_X86 void genStructPutArgPush(GenTreePutArgStk* putArgStkNode); #else @@ -1254,13 +1260,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif #endif // FEATURE_PUT_STRUCT_ARG_STK - void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); - void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); - void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); - void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); + void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); + void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); + void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); + void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); unsigned genEmitJumpTable(GenTree* treeNode, bool relativeAddr); - void genJumpTable(GenTree* tree); - void genTableBasedSwitch(GenTree* tree); + void genJumpTable(GenTree* tree); + void genTableBasedSwitch(GenTree* tree); #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) instruction genGetInsForOper(GenTree* treeNode); #else @@ -1270,13 +1276,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX regNumber targetReg, GenTreeIndir* indir, bool* needsBarrier); - bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data); - GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd); - regNumber getCallIndirectionCellReg(GenTreeCall* call); - void genCall(GenTreeCall* call); - void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes)); - void genDefinePendingCallLabel(GenTreeCall* call); - void genJmpMethod(GenTree* jmp); + bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data); + GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd); + regNumber getCallIndirectionCellReg(GenTreeCall* call); + void genCall(GenTreeCall* call); + void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes)); + void genDefinePendingCallLabel(GenTreeCall* call); + void genJmpMethod(GenTree* jmp); BasicBlock* genCallFinally(BasicBlock* block); #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO: refactor for LA. @@ -1318,13 +1324,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genReturn(GenTree* treeNode); #ifdef TARGET_XARCH - void genStackPointerConstantAdjustment(ssize_t spDelta, bool trackSpAdjustments); - void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, bool trackSpAdjustments); + void genStackPointerConstantAdjustment(ssize_t spDelta, bool trackSpAdjustments); + void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, bool trackSpAdjustments); target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, bool trackSpAdjustments); - void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta); + void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta); #else // !TARGET_XARCH - void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp); - void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp); + void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp); + void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp); target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp); #endif // !TARGET_XARCH @@ -1358,8 +1364,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #ifdef DEBUG GenTree* lastConsumedNode; - void genNumberOperandUse(GenTree* const operand, int& useNum) const; - void genCheckConsumeNode(GenTree* const node); + void genNumberOperandUse(GenTree* const operand, int& useNum) const; + void genCheckConsumeNode(GenTree* const node); #else // !DEBUG inline void genCheckConsumeNode(GenTree* treeNode) { @@ -1437,7 +1443,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(TARGET_XARCH) - enum class OperandKind{ + enum class OperandKind + { ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter. Local, // [Local or spill temp + offset] - "S" in the emitter. Indir, // [base+index*scale+disp] - "A" in the emitter. @@ -1448,7 +1455,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class OperandDesc { OperandKind m_kind; - union { + union + { struct { CORINFO_FIELD_HANDLE m_fieldHnd; @@ -1476,30 +1484,45 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX }; public: - OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd) + OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) + : m_kind(OperandKind::ClsVar) + , m_fieldHnd(fieldHnd) { } - OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset) + OperandDesc(int varNum, uint16_t offset) + : m_kind(OperandKind::Local) + , m_varNum(varNum) + , m_offset(offset) { } OperandDesc(GenTreeIndir* indir) - : m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet()) + : m_kind(OperandKind::Indir) + , m_addr(indir->Addr()) + , m_indir(indir) + , m_indirType(indir->TypeGet()) { } OperandDesc(var_types indirType, GenTree* addr) - : m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType) + : m_kind(OperandKind::Indir) + , m_addr(addr) + , m_indir(nullptr) + , m_indirType(indirType) { } OperandDesc(ssize_t immediate, bool immediateNeedsReloc) - : m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc) + : m_kind(OperandKind::Imm) + , m_immediate(immediate) + , m_immediateNeedsReloc(immediateNeedsReloc) { } - OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg) + OperandDesc(regNumber reg) + : m_kind(OperandKind::Reg) + , m_reg(reg) { } @@ -1689,7 +1712,9 @@ class CodeGenPhase final : public Phase { public: CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)()) - : Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action) + : Phase(_codeGen->GetCompiler(), _phase) + , codeGen(_codeGen) + , action(_action) { } diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 8cf3ac32b3a329..65ba1bf5913c69 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -173,9 +173,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) //------------------------------------------------------------------------ // instGen_Set_Reg_To_Imm: Move an immediate value into an integer register. // -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -1651,7 +1651,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTargetReg, // ireg REG_NA, 0, 0, // xreg, xmul, disp false // isJump - ); + ); } else { @@ -1660,7 +1660,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, DebugInfo(), REG_NA, REG_NA, 0, 0, /* ilOffset, ireg, xreg, xmul, disp */ false /* isJump */ - ); + ); } regSet.verifyRegistersUsed(RBM_CALLEE_TRASH); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 81370e6413835f..5447fc34724c21 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -1884,8 +1884,8 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() if (compiler->lvaPSPSym != BAD_VAR_NUM) { - if (CallerSP_to_PSP_slot_delta != - compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging + if (CallerSP_to_PSP_slot_delta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for + // debugging { printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n", compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); @@ -2216,9 +2216,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -5130,7 +5130,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, gcInfo.gcRegByrefSetCur, DebugInfo(), callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -5739,8 +5739,8 @@ void CodeGen::genCodeForBfiz(GenTreeOp* tree) GenTree* castOp = cast->CastOp(); genConsumeRegs(castOp); - unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE - : genTypeSize(castOp) * BITS_PER_BYTE; + unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE + : genTypeSize(castOp) * BITS_PER_BYTE; const bool isUnsigned = cast->IsUnsigned() || varTypeIsUnsigned(cast->CastToType()); GetEmitter()->emitIns_R_R_I_I(isUnsigned ? INS_ubfiz : INS_sbfiz, size, tree->GetRegNum(), castOp->GetRegNum(), (int)shiftByImm, (int)srcBits); diff --git a/src/coreclr/jit/codegenarm64test.cpp b/src/coreclr/jit/codegenarm64test.cpp index 750daa569613ff..52633ed6733e6a 100644 --- a/src/coreclr/jit/codegenarm64test.cpp +++ b/src/coreclr/jit/codegenarm64test.cpp @@ -4932,16 +4932,16 @@ void CodeGen::genArm64EmitterUnitTestsSve() INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ // IF_SVE_CZ_4A_A - theEmitter->emitIns_R_R(INS_sve_movs, EA_SCALABLE, REG_P0, REG_P15, - INS_OPTS_SCALABLE_B); /* MOVS .B, .B */ + theEmitter->emitIns_R_R(INS_sve_movs, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); /* MOVS .B, .B + */ // IF_SVE_CZ_4A_K theEmitter->emitIns_R_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); /* MOV .B, /M, .B */ // IF_SVE_CZ_4A_L - theEmitter->emitIns_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P15, - INS_OPTS_SCALABLE_B); /* MOV .B, .B */ + theEmitter->emitIns_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); /* MOV .B, .B + */ // IF_SVE_DA_4A theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 1f550660b77984..965b72721aaaa0 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -727,8 +727,8 @@ void CodeGen::genIntrinsic(GenTreeIntrinsic* treeNode) break; #if defined(FEATURE_SIMD) - // The handling is a bit more complex so genSimdUpperSave/Restore - // handles genConsumeOperands and genProduceReg + // The handling is a bit more complex so genSimdUpperSave/Restore + // handles genConsumeOperands and genProduceReg case NI_SIMD_UpperRestore: { @@ -861,7 +861,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut); #else // !TARGET_ARM64 - // There is no zero register on ARM32 + // There is no zero register on ARM32 unreached(); #endif // !TARGET_ARM64 } @@ -1018,9 +1018,9 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) nextIndex += 2; } #else // TARGET_ARM - // For a >= 4 byte sizes we will generate a ldr and str instruction each loop - // ldr r2, [r0] - // str r2, [sp, #16] + // For a >= 4 byte sizes we will generate a ldr and str instruction each loop + // ldr r2, [r0] + // str r2, [sp, #16] while (remainingSize >= TARGET_POINTER_SIZE) { var_types type = layout->GetGCPtrType(nextIndex); @@ -1812,7 +1812,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, assert(!addrIsInReg); switch (currentIns) { - // Loads + // Loads case INS_ldrb: return INS_ldapurb; @@ -1823,7 +1823,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, case INS_ldr: return INS_ldapur; - // Stores + // Stores case INS_strb: return INS_stlurb; @@ -1855,7 +1855,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, const bool hasRcpc1 = compiler->compOpportunisticallyDependsOn(InstructionSet_Rcpc); switch (currentIns) { - // Loads + // Loads case INS_ldrb: return hasRcpc1 ? INS_ldaprb : INS_ldarb; @@ -1866,7 +1866,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, case INS_ldr: return hasRcpc1 ? INS_ldapr : INS_ldar; - // Stores + // Stores case INS_strb: return INS_stlrb; @@ -2060,7 +2060,10 @@ class ProducingStreamBaseInstrs { public: ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter) - : intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter) + : intReg1(intReg1) + , intReg2(intReg2) + , addrReg(addrReg) + , emitter(emitter) { } @@ -2121,7 +2124,11 @@ class ProducingStream { public: ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter) - : intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter) + : intReg1(intReg1) + , simdReg1(simdReg1) + , simdReg2(simdReg2) + , addrReg(addrReg) + , emitter(emitter) { } @@ -2244,7 +2251,9 @@ class BlockUnrollHelper class InitBlockUnrollHelper { public: - InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount) + InitBlockUnrollHelper(int dstOffset, unsigned byteCount) + : dstStartOffset(dstOffset) + , byteCount(byteCount) { } @@ -2373,7 +2382,9 @@ class CopyBlockUnrollHelper { public: CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount) - : srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount) + : srcStartOffset(srcOffset) + , dstStartOffset(dstOffset) + , byteCount(byteCount) { } @@ -3432,13 +3443,13 @@ void CodeGen::genCall(GenTreeCall* call) else #endif // TARGET_ARM if (varTypeUsesFloatArgReg(returnType)) - { - returnReg = REG_FLOATRET; - } - else - { - returnReg = REG_INTRET; - } + { + returnReg = REG_FLOATRET; + } + else + { + returnReg = REG_INTRET; + } if (call->GetRegNum() != returnReg) { @@ -3694,19 +3705,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -4372,8 +4383,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -5529,9 +5540,8 @@ void CodeGen::genFnEpilog(BasicBlock* block) compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0); } - if (jmpEpilog || - genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == - RBM_NONE) + if (jmpEpilog || genStackAllocRegisterMask(compiler->compLclFrameSize, + regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == RBM_NONE) { genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted); } @@ -5603,9 +5613,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index a62bcde9e556d0..62fe40ed3c5876 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -65,7 +65,10 @@ CodeGenInterface* getCodeGenerator(Compiler* comp) // CodeGen constructor CodeGenInterface::CodeGenInterface(Compiler* theCompiler) - : gcInfo(theCompiler), regSet(theCompiler, gcInfo), compiler(theCompiler), treeLifeUpdater(nullptr) + : gcInfo(theCompiler) + , regSet(theCompiler, gcInfo) + , compiler(theCompiler) + , treeLifeUpdater(nullptr) { } @@ -84,7 +87,8 @@ void CodeGenInterface::CopyRegisterInfo() /*****************************************************************************/ -CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) +CodeGen::CodeGen(Compiler* theCompiler) + : CodeGenInterface(theCompiler) { #if defined(TARGET_XARCH) negBitmaskFlt = nullptr; @@ -1873,7 +1877,7 @@ void CodeGen::genGenerateMachineCode() (compiler->compCodeOpt() != Compiler::SMALL_CODE) && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) #endif - ); + ); /* Now generate code for the function */ genCodeForBBlist(); @@ -3199,9 +3203,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #ifdef TARGET_X86 noway_assert(varDsc->lvType == TYP_STRUCT); #else // !TARGET_X86 - // For LSRA, it may not be in regArgMaskLive if it has a zero - // refcnt. This is in contrast with the non-LSRA case in which all - // non-tracked args are assumed live on entry. + // For LSRA, it may not be in regArgMaskLive if it has a zero + // refcnt. This is in contrast with the non-LSRA case in which all + // non-tracked args are assumed live on entry. noway_assert((varDsc->lvRefCnt() == 0) || (varDsc->lvType == TYP_STRUCT) || (varDsc->IsAddressExposed() && compiler->info.compIsVarArgs) || (varDsc->IsAddressExposed() && compiler->opts.compUseSoftFP)); @@ -4131,8 +4135,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere int nextArgNum = argNum + i; LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].type, - compiler->info.compCallConv); - destRegNum = fieldVarDsc->GetRegNum(); + compiler->info.compCallConv); + destRegNum = fieldVarDsc->GetRegNum(); noway_assert(regArgTab[nextArgNum].varNum == varNum); noway_assert(genIsValidFloatReg(nextRegNum)); noway_assert(genIsValidFloatReg(destRegNum)); @@ -4215,7 +4219,7 @@ void CodeGen::genEnregisterIncomingStackArgs() regNumber tmp_reg = REG_NA; #endif - for (LclVarDsc *varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++) + for (LclVarDsc* varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++) { /* Is this variable a parameter? */ @@ -4289,7 +4293,7 @@ void CodeGen::genEnregisterIncomingStackArgs() } } } -#else // !TARGET_LOONGARCH64 +#else // !TARGET_LOONGARCH64 GetEmitter()->emitIns_R_S(ins_Load(regType), emitTypeSize(regType), regNum, varNum, 0); #endif // !TARGET_LOONGARCH64 @@ -5342,7 +5346,7 @@ void CodeGen::genFinalizeFrame() } noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0); #else // !TARGET_AMD64 && !TARGET_ARM64 - // On x86 we save all callee saved regs so the saved reg area size is consistent + // On x86 we save all callee saved regs so the saved reg area size is consistent regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE); #endif // !TARGET_AMD64 && !TARGET_ARM64 } @@ -6071,7 +6075,7 @@ void CodeGen::genFnProlog() } #endif // TARGET_AMD64 -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #ifdef TARGET_ARM if (compiler->compLocallocUsed) @@ -6097,11 +6101,11 @@ void CodeGen::genFnProlog() #endif // TARGET_AMD64 compiler->unwindEndProlog(); -//------------------------------------------------------------------------- -// -// This is the end of the OS-reported prolog for purposes of unwinding -// -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- + // + // This is the end of the OS-reported prolog for purposes of unwinding + // + //------------------------------------------------------------------------- #ifdef TARGET_ARM if (needToEstablishFP) @@ -8488,7 +8492,7 @@ void CodeGen::genPoisonFrame(regMaskTP regLiveIn) bool fpBased; int addr = compiler->lvaFrameAddress((int)varNum, &fpBased); #else - int addr = 0; + int addr = 0; #endif int end = addr + (int)size; for (int offs = addr; offs < end;) diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h index 63954adc6ffbb3..ef87ccca858702 100644 --- a/src/coreclr/jit/codegeninterface.h +++ b/src/coreclr/jit/codegeninterface.h @@ -165,8 +165,8 @@ class CodeGenInterface TreeLifeUpdater* treeLifeUpdater; public: - bool genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf); - bool genUseOptimizedWriteBarriers(GenTreeStoreInd* store); + bool genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf); + bool genUseOptimizedWriteBarriers(GenTreeStoreInd* store); CorInfoHelpFunc genWriteBarrierHelperForWriteBarrierForm(GCInfo::WriteBarrierForm wbf); #ifdef DEBUG @@ -442,7 +442,8 @@ class CodeGenInterface { siVarLocType vlType; - union { + union + { // VLT_REG/VLT_REG_FP -- Any pointer-sized enregistered value (TYP_INT, TYP_REF, etc) // eg. EAX // VLT_REG_BYREF -- the specified register contains the address of the variable @@ -627,7 +628,9 @@ class CodeGenInterface VariableLiveRange(CodeGenInterface::siVarLoc varLocation, emitLocation startEmitLocation, emitLocation endEmitLocation) - : m_StartEmitLocation(startEmitLocation), m_EndEmitLocation(endEmitLocation), m_VarLocation(varLocation) + : m_StartEmitLocation(startEmitLocation) + , m_EndEmitLocation(endEmitLocation) + , m_VarLocation(varLocation) { } @@ -675,7 +678,8 @@ class CodeGenInterface public: LiveRangeDumper(const LiveRangeList* liveRanges) - : m_startingLiveRange(liveRanges->end()), m_hasLiveRangesToDump(false){}; + : m_startingLiveRange(liveRanges->end()) + , m_hasLiveRangesToDump(false){}; // Make the dumper point to the last "VariableLiveRange" opened or nullptr if all are closed void resetDumper(const LiveRangeList* list); @@ -756,7 +760,7 @@ class CodeGenInterface LiveRangeList* getLiveRangesForVarForBody(unsigned int varNum) const; LiveRangeList* getLiveRangesForVarForProlog(unsigned int varNum) const; - size_t getLiveRangesCount() const; + size_t getLiveRangesCount() const; // For parameters locations on prolog void psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation, unsigned int varNum); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 5e05f1b0819829..2d8a2093454f86 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -157,7 +157,8 @@ void CodeGen::genCodeForBBlist() genMarkLabelsForCodegen(); assert(!compiler->fgFirstBBScratch || - compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first. + compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch + // has to be first. /* Initialize structures used in the block list iteration */ genInitialize(); @@ -622,7 +623,7 @@ void CodeGen::genCodeForBBlist() case BBJ_THROW: case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: - // We're going to generate more code below anyway, so no need for the NOP. + // We're going to generate more code below anyway, so no need for the NOP. case BBJ_RETURN: case BBJ_EHFINALLYRET: @@ -633,7 +634,7 @@ void CodeGen::genCodeForBBlist() case BBJ_COND: case BBJ_SWITCH: - // These can't have a call as the last instruction! + // These can't have a call as the last instruction! default: noway_assert(!"Unexpected bbKind"); @@ -1868,8 +1869,8 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArg var_types type = use.GetType(); unsigned thisFieldOffset = argOffset + use.GetOffset(); -// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing -// argument area. + // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing + // argument area. #if defined(FEATURE_SIMD) if (type == TYP_SIMD12) diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index a99199aedc634c..ec27d2ff8ab4da 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1225,9 +1225,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp @@ -1573,9 +1573,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) } // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { emitter* emit = GetEmitter(); @@ -3334,7 +3334,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); regSet.verifyRegistersUsed(killMask); @@ -4398,7 +4398,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -6648,19 +6648,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -7093,8 +7093,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -7615,7 +7615,7 @@ inline void CodeGen::genJumpToThrowHlpBlk_la( callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)(compiler->acdHelper(codeKind))); regSet.verifyRegistersUsed(killMask); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 87745fabe3e04b..1d48582c6c316e 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1212,9 +1212,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp @@ -1534,9 +1534,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) } // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { emitter* emit = GetEmitter(); @@ -3338,7 +3338,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); regSet.verifyRegistersUsed(killMask); @@ -4359,7 +4359,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -6724,19 +6724,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -7163,8 +7163,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -7683,7 +7683,7 @@ void CodeGen::genJumpToThrowHlpBlk_la( callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)(compiler->acdHelper(codeKind))); regSet.verifyRegistersUsed(killMask); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 314346300b0056..ede5df1bea39d1 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -156,9 +156,9 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) regGSCheck = REG_EAX; regMaskGSCheck = RBM_EAX; #else // !TARGET_X86 - // Jmp calls: specify method handle using which JIT queries VM for its entry point - // address and hence it can neither be a VSD call nor PInvoke calli with cookie - // parameter. Therefore, in case of jmp calls it is safe to use R11. + // Jmp calls: specify method handle using which JIT queries VM for its entry point + // address and hence it can neither be a VSD call nor PInvoke calli with cookie + // parameter. Therefore, in case of jmp calls it is safe to use R11. regGSCheck = REG_R11; #endif // !TARGET_X86 } @@ -387,9 +387,9 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) // Move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -2158,8 +2158,8 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) unsigned filterEndOffsetSlotOffs; - PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > - TARGET_POINTER_SIZE); // below doesn't underflow. + PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > TARGET_POINTER_SIZE); // below doesn't + // underflow. filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); @@ -6093,13 +6093,13 @@ void CodeGen::genCall(GenTreeCall* call) else #endif // TARGET_X86 if (varTypeIsFloating(returnType)) - { - returnReg = REG_FLOATRET; - } - else - { - returnReg = REG_INTRET; - } + { + returnReg = REG_FLOATRET; + } + else + { + returnReg = REG_INTRET; + } inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true); } @@ -8040,8 +8040,8 @@ void CodeGen::genIntrinsic(GenTreeIntrinsic* treeNode) } #if defined(FEATURE_SIMD) - // The handling is a bit more complex so genSimdUpperSave/Restore - // handles genConsumeOperands and genProduceReg + // The handling is a bit more complex so genSimdUpperSave/Restore + // handles genConsumeOperands and genProduceReg case NI_SIMD_UpperRestore: { @@ -8111,7 +8111,7 @@ unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode) #ifdef UNIX_AMD64_ABI assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK); #else // !UNIX_AMD64_ABI - // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0. + // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0. assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0)); #endif // !UNIX_AMD64_ABI #endif // !DEBUG @@ -8584,7 +8584,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk) unsigned argOffset = putArgStk->getArgOffset(); #ifdef DEBUG - CallArg* callArg = putArgStk->gtCall->gtArgs.FindByNode(putArgStk); + CallArg* callArg = putArgStk->gtCall->gtArgs.FindByNode(putArgStk); assert(callArg != nullptr); assert(argOffset == callArg->AbiInfo.ByteOffset); #endif @@ -8837,8 +8837,8 @@ CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigne } #ifdef JIT32_GCENCODER -void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, - unsigned prologSize, +void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { BYTE headerBuf[64]; @@ -9240,8 +9240,8 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) unsigned saveStackLvl2 = genStackLevel; -// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK() -// for x86 stack unwinding + // Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK() + // for x86 stack unwinding #if defined(UNIX_X86_ABI) // Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall() @@ -10203,7 +10203,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) // do an LEA to "pop off" the frame allocation. needLea = true; #else // !TARGET_AMD64 - // We will just generate "mov esp, ebp" and be done with it. + // We will just generate "mov esp, ebp" and be done with it. needMovEspEbp = true; #endif // !TARGET_AMD64 } @@ -10949,8 +10949,8 @@ void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNu assert(i == alignmentLoBlkSize); } #else // !defined(TARGET_AMD64) - // While we aren't aligning the start, we still want to - // zero anything that is not in a 16 byte chunk at end + // While we aren't aligning the start, we still want to + // zero anything that is not in a 16 byte chunk at end int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES; int alignmentHiBlkSize = blkSize - alignmentBlkSize; int alignedLclHi = untrLclLo + alignmentBlkSize; @@ -11129,8 +11129,8 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize) assert((offset % 16) == 0); instruction copyIns = ins_Copy(TYP_FLOAT); #else // !TARGET_AMD64 - unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES; - instruction copyIns = INS_movupd; + unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES; + instruction copyIns = INS_movupd; #endif // !TARGET_AMD64 for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg)) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 0e75f2073dc5e0..ef45fa5ca81ab8 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -645,11 +645,11 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) - { - // We set the "primitive" useType based upon the structSize - // and also examine the clsHnd to see if it is an HFA of count one - useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); - } + { + // We set the "primitive" useType based upon the structSize + // and also examine the clsHnd to see if it is an HFA of count one + useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); + } #else if (isTrivialPointerSizedStruct(clsHnd)) { @@ -1157,11 +1157,15 @@ struct FileLine unsigned m_line; char* m_condStr; - FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) + FileLine() + : m_file(nullptr) + , m_line(0) + , m_condStr(nullptr) { } - FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) + FileLine(const char* file, unsigned line, const char* condStr) + : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate(newSize); @@ -1200,7 +1204,7 @@ struct FileLine }; typedef JitHashTable FileLineToCountMap; -FileLineToCountMap* NowayAssertMap; +FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { @@ -1233,7 +1237,8 @@ struct NowayAssertCountMap size_t count; FileLine fl; - NowayAssertCountMap() : count(0) + NowayAssertCountMap() + : count(0) { } @@ -2026,8 +2031,8 @@ void Compiler::compDone() #endif // LATE_DISASM } -void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ - void** ppIndirection) /* OUT */ +void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ + void** ppIndirection) /* OUT */ { void* addr; @@ -3406,11 +3411,10 @@ void Compiler::compInitOptions(JitFlags* jitFlags) printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } - printf("OPTIONS: compCodeOpt = %s\n", - (opts.compCodeOpt == BLENDED_CODE) - ? "BLENDED_CODE" - : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" - : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); + printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" + : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" + : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" + : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); @@ -4011,8 +4015,9 @@ void Compiler::compSetOptimizationLevel() } if (theMinOptsValue == true) { - JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " - "%3d,%3d for method %s\n", + JITLOG((LL_INFO10000, + "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " + "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { @@ -4793,7 +4798,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl { // Tail merge // - DoPhase(this, PHASE_HEAD_TAIL_MERGE, [this]() { return fgHeadTailMerge(true); }); + DoPhase(this, PHASE_HEAD_TAIL_MERGE, [this]() { + return fgHeadTailMerge(true); + }); // Merge common throw blocks // @@ -4864,7 +4871,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl DoPhase(this, PHASE_MORPH_GLOBAL, &Compiler::fgMorphBlocks); auto postMorphPhase = [this]() { - // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; @@ -4919,7 +4925,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // Second pass of tail merge // - DoPhase(this, PHASE_HEAD_TAIL_MERGE2, [this]() { return fgHeadTailMerge(false); }); + DoPhase(this, PHASE_HEAD_TAIL_MERGE2, [this]() { + return fgHeadTailMerge(false); + }); // Canonicalize entry to give a unique dominator tree root // @@ -5274,7 +5282,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // Now that lowering is completed we can proceed to perform register allocation // - auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; + auto linearScanPhase = [this]() { + m_pLinearScan->doLinearScan(); + }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() @@ -6171,12 +6181,12 @@ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, // We need to assume, by default, that all flags coming from the VM are invalid. instructionSetFlags.Reset(); -// We then add each available instruction set for the target architecture provided -// that the corresponding JitConfig switch hasn't explicitly asked for it to be -// disabled. This allows us to default to "everything" supported for altjit scenarios -// while also still allowing instruction set opt-out providing users with the ability -// to, for example, see and debug ARM64 codegen for any desired CPU configuration without -// needing to have the hardware in question. + // We then add each available instruction set for the target architecture provided + // that the corresponding JitConfig switch hasn't explicitly asked for it to be + // disabled. This allows us to default to "everything" supported for altjit scenarios + // while also still allowing instruction set opt-out providing users with the ability + // to, for example, see and debug ARM64 codegen for any desired CPU configuration without + // needing to have the hardware in question. #if defined(TARGET_ARM64) if (JitConfig.EnableHWIntrinsic() != 0) @@ -7949,112 +7959,105 @@ int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, #endif param.result = result; - setErrorTrap(compHnd, Param*, pParamOuter, ¶m) - { - setErrorTrap(nullptr, Param*, pParam, pParamOuter) - { - if (pParam->inlineInfo) - { - // Lazily create the inlinee compiler object - if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) - { - pParam->inlineInfo->InlinerCompiler->InlineeCompiler = - (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); - } + setErrorTrap(compHnd, Param*, pParamOuter, ¶m){setErrorTrap(nullptr, Param*, pParam, pParamOuter){ + if (pParam->inlineInfo){// Lazily create the inlinee compiler object + if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr){ + pParam->inlineInfo->InlinerCompiler->InlineeCompiler = + (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); +} - // Use the inlinee compiler object - pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; +// Use the inlinee compiler object +pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif - } - else - { - // Allocate create the inliner compiler object - pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); - } +} +else +{ + // Allocate create the inliner compiler object + pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); +} #if MEASURE_CLRAPI_CALLS - pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); +pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif - // push this compiler on the stack (TLS) - pParam->pComp->prevCompiler = JitTls::GetCompiler(); - JitTls::SetCompiler(pParam->pComp); +// push this compiler on the stack (TLS) +pParam->pComp->prevCompiler = JitTls::GetCompiler(); +JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) - PREFIX_ASSUME(pParam->pComp != NULL); +PREFIX_ASSUME(pParam->pComp != NULL); #else - assert(pParam->pComp != nullptr); +assert(pParam->pComp != nullptr); #endif - pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, - pParam->inlineInfo); +pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG - pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; +pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif - // Now generate the code - pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, - pParam->compileFlags); - } - finallyErrorTrap() - { - Compiler* pCompiler = pParamOuter->pComp; +// Now generate the code +pParam->result = + pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); +} +finallyErrorTrap() +{ + Compiler* pCompiler = pParamOuter->pComp; - // If OOM is thrown when allocating memory for a pComp, we will end up here. - // For this case, pComp and also pCompiler will be a nullptr - // - if (pCompiler != nullptr) - { - pCompiler->info.compCode = nullptr; + // If OOM is thrown when allocating memory for a pComp, we will end up here. + // For this case, pComp and also pCompiler will be a nullptr + // + if (pCompiler != nullptr) + { + pCompiler->info.compCode = nullptr; - // pop the compiler off the TLS stack only if it was linked above - assert(JitTls::GetCompiler() == pCompiler); - JitTls::SetCompiler(pCompiler->prevCompiler); - } + // pop the compiler off the TLS stack only if it was linked above + assert(JitTls::GetCompiler() == pCompiler); + JitTls::SetCompiler(pCompiler->prevCompiler); + } - if (pParamOuter->inlineInfo == nullptr) - { - // Free up the allocator we were using - pParamOuter->pAlloc->destroy(); - } - } - endErrorTrap() + if (pParamOuter->inlineInfo == nullptr) + { + // Free up the allocator we were using + pParamOuter->pAlloc->destroy(); } - impJitErrorTrap() +} +endErrorTrap() +} +impJitErrorTrap() +{ + // If we were looking at an inlinee.... + if (inlineInfo != nullptr) { - // If we were looking at an inlinee.... - if (inlineInfo != nullptr) - { - // Note that we failed to compile the inlinee, and that - // there's no point trying to inline it again anywhere else. - inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); - } - param.result = __errc; + // Note that we failed to compile the inlinee, and that + // there's no point trying to inline it again anywhere else. + inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } - endErrorTrap() + param.result = __errc; +} +endErrorTrap() - result = param.result; + result = param.result; - if (!inlineInfo && - (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && - !jitFallbackCompile) - { - // If we failed the JIT, reattempt with debuggable code. - jitFallbackCompile = true; +if (!inlineInfo && + (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && + !jitFallbackCompile) +{ + // If we failed the JIT, reattempt with debuggable code. + jitFallbackCompile = true; - // Update the flags for 'safer' code generation. - compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); - compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); - compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); + // Update the flags for 'safer' code generation. + compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); + compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); + compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); - goto START; - } + goto START; +} - return result; +return result; } #if defined(UNIX_AMD64_ABI) @@ -8805,8 +8808,9 @@ void CompTimeSummaryInfo::Print(FILE* f) double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { - fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " - "%3.1f%% of total.\n\n", + fprintf(f, + "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " + "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } @@ -8846,8 +8850,9 @@ void CompTimeSummaryInfo::Print(FILE* f) double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { - fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " - "%3.1f%% of total.\n\n", + fprintf(f, + "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " + "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } @@ -8945,7 +8950,8 @@ void CompTimeSummaryInfo::Print(FILE* f) fprintf(f, "\n"); } -JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) +JitTimer::JitTimer(unsigned byteCodeSize) + : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; @@ -9197,7 +9203,7 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp) // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else - const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); + const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index ee89502ce9b62d..539629086f747a 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -234,11 +234,13 @@ class LclSsaVarDsc { } - LclSsaVarDsc(BasicBlock* block) : m_block(block) + LclSsaVarDsc(BasicBlock* block) + : m_block(block) { } - LclSsaVarDsc(BasicBlock* block, GenTreeLclVarCommon* defNode) : m_block(block) + LclSsaVarDsc(BasicBlock* block, GenTreeLclVarCommon* defNode) + : m_block(block) { SetDefNode(defNode); } @@ -363,7 +365,10 @@ class SsaDefArray public: // Construct an empty SsaDefArray. - SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) + SsaDefArray() + : m_array(nullptr) + , m_arraySize(0) + , m_count(0) { } @@ -503,11 +508,11 @@ class LclVarDsc // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF - unsigned char lvIsParam : 1; // is this a parameter? - unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? + unsigned char lvIsParam : 1; // is this a parameter? + unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) - unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame + unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? @@ -529,16 +534,16 @@ class LclVarDsc // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. - unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects + unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) - unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) - unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. + unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) + unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. - unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local + unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp @@ -553,13 +558,13 @@ class LclVarDsc #if defined(TARGET_LOONGARCH64) unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for LA-ABI64. unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for LA-ABI64. - unsigned char lvIsSplit : 1; // Set if the argument is splited. + unsigned char lvIsSplit : 1; // Set if the argument is splited. #endif // defined(TARGET_LOONGARCH64) #if defined(TARGET_RISCV64) unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for RISCV64. unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for RISCV64. - unsigned char lvIsSplit : 1; // Set if the argument is splited. + unsigned char lvIsSplit : 1; // Set if the argument is splited. #endif // defined(TARGET_RISCV64) unsigned char lvSingleDef : 1; // variable has a single def. Used to identify ref type locals that can get type @@ -588,7 +593,7 @@ class LclVarDsc unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG - unsigned char lvKeepType : 1; // Don't change the type of this variable + unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security @@ -643,8 +648,8 @@ class LclVarDsc #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness - unsigned char lvIsHoist : 1; // CSE temp for a hoisted tree - unsigned char lvIsMultiDefCSE : 1; // CSE temp for a multi-def CSE + unsigned char lvIsHoist : 1; // CSE temp for a hoisted tree + unsigned char lvIsMultiDefCSE : 1; // CSE temp for a multi-def CSE #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, @@ -669,7 +674,8 @@ class LclVarDsc unsigned char lvIsSpan : 1; // The local is a Span public: - union { + union + { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the @@ -889,7 +895,7 @@ class LclVarDsc assert(_lvRegNum == reg); } -///////////////////// + ///////////////////// #if defined(TARGET_64BIT) @@ -1075,13 +1081,13 @@ class LclVarDsc public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; - void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); - void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); - void incLvRefCntSaturating(unsigned short delta, RefCountState state = RCS_NORMAL); + void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); + void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); + void incLvRefCntSaturating(unsigned short delta, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; - void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); - void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); + void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); + void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. @@ -1334,7 +1340,8 @@ class IntegralRange IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) - : m_lowerBound(lowerBound), m_upperBound(upperBound) + : m_lowerBound(lowerBound) + , m_upperBound(upperBound) { assert(lowerBound <= upperBound); } @@ -1366,7 +1373,7 @@ class IntegralRange return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } - static int64_t SymbolicToRealValue(SymbolicIntegerValue value); + static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); @@ -1422,7 +1429,10 @@ class TempDsc var_types tdType; public: - TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) + TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) + : tdNum(_tdNum) + , tdSize((BYTE)_tdSize) + , tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) @@ -1486,9 +1496,9 @@ enum class PhaseStatus : unsigned class LinearScanInterface { public: - virtual PhaseStatus doLinearScan() = 0; - virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; - virtual bool willEnregisterLocalVars() const = 0; + virtual PhaseStatus doLinearScan() = 0; + virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; + virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; @@ -7436,23 +7446,23 @@ class Compiler typedef JitHashTable, GenTree*> LocalNumberToNullCheckTreeMap; - GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); - GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); - GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); - GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - bool optDoEarlyPropForBlock(BasicBlock* block); + GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); + GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); + GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); + GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); PhaseStatus optEarlyProp(); - bool optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - bool optIsNullCheckFoldingLegal(GenTree* tree, - GenTree* nullCheckTree, - GenTree** nullCheckParent, - Statement** nullCheckStmt); - bool optCanMoveNullCheckPastTree(GenTree* tree, - unsigned nullCheckLclNum, - bool isInsideTry, - bool checkSideEffectSummary); + bool optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + bool optIsNullCheckFoldingLegal(GenTree* tree, + GenTree* nullCheckTree, + GenTree** nullCheckParent, + Statement** nullCheckStmt); + bool optCanMoveNullCheckPastTree(GenTree* tree, + unsigned nullCheckLclNum, + bool isInsideTry, + bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, @@ -7463,30 +7473,30 @@ class Compiler #endif PhaseStatus optInductionVariables(); - bool optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop); - bool optIsIVWideningProfitable(unsigned lclNum, - BasicBlock* initBlock, - bool initedToConstant, - FlowGraphNaturalLoop* loop, - ArrayStack& ivUses); - void optBestEffortReplaceNarrowIVUses( - unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt); + bool optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop); + bool optIsIVWideningProfitable(unsigned lclNum, + BasicBlock* initBlock, + bool initedToConstant, + FlowGraphNaturalLoop* loop, + ArrayStack& ivUses); + void optBestEffortReplaceNarrowIVUses( + unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt); void optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned newLclNum, Statement* stmt); void optSinkWidenedIV(unsigned lclNum, unsigned newLclNum, FlowGraphNaturalLoop* loop); // Redundant branch opts // - PhaseStatus optRedundantBranches(); - bool optRedundantRelop(BasicBlock* const block); - bool optRedundantBranch(BasicBlock* const block); - bool optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); - bool optJumpThreadPhi(BasicBlock* const block, GenTree* tree, ValueNum treeNormVN); - bool optJumpThreadCheck(BasicBlock* const block, BasicBlock* const domBlock); - bool optJumpThreadCore(JumpThreadInfo& jti); - bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); + PhaseStatus optRedundantBranches(); + bool optRedundantRelop(BasicBlock* const block); + bool optRedundantBranch(BasicBlock* const block); + bool optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); + bool optJumpThreadPhi(BasicBlock* const block, GenTree* tree, ValueNum treeNormVN); + bool optJumpThreadCheck(BasicBlock* const block, BasicBlock* const domBlock); + bool optJumpThreadCore(JumpThreadInfo& jti); + bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); BitVecTraits* optReachableBitVecTraits; BitVec optReachableBitVec; - void optRelopImpliesRelop(RelopImplicationInfo* rii); + void optRelopImpliesRelop(RelopImplicationInfo* rii); /************************************************************************** * Value/Assertion propagation @@ -7553,7 +7563,8 @@ class Compiler { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; - union { + union + { SsaVar lcl; ArrBnd bnd; }; @@ -7573,7 +7584,8 @@ class Compiler #endif FieldSeq* fieldSeq; }; - union { + union + { SsaVar lcl; IntVal u1; __int64 lconVal; @@ -7790,49 +7802,49 @@ class Compiler bool optCanPropSubRange; public: - void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); + void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNBasedFoldCurStmt(BasicBlock* block, Statement* stmt, GenTree* parent, GenTree* tree); - GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); - GenTree* optVNBasedFoldConstExpr(BasicBlock* block, GenTree* parent, GenTree* tree); - GenTree* optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTree* tree); - GenTree* optVNBasedFoldExpr_Call(BasicBlock* block, GenTree* parent, GenTreeCall* call); - GenTree* optExtractSideEffListFromConst(GenTree* tree); + GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); + GenTree* optVNBasedFoldConstExpr(BasicBlock* block, GenTree* parent, GenTree* tree); + GenTree* optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTree* tree); + GenTree* optVNBasedFoldExpr_Call(BasicBlock* block, GenTree* parent, GenTreeCall* call); + GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } - ASSERT_TP* bbJtrueAssertionOut; + ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable, ASSERT_TP> ValueNumToAssertsMap; - ValueNumToAssertsMap* optValueNumToAsserts; + ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. - ASSERT_TP& GetAssertionDep(unsigned lclNum); + ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); - void optAssertionInit(bool isLocalProp); - void optAssertionTraitsInit(AssertionIndex assertionCount); - void optAssertionReset(AssertionIndex limit); - void optAssertionRemove(AssertionIndex index); + void optAssertionInit(bool isLocalProp); + void optAssertionTraitsInit(AssertionIndex assertionCount); + void optAssertionReset(AssertionIndex limit); + void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. PhaseStatus optAssertionPropMain(); - Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); - bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); - ASSERT_TP* optInitAssertionDataflowFlags(); - ASSERT_TP* optComputeAssertionGen(); + Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); + bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); + ASSERT_TP* optInitAssertionDataflowFlags(); + ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. - void optAssertionGen(GenTree* tree); + void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); - AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); - AssertionInfo optAssertionGenJtrue(GenTree* tree); + AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); + AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); - void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); + void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); ValueNum optConservativeNormalVN(GenTree* tree); @@ -7853,9 +7865,9 @@ class Compiler GenTree* op2, bool helperCallArgs = false); - bool optAssertionVnInvolvesNan(AssertionDsc* assertion); + bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); - void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); + void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif @@ -7865,8 +7877,8 @@ class Compiler AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); - bool optAssertionIsNonNull(GenTree* op, - ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); + bool optAssertionIsNonNull(GenTree* op, + ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); @@ -7874,15 +7886,15 @@ class Compiler optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. - bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); + bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)); + Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)); - bool optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, GenTree* destParent, GenTree* value); - bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); + Statement* stmt DEBUGARG(AssertionIndex index)); + bool optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, GenTree* destParent, GenTree* value); + bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); @@ -7902,8 +7914,8 @@ class Compiler GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); - bool optNonNullAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* indir); - bool optWriteBarrierAssertionProp_StoreInd(ASSERT_VALARG_TP assertions, GenTreeStoreInd* indir); + bool optNonNullAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* indir); + bool optWriteBarrierAssertionProp_StoreInd(ASSERT_VALARG_TP assertions, GenTreeStoreInd* indir); void optAssertionProp_RangeProperties(ASSERT_VALARG_TP assertions, GenTree* tree, @@ -7959,11 +7971,11 @@ class Compiler bool optReconstructArrIndex(GenTree* tree, ArrIndex* result); bool optIdentifyLoopOptInfo(FlowGraphNaturalLoop* loop, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; - fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); - bool optObtainLoopCloningOpts(LoopCloneContext* context); - bool optIsLoopClonable(FlowGraphNaturalLoop* loop, LoopCloneContext* context); - bool optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneVisitorInfo* info); - bool optIsHandleOrIndirOfHandle(GenTree* tree, GenTreeFlags handleType); + fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); + bool optObtainLoopCloningOpts(LoopCloneContext* context); + bool optIsLoopClonable(FlowGraphNaturalLoop* loop, LoopCloneContext* context); + bool optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneVisitorInfo* info); + bool optIsHandleOrIndirOfHandle(GenTree* tree, GenTreeFlags handleType); static bool optLoopCloningEnabled(); @@ -8125,7 +8137,7 @@ class Compiler const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer = nullptr, size_t bufferSize = 0); - void eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDLE handle); + void eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDLE handle); const char* eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd); #if defined(DEBUG) @@ -8134,12 +8146,12 @@ class Compiler unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); - var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); - var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); + var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); + var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); - unsigned eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd); - static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); + unsigned eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd); + static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs @@ -8267,7 +8279,7 @@ class Compiler unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE - void eeSetLIcount(unsigned count); + void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); @@ -8275,7 +8287,7 @@ class Compiler static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); - void eeDispLineInfos(); + void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info @@ -8290,7 +8302,7 @@ class Compiler UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; - } * eeVars; + }* eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, @@ -8324,7 +8336,7 @@ class Compiler WORD eeGetRelocTypeHint(void* target); -// ICorStaticInfo wrapper functions + // ICorStaticInfo wrapper functions #if defined(UNIX_AMD64_ABI) #ifdef DEBUG @@ -8353,7 +8365,11 @@ class Compiler template bool eeRunFunctorWithSPMIErrorTrap(Functor f) { - return eeRunWithSPMIErrorTrap([](Functor* pf) { (*pf)(); }, &f); + return eeRunWithSPMIErrorTrap( + [](Functor* pf) { + (*pf)(); + }, + &f); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); @@ -8361,7 +8377,7 @@ class Compiler // Utility functions static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); - static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); + static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); enum StaticHelperReturnValue { @@ -8412,7 +8428,7 @@ class Compiler // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable, DebugInfo> CallSiteDebugInfoTable; - CallSiteDebugInfoTable* genCallSite2DebugInfoMap; + CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. @@ -8446,11 +8462,11 @@ class Compiler return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); - bool shouldDoubleAlign(unsigned refCntStk, - unsigned refCntReg, - weight_t refCntWtdReg, - unsigned refCntStkParam, - weight_t refCntWtdStkDbl); + bool shouldDoubleAlign(unsigned refCntStk, + unsigned refCntReg, + weight_t refCntWtdReg, + unsigned refCntStkParam, + weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() @@ -8462,7 +8478,7 @@ class Compiler codeGen->SetFullPtrRegMapRequired(value); } -// Things that MAY belong either in CodeGen or CodeGenContext + // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; @@ -8495,7 +8511,7 @@ class Compiler #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); - void funSetCurrentFunc(unsigned funcIdx); + void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); @@ -8518,15 +8534,15 @@ class Compiler // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); -/* -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XX XX -XX UnwindInfo XX -XX XX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -*/ + /* + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XX XX + XX UnwindInfo XX + XX XX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + */ #if !defined(__GNUC__) #pragma region Unwind information @@ -8640,13 +8656,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); - void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); - void unwindPushPopCFI(regNumber reg); - void unwindBegPrologCFI(); - void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); - void unwindAllocStackCFI(unsigned size); - void unwindSetFrameRegCFI(regNumber reg, unsigned offset); - void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); + void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); + void unwindPushPopCFI(regNumber reg); + void unwindBegPrologCFI(); + void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); + void unwindAllocStackCFI(unsigned size); + void unwindSetFrameRegCFI(regNumber reg, unsigned offset); + void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, @@ -8895,11 +8911,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX GenTree* impSIMDPopStack(); - void setLclRelatedToSIMDIntrinsic(GenTree* tree); - bool areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2); - bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); - bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); - bool areArgumentsContiguous(GenTree* op1, GenTree* op2); + void setLclRelatedToSIMDIntrinsic(GenTree* tree); + bool areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2); + bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); + bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); + bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_types simdBaseType, unsigned simdSize); // Get the size of the SIMD type in bytes @@ -9553,8 +9569,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set -// NOTE: These values are only reliable after -// the importing is completely finished. + // NOTE: These values are only reliable after + // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? @@ -9642,11 +9658,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX uint32_t preferredVectorByteLength; #endif // TARGET_XARCH -// optimize maximally and/or favor speed over size? + // optimize maximally and/or favor speed over size? -#define DEFAULT_MIN_OPTS_CODE_SIZE 60000 -#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 -#define DEFAULT_MIN_OPTS_BB_COUNT 2000 +#define DEFAULT_MIN_OPTS_CODE_SIZE 60000 +#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 +#define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 @@ -10067,7 +10083,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG -// clang-format off + // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ @@ -10137,7 +10153,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX STRESS_MODES #undef STRESS_MODE }; -// clang-format on + // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNamesW[STRESS_COUNT + 1]; @@ -10147,8 +10163,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #define MAX_STRESS_WEIGHT 100 - bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); - bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); + bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); + bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); static unsigned compStressAreaHash(compStressArea area); #ifdef DEBUG @@ -10252,11 +10268,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). - bool compIsStatic : 1; // Is the method static (no 'this' pointer)? - bool compIsVarArgs : 1; // Does the method have varargs parameters? - bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? - bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback - bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic + bool compIsStatic : 1; // Is the method static (no 'this' pointer)? + bool compIsVarArgs : 1; // Does the method have varargs parameters? + bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? + bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback + bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL (including SIMD normalization) @@ -10376,7 +10392,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } } #endif // TARGET_ARM64 - // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. + // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) @@ -10465,7 +10481,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX var_types TypeHandleToVarType(CORINFO_CLASS_HANDLE handle, ClassLayout** pLayout = nullptr); var_types TypeHandleToVarType(CorInfoType jitType, CORINFO_CLASS_HANDLE handle, ClassLayout** pLayout = nullptr); -//-------------------------- Global Compiler Data ------------------------------------ + //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: @@ -10573,8 +10589,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX //------------ Some utility functions -------------- - void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ - void** ppIndirection); /* OUT */ + void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ + void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the @@ -10589,17 +10605,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void compDoComponentUnitTestsOnce(); #endif // DEBUG - int compCompile(CORINFO_MODULE_HANDLE classPtr, - void** methodCodePtr, - uint32_t* methodCodeSize, - JitFlags* compileFlags); + int compCompile(CORINFO_MODULE_HANDLE classPtr, + void** methodCodePtr, + uint32_t* methodCodeSize, + JitFlags* compileFlags); void compCompileFinish(); - int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, - COMP_HANDLE compHnd, - CORINFO_METHOD_INFO* methodInfo, - void** methodCodePtr, - uint32_t* methodCodeSize, - JitFlags* compileFlag); + int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, + COMP_HANDLE compHnd, + CORINFO_METHOD_INFO* methodInfo, + void** methodCodePtr, + uint32_t* methodCodeSize, + JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); @@ -10690,10 +10706,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); - VarName compVarName(regNumber reg, bool isFloatReg = false); + VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); - void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); - void compDspSrcLinesByLineNum(unsigned line, bool seek = false); + void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); + void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG const char* compRegNameForSize(regNumber reg, size_t size); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); @@ -10865,8 +10881,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); - void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); - void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); + void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); + void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfoForLocal(unsigned lclNum); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, @@ -10963,8 +10979,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk -#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. - // This can be overwritten by setting DOTNET_JITInlineSize env variable. +#define DEFAULT_MAX_INLINE_SIZE \ + 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. + // This can be overwritten by setting DOTNET_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined @@ -11148,7 +11165,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // defined(UNIX_AMD64_ABI) - void fgMorphMultiregStructArgs(GenTreeCall* call); + void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(CallArg* arg); bool killGCRefs(GenTree* tree); @@ -11303,7 +11320,9 @@ class GenTreeVisitor Compiler* m_compiler; ArrayStack m_ancestors; - GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) + GenTreeVisitor(Compiler* compiler) + : m_compiler(compiler) + , m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); @@ -11726,7 +11745,8 @@ class DomTreeVisitor protected: Compiler* m_compiler; - DomTreeVisitor(Compiler* compiler) : m_compiler(compiler) + DomTreeVisitor(Compiler* compiler) + : m_compiler(compiler) { } @@ -11815,7 +11835,8 @@ class EHClauses EHblkDsc* m_ehDsc; public: - iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) + iterator(EHblkDsc* ehDsc) + : m_ehDsc(ehDsc) { } @@ -11837,7 +11858,9 @@ class EHClauses }; public: - EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) + EHClauses(Compiler* comp) + : m_begin(comp->compHndBBtab) + , m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } @@ -11874,7 +11897,9 @@ class StringPrinter public: StringPrinter(CompAllocator alloc, char* buffer = nullptr, size_t bufferMax = 0) - : m_alloc(alloc), m_buffer(buffer), m_bufferMax(bufferMax) + : m_alloc(alloc) + , m_buffer(buffer) + , m_bufferMax(bufferMax) { if ((m_buffer == nullptr) || (m_bufferMax == 0)) { diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index daccf6027efd10..336c95f7527616 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -78,9 +78,9 @@ inline T genFindLowestBit(T value) } /***************************************************************************** -* -* Return true if the given value has exactly zero or one bits set. -*/ + * + * Return true if the given value has exactly zero or one bits set. + */ template inline bool genMaxOneBit(T value) @@ -89,9 +89,9 @@ inline bool genMaxOneBit(T value) } /***************************************************************************** -* -* Return true if the given value has exactly one bit set. -*/ + * + * Return true if the given value has exactly one bit set. + */ template inline bool genExactlyOneBit(T value) @@ -280,7 +280,8 @@ class Counter : public Dumpable public: int64_t Value; - Counter(int64_t initialValue = 0) : Value(initialValue) + Counter(int64_t initialValue = 0) + : Value(initialValue) { } @@ -332,7 +333,8 @@ class Histogram : public Dumpable class NodeCounts : public Dumpable { public: - NodeCounts() : m_counts() + NodeCounts() + : m_counts() { } @@ -544,7 +546,7 @@ BasicBlockVisit BasicBlock::VisitEHEnclosedHandlerSecondPassSuccs(Compiler* comp // 3. As part of two pass EH, control may bypass filters and flow directly to // filter-handlers // -template +template static BasicBlockVisit VisitEHSuccs(Compiler* comp, BasicBlock* block, TFunc func) { if (!block->HasPotentialEHSuccs(comp)) @@ -1273,8 +1275,8 @@ inline Statement* Compiler::gtNewStmt(GenTree* expr, const DebugInfo& di) inline GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); - assert((GenTree::OperKind(oper) & GTK_EXOP) == - 0); // Can't use this to construct any types that extend unary/binary operator. + assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary + // operator. assert(op1 != nullptr || oper == GT_RETFILT || (oper == GT_RETURN && type == TYP_VOID)); GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, nullptr); @@ -1320,7 +1322,7 @@ inline GenTreeIntCon* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags f node = new (this, LargeOpOpcode()) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields DEBUGARG(/*largeNode*/ true)); #else - node = new (this, GT_CNS_INT) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields); + node = new (this, GT_CNS_INT) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields); #endif node->gtFlags |= flags; return node; @@ -2520,8 +2522,8 @@ inline assert(varDsc->lvIsParam); #endif // UNIX_AMD64_ABI #else // !TARGET_AMD64 - // For other targets, a stack parameter that is enregistered or prespilled - // for profiling on ARM will have a stack location. + // For other targets, a stack parameter that is enregistered or prespilled + // for profiling on ARM will have a stack location. assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); #endif // !TARGET_AMD64 } @@ -2608,7 +2610,7 @@ inline #ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); #else - varOffset = -(codeGen->genTotalFrameSize()); + varOffset = -(codeGen->genTotalFrameSize()); #endif } } @@ -2662,7 +2664,7 @@ inline *pBaseReg = REG_SPBASE; } #else - *pFPbased = FPbased; + *pFPbased = FPbased; #endif return varOffset; @@ -4781,7 +4783,6 @@ unsigned Compiler::fgRunDfs(VisitPreorder visitPreorder, VisitPostorder visitPos ArrayStack blocks(getAllocator(CMK_DepthFirstSearch)); auto dfsFrom = [&](BasicBlock* firstBB) { - BitVecOps::AddElemD(&traits, visited, firstBB->bbNum); blocks.Emplace(this, firstBB); visitPreorder(firstBB, preOrderIndex++); @@ -4807,7 +4808,6 @@ unsigned Compiler::fgRunDfs(VisitPreorder visitPreorder, VisitPostorder visitPos visitPostorder(block, postOrderIndex++); } } - }; dfsFrom(fgFirstBB); @@ -4852,7 +4852,7 @@ template BasicBlockVisit FlowGraphNaturalLoop::VisitLoopBlocksReversePostOrder(TFunc func) { BitVecTraits traits(m_blocksSize, m_dfsTree->GetCompiler()); - bool result = BitVecOps::VisitBits(&traits, m_blocks, [=](unsigned index) { + bool result = BitVecOps::VisitBits(&traits, m_blocks, [=](unsigned index) { // head block rpo index = PostOrderCount - 1 - headPreOrderIndex // loop block rpo index = head block rpoIndex + index // loop block po index = PostOrderCount - 1 - loop block rpo index @@ -4884,7 +4884,7 @@ template BasicBlockVisit FlowGraphNaturalLoop::VisitLoopBlocksPostOrder(TFunc func) { BitVecTraits traits(m_blocksSize, m_dfsTree->GetCompiler()); - bool result = BitVecOps::VisitBitsReverse(&traits, m_blocks, [=](unsigned index) { + bool result = BitVecOps::VisitBitsReverse(&traits, m_blocks, [=](unsigned index) { unsigned poIndex = m_header->bbPostorderNum - index; assert(poIndex < m_dfsTree->GetPostOrderCount()); return func(m_dfsTree->GetPostOrder(poIndex)) == BasicBlockVisit::Continue; diff --git a/src/coreclr/jit/compilerbitsettraits.h b/src/coreclr/jit/compilerbitsettraits.h index 02223b1ecedfce..965ffac55465e1 100644 --- a/src/coreclr/jit/compilerbitsettraits.h +++ b/src/coreclr/jit/compilerbitsettraits.h @@ -107,7 +107,9 @@ struct BitVecTraits Compiler* comp; public: - BitVecTraits(unsigned size, Compiler* comp) : size(size), comp(comp) + BitVecTraits(unsigned size, Compiler* comp) + : size(size) + , comp(comp) { const unsigned elemBits = 8 * sizeof(size_t); arraySize = roundUp(size, elemBits) / elemBits; diff --git a/src/coreclr/jit/copyprop.cpp b/src/coreclr/jit/copyprop.cpp index 90a593ef65b2fe..142c745fc7c317 100644 --- a/src/coreclr/jit/copyprop.cpp +++ b/src/coreclr/jit/copyprop.cpp @@ -462,7 +462,9 @@ PhaseStatus Compiler::optVnCopyProp() public: CopyPropDomTreeVisitor(Compiler* compiler) - : DomTreeVisitor(compiler), m_curSsaName(compiler->getAllocator(CMK_CopyProp)), m_madeChanges(false) + : DomTreeVisitor(compiler) + , m_curSsaName(compiler->getAllocator(CMK_CopyProp)) + , m_madeChanges(false) { } diff --git a/src/coreclr/jit/debuginfo.h b/src/coreclr/jit/debuginfo.h index 3f628840765dc7..72119b905c948a 100644 --- a/src/coreclr/jit/debuginfo.h +++ b/src/coreclr/jit/debuginfo.h @@ -12,12 +12,17 @@ class InlineContext; class ILLocation { public: - ILLocation() : m_offset(BAD_IL_OFFSET), m_isStackEmpty(false), m_isCall(false) + ILLocation() + : m_offset(BAD_IL_OFFSET) + , m_isStackEmpty(false) + , m_isCall(false) { } ILLocation(IL_OFFSET offset, bool isStackEmpty, bool isCall) - : m_offset(offset), m_isStackEmpty(isStackEmpty), m_isCall(isCall) + : m_offset(offset) + , m_isStackEmpty(isStackEmpty) + , m_isCall(isCall) { } @@ -65,18 +70,21 @@ class ILLocation private: IL_OFFSET m_offset; bool m_isStackEmpty : 1; - bool m_isCall : 1; + bool m_isCall : 1; }; // Represents debug information about a statement. class DebugInfo { public: - DebugInfo() : m_inlineContext(nullptr) + DebugInfo() + : m_inlineContext(nullptr) { } - DebugInfo(InlineContext* inlineContext, ILLocation loc) : m_inlineContext(inlineContext), m_location(loc) + DebugInfo(InlineContext* inlineContext, ILLocation loc) + : m_inlineContext(inlineContext) + , m_location(loc) { } diff --git a/src/coreclr/jit/decomposelongs.h b/src/coreclr/jit/decomposelongs.h index b8ddc621079925..744061091e42be 100644 --- a/src/coreclr/jit/decomposelongs.h +++ b/src/coreclr/jit/decomposelongs.h @@ -18,7 +18,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class DecomposeLongs { public: - DecomposeLongs(Compiler* compiler) : m_compiler(compiler) + DecomposeLongs(Compiler* compiler) + : m_compiler(compiler) { } @@ -72,7 +73,7 @@ class DecomposeLongs GenTree* RepresentOpAsLocalVar(GenTree* op, GenTree* user, GenTree** edge); GenTree* EnsureIntSized(GenTree* node, bool signExtend); - GenTree* StoreNodeToVar(LIR::Use& use); + GenTree* StoreNodeToVar(LIR::Use& use); static genTreeOps GetHiOper(genTreeOps oper); static genTreeOps GetLoOper(genTreeOps oper); diff --git a/src/coreclr/jit/disasm.cpp b/src/coreclr/jit/disasm.cpp index 2a49f9d8cb55c5..bff93c85150a68 100644 --- a/src/coreclr/jit/disasm.cpp +++ b/src/coreclr/jit/disasm.cpp @@ -1,12 +1,12 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*********************************************************************** -* -* File: disasm.cpp -* -* This file handles disassembly for the "late disassembler". -* -***********************************************************************/ + * + * File: disasm.cpp + * + * This file handles disassembly for the "late disassembler". + * + ***********************************************************************/ #include "jitpch.h" #ifdef _MSC_VER @@ -23,7 +23,7 @@ FILE* g_disAsmFileCorDisTools; #endif // USE_COREDISTOOLS // Define DISASM_DEBUG to get verbose output of late disassembler inner workings. -//#define DISASM_DEBUG +// #define DISASM_DEBUG #ifdef DISASM_DEBUG #ifdef DEBUG #define DISASM_DUMP(...) \ @@ -96,12 +96,12 @@ typedef struct codeFix { codeFix* cfNext; unsigned cfFixup; -} * codeFixPtr; +}* codeFixPtr; typedef struct codeBlk { codeFix* cbFixupLst; -} * codeBlkPtr; +}* codeBlkPtr; #ifdef USE_MSVCDIS @@ -139,7 +139,7 @@ size_t DisAssembler::disCchAddrMember( switch (terminationType) { - // int disCallSize; + // int disCallSize; case DISX86::trmtaJmpShort: case DISX86::trmtaJmpCcShort: @@ -228,7 +228,7 @@ size_t DisAssembler::disCchAddrMember( switch (terminationType) { - // int disCallSize; + // int disCallSize; case DISARM64::TRMTA::trmtaBra: case DISARM64::TRMTA::trmtaBraCase: @@ -620,7 +620,7 @@ size_t DisAssembler::disCchRegRelMember( case DISX86::trmtaFallThrough: - /* some instructions like division have a TRAP termination type - ignore it */ + /* some instructions like division have a TRAP termination type - ignore it */ case DISX86::trmtaTrap: case DISX86::trmtaTrapCc: @@ -715,7 +715,7 @@ size_t DisAssembler::disCchRegRelMember( case DISARM64::TRMTA::trmtaFallThrough: - /* some instructions like division have a TRAP termination type - ignore it */ + /* some instructions like division have a TRAP termination type - ignore it */ case DISARM64::TRMTA::trmtaTrap: case DISARM64::TRMTA::trmtaTrapCc: @@ -1261,7 +1261,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) #elif defined(TARGET_AMD64) pdis = DIS::PdisNew(DIS::distX8664); #elif defined(TARGET_ARM64) - pdis = DIS::PdisNew(DIS::distArm64); + pdis = DIS::PdisNew(DIS::distArm64); #else // TARGET* #error Unsupported or unset target architecture #endif @@ -1340,7 +1340,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) #else false // Display code bytes? #endif - ); + ); ibCur += (unsigned)cb; } @@ -1680,7 +1680,7 @@ bool DisAssembler::InitCoredistoolsLibrary() s_disCoreDisToolsLibraryLoadSuccessful = true; // We made it! -// done initializing + // done initializing FinishedInitializing: InterlockedExchange(&s_disCoreDisToolsLibraryInitializing, 0); // unlock initialization @@ -1703,7 +1703,7 @@ bool DisAssembler::InitCoredistoolsDisasm() #if defined(TARGET_ARM64) coreDisTargetArchitecture = Target_Arm64; #elif defined(TARGET_ARM) - coreDisTargetArchitecture = Target_Thumb; + coreDisTargetArchitecture = Target_Thumb; #elif defined(TARGET_X86) coreDisTargetArchitecture = Target_X86; #elif defined(TARGET_AMD64) diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp index cfe50b492bb321..b33e6eed17bbc3 100644 --- a/src/coreclr/jit/ee_il_dll.cpp +++ b/src/coreclr/jit/ee_il_dll.cpp @@ -211,7 +211,9 @@ void SetJitTls(void* value) #if defined(DEBUG) -JitTls::JitTls(ICorJitInfo* jitInfo) : m_compiler(nullptr), m_logEnv(jitInfo) +JitTls::JitTls(ICorJitInfo* jitInfo) + : m_compiler(nullptr) + , m_logEnv(jitInfo) { m_next = reinterpret_cast(GetJitTls()); SetJitTls(this); @@ -1407,7 +1409,9 @@ bool Compiler::eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param) unsigned Compiler::eeTryGetClassSize(CORINFO_CLASS_HANDLE clsHnd) { unsigned classSize = UINT_MAX; - eeRunFunctorWithSPMIErrorTrap([&]() { classSize = info.compCompHnd->getClassSize(clsHnd); }); + eeRunFunctorWithSPMIErrorTrap([&]() { + classSize = info.compCompHnd->getClassSize(clsHnd); + }); return classSize; } diff --git a/src/coreclr/jit/ee_il_dll.hpp b/src/coreclr/jit/ee_il_dll.hpp index c3801d88292f5f..d676ba8caa479a 100644 --- a/src/coreclr/jit/ee_il_dll.hpp +++ b/src/coreclr/jit/ee_il_dll.hpp @@ -10,12 +10,12 @@ class CILJit : public ICorJitCompiler unsigned flags, /* IN */ uint8_t** nativeEntry, /* OUT */ uint32_t* nativeSizeOfCode /* OUT */ - ); + ); void ProcessShutdownWork(ICorStaticInfo* statInfo); void getVersionIdentifier(GUID* versionIdentifier /* OUT */ - ); + ); void setTargetOS(CORINFO_OS os); }; diff --git a/src/coreclr/jit/eeinterface.cpp b/src/coreclr/jit/eeinterface.cpp index 0578dee4109ef1..a6552c2194294c 100644 --- a/src/coreclr/jit/eeinterface.cpp +++ b/src/coreclr/jit/eeinterface.cpp @@ -210,7 +210,7 @@ void Compiler::eePrintTypeOrJitAlias(StringPrinter* printer, CORINFO_CLASS_HANDL } static const char* s_jitHelperNames[CORINFO_HELP_COUNT] = { -#define JITHELPER(code, pfnHelper, sig) #code, +#define JITHELPER(code, pfnHelper, sig) #code, #define DYNAMICJITHELPER(code, pfnHelper, sig) #code, #include "jithelpers.h" }; @@ -403,10 +403,9 @@ const char* Compiler::eeGetMethodFullName( CORINFO_SIG_INFO sig; eeGetMethodSig(hnd, &sig); eePrintMethod(&p, clsHnd, hnd, &sig, - /* includeClassInstantiation */ true, - /* includeMethodInstantiation */ true, - /* includeSignature */ true, includeReturnType, includeThisSpecifier); - + /* includeClassInstantiation */ true, + /* includeMethodInstantiation */ true, + /* includeSignature */ true, includeReturnType, includeThisSpecifier); }); if (success) @@ -475,13 +474,12 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE methHnd, char* buffe StringPrinter p(getAllocator(CMK_DebugOnly), buffer, bufferSize); bool success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintMethod(&p, NO_CLASS_HANDLE, methHnd, - /* sig */ nullptr, - /* includeClassInstantiation */ false, - /* includeMethodInstantiation */ false, - /* includeSignature */ false, - /* includeReturnType */ false, - /* includeThisSpecifier */ false); - + /* sig */ nullptr, + /* includeClassInstantiation */ false, + /* includeMethodInstantiation */ false, + /* includeSignature */ false, + /* includeReturnType */ false, + /* includeThisSpecifier */ false); }); if (!success) @@ -512,7 +510,9 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE methHnd, char* buffe const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeType, char* buffer, size_t bufferSize) { StringPrinter p(getAllocator(CMK_DebugOnly), buffer, bufferSize); - bool success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintField(&p, fldHnd, includeType); }); + bool success = eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintField(&p, fldHnd, includeType); + }); if (success) { @@ -525,7 +525,9 @@ const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeTy { p.Append(":"); - success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintField(&p, fldHnd, false); }); + success = eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintField(&p, fldHnd, false); + }); if (success) { @@ -560,7 +562,9 @@ const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeTy const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer, size_t bufferSize) { StringPrinter printer(getAllocator(CMK_DebugOnly), buffer, bufferSize); - if (!eeRunFunctorWithSPMIErrorTrap([&]() { eePrintType(&printer, clsHnd, true); })) + if (!eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintType(&printer, clsHnd, true); + })) { printer.Truncate(0); printer.Append(""); @@ -581,7 +585,9 @@ const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer, const char* Compiler::eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd) { StringPrinter printer(getAllocator(CMK_DebugOnly)); - if (!eeRunFunctorWithSPMIErrorTrap([&]() { eePrintType(&printer, clsHnd, false); })) + if (!eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintType(&printer, clsHnd, false); + })) { printer.Truncate(0); printer.Append(""); @@ -597,8 +603,9 @@ void Compiler::eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDL size_t actualLen = 0; // Ignore potential SPMI failures - bool success = eeRunFunctorWithSPMIErrorTrap( - [&]() { actualLen = this->info.compCompHnd->printObjectDescription(handle, str, maxStrSize); }); + bool success = eeRunFunctorWithSPMIErrorTrap([&]() { + actualLen = this->info.compCompHnd->printObjectDescription(handle, str, maxStrSize); + }); if (!success) { diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 778caa9b57e75e..6b6b96b0265460 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -787,7 +787,7 @@ void emitter::emitGenIG(insGroup* ig) IMPL_LIMITATION("Too many arguments pushed on stack"); } -// printf("Start IG #%02u [stk=%02u]\n", ig->igNum, emitCurStackLvl); + // printf("Start IG #%02u [stk=%02u]\n", ig->igNum, emitCurStackLvl); #endif @@ -1205,7 +1205,7 @@ void emitter::emitBegFN(bool hasFramePtr , bool chkAlign #endif - ) +) { insGroup* ig; @@ -1612,7 +1612,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) #if defined(FEATURE_EH_FUNCLETS) && !emitIGisInFuncletProlog(emitCurIG) && !emitIGisInFuncletEpilog(emitCurIG) #endif // FEATURE_EH_FUNCLETS - ) + ) { emitNxtIG(true); } @@ -1627,7 +1627,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) !emitIGisInProlog(emitCurIG) && // don't do this in prolog or epilog !emitIGisInEpilog(emitCurIG) && emitRandomNops // sometimes we turn off where exact codegen is needed (pinvoke inline) - ) + ) { if (emitNextNop == 0) { @@ -1761,7 +1761,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) #ifndef TARGET_AMD64 && emitComp->opts.compReloc #endif // TARGET_AMD64 - ) + ) { /* Mark idInfo()->idDspReloc to remember that the */ /* address mode has a displacement that is relocatable */ @@ -2074,7 +2074,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, #if defined(FEATURE_EH_FUNCLETS) || igType == IGPT_FUNCLET_EPILOG #endif // FEATURE_EH_FUNCLETS - ) + ) { #ifdef TARGET_AMD64 emitOutputPreEpilogNOP(); @@ -2202,7 +2202,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, #if defined(FEATURE_EH_FUNCLETS) || igType == IGPT_FUNCLET_EPILOG #endif // FEATURE_EH_FUNCLETS - ) + ) { // If this was an epilog, then assume this is the end of any currently in progress // no-GC region. If a block after the epilog needs to be no-GC, it needs to call @@ -2509,11 +2509,11 @@ void emitter::emitEndFnEpilog() // because the only instruction is the last one and thus a slight // underestimation of the epilog size is harmless (since the EIP // can not be between instructions). - assert(emitEpilogCnt == 1 || - (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp (size is either 6 or 5), - // and various forms of ret (size is either 1 or 3). The combination can - // be anything between 1 and 5. - ); + assert(emitEpilogCnt == 1 || (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp + // (size is either 6 or 5), and various forms of + // ret (size is either 1 or 3). The combination + // can be anything between 1 and 5. + ); emitExitSeqSize = newSize; } #endif // JIT32_GCENCODER @@ -2825,11 +2825,11 @@ bool emitter::emitNoGChelper(CorInfoHelpFunc helpFunc) case CORINFO_HELP_LRSH: case CORINFO_HELP_LRSZ: -// case CORINFO_HELP_LMUL: -// case CORINFO_HELP_LDIV: -// case CORINFO_HELP_LMOD: -// case CORINFO_HELP_ULDIV: -// case CORINFO_HELP_ULMOD: + // case CORINFO_HELP_LMUL: + // case CORINFO_HELP_LDIV: + // case CORINFO_HELP_LMOD: + // case CORINFO_HELP_ULDIV: + // case CORINFO_HELP_ULMOD: #ifdef TARGET_X86 case CORINFO_HELP_ASSIGN_REF_EAX: @@ -2890,8 +2890,8 @@ bool emitter::emitNoGChelper(CORINFO_METHOD_HANDLE methHnd) * Mark the current spot as having a label. */ -void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, - regMaskTP gcrefRegs, +void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, regMaskTP byrefRegs DEBUG_ARG(BasicBlock* block)) { /* Create a new IG if the current one is non-empty */ @@ -3088,7 +3088,7 @@ void emitter::emitSplit(emitLocation* startLoc, return; } -// Report it! + // Report it! #ifdef DEBUG if (EMITVERBOSE) @@ -3605,7 +3605,7 @@ emitter::instrDesc* emitter::emitNewInstrCallInd(int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, - emitAttr retSizeIn + emitAttr retSizeIn MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)) { emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE; @@ -3688,7 +3688,7 @@ emitter::instrDesc* emitter::emitNewInstrCallDir(int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, - emitAttr retSizeIn + emitAttr retSizeIn MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)) { emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE; @@ -3912,8 +3912,8 @@ void emitter::emitDispRegPtrListDelta() // Dump any deltas in regPtrDsc's for outgoing args; these aren't captured in the other sets. if (debugPrevRegPtrDsc != codeGen->gcInfo.gcRegPtrLast) { - for (regPtrDsc* dsc = (debugPrevRegPtrDsc == nullptr) ? codeGen->gcInfo.gcRegPtrList - : debugPrevRegPtrDsc->rpdNext; + for (regPtrDsc* dsc = (debugPrevRegPtrDsc == nullptr) ? codeGen->gcInfo.gcRegPtrList + : debugPrevRegPtrDsc->rpdNext; dsc != nullptr; dsc = dsc->rpdNext) { // The non-arg regPtrDscs are reflected in the register sets debugPrevGCrefRegs/emitThisGCrefRegs @@ -4397,7 +4397,7 @@ size_t emitter::emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp) ig->igPerfScore += insPerfScore; #endif // defined(DEBUG) || defined(LATE_DISASM) -// printf("[S=%02u]\n", emitCurStackLvl); + // printf("[S=%02u]\n", emitCurStackLvl); #if EMIT_TRACK_STACK_DEPTH @@ -4559,7 +4559,7 @@ void emitter::emitDispCommentForHandle(size_t handle, size_t cookie, GenTreeFlag #ifdef DEBUG emitComp->eePrintObjectDescription(commentPrefix, (CORINFO_OBJECT_HANDLE)handle); #else - str = "frozen object handle"; + str = "frozen object handle"; #endif } else if (flag == GTF_ICON_CLASS_HDL) @@ -4870,9 +4870,9 @@ void emitter::emitJumpDistBind() int jmp_iteration = 1; -/*****************************************************************************/ -/* If we iterate to look for more jumps to shorten, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If we iterate to look for more jumps to shorten, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -4880,10 +4880,10 @@ void emitter::emitJumpDistBind() emitCheckIGList(); #endif -/* - In the following loop we convert all jump targets from "BasicBlock *" - to "insGroup *" values. We also estimate which jumps will be short. - */ + /* + In the following loop we convert all jump targets from "BasicBlock *" + to "insGroup *" values. We also estimate which jumps will be short. + */ #ifdef DEBUG insGroup* lastIG = nullptr; @@ -5023,7 +5023,7 @@ void emitter::emitJumpDistBind() } #endif // TARGET_ARM64 -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastLJ == nullptr || lastIG != jmp->idjIG || lastLJ->idjOffs < jmp->idjOffs); @@ -5427,9 +5427,9 @@ void emitter::emitJumpDistBind() continue; - /*****************************************************************************/ - /* Handle conversion to short jump */ - /*****************************************************************************/ + /*****************************************************************************/ + /* Handle conversion to short jump */ + /*****************************************************************************/ SHORT_JMP: @@ -5469,9 +5469,9 @@ void emitter::emitJumpDistBind() #if defined(TARGET_ARM) - /*****************************************************************************/ - /* Handle conversion to medium jump */ - /*****************************************************************************/ + /*****************************************************************************/ + /* Handle conversion to medium jump */ + /*****************************************************************************/ MEDIUM_JMP: @@ -5496,7 +5496,7 @@ void emitter::emitJumpDistBind() #endif // TARGET_ARM - /*****************************************************************************/ + /*****************************************************************************/ NEXT_JMP: @@ -5572,7 +5572,7 @@ void emitter::emitJumpDistBind() #if defined(TARGET_ARM) || (minMediumExtra <= adjIG) #endif // TARGET_ARM - ) + ) { jmp_iteration++; @@ -5827,8 +5827,8 @@ bool emitter::emitEndsWithAlignInstr() // Returns: size of a loop in bytes. // unsigned emitter::getLoopSize(insGroup* igLoopHeader, - unsigned maxLoopSize // - DEBUG_ARG(bool isAlignAdjusted) // + unsigned maxLoopSize // + DEBUG_ARG(bool isAlignAdjusted) // DEBUG_ARG(UNATIVE_OFFSET containingIGNum) // DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)) { @@ -6236,7 +6236,7 @@ void emitter::emitLoopAlignAdjustments() } #endif // TARGET_XARCH & TARGET_ARM64 #endif // DEBUG - // Adjust the padding amount in all align instructions in this IG + // Adjust the padding amount in all align instructions in this IG instrDescAlign *alignInstrToAdj = alignInstr, *prevAlignInstr = nullptr; for (; alignInstrToAdj != nullptr && alignInstrToAdj->idaIG == alignInstr->idaIG; alignInstrToAdj = alignInstrToAdj->idaNext) @@ -6332,7 +6332,7 @@ void emitter::emitLoopAlignAdjustments() // 3b. If the loop already fits in minimum alignmentBoundary blocks, then return 0. // already best aligned // 3c. return paddingNeeded. // -unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* loopHeadIG, +unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* loopHeadIG, size_t offset DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)) @@ -6673,18 +6673,18 @@ void emitter::emitComputeCodeSizes() // Returns: // size of the method code, in bytes // -unsigned emitter::emitEndCodeGen(Compiler* comp, - bool contTrkPtrLcls, - bool fullyInt, - bool fullPtrMap, - unsigned xcptnsCount, - unsigned* prologSize, - unsigned* epilogSize, - void** codeAddr, - void** codeAddrRW, - void** coldCodeAddr, - void** coldCodeAddrRW, - void** consAddr, +unsigned emitter::emitEndCodeGen(Compiler* comp, + bool contTrkPtrLcls, + bool fullyInt, + bool fullPtrMap, + unsigned xcptnsCount, + unsigned* prologSize, + unsigned* epilogSize, + void** codeAddr, + void** codeAddrRW, + void** coldCodeAddr, + void** coldCodeAddrRW, + void** consAddr, void** consAddrRW DEBUGARG(unsigned* instrCount)) { #ifdef DEBUG @@ -7116,7 +7116,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, assert(indx < emitComp->lvaTrackedCount); -// printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); + // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); #ifdef JIT32_GCENCODER #ifndef FEATURE_EH_FUNCLETS @@ -8445,7 +8445,7 @@ void emitter::emitDispDataSec(dataSecDsc* section, BYTE* dst) printf("\tdd\t%08Xh", (uint32_t)(size_t)emitOffsetToPtr(ig->igOffs)); } #else // TARGET_64BIT - // We have a 64-BIT target + // We have a 64-BIT target if (emitComp->opts.disDiffable) { printf("\tdq\t%s\n", blockLabel); @@ -9042,7 +9042,7 @@ void emitter::emitGCregDeadSet(GCtype gcType, regMaskTP regMask, BYTE* addr) unsigned char emitter::emitOutputByte(BYTE* dst, ssize_t val) { - BYTE* dstRW = dst + writeableOffset; + BYTE* dstRW = dst + writeableOffset; *castto(dstRW, unsigned char*) = (unsigned char)val; #ifdef DEBUG @@ -9808,13 +9808,13 @@ cnsval_ssize_t emitter::emitGetInsSC(const instrDesc* id) const else #endif // TARGET_ARM if (id->idIsLargeCns()) - { - return ((instrDescCns*)id)->idcCnsVal; - } - else - { - return id->idSmallCns(); - } + { + return ((instrDescCns*)id)->idcCnsVal; + } + else + { + return id->idSmallCns(); + } } #ifdef TARGET_ARM @@ -9925,7 +9925,7 @@ void emitter::emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize, #ifndef JIT32_GCENCODER || (emitComp->IsFullPtrRegMapRequired() && !emitComp->GetInterruptible() && isCall) #endif // JIT32_GCENCODER - ) + ) { emitStackPopLargeStk(addr, isCall, callInstrSize, 0); } @@ -10202,17 +10202,17 @@ void emitter::emitStackKillArgs(BYTE* addr, unsigned count, unsigned char callIn #ifdef DEBUG -void emitter::emitRecordRelocationHelp(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ - const char* relocTypeName, /* IN */ +void emitter::emitRecordRelocationHelp(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ + const char* relocTypeName, /* IN */ int32_t addlDelta /* = 0 */) /* IN */ #else // !DEBUG -void emitter::emitRecordRelocation(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ +void emitter::emitRecordRelocation(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ int32_t addlDelta /* = 0 */) /* IN */ #endif // !DEBUG diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 48f5edeef728b3..094720597ead70 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -118,7 +118,7 @@ inline const char* GCtypeStr(GCtype gcType) #if DEBUG_EMIT #define INTERESTING_JUMP_NUM -1 // set to 0 to see all jump info -//#define INTERESTING_JUMP_NUM 0 +// #define INTERESTING_JUMP_NUM 0 #endif /***************************************************************************** @@ -129,11 +129,15 @@ inline const char* GCtypeStr(GCtype gcType) class emitLocation { public: - emitLocation() : ig(nullptr), codePos(0) + emitLocation() + : ig(nullptr) + , codePos(0) { } - emitLocation(insGroup* _ig) : ig(_ig), codePos(0) + emitLocation(insGroup* _ig) + : ig(_ig) + , codePos(0) { } @@ -147,7 +151,9 @@ class emitLocation CaptureLocation(emit); } - emitLocation(void* emitCookie) : ig((insGroup*)emitCookie), codePos(0) + emitLocation(void* emitCookie) + : ig((insGroup*)emitCookie) + , codePos(0) { } @@ -286,20 +292,23 @@ struct insGroup insGroup* igLoopBackEdge; // "last" back-edge that branches back to an aligned loop head. #endif -#define IGF_GC_VARS 0x0001 // new set of live GC ref variables -#define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers +#define IGF_GC_VARS 0x0001 // new set of live GC ref variables +#define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers #define IGF_FUNCLET_PROLOG 0x0004 // this group belongs to a funclet prolog #define IGF_FUNCLET_EPILOG 0x0008 // this group belongs to a funclet epilog. -#define IGF_EPILOG 0x0010 // this group belongs to a main function epilog -#define IGF_NOGCINTERRUPT 0x0020 // this IG is in a no-interrupt region (prolog, epilog, etc.) -#define IGF_UPD_ISZ 0x0040 // some instruction sizes updated -#define IGF_PLACEHOLDER 0x0080 // this is a placeholder group, to be filled in later -#define IGF_EXTEND 0x0100 // this block is conceptually an extension of the previous block - // and the emitter should continue to track GC info as if there was no new block. -#define IGF_HAS_ALIGN 0x0200 // this group contains an alignment instruction(s) at the end to align either the next - // IG, or, if this IG contains with an unconditional branch, some subsequent IG. -#define IGF_REMOVED_ALIGN 0x0400 // IG was marked as having an alignment instruction(s), but was later unmarked - // without updating the IG's size/offsets. +#define IGF_EPILOG 0x0010 // this group belongs to a main function epilog +#define IGF_NOGCINTERRUPT 0x0020 // this IG is in a no-interrupt region (prolog, epilog, etc.) +#define IGF_UPD_ISZ 0x0040 // some instruction sizes updated +#define IGF_PLACEHOLDER 0x0080 // this is a placeholder group, to be filled in later +#define IGF_EXTEND \ + 0x0100 // this block is conceptually an extension of the previous block + // and the emitter should continue to track GC info as if there was no new block. +#define IGF_HAS_ALIGN \ + 0x0200 // this group contains an alignment instruction(s) at the end to align either the next + // IG, or, if this IG contains with an unconditional branch, some subsequent IG. +#define IGF_REMOVED_ALIGN \ + 0x0400 // IG was marked as having an alignment instruction(s), but was later unmarked + // without updating the IG's size/offsets. #define IGF_HAS_REMOVABLE_JMP 0x0800 // this group ends with an unconditional jump which is a candidate for removal #ifdef TARGET_ARM64 #define IGF_HAS_REMOVED_INSTR 0x1000 // this group has an instruction that was removed. @@ -325,7 +334,8 @@ struct insGroup regMaskSmall igGCregs; // set of registers with live GC refs #endif // !(REGMASK_BITS <= 32) - union { + union + { BYTE* igData; // addr of instruction descriptors insPlaceholderGroupData* igPhData; // when igFlags & IGF_PLACEHOLDER }; @@ -428,8 +438,8 @@ struct emitLclVarAddr // protected: unsigned _lvaVarNum : 15; // Usually the lvaVarNum - unsigned _lvaExtra : 15; // Usually the lvaOffset - unsigned _lvaTag : 2; // tag field to support larger varnums + unsigned _lvaExtra : 15; // Usually the lvaOffset + unsigned _lvaTag : 2; // tag field to support larger varnums }; enum idAddrUnionTag @@ -513,7 +523,7 @@ class emitter #ifdef TARGET_AMD64 OPSZP = OPSZ8, #else - OPSZP = OPSZ4, + OPSZP = OPSZ4, #endif }; @@ -522,7 +532,7 @@ class emitter static const emitAttr emitSizeDecode[]; static emitter::opSize emitEncodeSize(emitAttr size); - static emitAttr emitDecodeSize(emitter::opSize ensz); + static emitAttr emitDecodeSize(emitter::opSize ensz); // Currently, we only allow one IG for the prolog bool emitIGisInProlog(const insGroup* ig) @@ -570,10 +580,10 @@ class emitter #ifdef TARGET_XARCH -#define AM_DISP_BITS ((sizeof(unsigned) * 8) - 2 * (REGNUM_BITS + 1) - 2) +#define AM_DISP_BITS ((sizeof(unsigned) * 8) - 2 * (REGNUM_BITS + 1) - 2) #define AM_DISP_BIG_VAL (-(1 << (AM_DISP_BITS - 1))) -#define AM_DISP_MIN (-((1 << (AM_DISP_BITS - 1)) - 1)) -#define AM_DISP_MAX (+((1 << (AM_DISP_BITS - 1)) - 1)) +#define AM_DISP_MIN (-((1 << (AM_DISP_BITS - 1)) - 1)) +#define AM_DISP_MAX (+((1 << (AM_DISP_BITS - 1)) - 1)) struct emitAddrMode { @@ -643,9 +653,9 @@ class emitter static_assert_no_msg(IF_COUNT <= 128); insFormat _idInsFmt : 7; #elif defined(TARGET_LOONGARCH64) - unsigned _idCodeSize : 5; // the instruction(s) size of this instrDesc described. + unsigned _idCodeSize : 5; // the instruction(s) size of this instrDesc described. #elif defined(TARGET_RISCV64) - unsigned _idCodeSize : 6; // the instruction(s) size of this instrDesc described. + unsigned _idCodeSize : 6; // the instruction(s) size of this instrDesc described. #elif defined(TARGET_ARM64) static_assert_no_msg(IF_COUNT <= 1024); insFormat _idInsFmt : 10; @@ -685,7 +695,7 @@ class emitter { } #elif defined(TARGET_RISCV64) - insFormat idInsFmt() const + insFormat idInsFmt() const { NYI_RISCV64("idInsFmt-----unimplemented on RISCV64 yet----"); return (insFormat)0; @@ -695,7 +705,7 @@ class emitter NYI_RISCV64("idInsFmt-----unimplemented on RISCV64 yet----"); } #else - insFormat idInsFmt() const + insFormat idInsFmt() const { return _idInsFmt; } @@ -721,7 +731,7 @@ class emitter private: #if defined(TARGET_XARCH) unsigned _idCodeSize : 4; // size of instruction in bytes. Max size of an Intel instruction is 15 bytes. - opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 + opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 // At this point we have fully consumed first DWORD so that next field // doesn't cross a byte boundary. #elif defined(TARGET_ARM64) @@ -730,7 +740,7 @@ class emitter #elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) /* _idOpSize defined below. */ #else - opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 + opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 #endif // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 // On Amd64, this is where the second DWORD begins @@ -763,9 +773,9 @@ class emitter // loongarch64: 28 bits // risc-v: 28 bits - unsigned _idSmallDsc : 1; // is this a "small" descriptor? - unsigned _idLargeCns : 1; // does a large constant follow? - unsigned _idLargeDsp : 1; // does a large displacement follow? + unsigned _idSmallDsc : 1; // is this a "small" descriptor? + unsigned _idLargeCns : 1; // does a large constant follow? + unsigned _idLargeDsp : 1; // does a large displacement follow? unsigned _idLargeCall : 1; // large call descriptor used // We have several pieces of information we need to encode but which are only applicable @@ -776,15 +786,15 @@ class emitter unsigned _idCustom2 : 1; unsigned _idCustom3 : 1; -#define _idBound _idCustom1 /* jump target / frame offset bound */ -#define _idTlsGD _idCustom2 /* Used to store information related to TLS GD access on linux */ -#define _idNoGC _idCustom3 /* Some helpers don't get recorded in GC tables */ +#define _idBound _idCustom1 /* jump target / frame offset bound */ +#define _idTlsGD _idCustom2 /* Used to store information related to TLS GD access on linux */ +#define _idNoGC _idCustom3 /* Some helpers don't get recorded in GC tables */ #define _idEvexAaaContext (_idCustom3 << 2) | (_idCustom2 << 1) | _idCustom1 /* bits used for the EVEX.aaa context */ #if !defined(TARGET_ARMARCH) unsigned _idCustom4 : 1; -#define _idCallRegPtr _idCustom4 /* IL indirect calls : addr in reg */ +#define _idCallRegPtr _idCustom4 /* IL indirect calls : addr in reg */ #define _idEvexZContext _idCustom4 /* bits used for the EVEX.z context */ #endif // !TARGET_ARMARCH @@ -798,12 +808,12 @@ class emitter #ifdef TARGET_ARM64 - unsigned _idLclVar : 1; // access a local on stack - unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars. + unsigned _idLclVar : 1; // access a local on stack + unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars. #endif #ifdef TARGET_LOONGARCH64 - // TODO-LoongArch64: maybe delete on future. + // TODO-LoongArch64: maybe delete on future. opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the // accessing a local on stack. @@ -818,11 +828,11 @@ class emitter #endif #ifdef TARGET_ARM - insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits - insFlags _idInsFlags : 1; // will this instruction set the flags - unsigned _idLclVar : 1; // access a local on stack + insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits + insFlags _idInsFlags : 1; // will this instruction set the flags + unsigned _idLclVar : 1; // access a local on stack unsigned _idLclFPBase : 1; // access a local on stack - SP based offset - insOpts _idInsOpt : 3; // options for Load/Store instructions + insOpts _idInsOpt : 3; // options for Load/Store instructions #endif //////////////////////////////////////////////////////////////////////// @@ -892,7 +902,7 @@ class emitter #define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS + ID_EXTRA_PREV_OFFSET_BITS) -/* Use whatever bits are left over for small constants */ + /* Use whatever bits are left over for small constants */ #define ID_BIT_SMALL_CNS (32 - ID_EXTRA_BITS) C_ASSERT(ID_BIT_SMALL_CNS > 0); @@ -951,9 +961,10 @@ class emitter void checkSizes(); - union idAddrUnion { -// TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts -// about reading what we think is here, to avoid unexpected corruption issues. + union idAddrUnion + { + // TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts + // about reading what we think is here, to avoid unexpected corruption issues. #if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) emitLclVarAddr iiaLclVar; @@ -1857,137 +1868,137 @@ class emitter #define PERFSCORE_THROUGHPUT_1C 1.0f // Single Issue -#define PERFSCORE_THROUGHPUT_2C 2.0f // slower - 2 cycles -#define PERFSCORE_THROUGHPUT_3C 3.0f // slower - 3 cycles -#define PERFSCORE_THROUGHPUT_4C 4.0f // slower - 4 cycles -#define PERFSCORE_THROUGHPUT_5C 5.0f // slower - 5 cycles -#define PERFSCORE_THROUGHPUT_6C 6.0f // slower - 6 cycles -#define PERFSCORE_THROUGHPUT_7C 7.0f // slower - 7 cycles -#define PERFSCORE_THROUGHPUT_8C 8.0f // slower - 8 cycles -#define PERFSCORE_THROUGHPUT_9C 9.0f // slower - 9 cycles -#define PERFSCORE_THROUGHPUT_10C 10.0f // slower - 10 cycles -#define PERFSCORE_THROUGHPUT_11C 10.0f // slower - 10 cycles -#define PERFSCORE_THROUGHPUT_13C 13.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_14C 14.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_16C 16.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_19C 19.0f // slower - 19 cycles -#define PERFSCORE_THROUGHPUT_25C 25.0f // slower - 25 cycles -#define PERFSCORE_THROUGHPUT_33C 33.0f // slower - 33 cycles -#define PERFSCORE_THROUGHPUT_50C 50.0f // slower - 50 cycles -#define PERFSCORE_THROUGHPUT_52C 52.0f // slower - 52 cycles -#define PERFSCORE_THROUGHPUT_57C 57.0f // slower - 57 cycles +#define PERFSCORE_THROUGHPUT_2C 2.0f // slower - 2 cycles +#define PERFSCORE_THROUGHPUT_3C 3.0f // slower - 3 cycles +#define PERFSCORE_THROUGHPUT_4C 4.0f // slower - 4 cycles +#define PERFSCORE_THROUGHPUT_5C 5.0f // slower - 5 cycles +#define PERFSCORE_THROUGHPUT_6C 6.0f // slower - 6 cycles +#define PERFSCORE_THROUGHPUT_7C 7.0f // slower - 7 cycles +#define PERFSCORE_THROUGHPUT_8C 8.0f // slower - 8 cycles +#define PERFSCORE_THROUGHPUT_9C 9.0f // slower - 9 cycles +#define PERFSCORE_THROUGHPUT_10C 10.0f // slower - 10 cycles +#define PERFSCORE_THROUGHPUT_11C 10.0f // slower - 10 cycles +#define PERFSCORE_THROUGHPUT_13C 13.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_14C 14.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_16C 16.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_19C 19.0f // slower - 19 cycles +#define PERFSCORE_THROUGHPUT_25C 25.0f // slower - 25 cycles +#define PERFSCORE_THROUGHPUT_33C 33.0f // slower - 33 cycles +#define PERFSCORE_THROUGHPUT_50C 50.0f // slower - 50 cycles +#define PERFSCORE_THROUGHPUT_52C 52.0f // slower - 52 cycles +#define PERFSCORE_THROUGHPUT_57C 57.0f // slower - 57 cycles #define PERFSCORE_THROUGHPUT_140C 140.0f // slower - 140 cycles #define PERFSCORE_LATENCY_ILLEGAL -1024.0f #define PERFSCORE_LATENCY_ZERO 0.0f -#define PERFSCORE_LATENCY_1C 1.0f -#define PERFSCORE_LATENCY_2C 2.0f -#define PERFSCORE_LATENCY_3C 3.0f -#define PERFSCORE_LATENCY_4C 4.0f -#define PERFSCORE_LATENCY_5C 5.0f -#define PERFSCORE_LATENCY_6C 6.0f -#define PERFSCORE_LATENCY_7C 7.0f -#define PERFSCORE_LATENCY_8C 8.0f -#define PERFSCORE_LATENCY_9C 9.0f -#define PERFSCORE_LATENCY_10C 10.0f -#define PERFSCORE_LATENCY_11C 11.0f -#define PERFSCORE_LATENCY_12C 12.0f -#define PERFSCORE_LATENCY_13C 13.0f -#define PERFSCORE_LATENCY_14C 14.0f -#define PERFSCORE_LATENCY_15C 15.0f -#define PERFSCORE_LATENCY_16C 16.0f -#define PERFSCORE_LATENCY_18C 18.0f -#define PERFSCORE_LATENCY_20C 20.0f -#define PERFSCORE_LATENCY_22C 22.0f -#define PERFSCORE_LATENCY_23C 23.0f -#define PERFSCORE_LATENCY_26C 26.0f -#define PERFSCORE_LATENCY_62C 62.0f -#define PERFSCORE_LATENCY_69C 69.0f +#define PERFSCORE_LATENCY_1C 1.0f +#define PERFSCORE_LATENCY_2C 2.0f +#define PERFSCORE_LATENCY_3C 3.0f +#define PERFSCORE_LATENCY_4C 4.0f +#define PERFSCORE_LATENCY_5C 5.0f +#define PERFSCORE_LATENCY_6C 6.0f +#define PERFSCORE_LATENCY_7C 7.0f +#define PERFSCORE_LATENCY_8C 8.0f +#define PERFSCORE_LATENCY_9C 9.0f +#define PERFSCORE_LATENCY_10C 10.0f +#define PERFSCORE_LATENCY_11C 11.0f +#define PERFSCORE_LATENCY_12C 12.0f +#define PERFSCORE_LATENCY_13C 13.0f +#define PERFSCORE_LATENCY_14C 14.0f +#define PERFSCORE_LATENCY_15C 15.0f +#define PERFSCORE_LATENCY_16C 16.0f +#define PERFSCORE_LATENCY_18C 18.0f +#define PERFSCORE_LATENCY_20C 20.0f +#define PERFSCORE_LATENCY_22C 22.0f +#define PERFSCORE_LATENCY_23C 23.0f +#define PERFSCORE_LATENCY_26C 26.0f +#define PERFSCORE_LATENCY_62C 62.0f +#define PERFSCORE_LATENCY_69C 69.0f #define PERFSCORE_LATENCY_140C 140.0f #define PERFSCORE_LATENCY_400C 400.0f // Intel microcode issue with these instructions -#define PERFSCORE_LATENCY_BRANCH_DIRECT 1.0f // cost of an unconditional branch -#define PERFSCORE_LATENCY_BRANCH_COND 2.0f // includes cost of a possible misprediction +#define PERFSCORE_LATENCY_BRANCH_DIRECT 1.0f // cost of an unconditional branch +#define PERFSCORE_LATENCY_BRANCH_COND 2.0f // includes cost of a possible misprediction #define PERFSCORE_LATENCY_BRANCH_INDIRECT 2.0f // includes cost of a possible misprediction #if defined(TARGET_XARCH) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_2C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_2C #define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_5C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_2C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_2C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_5C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_3C #define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_6C #elif defined(TARGET_ARM64) || defined(TARGET_ARM) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #elif defined(TARGET_LOONGARCH64) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #elif defined(TARGET_RISCV64) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #endif // TARGET_XXX // Make this an enum: // -#define PERFSCORE_MEMORY_NONE 0 -#define PERFSCORE_MEMORY_READ 1 -#define PERFSCORE_MEMORY_WRITE 2 +#define PERFSCORE_MEMORY_NONE 0 +#define PERFSCORE_MEMORY_READ 1 +#define PERFSCORE_MEMORY_WRITE 2 #define PERFSCORE_MEMORY_READ_WRITE 3 struct insExecutionCharacteristics @@ -2020,7 +2031,8 @@ class emitter instrDescJmp* idjNext; // next jump in the group/method insGroup* idjIG; // containing group - union { + union + { BYTE* idjAddr; // address of jump ins (for patching) } idjTemp; @@ -2043,7 +2055,7 @@ class emitter #else 30; #endif - unsigned idjShort : 1; // is the jump known to be a short one? + unsigned idjShort : 1; // is the jump known to be a short one? unsigned idjKeepLong : 1; // should the jump be kept long? (used for hot to cold and cold to hot jumps) }; @@ -2184,7 +2196,9 @@ class emitter alignas(alignof(T)) char idStorage[sizeof(T)]; public: - inlineInstrDesc() : idDebugInfo(nullptr), idStorage() + inlineInstrDesc() + : idDebugInfo(nullptr) + , idStorage() { static_assert_no_msg((offsetof(inlineInstrDesc, idStorage) - sizeof(instrDescDebugInfo*)) == offsetof(inlineInstrDesc, idDebugInfo)); @@ -2210,7 +2224,7 @@ class emitter #endif // TARGET_ARM insUpdateModes emitInsUpdateMode(instruction ins); - insFormat emitInsModeFormat(instruction ins, insFormat base); + insFormat emitInsModeFormat(instruction ins, insFormat base); static const BYTE emitInsModeFmtTab[]; #ifdef DEBUG @@ -2225,7 +2239,7 @@ class emitter ssize_t emitGetInsDsp(instrDesc* id); ssize_t emitGetInsAmd(instrDesc* id); - ssize_t emitGetInsCIdisp(instrDesc* id); + ssize_t emitGetInsCIdisp(instrDesc* id); unsigned emitGetInsCIargs(instrDesc* id); inline emitAttr emitGetMemOpSize(instrDesc* id) const; @@ -2238,7 +2252,7 @@ class emitter #endif // TARGET_XARCH cnsval_ssize_t emitGetInsSC(const instrDesc* id) const; - unsigned emitInsCount; + unsigned emitInsCount; /************************************************************************/ /* A few routines used for debug display purposes */ @@ -2264,11 +2278,11 @@ class emitter regMaskTP debugPrevGCrefRegs; regMaskTP debugPrevByrefRegs; void emitDispInsIndent(); - void emitDispGCDeltaTitle(const char* title); - void emitDispGCRegDelta(const char* title, regMaskTP prevRegs, regMaskTP curRegs); - void emitDispGCVarDelta(); - void emitDispRegPtrListDelta(); - void emitDispGCInfoDelta(); + void emitDispGCDeltaTitle(const char* title); + void emitDispGCRegDelta(const char* title, regMaskTP prevRegs, regMaskTP curRegs); + void emitDispGCVarDelta(); + void emitDispRegPtrListDelta(); + void emitDispGCInfoDelta(); void emitDispIGflags(unsigned flags); void emitDispIG(insGroup* ig, @@ -2325,7 +2339,9 @@ class emitter EpilogList* elNext; emitLocation elLoc; - EpilogList() : elNext(nullptr), elLoc() + EpilogList() + : elNext(nullptr) + , elLoc() { } }; @@ -2362,12 +2378,12 @@ class emitter /* Methods to record a code position and later convert to offset */ /************************************************************************/ - unsigned emitFindInsNum(const insGroup* ig, const instrDesc* id) const; + unsigned emitFindInsNum(const insGroup* ig, const instrDesc* id) const; UNATIVE_OFFSET emitFindOffset(const insGroup* ig, unsigned insNum) const; -/************************************************************************/ -/* Members and methods used to issue (encode) instructions. */ -/************************************************************************/ + /************************************************************************/ + /* Members and methods used to issue (encode) instructions. */ + /************************************************************************/ #ifdef DEBUG // If we have started issuing instructions from the list of instrDesc, this is set @@ -2458,9 +2474,9 @@ class emitter #endif // TARGET_LOONGARCH64 || TARGET_RISCV64 instrDesc* emitFirstInstrDesc(BYTE* idData) const; - void emitAdvanceInstrDesc(instrDesc** id, size_t idSize) const; - size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp); - size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp); + void emitAdvanceInstrDesc(instrDesc** id, size_t idSize) const; + size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp); + size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp); bool emitHasFramePtr; @@ -2511,13 +2527,13 @@ class emitter #endif // FEATURE_SIMD regNumber emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src); regNumber emitInsTernary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src1, GenTree* src2); - void emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem); - void emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem); - void emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode); + void emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem); + void emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem); + void emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode); insFormat emitMapFmtForIns(insFormat fmt, instruction ins); insFormat emitMapFmtAtoM(insFormat fmt); - void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins); - void spillIntArgRegsToShadowSlots(); + void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins); + void spillIntArgRegsToShadowSlots(); #ifdef TARGET_XARCH bool emitIsInstrWritingToReg(instrDesc* id, regNumber reg); @@ -2604,22 +2620,22 @@ class emitter // non-adaptive alignment on xarch, this points to the first align instruction of the series of align instructions. instrDescAlign* emitAlignLastGroup; - unsigned getLoopSize(insGroup* igLoopHeader, + unsigned getLoopSize(insGroup* igLoopHeader, unsigned maxLoopSize DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)); // Get the smallest loop size - void emitLoopAlignment(DEBUG_ARG1(bool isPlacedBehindJmp)); - bool emitEndsWithAlignInstr(); // Validate if newLabel is appropriate - bool emitSetLoopBackEdge(const BasicBlock* loopTopBlock); + void emitLoopAlignment(DEBUG_ARG1(bool isPlacedBehindJmp)); + bool emitEndsWithAlignInstr(); // Validate if newLabel is appropriate + bool emitSetLoopBackEdge(const BasicBlock* loopTopBlock); void emitLoopAlignAdjustments(); // Predict if loop alignment is needed and make appropriate adjustments - unsigned emitCalculatePaddingForLoopAlignment(insGroup* ig, + unsigned emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offset DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)); - void emitLoopAlign(unsigned paddingBytes, bool isFirstAlign DEBUG_ARG(bool isPlacedBehindJmp)); - void emitLongLoopAlign(unsigned alignmentBoundary DEBUG_ARG(bool isPlacedBehindJmp)); + void emitLoopAlign(unsigned paddingBytes, bool isFirstAlign DEBUG_ARG(bool isPlacedBehindJmp)); + void emitLongLoopAlign(unsigned alignmentBoundary DEBUG_ARG(bool isPlacedBehindJmp)); instrDescAlign* emitAlignInNextIG(instrDescAlign* alignInstr); - void emitConnectAlignInstrWithCurIG(); + void emitConnectAlignInstrWithCurIG(); #endif @@ -2692,7 +2708,7 @@ class emitter void emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize); #endif // MULTIREG_HAS_SECOND_GC_RET - static void emitEncodeCallGCregs(regMaskTP regs, instrDesc* id); + static void emitEncodeCallGCregs(regMaskTP regs, instrDesc* id); static unsigned emitDecodeCallGCregs(instrDesc* id); unsigned emitNxtIGnum; @@ -2716,8 +2732,8 @@ class emitter insGroup* emitAllocAndLinkIG(); insGroup* emitAllocIG(); - void emitInitIG(insGroup* ig); - void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig); + void emitInitIG(insGroup* ig); + void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig); void emitNewIG(); @@ -2732,9 +2748,9 @@ class emitter static bool emitJmpInstHasNoCode(instrDesc* id); #endif - void emitGenIG(insGroup* ig); + void emitGenIG(insGroup* ig); insGroup* emitSavIG(bool emitAdd = false); - void emitNxtIG(bool extend = false); + void emitNxtIG(bool extend = false); #ifdef TARGET_ARM64 void emitRemoveLastInstruction(); @@ -2864,8 +2880,8 @@ class emitter // Mark this instruction group as having a label; return the new instruction group. // Sets the emitter's record of the currently live GC variables // and registers. - void* emitAddLabel(VARSET_VALARG_TP GCvars, - regMaskTP gcrefRegs, + void* emitAddLabel(VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, regMaskTP byrefRegs DEBUG_ARG(BasicBlock* block = nullptr)); // Same as above, except the label is added and is conceptually "inline" in @@ -2873,7 +2889,7 @@ class emitter // continues to track GC info as if there was no label. void* emitAddInlineLabel(); - void emitPrintLabel(const insGroup* ig) const; + void emitPrintLabel(const insGroup* ig) const; const char* emitLabelString(const insGroup* ig) const; #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) @@ -3096,7 +3112,7 @@ class emitter return (offs >= emitGCrFrameOffsMin) && (offs < emitGCrFrameOffsMax); } - static instruction emitJumpKindToIns(emitJumpKind jumpKind); + static instruction emitJumpKindToIns(emitJumpKind jumpKind); static emitJumpKind emitInsToJumpKind(instruction ins); static emitJumpKind emitReverseJumpKind(emitJumpKind jumpKind); @@ -3161,7 +3177,8 @@ class emitter bool emitSimpleStkUsed; // using the "simple" stack table? - union { + union + { struct // if emitSimpleStkUsed==true { @@ -3209,8 +3226,8 @@ class emitter #ifdef DEBUG const char* emitGetFrameReg(); - void emitDispRegSet(regMaskTP regs); - void emitDispVarSet(); + void emitDispRegSet(regMaskTP regs); + void emitDispVarSet(); #endif void emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr); @@ -3275,7 +3292,11 @@ class emitter UNATIVE_OFFSET dsdOffs; UNATIVE_OFFSET alignment; // in bytes, defaults to 4 - dataSecDsc() : dsdList(nullptr), dsdLast(nullptr), dsdOffs(0), alignment(4) + dataSecDsc() + : dsdList(nullptr) + , dsdLast(nullptr) + , dsdOffs(0) + , alignment(4) { } }; @@ -3293,9 +3314,9 @@ class emitter COMP_HANDLE emitCmpHandle; -/************************************************************************/ -/* Helpers for interface to EE */ -/************************************************************************/ + /************************************************************************/ + /* Helpers for interface to EE */ + /************************************************************************/ #ifdef DEBUG @@ -3305,25 +3326,25 @@ class emitter #define emitRecordRelocationWithAddlDelta(location, target, fRelocType, addlDelta) \ emitRecordRelocationHelp(location, target, fRelocType, #fRelocType, addlDelta) - void emitRecordRelocationHelp(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ - const char* relocTypeName, /* IN */ - int32_t addlDelta = 0); /* IN */ + void emitRecordRelocationHelp(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ + const char* relocTypeName, /* IN */ + int32_t addlDelta = 0); /* IN */ #else // !DEBUG void emitRecordRelocationWithAddlDelta(void* location, /* IN */ void* target, /* IN */ uint16_t fRelocType, /* IN */ - int32_t addlDelta) /* IN */ + int32_t addlDelta) /* IN */ { emitRecordRelocation(location, target, fRelocType, addlDelta); } - void emitRecordRelocation(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ + void emitRecordRelocation(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ int32_t addlDelta = 0); /* IN */ #endif // !DEBUG @@ -3343,9 +3364,9 @@ class emitter CORINFO_SIG_INFO* emitScratchSigInfo; #endif // DEBUG -/************************************************************************/ -/* Logic to collect and display statistics */ -/************************************************************************/ + /************************************************************************/ + /* Logic to collect and display statistics */ + /************************************************************************/ #if EMITTER_STATS @@ -3482,10 +3503,10 @@ class emitter } #endif // EMITTER_STATS -/************************************************************************* - * - * Define any target-dependent emitter members. - */ + /************************************************************************* + * + * Define any target-dependent emitter members. + */ #include "emitdef.h" diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 3fa92b60d0e5b5..4dd1d470887a20 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -700,8 +700,8 @@ emitter::insFormat emitter::emitInsFormat(instruction ins) } // INST_FP is 1 -#define LD 2 -#define ST 4 +#define LD 2 +#define ST 4 #define CMP 8 // clang-format off @@ -1708,10 +1708,10 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) * Add an instruction referencing a register and a constant. */ -void emitter::emitIns_R_I(instruction ins, - emitAttr attr, - regNumber reg, - target_ssize_t imm, +void emitter::emitIns_R_I(instruction ins, + emitAttr attr, + regNumber reg, + target_ssize_t imm, insFlags flags /* = INS_FLAGS_DONT_CARE */ DEBUGARG(GenTreeFlags gtFlags)) { @@ -1738,7 +1738,7 @@ void emitter::emitIns_R_I(instruction ins, ins = INS_sub; else // ins == INS_sub ins = INS_add; - imm = -imm; + imm = -imm; } fmt = IF_T1_J0; sf = INS_FLAGS_SET; @@ -2607,7 +2607,7 @@ void emitter::emitIns_R_R_I(instruction ins, ins = INS_sub; else ins = INS_add; - imm = -imm; + imm = -imm; } fmt = IF_T1_G; sf = INS_FLAGS_SET; @@ -2621,7 +2621,7 @@ void emitter::emitIns_R_R_I(instruction ins, ins = INS_sub; else ins = INS_add; - imm = -imm; + imm = -imm; } // Use Thumb-1 encoding emitIns_R_I(ins, attr, reg1, imm, flags); @@ -2982,9 +2982,9 @@ void emitter::emitIns_R_R_I(instruction ins, } } } - // - // If we did not find a thumb-1 encoding above - // + // + // If we did not find a thumb-1 encoding above + // COMMON_THUMB2_LDST: assert(fmt == IF_NONE); @@ -3185,8 +3185,8 @@ void emitter::emitIns_R_R_R(instruction ins, case INS_mul: if (insMustSetFlags(flags)) { - assert(reg1 != - REG_PC); // VM debugging single stepper doesn't support PC register with this instruction. + assert(reg1 != REG_PC); // VM debugging single stepper doesn't support PC register with this + // instruction. assert(reg2 != REG_PC); assert(reg3 != REG_PC); @@ -4836,7 +4836,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -5236,7 +5236,7 @@ unsigned emitter::emitOutput_Thumb1Instr(BYTE* dst, code_t code) unsigned emitter::emitOutput_Thumb2Instr(BYTE* dst, code_t code) { unsigned short word1 = (code >> 16) & 0xffff; - unsigned short word2 = (code)&0xffff; + unsigned short word2 = (code) & 0xffff; assert((code_t)((word1 << 16) | word2) == code); #ifdef DEBUG @@ -5342,7 +5342,7 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) if (dstOffs <= srcOffs) { -/* This is a backward jump - distance is known at this point */ + /* This is a backward jump - distance is known at this point */ #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) @@ -5731,7 +5731,7 @@ BYTE* emitter::emitOutputIT(BYTE* dst, instruction ins, insFormat fmt, code_t co #endif // FEATURE_ITINSTRUCTION /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to @@ -6561,9 +6561,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; - /********************************************************************/ - /* oops */ - /********************************************************************/ + /********************************************************************/ + /* oops */ + /********************************************************************/ default: diff --git a/src/coreclr/jit/emitarm.h b/src/coreclr/jit/emitarm.h index 245196bfa18346..6ae0c57dea6d26 100644 --- a/src/coreclr/jit/emitarm.h +++ b/src/coreclr/jit/emitarm.h @@ -81,7 +81,7 @@ bool emitInsIsStore(instruction ins); bool emitInsIsLoadOrStore(instruction ins); emitter::insFormat emitInsFormat(instruction ins); -emitter::code_t emitInsCode(instruction ins, insFormat fmt); +emitter::code_t emitInsCode(instruction ins, insFormat fmt); // Generate code for a load or store operation and handle the case // of contained GT_LEA op1 with [base + index<idInsOpt())) { - assert((emitGetInsSC(id) > 0) || - (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of zero + assert((emitGetInsSC(id) > 0) || (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of + // zero } break; @@ -967,7 +967,7 @@ bool emitter::emitInsMayWriteToGCReg(instrDesc* id) switch (fmt) { - // These are the formats with "destination" registers: + // These are the formats with "destination" registers: case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw) case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s) @@ -1031,7 +1031,7 @@ bool emitter::emitInsMayWriteToGCReg(instrDesc* id) // Tracked GC pointers cannot be placed into the SIMD registers. return false; - // These are the load/store formats with "target" registers: + // These are the load/store formats with "target" registers: case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB) case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn @@ -1471,8 +1471,8 @@ emitter::insFormat emitter::emitInsFormat(instruction ins) return insFormats[ins]; } -#define LD 1 -#define ST 2 +#define LD 1 +#define ST 2 #define CMP 4 #define RSH 8 #define WID 16 @@ -1733,8 +1733,8 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt) }; // clang-format on - const static insFormat formatEncode9[9] = {IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C, - IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F}; + const static insFormat formatEncode9[9] = {IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C, + IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F}; const static insFormat formatEncode6A[6] = {IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A, IF_DV_3A, IF_DV_3E}; const static insFormat formatEncode6B[6] = {IF_LS_2D, IF_LS_3F, IF_LS_2E, IF_LS_2F, IF_LS_3G, IF_LS_2G}; const static insFormat formatEncode5A[5] = {IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A, IF_LS_1A}; @@ -3748,13 +3748,13 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg, insOpts o * Add an instruction referencing a register and a constant. */ -void emitter::emitIns_R_I(instruction ins, - emitAttr attr, - regNumber reg, - ssize_t imm, - insOpts opt, /* = INS_OPTS_NONE */ +void emitter::emitIns_R_I(instruction ins, + emitAttr attr, + regNumber reg, + ssize_t imm, + insOpts opt, /* = INS_OPTS_NONE */ insScalableOpts sopt /* = INS_SCALABLE_OPTS_NONE */ - DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = GTF_EMPTY */)) + DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = GTF_EMPTY */)) { emitAttr size = EA_SIZE(attr); emitAttr elemsize = EA_UNKNOWN; @@ -3940,8 +3940,8 @@ void emitter::emitIns_R_I(instruction ins, // First try the standard 'byteShifted immediate' imm(i8,bySh) bsi.immBSVal = 0; canEncode = canEncodeByteShiftedImm(imm, elemsize, - (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL) - &bsi); + (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL) + &bsi); if (canEncode) { imm = bsi.immBSVal; @@ -4955,8 +4955,8 @@ void emitter::emitIns_R_I_I(instruction ins, regNumber reg, ssize_t imm1, ssize_t imm2, - insOpts opt /* = INS_OPTS_NONE */ - DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = 0 */)) + insOpts opt /* = INS_OPTS_NONE */ + DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = 0 */)) { emitAttr size = EA_SIZE(attr); insFormat fmt = IF_NONE; @@ -5792,15 +5792,15 @@ void emitter::emitIns_R_R_F( } /***************************************************************************** -* -* Add an instruction referencing two registers and a constant. -* Also checks for a large immediate that needs a second instruction -* and will load it in reg1 -* -* - Supports instructions: add, adds, sub, subs, and, ands, eor and orr -* - Requires that reg1 is a general register and not SP or ZR -* - Requires that reg1 != reg2 -*/ + * + * Add an instruction referencing two registers and a constant. + * Also checks for a large immediate that needs a second instruction + * and will load it in reg1 + * + * - Supports instructions: add, adds, sub, subs, and, ands, eor and orr + * - Requires that reg1 is a general register and not SP or ZR + * - Requires that reg1 != reg2 + */ void emitter::emitIns_R_R_Imm(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm) { assert(isGeneralRegister(reg1)); @@ -6554,7 +6554,7 @@ void emitter::emitIns_R_R_R_I_LdStPair(instruction ins, int varx1, int varx2, int offs1, - int offs2 DEBUG_ARG(unsigned var1RefsOffs) DEBUG_ARG(unsigned var2RefsOffs)) + int offs2 DEBUG_ARG(unsigned var1RefsOffs) DEBUG_ARG(unsigned var2RefsOffs)) { assert((ins == INS_stp) || (ins == INS_ldp)); emitAttr size = EA_SIZE(attr); @@ -7147,7 +7147,7 @@ void emitter::emitIns_R_R_R_Ext(instruction ins, regNumber reg1, regNumber reg2, regNumber reg3, - insOpts opt, /* = INS_OPTS_NONE */ + insOpts opt, /* = INS_OPTS_NONE */ int shiftAmount) /* = -1 -- unset */ { emitAttr size = EA_SIZE(attr); @@ -8482,9 +8482,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This generates code to populate the access for TLS on linux -void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, - regNumber reg1, - regNumber reg2, +void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, + regNumber reg1, + regNumber reg2, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(emitComp->IsTargetAbi(CORINFO_NATIVEAOT_ABI)); @@ -8537,9 +8537,9 @@ void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); @@ -9108,7 +9108,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -10438,9 +10438,9 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) } /***************************************************************************** -* -* Output a short branch instruction. -*/ + * + * Output a short branch instruction. + */ BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id) { code_t code = emitInsCode(ins, fmt); @@ -10503,9 +10503,9 @@ BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, } /***************************************************************************** -* -* Output a short address instruction. -*/ + * + * Output a short address instruction. + */ BYTE* emitter::emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg) { ssize_t loBits = (distVal & 3); @@ -10533,9 +10533,9 @@ BYTE* emitter::emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, } /***************************************************************************** -* -* Output a short constant instruction. -*/ + * + * Output a short constant instruction. + */ BYTE* emitter::emitOutputShortConstant( BYTE* dst, instruction ins, insFormat fmt, ssize_t imm, regNumber reg, emitAttr opSize) { @@ -10768,7 +10768,7 @@ unsigned emitter::emitOutput_Instr(BYTE* dst, code_t code) } /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to @@ -11164,8 +11164,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) code = emitInsCode(ins, fmt); code |= insEncodeReg_Rd(id->idReg1()); // ddddd dst += emitOutput_Instr(dst, code); - emitRecordRelocation(odst, id->idAddr()->iiaAddr, id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21 - : IMAGE_REL_ARM64_PAGEBASE_REL21); + emitRecordRelocation(odst, id->idAddr()->iiaAddr, + id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21 + : IMAGE_REL_ARM64_PAGEBASE_REL21); } else { @@ -11208,8 +11209,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) { assert(sz == sizeof(instrDesc)); assert(id->idAddr()->iiaAddr != nullptr); - emitRecordRelocation(odst, id->idAddr()->iiaAddr, id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADD_LO12 - : IMAGE_REL_ARM64_PAGEOFFSET_12A); + emitRecordRelocation(odst, id->idAddr()->iiaAddr, + id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADD_LO12 + : IMAGE_REL_ARM64_PAGEOFFSET_12A); } break; @@ -12356,7 +12358,7 @@ void emitter::emitDispCond(insCond cond) { const static char* armCond[16] = {"eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", "hi", "ls", "ge", "lt", "gt", "le", "AL", "NV"}; // The last two are invalid - unsigned imm = (unsigned)cond; + unsigned imm = (unsigned)cond; assert((0 <= imm) && (imm < ArrLen(armCond))); printf(armCond[imm]); } @@ -12369,7 +12371,7 @@ void emitter::emitDispFlags(insCflags flags) { const static char* armFlags[16] = {"0", "v", "c", "cv", "z", "zv", "zc", "zcv", "n", "nv", "nc", "ncv", "nz", "nzv", "nzc", "nzcv"}; - unsigned imm = (unsigned)flags; + unsigned imm = (unsigned)flags; assert((0 <= imm) && (imm < ArrLen(armFlags))); printf(armFlags[imm]); } @@ -12382,7 +12384,7 @@ void emitter::emitDispBarrier(insBarrier barrier) { const static char* armBarriers[16] = {"#0", "oshld", "oshst", "osh", "#4", "nshld", "nshst", "nsh", "#8", "ishld", "ishst", "ish", "#12", "ld", "st", "sy"}; - unsigned imm = (unsigned)barrier; + unsigned imm = (unsigned)barrier; assert((0 <= imm) && (imm < ArrLen(armBarriers))); printf(armBarriers[imm]); } @@ -14681,9 +14683,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins switch (insFmt) { - // - // Branch Instructions - // + // + // Branch Instructions + // case IF_BI_0A: // b, bl_local case IF_BI_0C: // bl, b_tail @@ -14936,9 +14938,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins break; } - // - // Load/Store Instructions - // + // + // Load/Store Instructions + // case IF_LS_1A: // ldr, ldrsw (literal, pc relative immediate) result.insThroughput = PERFSCORE_THROUGHPUT_1C; @@ -16617,7 +16619,7 @@ bool emitter::OptimizeLdrStr(instruction ins, insFormat fmt, bool localVar, int varx, - int offs DEBUG_ARG(bool useRsvdReg)) + int offs DEBUG_ARG(bool useRsvdReg)) { assert(ins == INS_ldr || ins == INS_str); diff --git a/src/coreclr/jit/emitarm64.h b/src/coreclr/jit/emitarm64.h index 62624fe50d68ef..cc3254c06810ab 100644 --- a/src/coreclr/jit/emitarm64.h +++ b/src/coreclr/jit/emitarm64.h @@ -124,21 +124,21 @@ enum RegisterOrder /************************************************************************/ private: -bool emitInsIsCompare(instruction ins); -bool emitInsIsLoad(instruction ins); -bool emitInsIsStore(instruction ins); -bool emitInsIsLoadOrStore(instruction ins); -bool emitInsIsVectorRightShift(instruction ins); -bool emitInsIsVectorLong(instruction ins); -bool emitInsIsVectorNarrow(instruction ins); -bool emitInsIsVectorWide(instruction ins); -bool emitInsDestIsOp2(instruction ins); +bool emitInsIsCompare(instruction ins); +bool emitInsIsLoad(instruction ins); +bool emitInsIsStore(instruction ins); +bool emitInsIsLoadOrStore(instruction ins); +bool emitInsIsVectorRightShift(instruction ins); +bool emitInsIsVectorLong(instruction ins); +bool emitInsIsVectorNarrow(instruction ins); +bool emitInsIsVectorWide(instruction ins); +bool emitInsDestIsOp2(instruction ins); emitAttr emitInsTargetRegSize(instrDesc* id); emitAttr emitInsLoadStoreSize(instrDesc* id); emitter::insFormat emitInsFormat(instruction ins); -emitter::code_t emitInsCode(instruction ins, insFormat fmt); -emitter::code_t emitInsCodeSve(instruction ins, insFormat fmt); +emitter::code_t emitInsCode(instruction ins, insFormat fmt); +emitter::code_t emitInsCodeSve(instruction ins, insFormat fmt); // Generate code for a load or store operation and handle the case of contained GT_LEA op1 with [base + index<(id->idReg1()); // ddddd - code |= insEncodeSimm<9, 5>(imm1); // iiiii - code |= insEncodeSimm<20, 16>(imm2); // iiiii - code |= insEncodeElemsize(optGetSveElemsize(id->idInsOpt())); // xx - dst += emitOutput_Instr(dst, code); - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + code = emitInsCodeSve(ins, fmt); + code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd + code |= insEncodeSimm<9, 5>(imm1); // iiiii + code |= insEncodeSimm<20, 16>(imm2); // iiiii + code |= insEncodeElemsize(optGetSveElemsize(id->idInsOpt())); // xx + dst += emitOutput_Instr(dst, code); + break; + } case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) @@ -11451,16 +11451,16 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id) case IF_SVE_HM_2A: // ........xx...... ...ggg....iddddd -- SVE floating-point arithmetic with immediate // (predicated) - { - imm = emitGetInsSC(id); - code = emitInsCodeSve(ins, fmt); - code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd - code |= insEncodeReg_P<12, 10>(id->idReg2()); // ggg - code |= insEncodeSveSmallFloatImm(imm); // i - code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx - dst += emitOutput_Instr(dst, code); - } - break; + { + imm = emitGetInsSC(id); + code = emitInsCodeSve(ins, fmt); + code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd + code |= insEncodeReg_P<12, 10>(id->idReg2()); // ggg + code |= insEncodeSveSmallFloatImm(imm); // i + code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx + dst += emitOutput_Instr(dst, code); + } + break; case IF_SVE_HN_2A: // ........xx...iii ......mmmmmddddd -- SVE floating-point trig multiply-add coefficient imm = emitGetInsSC(id); @@ -13443,17 +13443,17 @@ void emitter::emitInsSveSanityCheck(instrDesc* id) case IF_SVE_AX_1A: // ........xx.iiiii ......iiiiiddddd -- SVE index generation (immediate start, immediate // increment) - { - ssize_t imm1; - ssize_t imm2; - insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); - assert(insOptsScalableStandard(id->idInsOpt())); - assert(isVectorRegister(id->idReg1())); // ddddd - assert(isValidSimm<5>(imm1)); // iiiii - assert(isValidSimm<5>(imm2)); // iiiii - assert(isValidVectorElemsize(optGetSveElemsize(id->idInsOpt()))); // xx - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + assert(insOptsScalableStandard(id->idInsOpt())); + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isValidSimm<5>(imm1)); // iiiii + assert(isValidSimm<5>(imm2)); // iiiii + assert(isValidVectorElemsize(optGetSveElemsize(id->idInsOpt()))); // xx + break; + } case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) @@ -14579,37 +14579,37 @@ void emitter::emitDispInsSveHelp(instrDesc* id) // ., #, # case IF_SVE_AX_1A: // ........xx.iiiii ......iiiiiddddd -- SVE index generation (immediate start, immediate // increment) - { - ssize_t imm1; - ssize_t imm2; - insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispImm(imm1, true); // iiiii - emitDispImm(imm2, false); // iiiii - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispImm(imm1, true); // iiiii + emitDispImm(imm2, false); // iiiii + break; + } // ., #, case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) - { - const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispImm(emitGetInsSC(id), true); // iiiii - emitDispReg(id->idReg2(), intRegSize, false); // mmmmm - break; - } + { + const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispImm(emitGetInsSC(id), true); // iiiii + emitDispReg(id->idReg2(), intRegSize, false); // mmmmm + break; + } // ., , # case IF_SVE_AZ_2A: // ........xx.iiiii ......nnnnnddddd -- SVE index generation (register start, immediate // increment) - { - const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispReg(id->idReg2(), intRegSize, true); // mmmmm - emitDispImm(emitGetInsSC(id), false); // iiiii - break; - } + { + const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispReg(id->idReg2(), intRegSize, true); // mmmmm + emitDispImm(emitGetInsSC(id), false); // iiiii + break; + } // .H, .B, .B case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 6aaf00a973c8a7..24b408b4b8db38 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -51,9 +51,9 @@ const emitJumpKind emitReverseJumpKinds[] = { } /***************************************************************************** -* Look up the jump kind for an instruction. It better be a conditional -* branch instruction with a jump kind! -*/ + * Look up the jump kind for an instruction. It better be a conditional + * branch instruction with a jump kind! + */ /*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins) { @@ -2047,9 +2047,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); // EA_PTR_DSP_RELOC @@ -2381,8 +2381,8 @@ void emitter::emitIns_I_la(emitAttr size, regNumber reg, ssize_t imm) void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, @@ -2786,9 +2786,9 @@ void emitter::emitJumpDistBind() B_DIST_SMALL_MAX_POS - emitCounts_INS_OPTS_J * (3 << 2); // the max placeholder sizeof(INS_OPTS_JIRL) - sizeof(INS_OPTS_J). -/*****************************************************************************/ -/* If the default small encoding is not enough, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If the default small encoding is not enough, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -2819,7 +2819,7 @@ void emitter::emitJumpDistBind() UNATIVE_OFFSET dstOffs; NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastSJ == nullptr || lastIG != jmp->idjIG || lastSJ->idjOffs < (jmp->idjOffs + adjSJ)); @@ -2997,8 +2997,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_bceqz <= ins) && (ins <= INS_bl)); - if (ins < - INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See instrsloongarch64.h. + if (ins < INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See + // instrsloongarch64.h. { if ((jmpDist + emitCounts_INS_OPTS_J * 4) < 0x8000000) { @@ -3085,8 +3085,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_bceqz <= ins) && (ins <= INS_bl)); - if (ins < - INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See instrsloongarch64.h. + if (ins < INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See + // instrsloongarch64.h. { if ((jmpDist + emitCounts_INS_OPTS_J * 4) < 0x8000000) { @@ -3181,7 +3181,7 @@ void emitter::emitJumpDistBind() } /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to diff --git a/src/coreclr/jit/emitloongarch64.h b/src/coreclr/jit/emitloongarch64.h index 11a2f9ee90710e..135f9cf4006735 100644 --- a/src/coreclr/jit/emitloongarch64.h +++ b/src/coreclr/jit/emitloongarch64.h @@ -104,10 +104,10 @@ enum insDisasmFmt #endif }; -code_t emitGetInsMask(int ins); +code_t emitGetInsMask(int ins); insDisasmFmt emitGetInsFmt(instruction ins); -void emitDispInst(instruction ins); -void emitDisInsName(code_t code, const BYTE* addr, instrDesc* id); +void emitDispInst(instruction ins); +void emitDisInsName(code_t code, const BYTE* addr, instrDesc* id); #endif // DEBUG void emitIns_J_cond_la(instruction ins, BasicBlock* dst, regNumber reg1 = REG_R0, regNumber reg2 = REG_R0); @@ -316,9 +316,9 @@ void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) void emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); enum EmitCallType @@ -343,8 +343,8 @@ enum EmitCallType void emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, diff --git a/src/coreclr/jit/emitpub.h b/src/coreclr/jit/emitpub.h index c31d21153fd970..bf15ba33667cac 100644 --- a/src/coreclr/jit/emitpub.h +++ b/src/coreclr/jit/emitpub.h @@ -16,24 +16,24 @@ void emitBegFN(bool hasFramePtr , bool checkAlign #endif - ); +); void emitEndFN(); void emitComputeCodeSizes(); -unsigned emitEndCodeGen(Compiler* comp, - bool contTrkPtrLcls, - bool fullyInt, - bool fullPtrMap, - unsigned xcptnsCount, - unsigned* prologSize, - unsigned* epilogSize, - void** codeAddr, - void** codeAddrRW, - void** coldCodeAddr, - void** coldCodeAddrRW, - void** consAddr, +unsigned emitEndCodeGen(Compiler* comp, + bool contTrkPtrLcls, + bool fullyInt, + bool fullPtrMap, + unsigned xcptnsCount, + unsigned* prologSize, + unsigned* epilogSize, + void** codeAddr, + void** codeAddrRW, + void** coldCodeAddr, + void** coldCodeAddrRW, + void** consAddr, void** consAddrRW DEBUGARG(unsigned* instrCount)); /************************************************************************/ @@ -102,11 +102,11 @@ UNATIVE_OFFSET emitDataSize(); /************************************************************************/ #ifdef TARGET_XARCH -static bool instrIs3opImul(instruction ins); -static bool instrIsExtendedReg3opImul(instruction ins); -static bool instrHasImplicitRegPairDest(instruction ins); -static void check3opImulValues(); -static regNumber inst3opImulReg(instruction ins); +static bool instrIs3opImul(instruction ins); +static bool instrIsExtendedReg3opImul(instruction ins); +static bool instrHasImplicitRegPairDest(instruction ins); +static void check3opImulValues(); +static regNumber inst3opImulReg(instruction ins); static instruction inst3opImulForReg(regNumber reg); #endif diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index 1c5be94198f8af..525d5e5274ba74 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -988,9 +988,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); // EA_PTR_DSP_RELOC @@ -1290,8 +1290,8 @@ void emitter::emitLoadImmediate(emitAttr size, regNumber reg, ssize_t imm) void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, @@ -1760,9 +1760,9 @@ void emitter::emitJumpDistBind() emitCounts_INS_OPTS_J * (6 << 2); // the max placeholder sizeof(INS_OPTS_JALR) - sizeof(INS_OPTS_J) NATIVE_OFFSET psd = B_DIST_SMALL_MAX_POS - maxPlaceholderSize; -/*****************************************************************************/ -/* If the default small encoding is not enough, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If the default small encoding is not enough, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -1793,7 +1793,7 @@ void emitter::emitJumpDistBind() UNATIVE_OFFSET dstOffs; NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastSJ == nullptr || lastIG != jmp->idjIG || lastSJ->idjOffs < (jmp->idjOffs + adjSJ)); @@ -1948,8 +1948,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_jal <= ins) && (ins <= INS_bgeu)); - if (ins > INS_jalr || - (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < beq/bne/blt/bltu/bge/bgeu + if (ins > INS_jalr || (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < + // beq/bne/blt/bltu/bge/bgeu { if (isValidSimm13(jmpDist + maxPlaceholderSize)) { @@ -2022,8 +2022,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_jal <= ins) && (ins <= INS_bgeu)); - if (ins > INS_jalr || - (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < beq/bne/blt/bltu/bge/bgeu + if (ins > INS_jalr || (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < + // beq/bne/blt/bltu/bge/bgeu { if (isValidSimm13(jmpDist + maxPlaceholderSize)) { @@ -2966,7 +2966,7 @@ BYTE* emitter::emitOutputInstr_OptsRcNoReloc(BYTE* dst, instruction* ins, unsign const regNumber rsvdReg = codeGen->rsGetRsvdReg(); const instruction lastIns = (*ins == INS_jal) ? (*ins = INS_addi) : *ins; - const ssize_t high = immediate >> 11; + const ssize_t high = immediate >> 11; dst += emitOutput_UTypeInstr(dst, INS_lui, rsvdReg, UpperNBitsOfWordSignExtend<20>(high)); dst += emitOutput_ITypeInstr(dst, INS_addi, rsvdReg, rsvdReg, LowerNBitsOfWord<12>(high)); diff --git a/src/coreclr/jit/emitriscv64.h b/src/coreclr/jit/emitriscv64.h index aef61a029cb576..07e603a70afb7c 100644 --- a/src/coreclr/jit/emitriscv64.h +++ b/src/coreclr/jit/emitriscv64.h @@ -82,17 +82,17 @@ void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTr unsigned emitOutput_Instr(BYTE* dst, code_t code) const; ssize_t emitOutputInstrJumpDistance(const BYTE* src, const insGroup* ig, instrDescJmp* jmp); -void emitOutputInstrJumpDistanceHelper(const insGroup* ig, - instrDescJmp* jmp, - UNATIVE_OFFSET& dstOffs, - const BYTE*& dstAddr) const; +void emitOutputInstrJumpDistanceHelper(const insGroup* ig, + instrDescJmp* jmp, + UNATIVE_OFFSET& dstOffs, + const BYTE*& dstAddr) const; // Method to do check if mov is redundant with respect to the last instruction. // If yes, the caller of this method can choose to omit current mov instruction. static bool IsMovInstruction(instruction ins); -bool IsRedundantMov(instruction ins, emitAttr size, regNumber dst, regNumber src, bool canSkip); -bool IsRedundantLdStr( - instruction ins, regNumber reg1, regNumber reg2, ssize_t imm, emitAttr size, insFormat fmt); // New functions end. +bool IsRedundantMov(instruction ins, emitAttr size, regNumber dst, regNumber src, bool canSkip); +bool IsRedundantLdStr( + instruction ins, regNumber reg1, regNumber reg2, ssize_t imm, emitAttr size, insFormat fmt); // New functions end. static code_t insEncodeRTypeInstr( unsigned opcode, unsigned rd, unsigned funct3, unsigned rs1, unsigned rs2, unsigned funct7); @@ -293,9 +293,9 @@ void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) void emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); enum EmitCallType @@ -324,8 +324,8 @@ enum EmitCallType void emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 1bafb6796d8075..e356ab8b3d1132 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -1287,10 +1287,10 @@ bool emitter::TakesEvexPrefix(const instrDesc* id) const #define DEFAULT_BYTE_EVEX_PREFIX 0x62F07C0800000000ULL #define DEFAULT_BYTE_EVEX_PREFIX_MASK 0xFFFFFFFF00000000ULL -#define BBIT_IN_BYTE_EVEX_PREFIX 0x0000001000000000ULL -#define LBIT_IN_BYTE_EVEX_PREFIX 0x0000002000000000ULL +#define BBIT_IN_BYTE_EVEX_PREFIX 0x0000001000000000ULL +#define LBIT_IN_BYTE_EVEX_PREFIX 0x0000002000000000ULL #define LPRIMEBIT_IN_BYTE_EVEX_PREFIX 0x0000004000000000ULL -#define ZBIT_IN_BYTE_EVEX_PREFIX 0x0000008000000000ULL +#define ZBIT_IN_BYTE_EVEX_PREFIX 0x0000008000000000ULL //------------------------------------------------------------------------ // AddEvexPrefix: Add default EVEX prefix with only LL' bits set. @@ -1460,9 +1460,9 @@ bool emitter::TakesVexPrefix(instruction ins) const // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) -#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL +#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL #define DEFAULT_3BYTE_VEX_PREFIX_MASK 0xFFFFFF00000000ULL -#define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL +#define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL emitter::code_t emitter::AddVexPrefix(instruction ins, code_t code, emitAttr attr) { // The 2-byte VEX encoding is preferred when possible, but actually emitting @@ -3597,7 +3597,7 @@ bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1 #ifdef FEATURE_HW_INTRINSICS && (ins != INS_crc32) #endif - ) + ) { // reg1 must be a byte-able register if ((genRegMask(reg1) & RBM_BYTE_REGS) == 0) @@ -4108,7 +4108,8 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) assert((attrSize == EA_4BYTE) || (attrSize == EA_PTRSIZE) // Only for x64 || (attrSize == EA_16BYTE) || (attrSize == EA_32BYTE) || (attrSize == EA_64BYTE) // only for x64 - || (ins == INS_movzx) || (ins == INS_movsx) || (ins == INS_cmpxchg) + || (ins == INS_movzx) || (ins == INS_movsx) || + (ins == INS_cmpxchg) // The prefetch instructions are always 3 bytes and have part of their modr/m byte hardcoded || isPrefetch(ins)); @@ -4489,9 +4490,9 @@ emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int } /***************************************************************************** -* -* Add a data16 instruction of the 1 byte. -*/ + * + * Add a data16 instruction of the 1 byte. + */ void emitter::emitIns_Data16() { @@ -4539,7 +4540,8 @@ void emitter::emitIns(instruction ins) (ins == INS_cdq || ins == INS_int3 || ins == INS_lock || ins == INS_leave || ins == INS_movsb || ins == INS_movsd || ins == INS_movsp || ins == INS_nop || ins == INS_r_movsb || ins == INS_r_movsd || ins == INS_r_movsp || ins == INS_r_stosb || ins == INS_r_stosd || ins == INS_r_stosp || ins == INS_ret || - ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || ins == INS_stosp + ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || + ins == INS_stosp // These instructions take zero operands || ins == INS_vzeroupper || ins == INS_lfence || ins == INS_mfence || ins == INS_sfence || ins == INS_pause || ins == INS_serialize); @@ -6969,9 +6971,9 @@ void emitter::emitIns_R_R_C(instruction ins, } /***************************************************************************** -* -* Add an instruction with three register operands. -*/ + * + * Add an instruction with three register operands. + */ void emitter::emitIns_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, insOpts instOptions) @@ -7102,16 +7104,16 @@ void emitter::emitIns_R_R_C_I( } /********************************************************************************** -* emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. -* -* Arguments: -* ins - the instruction to add -* attr - the emitter attribute for instruction -* targetReg - the target (destination) register -* reg1 - the first source register -* reg2 - the second source register -* ival - the immediate value -*/ + * emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. + * + * Arguments: + * ins - the instruction to add + * attr - the emitter attribute for instruction + * targetReg - the target (destination) register + * reg1 - the first source register + * reg2 - the second source register + * ival - the immediate value + */ void emitter::emitIns_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, int ival) @@ -7745,9 +7747,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNum emitIns_R_ARX(ins, attr, reg, base, REG_NA, 1, disp); } -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t disp DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); @@ -9706,7 +9708,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -11698,7 +11700,7 @@ void emitter::emitDispIns( #ifdef TARGET_AMD64 || ins == INS_shrx || ins == INS_shlx || ins == INS_sarx #endif - ) + ) { // BMI bextr,bzhi, shrx, shlx and sarx encode the reg2 in VEX.vvvv and reg3 in modRM, // which is different from most of other instructions @@ -12999,9 +13001,9 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) dst += emitOutputWord(dst, code | 0x0500); } #else // TARGET_AMD64 - // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. - // This addr mode should never be used while generating relocatable ngen code nor if - // the addr can be encoded as pc-relative address. + // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. + // This addr mode should never be used while generating relocatable ngen code nor if + // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32); noway_assert((int)dsp == dsp); @@ -13925,7 +13927,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) case IF_SRW_CNS: case IF_SRW_RRD: case IF_SRW_RRW: - // += -= of a byref, no change + // += -= of a byref, no change case IF_SRW: break; @@ -16437,9 +16439,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Simple constant, local label, method */ - /********************************************************************/ + /********************************************************************/ + /* Simple constant, local label, method */ + /********************************************************************/ case IF_CNS: { @@ -16557,9 +16559,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) #ifdef TARGET_X86 dst += emitOutputWord(dst, code | 0x0500); #else // TARGET_AMD64 - // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. - // This addr mode should never be used while generating relocatable ngen code nor if - // the addr can be encoded as pc-relative address. + // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. + // This addr mode should never be used while generating relocatable ngen code nor if + // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32); noway_assert(static_cast(reinterpret_cast(addr)) == (ssize_t)addr); @@ -16712,9 +16714,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* One register operand */ - /********************************************************************/ + /********************************************************************/ + /* One register operand */ + /********************************************************************/ case IF_RRD: case IF_RWR: @@ -16725,9 +16727,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Register and register/constant */ - /********************************************************************/ + /********************************************************************/ + /* Register and register/constant */ + /********************************************************************/ case IF_RRW_SHF: { @@ -16952,9 +16954,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Address mode operand */ - /********************************************************************/ + /********************************************************************/ + /* Address mode operand */ + /********************************************************************/ case IF_ARD: case IF_AWR: @@ -17191,9 +17193,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Stack-based operand */ - /********************************************************************/ + /********************************************************************/ + /* Stack-based operand */ + /********************************************************************/ case IF_SRD: case IF_SWR: @@ -17455,9 +17457,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) unreached(); } - /********************************************************************/ - /* Direct memory address */ - /********************************************************************/ + /********************************************************************/ + /* Direct memory address */ + /********************************************************************/ case IF_MRD: case IF_MRW: @@ -17757,9 +17759,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) unreached(); } - /********************************************************************/ - /* oops */ - /********************************************************************/ + /********************************************************************/ + /* oops */ + /********************************************************************/ default: @@ -18224,7 +18226,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins #ifdef TARGET_AMD64 || ins == INS_movsxd #endif - ) + ) { result.insLatency += PERFSCORE_LATENCY_2C; } diff --git a/src/coreclr/jit/emitxarch.h b/src/coreclr/jit/emitxarch.h index 4554a892201f95..e32cab66254fe8 100644 --- a/src/coreclr/jit/emitxarch.h +++ b/src/coreclr/jit/emitxarch.h @@ -93,7 +93,7 @@ code_t emitExtractEvexPrefix(instruction ins, code_t& code) const; unsigned insEncodeReg012(const instrDesc* id, regNumber reg, emitAttr size, code_t* code); unsigned insEncodeReg345(const instrDesc* id, regNumber reg, emitAttr size, code_t* code); -code_t insEncodeReg3456(const instrDesc* id, regNumber reg, emitAttr size, code_t code); +code_t insEncodeReg3456(const instrDesc* id, regNumber reg, emitAttr size, code_t code); unsigned insEncodeRegSIB(const instrDesc* id, regNumber reg, code_t* code); code_t insEncodeMRreg(const instrDesc* id, code_t code); @@ -116,11 +116,11 @@ static bool IsKInstruction(instruction ins); static regNumber getBmiRegNumber(instruction ins); static regNumber getSseShiftRegNumber(instruction ins); -bool HasVexEncoding(instruction ins) const; -bool HasEvexEncoding(instruction ins) const; -bool IsVexEncodableInstruction(instruction ins) const; -bool IsEvexEncodableInstruction(instruction ins) const; -bool IsVexOrEvexEncodableInstruction(instruction ins) const; +bool HasVexEncoding(instruction ins) const; +bool HasEvexEncoding(instruction ins) const; +bool IsVexEncodableInstruction(instruction ins) const; +bool IsEvexEncodableInstruction(instruction ins) const; +bool IsVexOrEvexEncodableInstruction(instruction ins) const; code_t insEncodeMIreg(const instrDesc* id, regNumber reg, emitAttr size, code_t code); @@ -130,15 +130,15 @@ code_t AddRexXPrefix(const instrDesc* id, code_t code); code_t AddRexBPrefix(const instrDesc* id, code_t code); code_t AddRexPrefix(instruction ins, code_t code); -bool EncodedBySSE38orSSE3A(instruction ins) const; -bool Is4ByteSSEInstruction(instruction ins) const; +bool EncodedBySSE38orSSE3A(instruction ins) const; +bool Is4ByteSSEInstruction(instruction ins) const; code_t AddEvexVPrimePrefix(code_t code); code_t AddEvexRPrimePrefix(code_t code); static bool IsMovInstruction(instruction ins); -bool HasSideEffect(instruction ins, emitAttr size); -bool IsRedundantMov( - instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects); +bool HasSideEffect(instruction ins, emitAttr size); +bool IsRedundantMov( + instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects); bool EmitMovsxAsCwde(instruction ins, emitAttr size, regNumber dst, regNumber src); bool IsRedundantStackMov(instruction ins, insFormat fmt, emitAttr size, regNumber ireg, int varx, int offs); @@ -478,15 +478,15 @@ void SetContainsCallNeedingVzeroupper(bool value) containsCallNeedingVzeroupper = value; } -bool IsDstDstSrcAVXInstruction(instruction ins) const; -bool IsDstSrcSrcAVXInstruction(instruction ins) const; -bool IsThreeOperandAVXInstruction(instruction ins) const; +bool IsDstDstSrcAVXInstruction(instruction ins) const; +bool IsDstSrcSrcAVXInstruction(instruction ins) const; +bool IsThreeOperandAVXInstruction(instruction ins) const; static bool HasRegularWideForm(instruction ins); static bool HasRegularWideImmediateForm(instruction ins); static bool DoesWriteZeroFlag(instruction ins); static bool DoesWriteSignFlag(instruction ins); static bool DoesResetOverflowAndCarryFlags(instruction ins); -bool IsFlagsAlwaysModified(instrDesc* id); +bool IsFlagsAlwaysModified(instrDesc* id); static bool IsRexW0Instruction(instruction ins); static bool IsRexW1Instruction(instruction ins); static bool IsRexWXInstruction(instruction ins); @@ -528,7 +528,7 @@ const char* emitZMMregName(unsigned reg) const; /************************************************************************/ private: -void emitSetAmdDisp(instrDescAmd* id, ssize_t dsp); +void emitSetAmdDisp(instrDescAmd* id, ssize_t dsp); instrDesc* emitNewInstrAmd(emitAttr attr, ssize_t dsp); instrDesc* emitNewInstrAmdCns(emitAttr attr, ssize_t dsp, int cns); @@ -545,9 +545,9 @@ instrDesc* emitNewInstrCallInd(int argCnt, regMaskTP byrefRegs, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)); -void emitGetInsCns(const instrDesc* id, CnsVal* cv) const; +void emitGetInsCns(const instrDesc* id, CnsVal* cv) const; ssize_t emitGetInsAmdCns(const instrDesc* id, CnsVal* cv) const; -void emitGetInsDcmCns(const instrDesc* id, CnsVal* cv) const; +void emitGetInsDcmCns(const instrDesc* id, CnsVal* cv) const; ssize_t emitGetInsAmdAny(const instrDesc* id) const; /************************************************************************/ @@ -580,10 +580,10 @@ size_t emitSizeOfInsDsc_NONE(instrDesc* id) const; size_t emitSizeOfInsDsc_SPEC(instrDesc* id) const; /***************************************************************************** -* -* Convert between an index scale in bytes to a smaller encoding used for -* storage in instruction descriptors. -*/ + * + * Convert between an index scale in bytes to a smaller encoding used for + * storage in instruction descriptors. + */ inline emitter::opSize emitEncodeScale(size_t scale) { @@ -752,9 +752,9 @@ void emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp); void emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNumber base, int disp); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); void emitIns_AR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, cnsval_ssize_t disp); diff --git a/src/coreclr/jit/error.cpp b/src/coreclr/jit/error.cpp index a45ad7c7df0ef0..5ae6cea056efeb 100644 --- a/src/coreclr/jit/error.cpp +++ b/src/coreclr/jit/error.cpp @@ -250,7 +250,9 @@ void debugError(const char* msg, const char* file, unsigned line) } /*****************************************************************************/ -LogEnv::LogEnv(ICorJitInfo* aCompHnd) : compHnd(aCompHnd), compiler(nullptr) +LogEnv::LogEnv(ICorJitInfo* aCompHnd) + : compHnd(aCompHnd) + , compiler(nullptr) { } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 5b4fcd33f8e214..a650ae437fccc6 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -899,7 +899,10 @@ BasicBlock* Compiler::fgLookupBB(unsigned addr) class FgStack { public: - FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) + FgStack() + : slot0(SLOT_INVALID) + , slot1(SLOT_INVALID) + , depth(0) { // Empty } @@ -3122,7 +3125,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F codeAddr += sizeof(__int8); goto DECODE_OPCODE; - /* Check to see if we have a jump/return opcode */ + /* Check to see if we have a jump/return opcode */ case CEE_BRFALSE: case CEE_BRFALSE_S: @@ -3305,7 +3308,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F // statement in the block. // Otherwise, we will assert at the following line in fgMorphCall() // noway_assert(fgMorphStmt->GetNextStmt() == NULL); - ) + ) { // Neither .tailcall prefix, no tailcall stress. So move on. break; @@ -4030,11 +4033,11 @@ void Compiler::fgFindBasicBlocks() #endif } -/* Init ebdHandlerNestingLevel of current clause, and bump up value for all - * enclosed clauses (which have to be before it in the table). - * Innermost try-finally blocks must precede outermost - * try-finally blocks. - */ + /* Init ebdHandlerNestingLevel of current clause, and bump up value for all + * enclosed clauses (which have to be before it in the table). + * Innermost try-finally blocks must precede outermost + * try-finally blocks. + */ #if !defined(FEATURE_EH_FUNCLETS) HBtab->ebdHandlerNestingLevel = 0; @@ -5917,8 +5920,8 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r } else { - assert(fgFirstFuncletBB != - insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the funclet region. + assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the + // funclet region. } #ifdef DEBUG @@ -6244,8 +6247,8 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, noway_assert(startBlk != nullptr); noway_assert(startBlk != endBlk); noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method - (putInTryRegion && regionIndex > 0 && - startBlk->bbTryIndex == regionIndex) || // Search in the specified try region + (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the + // specified try region (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index f49f863b16dc57..e2af55f45ac820 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -389,7 +389,7 @@ const char* ConvertToUtf8(LPCWSTR wideString, CompAllocator& allocator) return alloc; } -} +} // namespace #endif //------------------------------------------------------------------------ @@ -546,7 +546,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePositi ONE_FILE_PER_METHOD:; -#define FILENAME_PATTERN "%s-%s-%s-%s.%s" +#define FILENAME_PATTERN "%s-%s-%s-%s.%s" #define FILENAME_PATTERN_WITH_NUMBER "%s-%s-%s-%s~%d.%s" const size_t MaxFileNameLength = MAX_PATH_FNAME - 20 /* give us some extra buffer */; @@ -1249,7 +1249,10 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) public: RegionGraph(Compiler* comp, unsigned* blkMap, unsigned blkMapSize) - : m_comp(comp), m_rgnRoot(nullptr), m_blkMap(blkMap), m_blkMapSize(blkMapSize) + : m_comp(comp) + , m_rgnRoot(nullptr) + , m_blkMap(blkMap) + , m_blkMapSize(blkMapSize) { // Create a root region that encompasses the whole function. m_rgnRoot = @@ -2642,7 +2645,8 @@ void Compiler::fgStress64RsltMul() class BBPredsChecker { public: - BBPredsChecker(Compiler* compiler) : comp(compiler) + BBPredsChecker(Compiler* compiler) + : comp(compiler) { } @@ -3240,7 +3244,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef #ifndef JIT32_GCENCODER copiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER - copiedForGenericsCtxt = false; + copiedForGenericsCtxt = false; #endif // JIT32_GCENCODER // This if only in support of the noway_asserts it contains. @@ -3284,7 +3288,8 @@ void Compiler::fgDebugCheckTypes(GenTree* tree) DoPostOrder = true, }; - NodeTypeValidator(Compiler* comp) : GenTreeVisitor(comp) + NodeTypeValidator(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3733,7 +3738,9 @@ void Compiler::fgDebugCheckLinkedLocals() UseExecutionOrder = true, }; - DebugLocalSequencer(Compiler* comp) : GenTreeVisitor(comp), m_locals(comp->getAllocator(CMK_DebugOnly)) + DebugLocalSequencer(Compiler* comp) + : GenTreeVisitor(comp) + , m_locals(comp->getAllocator(CMK_DebugOnly)) { } @@ -4014,7 +4021,9 @@ class UniquenessCheckWalker { public: UniquenessCheckWalker(Compiler* comp) - : comp(comp), nodesVecTraits(comp->compGenTreeID, comp), uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) + : comp(comp) + , nodesVecTraits(comp->compGenTreeID, comp) + , uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) { } @@ -4132,11 +4141,15 @@ class SsaCheckVisitor : public GenTreeVisitor unsigned m_ssaNum; public: - SsaKey() : m_lclNum(BAD_VAR_NUM), m_ssaNum(SsaConfig::RESERVED_SSA_NUM) + SsaKey() + : m_lclNum(BAD_VAR_NUM) + , m_ssaNum(SsaConfig::RESERVED_SSA_NUM) { } - SsaKey(unsigned lclNum, unsigned ssaNum) : m_lclNum(lclNum), m_ssaNum(ssaNum) + SsaKey(unsigned lclNum, unsigned ssaNum) + : m_lclNum(lclNum) + , m_ssaNum(ssaNum) { } @@ -4773,13 +4786,15 @@ void Compiler::fgDebugCheckFlowGraphAnnotations() return; } - unsigned count = - fgRunDfs([](BasicBlock* block, unsigned preorderNum) { assert(block->bbPreorderNum == preorderNum); }, - [=](BasicBlock* block, unsigned postorderNum) { - assert(block->bbPostorderNum == postorderNum); - assert(m_dfsTree->GetPostOrder(postorderNum) == block); - }, - [](BasicBlock* block, BasicBlock* succ) {}); + unsigned count = fgRunDfs( + [](BasicBlock* block, unsigned preorderNum) { + assert(block->bbPreorderNum == preorderNum); + }, + [=](BasicBlock* block, unsigned postorderNum) { + assert(block->bbPostorderNum == postorderNum); + assert(m_dfsTree->GetPostOrder(postorderNum) == block); + }, + [](BasicBlock* block, BasicBlock* succ) {}); assert(m_dfsTree->GetPostOrderCount() == count); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index a437a3da128d4c..0e1ce24c39ed87 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -1867,11 +1867,15 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* m_block; GenTreeCall* m_call; - ThrowHelper() : m_block(nullptr), m_call(nullptr) + ThrowHelper() + : m_block(nullptr) + , m_call(nullptr) { } - ThrowHelper(BasicBlock* block, GenTreeCall* call) : m_block(block), m_call(call) + ThrowHelper(BasicBlock* block, GenTreeCall* call) + : m_block(block) + , m_call(call) { } diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 8ea17f2bff05a0..ba5ed96610dd32 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -214,7 +214,8 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitor( [](Param* pParam) { - // Init the local var info of the inlinee - pParam->pThis->impInlineInitVars(pParam->inlineInfo); + // Init the local var info of the inlinee + pParam->pThis->impInlineInitVars(pParam->inlineInfo); - if (pParam->inlineInfo->inlineResult->IsCandidate()) - { - /* Clear the temp table */ - memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); + if (pParam->inlineInfo->inlineResult->IsCandidate()) + { + /* Clear the temp table */ + memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); - // - // Prepare the call to jitNativeCode - // + // + // Prepare the call to jitNativeCode + // - pParam->inlineInfo->InlinerCompiler = pParam->pThis; - if (pParam->pThis->impInlineInfo == nullptr) - { - pParam->inlineInfo->InlineRoot = pParam->pThis; - } - else - { - pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; - } + pParam->inlineInfo->InlinerCompiler = pParam->pThis; + if (pParam->pThis->impInlineInfo == nullptr) + { + pParam->inlineInfo->InlineRoot = pParam->pThis; + } + else + { + pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; + } - // The inline context is part of debug info and must be created - // before we start creating statements; we lazily create it as - // late as possible, which is here. - pParam->inlineInfo->inlineContext = - pParam->inlineInfo->InlineRoot->m_inlineStrategy - ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, - pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); - pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); - pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; - - JITLOG_THIS(pParam->pThis, - (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", - pParam->pThis->eeGetMethodFullName(pParam->fncHandle), - pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); - - JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; - - // The following flags are lost when inlining. - // (This is checked in Compiler::compInitOptions().) - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR_IF_LOOPS); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); + // The inline context is part of debug info and must be created + // before we start creating statements; we lazily create it as + // late as possible, which is here. + pParam->inlineInfo->inlineContext = + pParam->inlineInfo->InlineRoot->m_inlineStrategy + ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, + pParam->inlineInfo->iciCall); + pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); + pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; + + JITLOG_THIS(pParam->pThis, + (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", + pParam->pThis->eeGetMethodFullName(pParam->fncHandle), + pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); + + JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; + + // The following flags are lost when inlining. + // (This is checked in Compiler::compInitOptions().) + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR_IF_LOOPS); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); #ifdef DEBUG - if (pParam->pThis->verbose) - { - printf("\nInvoking compiler for the inlinee method %s :\n", - pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); - } + if (pParam->pThis->verbose) + { + printf("\nInvoking compiler for the inlinee method %s :\n", + pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); + } #endif // DEBUG - int result = - jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, - pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, - (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); + int result = + jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, + pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, + (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); - if (result != CORJIT_OK) - { - // If we haven't yet determined why this inline fails, use - // a catch-all something bad happened observation. - InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; + if (result != CORJIT_OK) + { + // If we haven't yet determined why this inline fails, use + // a catch-all something bad happened observation. + InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; - if (!innerInlineResult->IsFailure()) - { - innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); - } + if (!innerInlineResult->IsFailure()) + { + innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } - }, + } + }, ¶m); if (!success) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 0f20dccd2fef35..433a512469816a 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -731,7 +731,6 @@ PhaseStatus Compiler::fgPostImportationCleanup() // auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget, &addedBlocks](BasicBlock* fromBlock, BasicBlock* toBlock) { - // We may have previously though this try entry was unreachable, but now we're going to // step through it on the way to the OSR entry. So ensure it has plausible profile weight. // @@ -2600,7 +2599,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) assert(block->TargetIs(target)); /* Update bbRefs and bbNum - Conditional predecessors to the same - * block are counted twice so we have to remove one of them */ + * block are counted twice so we have to remove one of them */ noway_assert(target->countOfInEdges() > 1); fgRemoveRefPred(block->GetTargetEdge()); @@ -3969,8 +3968,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) bNext = bEnd->Next(); bool connected_bDest = false; - if ((backwardBranch && !isRare) || - block->HasFlag(BBF_DONT_REMOVE)) // Don't choose option #1 when block is the start of a try region + if ((backwardBranch && !isRare) || block->HasFlag(BBF_DONT_REMOVE)) // Don't choose option #1 when block is the + // start of a try region { bStart = nullptr; bEnd = nullptr; @@ -4779,11 +4778,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh continue; } - /* We jump to the REPEAT label if we performed a change involving the current block - * This is in case there are other optimizations that can show up - * (e.g. - compact 3 blocks in a row) - * If nothing happens, we then finish the iteration and move to the next block - */ + /* We jump to the REPEAT label if we performed a change involving the current block + * This is in case there are other optimizations that can show up + * (e.g. - compact 3 blocks in a row) + * If nothing happens, we then finish the iteration and move to the next block + */ REPEAT:; @@ -5364,12 +5363,13 @@ unsigned Compiler::fgMeasureIR() { for (Statement* const stmt : block->Statements()) { - fgWalkTreePre(stmt->GetRootNodePointer(), - [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { - (*reinterpret_cast(data->pCallbackData))++; - return Compiler::WALK_CONTINUE; - }, - &nodeCount); + fgWalkTreePre( + stmt->GetRootNodePointer(), + [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { + (*reinterpret_cast(data->pCallbackData))++; + return Compiler::WALK_CONTINUE; + }, + &nodeCount); } } else @@ -5444,7 +5444,9 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) struct PredInfo { - PredInfo(BasicBlock* block, Statement* stmt) : m_block(block), m_stmt(stmt) + PredInfo(BasicBlock* block, Statement* stmt) + : m_block(block) + , m_stmt(stmt) { } BasicBlock* m_block; @@ -5750,7 +5752,6 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) }; auto iterateTailMerge = [&](BasicBlock* block) -> void { - int numOpts = 0; while (tailMerge(block)) @@ -5968,7 +5969,8 @@ bool Compiler::gtTreeContainsTailCall(GenTree* tree) DoPreOrder = true }; - HasTailCallCandidateVisitor(Compiler* comp) : GenTreeVisitor(comp) + HasTailCallCandidateVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index d53abf356150c3..9fa4e7273862a2 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -309,7 +309,11 @@ class Instrumentor bool m_modifiedFlow; protected: - Instrumentor(Compiler* comp) : m_comp(comp), m_schemaCount(0), m_instrCount(0), m_modifiedFlow(false) + Instrumentor(Compiler* comp) + : m_comp(comp) + , m_schemaCount(0) + , m_instrCount(0) + , m_modifiedFlow(false) { } @@ -360,7 +364,8 @@ class Instrumentor class NonInstrumentor : public Instrumentor { public: - NonInstrumentor(Compiler* comp) : Instrumentor(comp) + NonInstrumentor(Compiler* comp) + : Instrumentor(comp) { } }; @@ -376,7 +381,9 @@ class BlockCountInstrumentor : public Instrumentor BasicBlock* m_entryBlock; public: - BlockCountInstrumentor(Compiler* comp) : Instrumentor(comp), m_entryBlock(nullptr) + BlockCountInstrumentor(Compiler* comp) + : Instrumentor(comp) + , m_entryBlock(nullptr) { } bool ShouldProcess(BasicBlock* block) override @@ -566,8 +573,8 @@ void BlockCountInstrumentor::BuildSchemaElements(BasicBlock* block, Schema& sche schemaElem.InstrumentationKind = m_comp->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::BasicBlockLongCount : ICorJitInfo::PgoInstrumentationKind::BasicBlockIntCount; - schemaElem.ILOffset = offset; - schemaElem.Offset = 0; + schemaElem.ILOffset = offset; + schemaElem.Offset = 0; schema.push_back(schemaElem); @@ -841,9 +848,9 @@ class SpanningTreeVisitor Duplicate }; - virtual void Badcode() = 0; - virtual void VisitBlock(BasicBlock* block) = 0; - virtual void VisitTreeEdge(BasicBlock* source, BasicBlock* target) = 0; + virtual void Badcode() = 0; + virtual void VisitBlock(BasicBlock* block) = 0; + virtual void VisitTreeEdge(BasicBlock* source, BasicBlock* target) = 0; virtual void VisitNonTreeEdge(BasicBlock* source, BasicBlock* target, EdgeKind kind) = 0; }; @@ -1239,7 +1246,9 @@ static int32_t EfficientEdgeCountBlockToKey(BasicBlock* block) // Based on "Optimally Profiling and Tracing Programs," // Ball and Larus PLDI '92. // -class EfficientEdgeCountInstrumentor : public Instrumentor, public SpanningTreeVisitor +class EfficientEdgeCountInstrumentor + : public Instrumentor + , public SpanningTreeVisitor { private: // A particular edge probe. These are linked @@ -1753,8 +1762,8 @@ void EfficientEdgeCountInstrumentor::BuildSchemaElements(BasicBlock* block, Sche schemaElem.InstrumentationKind = m_comp->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::EdgeLongCount : ICorJitInfo::PgoInstrumentationKind::EdgeIntCount; - schemaElem.ILOffset = sourceKey; - schemaElem.Offset = 0; + schemaElem.ILOffset = sourceKey; + schemaElem.Offset = 0; schema.push_back(schemaElem); @@ -1903,7 +1912,9 @@ class HandleHistogramProbeVisitor final : public GenTreeVisitor(compiler), m_functor(functor), m_compiler(compiler) + : GenTreeVisitor(compiler) + , m_functor(functor) + , m_compiler(compiler) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) @@ -1935,7 +1946,9 @@ class ValueHistogramProbeVisitor final : public GenTreeVisitor(compiler), m_functor(functor), m_compiler(compiler) + : GenTreeVisitor(compiler) + , m_functor(functor) + , m_compiler(compiler) { } @@ -1965,7 +1978,8 @@ class BuildHandleHistogramProbeSchemaGen public: BuildHandleHistogramProbeSchemaGen(Schema& schema, unsigned& schemaCount) - : m_schema(schema), m_schemaCount(schemaCount) + : m_schema(schema) + , m_schemaCount(schemaCount) { } @@ -2003,8 +2017,8 @@ class BuildHandleHistogramProbeSchemaGen schemaElem.InstrumentationKind = compiler->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::HandleHistogramLongCount : ICorJitInfo::PgoInstrumentationKind::HandleHistogramIntCount; - schemaElem.ILOffset = (int32_t)call->gtHandleHistogramProfileCandidateInfo->ilOffset; - schemaElem.Offset = 0; + schemaElem.ILOffset = (int32_t)call->gtHandleHistogramProfileCandidateInfo->ilOffset; + schemaElem.Offset = 0; m_schema.push_back(schemaElem); @@ -2013,7 +2027,7 @@ class BuildHandleHistogramProbeSchemaGen // Re-using ILOffset and Other fields from schema item for TypeHandleHistogramCount schemaElem.InstrumentationKind = isTypeHistogram ? ICorJitInfo::PgoInstrumentationKind::HandleHistogramTypes : ICorJitInfo::PgoInstrumentationKind::HandleHistogramMethods; - schemaElem.Count = ICorJitInfo::HandleHistogram32::SIZE; + schemaElem.Count = ICorJitInfo::HandleHistogram32::SIZE; m_schema.push_back(schemaElem); m_schemaCount++; @@ -2027,7 +2041,8 @@ class BuildValueHistogramProbeSchemaGen public: BuildValueHistogramProbeSchemaGen(Schema& schema, unsigned& schemaCount) - : m_schema(schema), m_schemaCount(schemaCount) + : m_schema(schema) + , m_schemaCount(schemaCount) { } @@ -2036,8 +2051,8 @@ class BuildValueHistogramProbeSchemaGen ICorJitInfo::PgoInstrumentationSchema schemaElem = {}; schemaElem.Count = 1; schemaElem.InstrumentationKind = compiler->opts.compCollect64BitCounts - ? ICorJitInfo::PgoInstrumentationKind::ValueHistogramLongCount - : ICorJitInfo::PgoInstrumentationKind::ValueHistogramIntCount; + ? ICorJitInfo::PgoInstrumentationKind::ValueHistogramLongCount + : ICorJitInfo::PgoInstrumentationKind::ValueHistogramIntCount; schemaElem.ILOffset = (int32_t)call->AsCall()->gtHandleHistogramProfileCandidateInfo->ilOffset; m_schema.push_back(schemaElem); m_schemaCount++; @@ -2332,7 +2347,8 @@ class ValueHistogramProbeInserter class HandleHistogramProbeInstrumentor : public Instrumentor { public: - HandleHistogramProbeInstrumentor(Compiler* comp) : Instrumentor(comp) + HandleHistogramProbeInstrumentor(Compiler* comp) + : Instrumentor(comp) { } bool ShouldProcess(BasicBlock* block) override @@ -2350,7 +2366,8 @@ class HandleHistogramProbeInstrumentor : public Instrumentor class ValueInstrumentor : public Instrumentor { public: - ValueInstrumentor(Compiler* comp) : Instrumentor(comp) + ValueInstrumentor(Compiler* comp) + : Instrumentor(comp) { } bool ShouldProcess(BasicBlock* block) override @@ -2727,7 +2744,7 @@ PhaseStatus Compiler::fgInstrumentMethod() // uint8_t* profileMemory; HRESULT res = info.compCompHnd->allocPgoInstrumentationBySchema(info.compMethodHnd, schema.data(), - (UINT32)schema.size(), &profileMemory); + (UINT32)schema.size(), &profileMemory); // Deal with allocation failures. // @@ -3102,7 +3119,7 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor // Map correlating block keys to blocks. // typedef JitHashTable, BasicBlock*> KeyToBlockMap; - KeyToBlockMap m_keyToBlockMap; + KeyToBlockMap m_keyToBlockMap; // Key for finding an edge based on schema info. // @@ -3111,7 +3128,9 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor int32_t const m_sourceKey; int32_t const m_targetKey; - EdgeKey(int32_t sourceKey, int32_t targetKey) : m_sourceKey(sourceKey), m_targetKey(targetKey) + EdgeKey(int32_t sourceKey, int32_t targetKey) + : m_sourceKey(sourceKey) + , m_targetKey(targetKey) { } @@ -3159,7 +3178,7 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor // Map for correlating EdgeIntCount schema entries with edges // typedef JitHashTable EdgeKeyToEdgeMap; - EdgeKeyToEdgeMap m_edgeKeyToEdgeMap; + EdgeKeyToEdgeMap m_edgeKeyToEdgeMap; // Per block data // @@ -3519,8 +3538,9 @@ void EfficientEdgeCountReconstructor::Solve() // if (m_badcode || m_mismatch || m_allWeightsZero) { - JITDUMP("... not solving because of the %s\n", - m_badcode ? "badcode" : m_allWeightsZero ? "zero counts" : "mismatch"); + JITDUMP("... not solving because of the %s\n", m_badcode ? "badcode" + : m_allWeightsZero ? "zero counts" + : "mismatch"); return; } diff --git a/src/coreclr/jit/fgprofilesynthesis.h b/src/coreclr/jit/fgprofilesynthesis.h index 216bd58297286a..e2e7c58cbac4f2 100644 --- a/src/coreclr/jit/fgprofilesynthesis.h +++ b/src/coreclr/jit/fgprofilesynthesis.h @@ -40,7 +40,8 @@ class ProfileSynthesis static constexpr weight_t epsilon = 0.001; private: - ProfileSynthesis(Compiler* compiler) : m_comp(compiler) + ProfileSynthesis(Compiler* compiler) + : m_comp(compiler) { } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 691fa5ef349f27..96e0a3e785f1fc 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1063,7 +1063,7 @@ GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call, &genericLookup); GenTree* ctxTree = getRuntimeContextTree(pLookup.lookupKind.runtimeLookupKind); call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_DELEGATE_CTOR, TYP_VOID, thisPointer, - targetObjPointers, ctxTree); + targetObjPointers, ctxTree); call->setEntryPoint(genericLookup); } } @@ -1647,8 +1647,8 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) // try/finally, which must be the last EH region. EHblkDsc* ehDsc = ehGetDsc(tryIndex); - assert(ehDsc->ebdEnclosingTryIndex == - EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the BBJ_RETURN block + assert(ehDsc->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the + // BBJ_RETURN block assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. @@ -1823,7 +1823,8 @@ class MergedReturns bool mergingReturns = false; public: - MergedReturns(Compiler* comp) : comp(comp) + MergedReturns(Compiler* comp) + : comp(comp) { comp->fgReturnCount = 0; } @@ -2266,7 +2267,7 @@ class MergedReturns return nullptr; } }; -} +} // namespace //------------------------------------------------------------------------ // fgAddInternal: add blocks and trees to express special method semantics @@ -2325,7 +2326,7 @@ PhaseStatus Compiler::fgAddInternal() #ifndef JIT32_GCENCODER lva0CopiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER - lva0CopiedForGenericsCtxt = false; + lva0CopiedForGenericsCtxt = false; #endif // JIT32_GCENCODER noway_assert(lva0CopiedForGenericsCtxt || !lvaTable[info.compThisArg].IsAddressExposed()); noway_assert(!lvaTable[info.compThisArg].lvHasILStoreOp); @@ -3589,7 +3590,9 @@ GenTree* Compiler::fgSetTreeSeq(GenTree* tree, bool isLIR) }; SetTreeSeqVisitor(Compiler* compiler, GenTree* tree, bool isLIR) - : GenTreeVisitor(compiler), m_prevNode(tree), m_isLIR(isLIR) + : GenTreeVisitor(compiler) + , m_prevNode(tree) + , m_isLIR(isLIR) { INDEBUG(tree->gtSeqNum = 0); } @@ -3677,7 +3680,8 @@ PhaseStatus Compiler::fgSetBlockOrder() class GCSafePointSuccessorEnumerator { BasicBlock* m_block; - union { + union + { BasicBlock* m_successors[2]; BasicBlock** m_pSuccessors; }; @@ -3688,7 +3692,8 @@ class GCSafePointSuccessorEnumerator public: // Constructs an enumerator of successors to be used for checking for GC // safe point cycles. - GCSafePointSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block) + GCSafePointSuccessorEnumerator(Compiler* comp, BasicBlock* block) + : m_block(block) { m_numSuccs = 0; block->VisitRegularSuccs(comp, [this](BasicBlock* succ) { @@ -4203,7 +4208,9 @@ unsigned FlowGraphNaturalLoop::NumLoopBlocks() // dfs - A DFS tree. // FlowGraphNaturalLoops::FlowGraphNaturalLoops(const FlowGraphDfsTree* dfsTree) - : m_dfsTree(dfsTree), m_loops(m_dfsTree->GetCompiler()->getAllocator(CMK_Loops)), m_improperLoopHeaders(0) + : m_dfsTree(dfsTree) + , m_loops(m_dfsTree->GetCompiler()->getAllocator(CMK_Loops)) + , m_improperLoopHeaders(0) { } @@ -4838,7 +4845,9 @@ bool FlowGraphNaturalLoop::VisitDefs(TFunc func) DoPreOrder = true, }; - VisitDefsVisitor(Compiler* comp, TFunc& func) : GenTreeVisitor(comp), m_func(func) + VisitDefsVisitor(Compiler* comp, TFunc& func) + : GenTreeVisitor(comp) + , m_func(func) { } @@ -6089,7 +6098,9 @@ FlowGraphDominatorTree* FlowGraphDominatorTree::Build(const FlowGraphDfsTree* df public: NumberDomTreeVisitor(Compiler* comp, unsigned* preorderNums, unsigned* postorderNums) - : DomTreeVisitor(comp), m_preorderNums(preorderNums), m_postorderNums(postorderNums) + : DomTreeVisitor(comp) + , m_preorderNums(preorderNums) + , m_postorderNums(postorderNums) { } diff --git a/src/coreclr/jit/forwardsub.cpp b/src/coreclr/jit/forwardsub.cpp index 8e450d7cbb35e1..de4ac5fe8a4758 100644 --- a/src/coreclr/jit/forwardsub.cpp +++ b/src/coreclr/jit/forwardsub.cpp @@ -191,7 +191,9 @@ class ForwardSubVisitor final : public GenTreeVisitor UseExecutionOrder = true }; - ForwardSubVisitor(Compiler* compiler, unsigned lclNum) : GenTreeVisitor(compiler), m_lclNum(lclNum) + ForwardSubVisitor(Compiler* compiler, unsigned lclNum) + : GenTreeVisitor(compiler) + , m_lclNum(lclNum) { LclVarDsc* dsc = compiler->lvaGetDesc(m_lclNum); if (dsc->lvIsStructField) @@ -399,7 +401,9 @@ class EffectsVisitor final : public GenTreeVisitor UseExecutionOrder = true }; - EffectsVisitor(Compiler* compiler) : GenTreeVisitor(compiler), m_flags(GTF_EMPTY) + EffectsVisitor(Compiler* compiler) + : GenTreeVisitor(compiler) + , m_flags(GTF_EMPTY) { } diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index d039cb3169379e..9d521ebef799cd 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -433,12 +433,13 @@ static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state) EnterCriticalSection(&logFileLock); - fprintf(logFile, "InfoHdr( %2d, %2d, %1d, %1d, %1d," - " %1d, %1d, %1d, %1d, %1d," - " %1d, %1d, %1d, %1d, %1d, %1d," - " %1d, %1d, %1d," - " %1d, %2d, %2d," - " %2d, %2d, %2d, %2d, %2d, %2d), \n", + fprintf(logFile, + "InfoHdr( %2d, %2d, %1d, %1d, %1d," + " %1d, %1d, %1d, %1d, %1d," + " %1d, %1d, %1d, %1d, %1d, %1d," + " %1d, %1d, %1d," + " %1d, %2d, %2d," + " %2d, %2d, %2d, %2d, %2d, %2d), \n", state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved, state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible, state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs, @@ -1817,7 +1818,7 @@ static int (*zeroFunc)() = zeroFN; */ typedef unsigned pasMaskType; -#define BITS_IN_pasMask (BITS_PER_BYTE * sizeof(pasMaskType)) +#define BITS_IN_pasMask (BITS_PER_BYTE * sizeof(pasMaskType)) #define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask - 1)) //----------------------------------------------------------------------------- @@ -1850,8 +1851,8 @@ class PendingArgsStack // Use these in the case where there actually are more ptrs than pasArgMask unsigned pasEnumGCoffsCount(); #define pasENUM_START ((unsigned)-1) -#define pasENUM_LAST ((unsigned)-2) -#define pasENUM_END ((unsigned)-3) +#define pasENUM_LAST ((unsigned)-2) +#define pasENUM_END ((unsigned)-3) unsigned pasEnumGCoffs(unsigned iter, unsigned* offs); protected: @@ -2331,7 +2332,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].GetStackOffset(); /* For negative stack offsets we must reset the low bits, - * take abs and then set them back */ + * take abs and then set them back */ varOffs = abs(static_cast(varOffs)); varOffs |= this_OFFSET_FLAG; @@ -3285,7 +3286,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un assert(regMask || argMask || callArgCnt || pasStk.pasCurDepth()); -// Emit IPtrMask if needed + // Emit IPtrMask if needed #define CHK_NON_INTRPT_ESP_IPtrMask \ \ @@ -3571,7 +3572,7 @@ size_t GCInfo::gcInfoBlockHdrDump(const BYTE* table, InfoHdr* header, unsigned* #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif printf("Method info block:\n"); @@ -3590,7 +3591,7 @@ size_t GCInfo::gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables); @@ -3608,7 +3609,7 @@ void GCInfo::gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, uns #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif gcDump.DumpPtrsInFrame((PTR_CBYTE)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables); @@ -3646,7 +3647,8 @@ class GcInfoEncoderWithLogging public: GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose) - : m_gcInfoEncoder(gcInfoEncoder), m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0)) + : m_gcInfoEncoder(gcInfoEncoder) + , m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0)) { } @@ -4024,7 +4026,8 @@ struct InterruptibleRangeReporter Encoder* gcInfoEncoderWithLog; InterruptibleRangeReporter(unsigned _prevStart, Encoder* _gcInfo) - : prevStart(_prevStart), gcInfoEncoderWithLog(_gcInfo) + : prevStart(_prevStart) + , gcInfoEncoderWithLog(_gcInfo) { } @@ -4793,7 +4796,7 @@ void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtr StackSlotIdKey sskey(genStackPtr->rpdPtrArg, false, GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE)); - GcSlotId varSlotId; + GcSlotId varSlotId; if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) { if (!m_stackSlotMap->Lookup(sskey, &varSlotId)) @@ -4841,8 +4844,8 @@ void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder, StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, false, genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE); - GcSlotId varSlotId; - bool b = m_stackSlotMap->Lookup(sskey, &varSlotId); + GcSlotId varSlotId; + bool b = m_stackSlotMap->Lookup(sskey, &varSlotId); assert(b); // Should have been added in the first pass. // Live until the call. gcInfoEncoderWithLog->SetSlotState(instrOffset, varSlotId, GC_SLOT_DEAD); diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index cb72d3e82ddebb..8045cd873260ea 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -46,7 +46,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -GCInfo::GCInfo(Compiler* theCompiler) : compiler(theCompiler) +GCInfo::GCInfo(Compiler* theCompiler) + : compiler(theCompiler) { regSet = nullptr; gcVarPtrList = nullptr; diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index dd77f8e6e1cece..ee41985df53bc8 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -21,13 +21,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX /*****************************************************************************/ const unsigned char GenTree::gtOperKindTable[] = { -#define GTNODE(en, st, cm, ivn, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm, +#define GTNODE(en, st, cm, ivn, ok) ((ok) & GTK_MASK) + GTK_COMMUTE *cm, #include "gtlist.h" }; #ifdef DEBUG const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = { -#define GTNODE(en, st, cm, ivn, ok) static_cast((ok)&DBK_MASK), +#define GTNODE(en, st, cm, ivn, ok) static_cast((ok) & DBK_MASK), #include "gtlist.h" }; #endif // DEBUG @@ -78,7 +78,8 @@ struct IndentStack const char** indents; // Constructor for IndentStack. Uses 'compiler' to determine the mode of printing. - IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly)) + IndentStack(Compiler* compiler) + : stack(compiler->getAllocator(CMK_DebugOnly)) { if (compiler->asciiTrees) { @@ -3233,7 +3234,8 @@ bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree) DoLclVarsOnly = true, }; - LocalsWithAddrOpVisitor(Compiler* comp) : GenTreeVisitor(comp) + LocalsWithAddrOpVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3274,7 +3276,8 @@ bool Compiler::gtHasAddressExposedLocals(GenTree* tree) DoLclVarsOnly = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3363,7 +3366,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) #ifdef HOST_64BIT add = bits; #else // 32-bit host - add = genTreeHashAdd(uhi32(bits), ulo32(bits)); + add = genTreeHashAdd(uhi32(bits), ulo32(bits)); #endif break; case GT_CNS_DBL: @@ -3373,7 +3376,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) #ifdef HOST_64BIT add = bits; #else // 32-bit host - add = genTreeHashAdd(uhi32(bits), ulo32(bits)); + add = genTreeHashAdd(uhi32(bits), ulo32(bits)); #endif break; } @@ -6436,7 +6439,9 @@ bool Compiler::gtMayHaveStoreInterference(GenTree* treeWithStores, GenTree* tree DoPreOrder = true, }; - Visitor(Compiler* compiler, GenTree* readTree) : GenTreeVisitor(compiler), m_readTree(readTree) + Visitor(Compiler* compiler, GenTree* readTree) + : GenTreeVisitor(compiler) + , m_readTree(readTree) { } @@ -6497,7 +6502,9 @@ bool Compiler::gtTreeHasLocalRead(GenTree* tree, unsigned lclNum) unsigned m_lclNum; LclVarDsc* m_lclDsc; - Visitor(Compiler* compiler, unsigned lclNum) : GenTreeVisitor(compiler), m_lclNum(lclNum) + Visitor(Compiler* compiler, unsigned lclNum) + : GenTreeVisitor(compiler) + , m_lclNum(lclNum) { m_lclDsc = compiler->lvaGetDesc(lclNum); } @@ -7480,7 +7487,7 @@ GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper) switch (oper) { -// clang-format off + // clang-format off #define GTSTRUCT_0(nm, tag) /*handle explicitly*/ #define GTSTRUCT_1(nm, tag) \ @@ -7542,8 +7549,8 @@ GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper) } break; - // We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified - // in their proper subtype. Similarly for GenTreeIndir. + // We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified + // in their proper subtype. Similarly for GenTreeIndir. default: { @@ -9026,7 +9033,7 @@ GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg); } #else - node = gtNewOperNode(GT_PUTARG_REG, type, arg); + node = gtNewOperNode(GT_PUTARG_REG, type, arg); #endif node->SetRegNum(argReg); @@ -9057,7 +9064,7 @@ GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg) // A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers. node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr); #else - node = gtNewOperNode(GT_BITCAST, type, arg); + node = gtNewOperNode(GT_BITCAST, type, arg); #endif return node; @@ -9462,7 +9469,7 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) tree->AsLclFld()->Data(), tree->AsLclFld()->GetLayout()); break; - /* These nodes sometimes get bashed to "fat" ones */ + /* These nodes sometimes get bashed to "fat" ones */ case GT_MUL: case GT_DIV: @@ -9844,7 +9851,9 @@ GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree) copy->gtCallMoreFlags = tree->gtCallMoreFlags; INDEBUG(copy->gtCallDebugFlags = tree->gtCallDebugFlags); - copy->gtArgs.InternalCopyFrom(this, &tree->gtArgs, [=](GenTree* node) { return gtCloneExpr(node); }); + copy->gtArgs.InternalCopyFrom(this, &tree->gtArgs, [=](GenTree* node) { + return gtCloneExpr(node); + }); // The call sig comes from the EE and doesn't change throughout the compilation process, meaning // we only really need one physical copy of it. Therefore a shallow pointer copy will suffice. @@ -9994,7 +10003,8 @@ void Compiler::gtUpdateStmtSideEffects(Statement* stmt) DoPostOrder = true, }; - UpdateSideEffectsWalker(Compiler* comp) : GenTreeVisitor(comp) + UpdateSideEffectsWalker(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -10191,12 +10201,20 @@ bool GenTree::gtRequestSetFlags() } GenTreeUseEdgeIterator::GenTreeUseEdgeIterator() - : m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1) + : m_advance(nullptr) + , m_node(nullptr) + , m_edge(nullptr) + , m_statePtr(nullptr) + , m_state(-1) { } GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) - : m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0) + : m_advance(nullptr) + , m_node(node) + , m_edge(nullptr) + , m_statePtr(nullptr) + , m_state(0) { assert(m_node != nullptr); @@ -10480,7 +10498,7 @@ void GenTreeUseEdgeIterator::AdvanceConditional() // `GTF_REVERSE_OPS` flag. // template -void GenTreeUseEdgeIterator::AdvanceBinOp() +void GenTreeUseEdgeIterator::AdvanceBinOp() { assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0)); @@ -10603,7 +10621,7 @@ void GenTreeUseEdgeIterator::SetEntryStateForMultiOp() // component operands. // template -void GenTreeUseEdgeIterator::AdvanceCall() +void GenTreeUseEdgeIterator::AdvanceCall() { GenTreeCall* const call = m_node->AsCall(); @@ -10816,10 +10834,12 @@ bool GenTree::HandleKindDataIsInvariant(GenTreeFlags flags) printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-'); printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-'); printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set - (flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-' + (flags & GTF_ORDER_SIDEEFF) ? 'O' + : '-'); // otherwise print 'O' or '-' printf("%c", (flags & GTF_COLON_COND) ? '?' : '-'); - printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse - (flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr + printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse + (flags & GTF_MAKE_CSE) ? 'H' + : '-'); // H is for Hoist this expr printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-'); printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-'); #if FEATURE_SET_FLAGS @@ -11668,8 +11688,8 @@ void Compiler::gtDispRegVal(GenTree* tree) { switch (tree->GetRegTag()) { - // Don't display anything for the GT_REGTAG_NONE case; - // the absence of printed register values will imply this state. + // Don't display anything for the GT_REGTAG_NONE case; + // the absence of printed register values will imply this state. case GenTree::GT_REGTAG_REG: printf(" REG %s", compRegVarName(tree->GetRegNum())); @@ -11697,9 +11717,9 @@ void Compiler::gtDispRegVal(GenTree* tree) } // We usually/commonly don't expect to print anything longer than this string, -#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame" +#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame" #define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY)) -#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2) +#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2) void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut) { @@ -11934,7 +11954,7 @@ static const char* InsCflagsToString(insCflags flags) { const static char* s_table[16] = {"0", "v", "c", "cv", "z", "zv", "zc", "zcv", "n", "nv", "nc", "ncv", "nz", "nzv", "nzc", "nzcv"}; - unsigned index = (unsigned)flags; + unsigned index = (unsigned)flags; assert((0 <= index) && (index < ArrLen(s_table))); return s_table[index]; } @@ -12345,7 +12365,7 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) break; #endif // !FEATURE_EH_FUNCLETS - // Vanilla leaves. No qualifying information available. So do nothing + // Vanilla leaves. No qualifying information available. So do nothing case GT_NOP: case GT_NO_OP: @@ -12497,8 +12517,8 @@ void Compiler::gtDispLocal(GenTreeLclVarCommon* tree, IndentStack* indentStack) void Compiler::gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, - _In_opt_ const char* msg, /* = nullptr */ - bool topOnly) /* = false */ + _In_opt_ const char* msg, /* = nullptr */ + bool topOnly) /* = false */ { indentStack->Push(arcType); gtDispTree(child, indentStack, msg, topOnly); @@ -12507,11 +12527,11 @@ void Compiler::gtDispChild(GenTree* child, /*****************************************************************************/ -void Compiler::gtDispTree(GenTree* tree, - IndentStack* indentStack, /* = nullptr */ - _In_ _In_opt_z_ const char* msg, /* = nullptr */ - bool topOnly, /* = false */ - bool isLIR) /* = false */ +void Compiler::gtDispTree(GenTree* tree, + IndentStack* indentStack, /* = nullptr */ + _In_ _In_opt_z_ const char* msg, /* = nullptr */ + bool topOnly, /* = false */ + bool isLIR) /* = false */ { if (tree == nullptr) { @@ -13008,9 +13028,10 @@ void Compiler::gtDispTree(GenTree* tree, case GT_HWINTRINSIC: if (tree->OperIs(GT_HWINTRINSIC)) { - printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN - ? "" - : varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()), + printf(" %s %s", + tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN + ? "" + : varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()), HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId())); } @@ -15564,8 +15585,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) switch (switchType) { - // Fold constant REF of BYREF binary operator. - // These can only be comparisons or null pointers. + // Fold constant REF of BYREF binary operator. + // These can only be comparisons or null pointers. case TYP_REF: @@ -15634,7 +15655,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) return tree; - // Fold constant INT binary operator. + // Fold constant INT binary operator. case TYP_INT: @@ -15761,8 +15782,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f)); break; - // DIV and MOD can throw an exception - if the division is by 0 - // or there is overflow - when dividing MIN by -1. + // DIV and MOD can throw an exception - if the division is by 0 + // or there is overflow - when dividing MIN by -1. case GT_DIV: case GT_MOD: @@ -15831,7 +15852,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) goto DONE; - // Fold constant LONG binary operator. + // Fold constant LONG binary operator. case TYP_LONG: @@ -16054,7 +16075,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) goto DONE; - // Fold constant FLOAT or DOUBLE binary operator + // Fold constant FLOAT or DOUBLE binary operator case TYP_FLOAT: case TYP_DOUBLE: @@ -17092,7 +17113,9 @@ void Compiler::gtExtractSideEffList(GenTree* expr, return m_result; } - SideEffectExtractor(Compiler* compiler, GenTreeFlags flags) : GenTreeVisitor(compiler), m_flags(flags) + SideEffectExtractor(Compiler* compiler, GenTreeFlags flags) + : GenTreeVisitor(compiler) + , m_flags(flags) { } @@ -17373,7 +17396,9 @@ Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node) DoPreOrder = true, }; - FindLinkWalker(Compiler* comp, GenTree* node) : GenTreeVisitor(comp), m_node(node) + FindLinkWalker(Compiler* comp, GenTree* node) + : GenTreeVisitor(comp) + , m_node(node) { } @@ -17559,7 +17584,9 @@ bool Compiler::gtTreeContainsOper(GenTree* tree, genTreeOps oper) genTreeOps m_oper; public: - Visitor(Compiler* comp, genTreeOps oper) : GenTreeVisitor(comp), m_oper(oper) + Visitor(Compiler* comp, genTreeOps oper) + : GenTreeVisitor(comp) + , m_oper(oper) { } @@ -17600,7 +17627,8 @@ ExceptionSetFlags Compiler::gtCollectExceptions(GenTree* tree) ExceptionSetFlags m_preciseExceptions = ExceptionSetFlags::None; public: - ExceptionsWalker(Compiler* comp) : GenTreeVisitor(comp) + ExceptionsWalker(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -17658,7 +17686,9 @@ bool Compiler::gtComplexityExceeds(GenTree* tree, unsigned limit) DoPreOrder = true, }; - ComplexityVisitor(Compiler* comp, unsigned limit) : GenTreeVisitor(comp), m_limit(limit) + ComplexityVisitor(Compiler* comp, unsigned limit) + : GenTreeVisitor(comp) + , m_limit(limit) { } @@ -19415,7 +19445,8 @@ FieldSeq* FieldSeqStore::Append(FieldSeq* a, FieldSeq* b) return nullptr; } -FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind) : m_offset(offset) +FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind) + : m_offset(offset) { assert(fieldHnd != NO_FIELD_HANDLE); @@ -20631,8 +20662,8 @@ GenTree* Compiler::gtNewSimdBinOpNode( assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); // Vector256 maskedProduct = Avx2.And(widenedProduct, vecCon1).AsInt16() - GenTree* maskedProduct = gtNewSimdBinOpNode(GT_AND, widenedType, widenedProduct, vecCon1, - widenedSimdBaseJitType, widenedSimdSize); + GenTree* maskedProduct = gtNewSimdBinOpNode(GT_AND, widenedType, widenedProduct, vecCon1, + widenedSimdBaseJitType, widenedSimdSize); GenTree* maskedProductDup = fgMakeMultiUse(&maskedProduct); // Vector256 packedProduct = Avx2.PackUnsignedSaturate(maskedProduct, @@ -21629,10 +21660,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode( op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); - u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); - v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); + u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); + v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); // Validate we can't use AVX512F_VL_TernaryLogic here assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); @@ -21884,10 +21915,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode( op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); - u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); - v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); + u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); + v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); // Validate we can't use AVX512F_VL_TernaryLogic here assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); @@ -25033,8 +25064,8 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si tmp = fgMakeMultiUse(&op1); opShifted = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(shiftVal, TYP_INT), NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseJitType, simdSize); - shiftVal = shiftVal / 2; + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseJitType, simdSize); + shiftVal = shiftVal / 2; } return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); @@ -26787,7 +26818,7 @@ genTreeOps GenTreeHWIntrinsic::HWOperGet() const return GT_AND_NOT; } #endif - // TODO: Handle other cases + // TODO: Handle other cases default: { @@ -26935,7 +26966,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, #else uint32_t floatFieldFlags = comp->info.compCompHnd->getRISCV64PassStructInRegisterFlags(retClsHnd); #endif - BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; + BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]); if (floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index c1fc5b33a7175f..7fbcf5471103ea 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -156,10 +156,10 @@ inline ExceptionSetFlags& operator&=(ExceptionSetFlags& a, ExceptionSetFlags b) #ifdef DEBUG /***************************************************************************** -* -* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node. -* The values are such that they don't overlap with helper's or user function's handle. -*/ + * + * TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node. + * The values are such that they don't overlap with helper's or user function's handle. + */ enum TargetHandleType : BYTE { THT_Unknown = 2, @@ -200,17 +200,20 @@ class AssertionInfo unsigned short m_assertionIndex : 15; AssertionInfo(bool assertionHoldsOnFalseEdge, AssertionIndex assertionIndex) - : m_assertionHoldsOnFalseEdge(assertionHoldsOnFalseEdge), m_assertionIndex(assertionIndex) + : m_assertionHoldsOnFalseEdge(assertionHoldsOnFalseEdge) + , m_assertionIndex(assertionIndex) { assert(m_assertionIndex == assertionIndex); } public: - AssertionInfo() : AssertionInfo(false, 0) + AssertionInfo() + : AssertionInfo(false, 0) { } - AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex) + AssertionInfo(AssertionIndex assertionIndex) + : AssertionInfo(false, assertionIndex) { } @@ -314,7 +317,8 @@ class FieldSeqStore JitHashTable, FieldSeq> m_map; public: - FieldSeqStore(CompAllocator alloc) : m_map(alloc) + FieldSeqStore(CompAllocator alloc) + : m_map(alloc) { } @@ -331,13 +335,13 @@ struct Statement; /*****************************************************************************/ // Forward declarations of the subtypes -#define GTSTRUCT_0(fn, en) struct GenTree##fn; -#define GTSTRUCT_1(fn, en) struct GenTree##fn; -#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn; -#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn; -#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn; -#define GTSTRUCT_N(fn, ...) struct GenTree##fn; -#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) +#define GTSTRUCT_0(fn, en) struct GenTree##fn; +#define GTSTRUCT_1(fn, en) struct GenTree##fn; +#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn; +#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn; +#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn; +#define GTSTRUCT_N(fn, ...) struct GenTree##fn; +#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3) #include "gtstructs.h" @@ -690,11 +694,11 @@ struct GenTree return *As##fn(); \ } -#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en) -#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2) -#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3) -#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4) -#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) +#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en) +#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2) +#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3) +#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4) +#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3) #include "gtstructs.h" @@ -719,11 +723,11 @@ struct GenTree #define NO_CSE (0) -#define IS_CSE_INDEX(x) ((x) != 0) -#define IS_CSE_USE(x) ((x) > 0) -#define IS_CSE_DEF(x) ((x) < 0) +#define IS_CSE_INDEX(x) ((x) != 0) +#define IS_CSE_USE(x) ((x) > 0) +#define IS_CSE_DEF(x) ((x) < 0) #define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x)) -#define TO_CSE_DEF(x) (-(x)) +#define TO_CSE_DEF(x) (-(x)) signed char gtCSEnum; // 0 or the CSE index (negated if def) // valid only for CSE expressions @@ -766,7 +770,7 @@ struct GenTree bool gtCostsInitialized; #endif // DEBUG -#define MAX_COST UCHAR_MAX +#define MAX_COST UCHAR_MAX #define IND_COST_EX 3 // execution cost for an indirection unsigned char GetCostEx() const @@ -957,7 +961,7 @@ struct GenTree regMaskSmall gtRsvdRegs; // set of fixed trashed registers - unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const; + unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const; regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1); regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1); @@ -1509,7 +1513,7 @@ struct GenTree #if !defined(TARGET_64BIT) || (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI) #endif - ); + ); } bool OperMayOverflow() const @@ -1754,9 +1758,9 @@ struct GenTree return (DebugOperKind() & DBK_NOTLIR) == 0; } - bool OperSupportsReverseOpEvalOrder(Compiler* comp) const; + bool OperSupportsReverseOpEvalOrder(Compiler* comp) const; static bool RequiresNonNullOp2(genTreeOps oper); - bool IsValidCallArgument(); + bool IsValidCallArgument(); #endif // DEBUG inline bool IsIntegralConst(ssize_t constVal) const; @@ -1857,7 +1861,7 @@ struct GenTree bool OperRequiresCallFlag(Compiler* comp) const; ExceptionSetFlags OperExceptions(Compiler* comp); - bool OperMayThrow(Compiler* comp); + bool OperMayThrow(Compiler* comp); bool OperRequiresGlobRefFlag(Compiler* comp) const; @@ -1894,7 +1898,7 @@ struct GenTree static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false); -//--------------------------------------------------------------------- + //--------------------------------------------------------------------- #if defined(DEBUG) || CALL_ARG_STATS || COUNT_BASIC_BLOCKS || COUNT_LOOPS || EMITTER_STATS || MEASURE_MEM_ALLOC || \ NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS @@ -1938,8 +1942,8 @@ struct GenTree } template - void BashToConst(T value, var_types type = TYP_UNDEF); - void BashToZeroConst(var_types type); + void BashToConst(T value, var_types type = TYP_UNDEF); + void BashToZeroConst(var_types type); GenTreeLclVar* BashToLclVar(Compiler* comp, unsigned lclNum); #if NODEBASH_STATS @@ -1977,7 +1981,7 @@ struct GenTree unsigned* pSize = nullptr); GenTreeLclVarCommon* IsImplicitByrefParameterValuePreMorph(Compiler* compiler); - GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr); + GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr); // Determine whether this is an assignment tree of the form X = X (op) Y, // where Y is an arbitrary tree, and X is a lclVar. @@ -2256,7 +2260,7 @@ struct GenTree bool gtRequestSetFlags(); #ifdef DEBUG - static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags); + static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags); static const char* gtGetHandleKindString(GenTreeFlags flags); #endif @@ -2375,7 +2379,7 @@ struct GenTree typedef void* VtablePtr; VtablePtr GetVtableForOper(genTreeOps oper); - void SetVtableForOper(genTreeOps oper); + void SetVtableForOper(genTreeOps oper); static VtablePtr s_vtablesForOpers[GT_COUNT]; static VtablePtr s_vtableForOp; @@ -2409,7 +2413,9 @@ struct GenTreePhi final : public GenTree Use* m_next; public: - Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next) + Use(GenTree* node, Use* next = nullptr) + : m_node(node) + , m_next(next) { assert(node->OperIs(GT_PHI_ARG)); } @@ -2447,7 +2453,8 @@ struct GenTreePhi final : public GenTree Use* m_use; public: - UseIterator(Use* use) : m_use(use) + UseIterator(Use* use) + : m_use(use) { } @@ -2483,7 +2490,8 @@ struct GenTreePhi final : public GenTree Use* m_uses; public: - UseList(Use* uses) : m_uses(uses) + UseList(Use* uses) + : m_uses(uses) { } @@ -2500,7 +2508,9 @@ struct GenTreePhi final : public GenTree Use* gtUses; - GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr) + GenTreePhi(var_types type) + : GenTree(GT_PHI, type) + , gtUses(nullptr) { } @@ -2549,7 +2559,8 @@ struct GenTreePhi final : public GenTree } #if DEBUGGABLE_GENTREE - GenTreePhi() : GenTree() + GenTreePhi() + : GenTree() { } #endif @@ -2568,7 +2579,10 @@ struct GenTreeFieldList : public GenTree public: Use(GenTree* node, unsigned offset, var_types type) - : m_node(node), m_next(nullptr), m_offset(static_cast(offset)), m_type(type) + : m_node(node) + , m_next(nullptr) + , m_offset(static_cast(offset)) + , m_type(type) { // We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion // only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max @@ -2628,7 +2642,8 @@ struct GenTreeFieldList : public GenTree Use* use; public: - UseIterator(Use* use) : use(use) + UseIterator(Use* use) + : use(use) { } @@ -2664,7 +2679,9 @@ struct GenTreeFieldList : public GenTree Use* m_tail; public: - UseList() : m_head(nullptr), m_tail(nullptr) + UseList() + : m_head(nullptr) + , m_tail(nullptr) { } @@ -2744,7 +2761,8 @@ struct GenTreeFieldList : public GenTree UseList m_uses; public: - GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT) + GenTreeFieldList() + : GenTree(GT_FIELD_LIST, TYP_STRUCT) { SetContained(); } @@ -2848,12 +2866,12 @@ class GenTreeUseEdgeIterator final void AdvanceConditional(); template - void AdvanceBinOp(); - void SetEntryStateForBinOp(); + void AdvanceBinOp(); + void SetEntryStateForBinOp(); // The advance function for call nodes template - void AdvanceCall(); + void AdvanceCall(); #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) void AdvanceMultiOp(); @@ -2912,12 +2930,14 @@ class GenTreeOperandIterator final GenTreeUseEdgeIterator m_useEdges; - GenTreeOperandIterator(GenTree* node) : m_useEdges(node) + GenTreeOperandIterator(GenTree* node) + : m_useEdges(node) { } public: - GenTreeOperandIterator() : m_useEdges() + GenTreeOperandIterator() + : m_useEdges() { } @@ -2960,12 +2980,14 @@ struct GenTreeUnOp : public GenTree protected: GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false)) - : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr) + : GenTree(oper, type DEBUGARG(largeNode)) + , gtOp1(nullptr) { } GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false)) - : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1) + : GenTree(oper, type DEBUGARG(largeNode)) + , gtOp1(op1) { assert(op1 != nullptr || NullOp1Legal()); if (op1 != nullptr) @@ -2975,7 +2997,9 @@ struct GenTreeUnOp : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeUnOp() : GenTree(), gtOp1(nullptr) + GenTreeUnOp() + : GenTree() + , gtOp1(nullptr) { } #endif @@ -2986,7 +3010,8 @@ struct GenTreeOp : public GenTreeUnOp GenTree* gtOp2; GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false)) - : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2) + : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)) + , gtOp2(op2) { // comparisons are always integral types assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type)); @@ -3005,7 +3030,8 @@ struct GenTreeOp : public GenTreeUnOp // A small set of types are unary operators with optional arguments. We use // this constructor to build those. GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false)) - : GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr) + : GenTreeUnOp(oper, type DEBUGARG(largeNode)) + , gtOp2(nullptr) { // Unary operators with optional arguments: assert(oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper)); @@ -3027,7 +3053,9 @@ struct GenTreeOp : public GenTreeUnOp #endif #if DEBUGGABLE_GENTREE - GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr) + GenTreeOp() + : GenTreeUnOp() + , gtOp2(nullptr) { } #endif @@ -3037,11 +3065,14 @@ struct GenTreeVal : public GenTree { size_t gtVal1; - GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val) + GenTreeVal(genTreeOps oper, var_types type, ssize_t val) + : GenTree(oper, type) + , gtVal1(val) { } #if DEBUGGABLE_GENTREE - GenTreeVal() : GenTree() + GenTreeVal() + : GenTree() { } #endif @@ -3049,12 +3080,12 @@ struct GenTreeVal : public GenTree struct GenTreeIntConCommon : public GenTree { - inline INT64 LngValue() const; - inline void SetLngValue(INT64 val); + inline INT64 LngValue() const; + inline void SetLngValue(INT64 val); inline ssize_t IconValue() const; - inline void SetIconValue(ssize_t val); - inline INT64 IntegralValue() const; - inline void SetIntegralValue(int64_t value); + inline void SetIconValue(ssize_t val); + inline INT64 IntegralValue() const; + inline void SetIntegralValue(int64_t value); template inline void SetValueTruncating(T value); @@ -3097,7 +3128,8 @@ struct GenTreeIntConCommon : public GenTree #endif #if DEBUGGABLE_GENTREE - GenTreeIntConCommon() : GenTree() + GenTreeIntConCommon() + : GenTree() { } #endif @@ -3110,11 +3142,14 @@ struct GenTreePhysReg : public GenTree // GetRegNum() indicates the destination (and can be changed) // whereas reg indicates the source regNumber gtSrcReg; - GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r) + GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) + : GenTree(GT_PHYSREG, type) + , gtSrcReg(r) { } #if DEBUGGABLE_GENTREE - GenTreePhysReg() : GenTree() + GenTreePhysReg() + : GenTree() { } #endif @@ -3173,7 +3208,8 @@ struct GenTreeIntCon : public GenTreeIntConCommon void FixupInitBlkValue(var_types type); #if DEBUGGABLE_GENTREE - GenTreeIntCon() : GenTreeIntConCommon() + GenTreeIntCon() + : GenTreeIntConCommon() { } #endif @@ -3194,12 +3230,14 @@ struct GenTreeLngCon : public GenTreeIntConCommon return (INT32)(gtLconVal >> 32); } - GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG) + GenTreeLngCon(INT64 val) + : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG) { SetLngValue(val); } #if DEBUGGABLE_GENTREE - GenTreeLngCon() : GenTreeIntConCommon() + GenTreeLngCon() + : GenTreeIntConCommon() { } #endif @@ -3330,13 +3368,15 @@ struct GenTreeDblCon : public GenTree return (bits == otherBits); } - GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type) + GenTreeDblCon(double val, var_types type = TYP_DOUBLE) + : GenTree(GT_CNS_DBL, type) { assert(varTypeIsFloating(type)); SetDconValue(val); } #if DEBUGGABLE_GENTREE - GenTreeDblCon() : GenTree() + GenTreeDblCon() + : GenTree() { } #endif @@ -3360,11 +3400,14 @@ struct GenTreeStrCon : public GenTree // Because this node can come from an inlined method we need to // have the scope handle, since it will become a helper call. GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false)) - : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod) + : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)) + , gtSconCPX(sconCPX) + , gtScpHnd(mod) { } #if DEBUGGABLE_GENTREE - GenTreeStrCon() : GenTree() + GenTreeStrCon() + : GenTree() { } #endif @@ -3407,12 +3450,14 @@ class SsaNumInfo final int m_value; - SsaNumInfo(int value) : m_value(value) + SsaNumInfo(int value) + : m_value(value) { } public: - SsaNumInfo() : m_value(SsaConfig::RESERVED_SSA_NUM) + SsaNumInfo() + : m_value(SsaConfig::RESERVED_SSA_NUM) { } @@ -3545,7 +3590,8 @@ struct GenTreeLclVarCommon : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeLclVarCommon() : GenTreeUnOp() + GenTreeLclVarCommon() + : GenTreeUnOp() { } #endif @@ -3684,7 +3730,7 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } unsigned int GetFieldCount(Compiler* compiler) const; - var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx); + var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx); bool IsNeverNegative(Compiler* comp) const; @@ -3723,8 +3769,8 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } #endif - GenTreeLclVar(genTreeOps oper, - var_types type, + GenTreeLclVar(genTreeOps oper, + var_types type, unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false)) : GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs)) { @@ -3737,7 +3783,8 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } #if DEBUGGABLE_GENTREE - GenTreeLclVar() : GenTreeLclVarCommon() + GenTreeLclVar() + : GenTreeLclVarCommon() { } #endif @@ -3753,14 +3800,16 @@ struct GenTreeLclFld : public GenTreeLclVarCommon public: GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs, ClassLayout* layout = nullptr) - : GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast(lclOffs)) + : GenTreeLclVarCommon(oper, type, lclNum) + , m_lclOffs(static_cast(lclOffs)) { assert(lclOffs <= UINT16_MAX); SetLayout(layout); } GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs, GenTree* data, ClassLayout* layout) - : GenTreeLclVarCommon(GT_STORE_LCL_FLD, type, lclNum, data), m_lclOffs(static_cast(lclOffs)) + : GenTreeLclVarCommon(GT_STORE_LCL_FLD, type, lclNum, data) + , m_lclOffs(static_cast(lclOffs)) { assert(lclOffs <= UINT16_MAX); SetLayout(layout); @@ -3795,7 +3844,8 @@ struct GenTreeLclFld : public GenTreeLclVarCommon #endif // TARGET_ARM #if DEBUGGABLE_GENTREE - GenTreeLclFld() : GenTreeLclVarCommon() + GenTreeLclFld() + : GenTreeLclVarCommon() { } #endif @@ -3837,7 +3887,8 @@ struct GenTreeCast : public GenTreeOp var_types gtCastType; GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false)) - : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType) + : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)) + , gtCastType(castType) { // We do not allow casts from floating point types to be treated as from // unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the @@ -3847,7 +3898,8 @@ struct GenTreeCast : public GenTreeOp gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY; } #if DEBUGGABLE_GENTREE - GenTreeCast() : GenTreeOp() + GenTreeCast() + : GenTreeOp() { } #endif @@ -3896,7 +3948,8 @@ struct GenTreeBox : public GenTreeUnOp { } #if DEBUGGABLE_GENTREE - GenTreeBox() : GenTreeUnOp() + GenTreeBox() + : GenTreeUnOp() { } #endif @@ -3940,7 +3993,8 @@ struct GenTreeFieldAddr : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeFieldAddr() : GenTreeUnOp() + GenTreeFieldAddr() + : GenTreeUnOp() { } #endif @@ -4010,12 +4064,14 @@ struct GenTreeColon : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeColon() : GenTreeOp() + GenTreeColon() + : GenTreeOp() { } #endif - GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode) + GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) + : GenTreeOp(GT_COLON, typ, elseNode, thenNode) { } }; @@ -4028,13 +4084,15 @@ struct GenTreeConditional : public GenTreeOp GenTreeConditional( genTreeOps oper, var_types type, GenTree* cond, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false)) - : GenTreeOp(oper, type, op1, op2 DEBUGARG(largeNode)), gtCond(cond) + : GenTreeOp(oper, type, op1, op2 DEBUGARG(largeNode)) + , gtCond(cond) { assert(cond != nullptr); } #if DEBUGGABLE_GENTREE - GenTreeConditional() : GenTreeOp() + GenTreeConditional() + : GenTreeOp() { } #endif @@ -4315,7 +4373,7 @@ struct ReturnTypeDesc class TailCallSiteInfo { bool m_isCallvirt : 1; - bool m_isCalli : 1; + bool m_isCalli : 1; CORINFO_SIG_INFO m_sig; CORINFO_RESOLVED_TOKEN m_token; @@ -4501,7 +4559,7 @@ struct CallArgABIInformation bool IsHfaArg() const; bool IsHfaRegArg() const; var_types GetHfaType() const; - void SetHfaType(var_types type, unsigned hfaSlots); + void SetHfaType(var_types type, unsigned hfaSlots); regNumber GetRegNum() const { @@ -4676,7 +4734,8 @@ class CallArg public: CallArgABIInformation AbiInfo; - CallArg(const NewCallArg& arg) : CallArg() + CallArg(const NewCallArg& arg) + : CallArg() { m_earlyNode = arg.Node; m_wellKnownArg = arg.WellKnownArg; @@ -4684,7 +4743,7 @@ class CallArg m_signatureClsHnd = arg.SignatureClsHnd; } - CallArg(const CallArg&) = delete; + CallArg(const CallArg&) = delete; CallArg& operator=(CallArg&) = delete; // clang-format off @@ -4746,9 +4805,9 @@ class CallArgs // made for this call. unsigned m_padStkAlign; #endif - bool m_hasThisPointer : 1; - bool m_hasRetBuffer : 1; - bool m_isVarArgs : 1; + bool m_hasThisPointer : 1; + bool m_hasRetBuffer : 1; + bool m_isVarArgs : 1; bool m_abiInformationDetermined : 1; // True if we have one or more register arguments. bool m_hasRegArgs : 1; @@ -4762,15 +4821,15 @@ class CallArgs bool m_alignmentDone : 1; #endif - void AddedWellKnownArg(WellKnownArg arg); - void RemovedWellKnownArg(WellKnownArg arg); + void AddedWellKnownArg(WellKnownArg arg); + void RemovedWellKnownArg(WellKnownArg arg); regNumber GetCustomRegister(Compiler* comp, CorInfoCallConvExtension cc, WellKnownArg arg); - void SplitArg(CallArg* arg, unsigned numRegs, unsigned numSlots); - void SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs); + void SplitArg(CallArg* arg, unsigned numRegs, unsigned numSlots); + void SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs); public: CallArgs(); - CallArgs(const CallArgs&) = delete; + CallArgs(const CallArgs&) = delete; CallArgs& operator=(CallArgs&) = delete; CallArg* FindByNode(GenTree* node); @@ -4795,8 +4854,8 @@ class CallArgs CallArg* InsertAfterUnchecked(Compiler* comp, CallArg* after, const NewCallArg& arg); CallArg* InsertInstParam(Compiler* comp, GenTree* node); CallArg* InsertAfterThisOrFirst(Compiler* comp, const NewCallArg& arg); - void PushLateBack(CallArg* arg); - void Remove(CallArg* arg); + void PushLateBack(CallArg* arg); + void Remove(CallArg* arg); template void InternalCopyFrom(Compiler* comp, CallArgs* other, CopyNodeFunc copyFunc); @@ -4817,7 +4876,7 @@ class CallArgs bool IsNonStandard(Compiler* comp, GenTreeCall* call, CallArg* arg); GenTree* MakeTmpArgNode(Compiler* comp, CallArg* arg); - void SetTemp(CallArg* arg, unsigned tmpNum); + void SetTemp(CallArg* arg, unsigned tmpNum); // clang-format off bool HasThisPointer() const { return m_hasThisPointer; } @@ -4855,7 +4914,8 @@ class CallArgs CallArg* m_arg; public: - explicit CallArgIterator(CallArg* arg) : m_arg(arg) + explicit CallArgIterator(CallArg* arg) + : m_arg(arg) { } @@ -4899,7 +4959,8 @@ class CallArgs } public: - explicit EarlyArgIterator(CallArg* arg) : m_arg(arg) + explicit EarlyArgIterator(CallArg* arg) + : m_arg(arg) { } @@ -4955,7 +5016,8 @@ struct GenTreeCall final : public GenTree CORINFO_SIG_INFO* callSig; #endif - union { + union + { TailCallSiteInfo* tailCallInfo; // Only used for unmanaged calls, which cannot be tail-called CorInfoCallConvExtension unmgdCallConv; @@ -5216,7 +5278,7 @@ struct GenTreeCall final : public GenTree } bool HasNonStandardAddedArgs(Compiler* compiler) const; - int GetNonStandardAddedArgCount(Compiler* compiler) const; + int GetNonStandardAddedArgCount(Compiler* compiler) const; // Returns true if the ABI dictates that this call should get a ret buf // arg. This may be out of sync with gtArgs.HasRetBuffer during import @@ -5583,19 +5645,21 @@ struct GenTreeCall final : public GenTree } GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags - gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration + gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration var_types gtReturnType : 5; // exact return type uint8_t gtInlineInfoCount; // number of inline candidates for the given call CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available - union { + union + { void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined CORINFO_CLASS_HANDLE gtInitClsHnd; // Used by static init helpers, represents a class they init IL_OFFSET gtCastHelperILOffset; // Used by cast helpers to save corresponding IL offset }; - union { + union + { // only used for CALLI unmanaged calls (CT_INDIRECT) GenTree* gtCallCookie; @@ -5613,7 +5677,8 @@ struct GenTreeCall final : public GenTree // expression evaluated after args are placed which determines the control target GenTree* gtControlExpr; - union { + union + { CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER GenTree* gtCallAddr; // CT_INDIRECT }; @@ -5666,11 +5731,13 @@ struct GenTreeCall final : public GenTree static bool Equals(GenTreeCall* c1, GenTreeCall* c2); - GenTreeCall(var_types type) : GenTree(GT_CALL, type) + GenTreeCall(var_types type) + : GenTree(GT_CALL, type) { } #if DEBUGGABLE_GENTREE - GenTreeCall() : GenTree() + GenTreeCall() + : GenTree() { } #endif @@ -5689,7 +5756,8 @@ struct GenTreeMultiRegOp : public GenTreeOp MultiRegSpillFlags gtSpillFlags; GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2) - : GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA) + : GenTreeOp(oper, type, op1, op2) + , gtOtherReg(REG_NA) { ClearOtherRegFlags(); } @@ -5772,7 +5840,8 @@ struct GenTreeMultiRegOp : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeMultiRegOp() : GenTreeOp() + GenTreeMultiRegOp() + : GenTreeOp() { } #endif @@ -5790,7 +5859,9 @@ struct GenTreeFptrVal : public GenTree #endif GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth) - : GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false) + : GenTree(GT_FTN_ADDR, type) + , gtFptrMethod(meth) + , gtFptrDelegateTarget(false) { #ifdef FEATURE_READYTORUN gtEntryPoint.addr = nullptr; @@ -5798,7 +5869,8 @@ struct GenTreeFptrVal : public GenTree #endif } #if DEBUGGABLE_GENTREE - GenTreeFptrVal() : GenTree() + GenTreeFptrVal() + : GenTree() { } #endif @@ -5810,7 +5882,8 @@ struct GenTreeQmark : public GenTreeOp unsigned gtThenLikelihood; GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon, unsigned thenLikelihood = 50) - : GenTreeOp(GT_QMARK, type, cond, colon), gtThenLikelihood(thenLikelihood) + : GenTreeOp(GT_QMARK, type, cond, colon) + , gtThenLikelihood(thenLikelihood) { // These must follow a specific form. assert((cond != nullptr) && cond->TypeIs(TYP_INT)); @@ -5846,7 +5919,8 @@ struct GenTreeQmark : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeQmark() : GenTreeOp() + GenTreeQmark() + : GenTreeOp() { } #endif @@ -5865,20 +5939,25 @@ struct GenTreeIntrinsic : public GenTreeOp #endif GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle) - : GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle) + : GenTreeOp(GT_INTRINSIC, type, op1, nullptr) + , gtIntrinsicName(intrinsicName) + , gtMethodHandle(methodHandle) { assert(intrinsicName != NI_Illegal); } GenTreeIntrinsic( var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle) - : GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle) + : GenTreeOp(GT_INTRINSIC, type, op1, op2) + , gtIntrinsicName(intrinsicName) + , gtMethodHandle(methodHandle) { assert(intrinsicName != NI_Illegal); } #if DEBUGGABLE_GENTREE - GenTreeIntrinsic() : GenTreeOp() + GenTreeIntrinsic() + : GenTreeOp() { } #endif @@ -5898,7 +5977,8 @@ struct GenTreeMultiOp : public GenTree protected: GenTree** m_use; - Iterator(GenTree** use) : m_use(use) + Iterator(GenTree** use) + : m_use(use) { } @@ -5923,7 +6003,8 @@ struct GenTreeMultiOp : public GenTree class OperandsIterator final : public Iterator { public: - OperandsIterator(GenTree** use) : Iterator(use) + OperandsIterator(GenTree** use) + : Iterator(use) { } @@ -5936,7 +6017,8 @@ struct GenTreeMultiOp : public GenTree class UseEdgesIterator final : public Iterator { public: - UseEdgesIterator(GenTree** use) : Iterator(use) + UseEdgesIterator(GenTree** use) + : Iterator(use) { } @@ -5983,7 +6065,8 @@ struct GenTreeMultiOp : public GenTree public: #if DEBUGGABLE_GENTREE - GenTreeMultiOp() : GenTree() + GenTreeMultiOp() + : GenTree() { } #endif @@ -6056,7 +6139,8 @@ class IntrinsicNodeBuilder final GenTree* m_inlineOperands[2]; public: - IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount) + IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) + : m_operandCount(operandCount) { m_operands = (operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate(operandCount); @@ -6068,7 +6152,8 @@ class IntrinsicNodeBuilder final #endif // DEBUG } - IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount()) + IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) + : m_operandCount(source->GetOperandCount()) { m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate(m_operandCount); @@ -6280,7 +6365,8 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp } #if DEBUGGABLE_GENTREE - GenTreeJitIntrinsic() : GenTreeMultiOp() + GenTreeJitIntrinsic() + : GenTreeMultiOp() { } #endif @@ -6341,7 +6427,8 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic } #if DEBUGGABLE_GENTREE - GenTreeHWIntrinsic() : GenTreeJitIntrinsic() + GenTreeHWIntrinsic() + : GenTreeJitIntrinsic() { } #endif @@ -6383,7 +6470,7 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic bool OperRequiresGlobRefFlag() const; unsigned GetResultOpNumForRmwIntrinsic(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3); - uint8_t GetTernaryControlByte(GenTreeHWIntrinsic* second) const; + uint8_t GetTernaryControlByte(GenTreeHWIntrinsic* second) const; ClassLayout* GetLayout(Compiler* compiler) const; @@ -6486,7 +6573,8 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic // struct GenTreeVecCon : public GenTree { - union { + union + { simd8_t gtSimd8Val; simd12_t gtSimd12Val; simd16_t gtSimd16Val; @@ -6538,7 +6626,7 @@ struct GenTreeVecCon : public GenTree // These intrinsics are meant to set the same value to every element. if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, simdVal, simdBaseType)) { -// CreateScalar leaves the upper bits as zero + // CreateScalar leaves the upper bits as zero #if defined(TARGET_XARCH) if ((intrinsic != NI_Vector128_CreateScalar) && (intrinsic != NI_Vector256_CreateScalar) && @@ -6858,7 +6946,8 @@ struct GenTreeVecCon : public GenTree } } - GenTreeVecCon(var_types type) : GenTree(GT_CNS_VEC, type) + GenTreeVecCon(var_types type) + : GenTree(GT_CNS_VEC, type) { assert(varTypeIsSIMD(type)); @@ -6874,7 +6963,8 @@ struct GenTreeVecCon : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeVecCon() : GenTree() + GenTreeVecCon() + : GenTree() { } #endif @@ -6931,7 +7021,8 @@ struct GenTreeIndexAddr : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeIndexAddr() : GenTreeOp() + GenTreeIndexAddr() + : GenTreeOp() { } #endif @@ -6971,7 +7062,8 @@ struct GenTreeArrAddr : GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeArrAddr() : GenTreeUnOp() + GenTreeArrAddr() + : GenTreeUnOp() { } #endif @@ -7018,12 +7110,14 @@ struct GenTreeArrCommon : public GenTreeUnOp return gtOp1; } - GenTreeArrCommon(genTreeOps oper, var_types type, GenTree* arrRef) : GenTreeUnOp(oper, type, arrRef) + GenTreeArrCommon(genTreeOps oper, var_types type, GenTree* arrRef) + : GenTreeUnOp(oper, type, arrRef) { } #if DEBUGGABLE_GENTREE - GenTreeArrCommon() : GenTreeUnOp() + GenTreeArrCommon() + : GenTreeUnOp() { } #endif @@ -7048,12 +7142,14 @@ struct GenTreeArrLen : public GenTreeArrCommon } GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset) - : GenTreeArrCommon(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset) + : GenTreeArrCommon(GT_ARR_LENGTH, type, arrRef) + , gtArrLenOffset(lenOffset) { } #if DEBUGGABLE_GENTREE - GenTreeArrLen() : GenTreeArrCommon() + GenTreeArrLen() + : GenTreeArrCommon() { } #endif @@ -7080,13 +7176,16 @@ struct GenTreeMDArr : public GenTreeArrCommon } GenTreeMDArr(genTreeOps oper, GenTree* arrRef, unsigned dim, unsigned rank) - : GenTreeArrCommon(oper, TYP_INT, arrRef), gtDim(dim), gtRank(rank) + : GenTreeArrCommon(oper, TYP_INT, arrRef) + , gtDim(dim) + , gtRank(rank) { assert(OperIs(GT_MDARR_LENGTH, GT_MDARR_LOWER_BOUND)); } #if DEBUGGABLE_GENTREE - GenTreeMDArr() : GenTreeArrCommon() + GenTreeMDArr() + : GenTreeArrCommon() { } #endif @@ -7118,7 +7217,8 @@ struct GenTreeBoundsChk : public GenTreeOp gtFlags |= GTF_EXCEPT; } #if DEBUGGABLE_GENTREE - GenTreeBoundsChk() : GenTreeOp() + GenTreeBoundsChk() + : GenTreeOp() { } #endif @@ -7168,7 +7268,10 @@ struct GenTreeArrElem : public GenTree // Requires that "inds" is a pointer to an array of "rank" nodes for the indices. GenTreeArrElem(var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, GenTree** inds) - : GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize) + : GenTree(GT_ARR_ELEM, type) + , gtArrObj(arr) + , gtArrRank(rank) + , gtArrElemSize(elemSize) { assert(rank <= ArrLen(gtArrInds)); gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT); @@ -7180,7 +7283,8 @@ struct GenTreeArrElem : public GenTree gtFlags |= GTF_EXCEPT; } #if DEBUGGABLE_GENTREE - GenTreeArrElem() : GenTree() + GenTreeArrElem() + : GenTree() { } #endif @@ -7274,7 +7378,8 @@ struct GenTreeAddrMode : public GenTreeOp protected: friend GenTree; // Used only for GenTree::GetVtableForOper() - GenTreeAddrMode() : GenTreeOp() + GenTreeAddrMode() + : GenTreeOp() { } #endif @@ -7310,7 +7415,8 @@ struct GenTreeIndir : public GenTreeOp unsigned Size() const; - GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data) + GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) + : GenTreeOp(oper, type, addr, data) { } @@ -7336,12 +7442,14 @@ struct GenTreeIndir : public GenTreeOp #if DEBUGGABLE_GENTREE // Used only for GenTree::GetVtableForOper() - GenTreeIndir() : GenTreeOp() + GenTreeIndir() + : GenTreeOp() { } #else // Used by XARCH codegen to construct temporary trees to pass to the emitter. - GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF) + GenTreeIndir() + : GenTreeOp(GT_NOP, TYP_UNDEF) { } #endif @@ -7449,7 +7557,8 @@ struct GenTreeBlk : public GenTreeIndir #if DEBUGGABLE_GENTREE protected: friend GenTree; - GenTreeBlk() : GenTreeIndir() + GenTreeBlk() + : GenTreeIndir() { } #endif // DEBUGGABLE_GENTREE @@ -7555,7 +7664,8 @@ struct GenTreeStoreInd : public GenTreeIndir return gtOp2; } - GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data) + GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) + : GenTreeIndir(GT_STOREIND, type, destPtr, data) { SetRMWStatusDefault(); } @@ -7564,7 +7674,8 @@ struct GenTreeStoreInd : public GenTreeIndir protected: friend GenTree; // Used only for GenTree::GetVtableForOper() - GenTreeStoreInd() : GenTreeIndir() + GenTreeStoreInd() + : GenTreeIndir() { SetRMWStatusDefault(); } @@ -7578,13 +7689,15 @@ struct GenTreeCmpXchg : public GenTreeIndir public: GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand) - : GenTreeIndir(GT_CMPXCHG, type, loc, val), m_comparand(comparand) + : GenTreeIndir(GT_CMPXCHG, type, loc, val) + , m_comparand(comparand) { gtFlags |= comparand->gtFlags & GTF_ALL_EFFECT; } #if DEBUGGABLE_GENTREE - GenTreeCmpXchg() : GenTreeIndir() + GenTreeCmpXchg() + : GenTreeIndir() { } #endif @@ -7612,11 +7725,13 @@ struct GenTreeRetExpr : public GenTree // nullptr for cases where gtSubstExpr is not a tree from the inlinee. BasicBlock* gtSubstBB; - GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type) + GenTreeRetExpr(var_types type) + : GenTree(GT_RET_EXPR, type) { } #if DEBUGGABLE_GENTREE - GenTreeRetExpr() : GenTree() + GenTreeRetExpr() + : GenTree() { } #endif @@ -7640,7 +7755,8 @@ struct GenTreeILOffset : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID) + GenTreeILOffset() + : GenTree(GT_IL_OFFSET, TYP_VOID) { } #endif @@ -7662,7 +7778,8 @@ class GenTreeList GenTree* m_tree; public: - explicit iterator(GenTree* tree) : m_tree(tree) + explicit iterator(GenTree* tree) + : m_tree(tree) { } @@ -7683,7 +7800,8 @@ class GenTreeList } }; - explicit GenTreeList(GenTree* trees) : m_trees(trees) + explicit GenTreeList(GenTree* trees) + : m_trees(trees) { } @@ -7708,7 +7826,8 @@ class LocalsGenTreeList GenTreeLclVarCommon* m_tree; public: - explicit iterator(GenTreeLclVarCommon* tree) : m_tree(tree) + explicit iterator(GenTreeLclVarCommon* tree) + : m_tree(tree) { } @@ -7737,7 +7856,8 @@ class LocalsGenTreeList } }; - explicit LocalsGenTreeList(Statement* stmt) : m_stmt(stmt) + explicit LocalsGenTreeList(Statement* stmt) + : m_stmt(stmt) { } @@ -7937,7 +8057,8 @@ class StatementList Statement* m_stmt; public: - iterator(Statement* stmt) : m_stmt(stmt) + iterator(Statement* stmt) + : m_stmt(stmt) { } @@ -7959,7 +8080,8 @@ class StatementList }; public: - StatementList(Statement* stmts) : m_stmts(stmts) + StatementList(Statement* stmts) + : m_stmts(stmts) { } @@ -7984,13 +8106,15 @@ struct GenTreePhiArg : public GenTreeLclVarCommon BasicBlock* gtPredBB; GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block) - : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block) + : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum) + , gtPredBB(block) { SetSsaNum(ssaNum); } #if DEBUGGABLE_GENTREE - GenTreePhiArg() : GenTreeLclVarCommon() + GenTreePhiArg() + : GenTreeLclVarCommon() { } #endif @@ -8028,8 +8152,13 @@ struct GenTreePutArgStk : public GenTreeUnOp // TODO-Throughput: The following information should be obtained from the child // block node. - enum class Kind : int8_t{ - Invalid, RepInstr, PartialRepInstr, Unroll, Push, + enum class Kind : int8_t + { + Invalid, + RepInstr, + PartialRepInstr, + Unroll, + Push, }; Kind gtPutArgStkKind; @@ -8161,7 +8290,8 @@ struct GenTreePutArgStk : public GenTreeUnOp #endif // !FEATURE_PUT_STRUCT_ARG_STK #if DEBUGGABLE_GENTREE - GenTreePutArgStk() : GenTreeUnOp() + GenTreePutArgStk() + : GenTreeUnOp() { } #endif @@ -8311,7 +8441,8 @@ struct GenTreePutArgSplit : public GenTreePutArgStk } #if DEBUGGABLE_GENTREE - GenTreePutArgSplit() : GenTreePutArgStk() + GenTreePutArgSplit() + : GenTreePutArgStk() { } #endif @@ -8438,7 +8569,8 @@ struct GenTreeCopyOrReload : public GenTreeUnOp return 1; } - GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1) + GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) + : GenTreeUnOp(oper, type, op1) { assert(type != TYP_STRUCT || op1->IsMultiRegNode()); SetRegNum(REG_NA); @@ -8446,7 +8578,8 @@ struct GenTreeCopyOrReload : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeCopyOrReload() : GenTreeUnOp() + GenTreeCopyOrReload() + : GenTreeUnOp() { } #endif @@ -8476,7 +8609,8 @@ struct GenTreeAllocObj final : public GenTreeUnOp #endif } #if DEBUGGABLE_GENTREE - GenTreeAllocObj() : GenTreeUnOp() + GenTreeAllocObj() + : GenTreeUnOp() { } #endif @@ -8490,12 +8624,15 @@ struct GenTreeRuntimeLookup final : public GenTreeUnOp CorInfoGenericHandleType gtHndType; GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree) - : GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp) + : GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)) + , gtHnd(hnd) + , gtHndType(hndTyp) { assert(hnd != nullptr); } #if DEBUGGABLE_GENTREE - GenTreeRuntimeLookup() : GenTreeUnOp() + GenTreeRuntimeLookup() + : GenTreeUnOp() { } #endif @@ -8659,11 +8796,13 @@ struct GenCondition return names[m_code]; } - GenCondition() : m_code() + GenCondition() + : m_code() { } - GenCondition(Code cond) : m_code(cond) + GenCondition(Code cond) + : m_code(cond) { } @@ -8787,13 +8926,15 @@ struct GenTreeCC final : public GenTree GenCondition gtCondition; GenTreeCC(genTreeOps oper, var_types type, GenCondition condition) - : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition) + : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)) + , gtCondition(condition) { assert(OperIs(GT_JCC, GT_SETCC)); } #if DEBUGGABLE_GENTREE - GenTreeCC() : GenTree() + GenTreeCC() + : GenTree() { } #endif // DEBUGGABLE_GENTREE @@ -8805,7 +8946,8 @@ struct GenTreeOpCC : public GenTreeOp GenCondition gtCondition; GenTreeOpCC(genTreeOps oper, var_types type, GenCondition condition, GenTree* op1 = nullptr, GenTree* op2 = nullptr) - : GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition) + : GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ FALSE)) + , gtCondition(condition) { #ifdef TARGET_ARM64 assert(OperIs(GT_SELECTCC, GT_SELECT_INCCC, GT_SELECT_INVCC, GT_SELECT_NEGCC)); @@ -8815,7 +8957,8 @@ struct GenTreeOpCC : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeOpCC() : GenTreeOp() + GenTreeOpCC() + : GenTreeOp() { } #endif // DEBUGGABLE_GENTREE @@ -8850,12 +8993,14 @@ struct GenTreeCCMP final : public GenTreeOpCC insCflags gtFlagsVal; GenTreeCCMP(var_types type, GenCondition condition, GenTree* op1, GenTree* op2, insCflags flagsVal) - : GenTreeOpCC(GT_CCMP, type, condition, op1, op2), gtFlagsVal(flagsVal) + : GenTreeOpCC(GT_CCMP, type, condition, op1, op2) + , gtFlagsVal(flagsVal) { } #if DEBUGGABLE_GENTREE - GenTreeCCMP() : GenTreeOpCC() + GenTreeCCMP() + : GenTreeOpCC() { } #endif // DEBUGGABLE_GENTREE diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index 12c610ceaefa45..7b448b8ca3d24e 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -455,7 +455,8 @@ void Compiler::gsParamsToShadows() DoPostOrder = true }; - ReplaceShadowParamsVisitor(Compiler* compiler) : GenTreeVisitor(compiler) + ReplaceShadowParamsVisitor(Compiler* compiler) + : GenTreeVisitor(compiler) { } diff --git a/src/coreclr/jit/hashbv.cpp b/src/coreclr/jit/hashbv.cpp index 854215235261db..203219a7ec20ca 100644 --- a/src/coreclr/jit/hashbv.cpp +++ b/src/coreclr/jit/hashbv.cpp @@ -1948,7 +1948,7 @@ indexType hashBvIterator::nextBit() current_element++; // printf("current element is %d\n", current_element); // reached the end of this node - if (current_element == (indexType) this->currNode->numElements()) + if (current_element == (indexType)this->currNode->numElements()) { // printf("going to next node\n"); this->nextNode(); @@ -1956,7 +1956,7 @@ indexType hashBvIterator::nextBit() } else { - assert(current_element < (indexType) this->currNode->numElements()); + assert(current_element < (indexType)this->currNode->numElements()); // printf("getting more data\n"); current_data = this->currNode->elements[current_element]; current_base = this->currNode->baseIndex + current_element * BITS_PER_ELEMENT; diff --git a/src/coreclr/jit/hashbv.h b/src/coreclr/jit/hashbv.h index 7ad95998add8e4..561a1c5641e491 100644 --- a/src/coreclr/jit/hashbv.h +++ b/src/coreclr/jit/hashbv.h @@ -13,15 +13,15 @@ #include #include -//#define TESTING 1 +// #define TESTING 1 -#define LOG2_BITS_PER_ELEMENT 5 +#define LOG2_BITS_PER_ELEMENT 5 #define LOG2_ELEMENTS_PER_NODE 2 -#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE) +#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE) -#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT) +#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT) #define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE) -#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE) +#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE) #ifdef TARGET_AMD64 typedef unsigned __int64 elemType; @@ -128,8 +128,8 @@ class hashBvNode { } static hashBvNode* Create(indexType base, Compiler* comp); - void Reconstruct(indexType base); - int numElements() + void Reconstruct(indexType base); + int numElements() { return ELEMENTS_PER_NODE; } @@ -172,7 +172,8 @@ class hashBv hashBvNode** nodeArr; hashBvNode* initialVector[1]; - union { + union + { Compiler* compiler; // for freelist hashBv* next; @@ -186,9 +187,9 @@ class hashBv public: hashBv(Compiler* comp); static hashBv* Create(Compiler* comp); - static void Init(Compiler* comp); + static void Init(Compiler* comp); static hashBv* CreateFrom(hashBv* other, Compiler* comp); - void hbvFree(); + void hbvFree(); #ifdef DEBUG void dump(); void dumpFancy(); @@ -201,18 +202,18 @@ class hashBv hashBvGlobalData* globalData(); static hashBvNode*& nodeFreeList(hashBvGlobalData* globalData); - static hashBv*& hbvFreeList(hashBvGlobalData* data); + static hashBv*& hbvFreeList(hashBvGlobalData* data); hashBvNode** getInsertionPointForIndex(indexType index); private: hashBvNode* getNodeForIndexHelper(indexType index, bool canAdd); - int getHashForIndex(indexType index, int table_size); - int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize); + int getHashForIndex(indexType index, int table_size); + int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize); // maintain free lists for vectors hashBvNode** getNewVector(int vectorLength); - int getNodeCount(); + int getNodeCount(); public: inline hashBvNode* getOrAddNodeForIndex(indexType index) @@ -221,7 +222,7 @@ class hashBv return temp; } hashBvNode* getNodeForIndex(indexType index); - void removeNodeAtBase(indexType index); + void removeNodeAtBase(indexType index); public: void setBit(indexType index); diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 3bf37b93798e93..6c8251eee257f5 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -1945,13 +1945,13 @@ static int PickCandidatesForTypeCheck(Compiler* comp, isCastClass = false; break; - // These are never expanded: - // CORINFO_HELP_ISINSTANCEOF_EXCEPTION - // CORINFO_HELP_CHKCASTCLASS_SPECIAL - // CORINFO_HELP_READYTORUN_ISINSTANCEOF, - // CORINFO_HELP_READYTORUN_CHKCAST, + // These are never expanded: + // CORINFO_HELP_ISINSTANCEOF_EXCEPTION + // CORINFO_HELP_CHKCASTCLASS_SPECIAL + // CORINFO_HELP_READYTORUN_ISINSTANCEOF, + // CORINFO_HELP_READYTORUN_CHKCAST, - // Other helper calls are not cast helpers + // Other helper calls are not cast helpers default: return 0; diff --git a/src/coreclr/jit/host.h b/src/coreclr/jit/host.h index 6667fbb3994a76..d10eb93ca9a122 100644 --- a/src/coreclr/jit/host.h +++ b/src/coreclr/jit/host.h @@ -28,10 +28,10 @@ class LogEnv }; bool vlogf(unsigned level, const char* fmt, va_list args); -int vflogf(FILE* file, const char* fmt, va_list args); +int vflogf(FILE* file, const char* fmt, va_list args); -int logf(const char* fmt, ...); -int flogf(FILE* file, const char* fmt, ...); +int logf(const char* fmt, ...); +int flogf(FILE* file, const char* fmt, ...); void gcDump_logf(const char* fmt, ...); void logf(unsigned level, const char* fmt, ...); diff --git a/src/coreclr/jit/hostallocator.h b/src/coreclr/jit/hostallocator.h index a91f7f1fb4ab9b..0e8f192063fb06 100644 --- a/src/coreclr/jit/hostallocator.h +++ b/src/coreclr/jit/hostallocator.h @@ -37,7 +37,7 @@ class HostAllocator final private: void* allocateHostMemory(size_t size); - void freeHostMemory(void* p); + void freeHostMemory(void* p); }; // Global operator new overloads that work with HostAllocator diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index e8b60b07909d95..53970ef4a7460b 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -832,7 +832,7 @@ GenTree* Compiler::addRangeCheckIfNeeded( #ifdef TARGET_XARCH && !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic) #endif - ) + ) { assert(!immOp->IsCnsIntOrI()); assert(varTypeIsUnsigned(immOp)); diff --git a/src/coreclr/jit/hwintrinsic.h b/src/coreclr/jit/hwintrinsic.h index cac041eb83ea6d..5ca302e126f32a 100644 --- a/src/coreclr/jit/hwintrinsic.h +++ b/src/coreclr/jit/hwintrinsic.h @@ -451,13 +451,13 @@ struct TernaryLogicInfo // We have 256 entries, so we compress as much as possible // This gives us 3-bytes per entry (21-bits) - TernaryLogicOperKind oper1 : 4; + TernaryLogicOperKind oper1 : 4; TernaryLogicUseFlags oper1Use : 3; - TernaryLogicOperKind oper2 : 4; + TernaryLogicOperKind oper2 : 4; TernaryLogicUseFlags oper2Use : 3; - TernaryLogicOperKind oper3 : 4; + TernaryLogicOperKind oper3 : 4; TernaryLogicUseFlags oper3Use : 3; static const TernaryLogicInfo& lookup(uint8_t control); @@ -491,11 +491,11 @@ struct HWIntrinsicInfo static const HWIntrinsicInfo& lookup(NamedIntrinsic id); - static NamedIntrinsic lookupId(Compiler* comp, - CORINFO_SIG_INFO* sig, - const char* className, - const char* methodName, - const char* enclosingClassName); + static NamedIntrinsic lookupId(Compiler* comp, + CORINFO_SIG_INFO* sig, + const char* className, + const char* methodName, + const char* enclosingClassName); static CORINFO_InstructionSet lookupIsa(const char* className, const char* enclosingClassName); static unsigned lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig); @@ -514,7 +514,7 @@ struct HWIntrinsicInfo static bool isScalarIsa(CORINFO_InstructionSet isa); #ifdef TARGET_XARCH - static bool isAVX2GatherIntrinsic(NamedIntrinsic id); + static bool isAVX2GatherIntrinsic(NamedIntrinsic id); static FloatComparisonMode lookupFloatComparisonModeForSwappedArgs(FloatComparisonMode comparison); #endif @@ -927,7 +927,12 @@ struct HWIntrinsicInfo struct HWIntrinsic final { HWIntrinsic(const GenTreeHWIntrinsic* node) - : op1(nullptr), op2(nullptr), op3(nullptr), op4(nullptr), numOperands(0), baseType(TYP_UNDEF) + : op1(nullptr) + , op2(nullptr) + , op3(nullptr) + , op4(nullptr) + , numOperands(0) + , baseType(TYP_UNDEF) { assert(node != nullptr); diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp index 6418b72a8f3075..9a3a98e087a274 100644 --- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp @@ -36,7 +36,10 @@ // of a for-loop. // CodeGen::HWIntrinsicImmOpHelper::HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin) - : codeGen(codeGen), endLabel(nullptr), nonZeroLabel(nullptr), branchTargetReg(REG_NA) + : codeGen(codeGen) + , endLabel(nullptr) + , nonZeroLabel(nullptr) + , branchTargetReg(REG_NA) { assert(codeGen != nullptr); assert(varTypeIsIntegral(immOp)); diff --git a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp index 5e44772e7115ac..79e6b497c368a9 100644 --- a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp @@ -317,9 +317,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) { case 1: { - regNumber targetReg = node->GetRegNum(); - GenTree* rmOp = node->Op(1); - auto emitSwCase = [&](int8_t i) { + regNumber targetReg = node->GetRegNum(); + GenTree* rmOp = node->Op(1); + auto emitSwCase = [&](int8_t i) { insOpts newInstOptions = AddEmbRoundingMode(instOptions, i); genHWIntrinsic_R_RM(node, ins, simdSize, targetReg, rmOp, newInstOptions); }; @@ -559,7 +559,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) if (HWIntrinsicInfo::isImmOp(intrinsicId, op3)) { - auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); }; + auto emitSwCase = [&](int8_t i) { + genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); + }; if (op3->IsCnsIntOrI()) { @@ -653,7 +655,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) if (HWIntrinsicInfo::isImmOp(intrinsicId, op4)) { - auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_R_RM_I(node, ins, simdSize, i); }; + auto emitSwCase = [&](int8_t i) { + genHWIntrinsic_R_R_R_RM_I(node, ins, simdSize, i); + }; if (op4->IsCnsIntOrI()) { @@ -1208,10 +1212,10 @@ void CodeGen::genHWIntrinsic_R_R_R_RM_I(GenTreeHWIntrinsic* node, instruction in if (op2->isContained()) { -// op2 is never selected by the table so -// we can contain and ignore any register -// allocated to it resulting in better -// non-RMW based codegen. + // op2 is never selected by the table so + // we can contain and ignore any register + // allocated to it resulting in better + // non-RMW based codegen. #if defined(DEBUG) NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); @@ -1364,8 +1368,8 @@ void CodeGen::genNonTableDrivenHWIntrinsicsJumpTableFallback(GenTreeHWIntrinsic* { // This intrinsic has several overloads, only the ones with floating number inputs should reach this part. assert(varTypeIsFloating(baseType)); - GenTree* rmOp = node->Op(1); - auto emitSwCase = [&](int8_t i) { + GenTree* rmOp = node->Op(1); + auto emitSwCase = [&](int8_t i) { insOpts newInstOptions = AddEmbRoundingMode(instOptions, i); genHWIntrinsic_R_RM(node, ins, attr, targetReg, rmOp, newInstOptions); }; @@ -2055,7 +2059,9 @@ void CodeGen::genSSE41Intrinsic(GenTreeHWIntrinsic* node) instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType); emitAttr attr = emitActualTypeSize(node->TypeGet()); - auto emitSwCase = [&](int8_t i) { inst_RV_TT_IV(ins, attr, targetReg, op1, i); }; + auto emitSwCase = [&](int8_t i) { + inst_RV_TT_IV(ins, attr, targetReg, op1, i); + }; if (op2->IsCnsIntOrI()) { diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index f88cf6ec99ec3c..87332c07f0113f 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -371,7 +371,7 @@ FloatComparisonMode HWIntrinsicInfo::lookupFloatComparisonModeForSwappedArgs(Flo { switch (comparison) { - // These comparison modes are the same even if the operands are swapped + // These comparison modes are the same even if the operands are swapped case FloatComparisonMode::OrderedEqualNonSignaling: return FloatComparisonMode::OrderedEqualNonSignaling; @@ -406,7 +406,7 @@ FloatComparisonMode HWIntrinsicInfo::lookupFloatComparisonModeForSwappedArgs(Flo case FloatComparisonMode::UnorderedTrueSignaling: return FloatComparisonMode::UnorderedTrueSignaling; - // These comparison modes need a different mode if the operands are swapped + // These comparison modes need a different mode if the operands are swapped case FloatComparisonMode::OrderedLessThanSignaling: return FloatComparisonMode::OrderedGreaterThanSignaling; @@ -2498,7 +2498,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // TODO-XARCH-CQ: We should support long/ulong multiplication break; } -// else if simdSize == 64 then above assert would check if baseline isa supported + // else if simdSize == 64 then above assert would check if baseline isa supported #if defined(TARGET_X86) // TODO-XARCH-CQ: We need to support 64-bit CreateBroadcast @@ -3274,13 +3274,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, int ival = HWIntrinsicInfo::lookupIval(this, intrinsic, simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(ival), NI_AVX_CompareScalar, - simdBaseJitType, simdSize); + simdBaseJitType, simdSize); } else { GenTree* clonedOp1 = nullptr; op1 = impCloneExpr(op1, &clonedOp1, CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); + nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE_MoveScalar, simdBaseJitType, @@ -3333,13 +3333,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, int ival = HWIntrinsicInfo::lookupIval(this, intrinsic, simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(ival), NI_AVX_CompareScalar, - simdBaseJitType, simdSize); + simdBaseJitType, simdSize); } else { GenTree* clonedOp1 = nullptr; op1 = impCloneExpr(op1, &clonedOp1, CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); + nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE2_MoveScalar, simdBaseJitType, diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 5ed3064fa87502..bb5231b11ce63f 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -70,16 +70,16 @@ bool Compiler::impILConsumesAddr(const BYTE* codeAddr) switch (opcode) { - // case CEE_LDFLDA: We're taking this one out as if you have a sequence - // like - // - // ldloca.0 - // ldflda whatever - // - // of a primitivelike struct, you end up after morphing with addr of a local - // that's not marked as addrtaken, which is wrong. Also ldflda is usually used - // for structs that contain other structs, which isnt a case we handle very - // well now for other reasons. + // case CEE_LDFLDA: We're taking this one out as if you have a sequence + // like + // + // ldloca.0 + // ldflda whatever + // + // of a primitivelike struct, you end up after morphing with addr of a local + // that's not marked as addrtaken, which is wrong. Also ldflda is usually used + // for structs that contain other structs, which isnt a case we handle very + // well now for other reasons. case CEE_LDFLD: { @@ -670,7 +670,7 @@ void Compiler::impStoreTemp(unsigned lclNum, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ - ) +) { GenTree* store = gtNewTempStore(lclNum, val, curLevel, pAfterStmt, di, block); @@ -815,7 +815,7 @@ GenTree* Compiler::impStoreStruct(GenTree* store, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ - ) +) { assert(varTypeIsStruct(store) && store->OperIsStore()); @@ -1718,7 +1718,7 @@ bool Compiler::impSpillStackEntry(unsigned level, bool bAssertOnRecursion, const char* reason #endif - ) +) { #ifdef DEBUG @@ -2067,9 +2067,9 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H * If the tree has side-effects, it will be spilled to a temp. */ -GenTree* Compiler::impCloneExpr(GenTree* tree, - GenTree** pClone, - unsigned curLevel, +GenTree* Compiler::impCloneExpr(GenTree* tree, + GenTree** pClone, + unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) @@ -4260,12 +4260,13 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // In contrast, we can only use multi-reg calls directly if they have the exact same ABI. // Calling convention equality is a conservative approximation for that check. - if (op->IsCall() && (op->AsCall()->GetUnmanagedCallConv() == info.compCallConv) + if (op->IsCall() && + (op->AsCall()->GetUnmanagedCallConv() == info.compCallConv) #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO-Review: this seems unnecessary. Return ABI doesn't change under varargs. && !op->AsCall()->IsVarargs() #endif // defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - ) + ) { return op; } @@ -6172,7 +6173,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) bool ovfl, unordered, callNode; CORINFO_CLASS_HANDLE tokenType; - union { + union + { int intVal; float fltVal; __int64 lngVal; @@ -6919,7 +6921,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Create the store node and append it. ClassLayout* layout = (lclTyp == TYP_STRUCT) ? typGetObjLayout(stelemClsHnd) : nullptr; op1 = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, op1, op2)->AsIndir() - : gtNewStoreIndNode(lclTyp, op1, op2); + : gtNewStoreIndNode(lclTyp, op1, op2); if (varTypeIsStruct(op1)) { op1 = impStoreStruct(op1, CHECK_SPILL_ALL); @@ -6977,7 +6979,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; - // Other binary math operations + // Other binary math operations case CEE_DIV: oper = GT_DIV; @@ -7266,7 +7268,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } - // fall through + // fall through COND_JUMP: @@ -7595,7 +7597,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto SPILL_APPEND; - /************************** Casting OPCODES ***************************/ + /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; @@ -7737,12 +7739,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (varTypeIsFloating(lclTyp)) { - callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl + callNode = varTypeIsLong(impStackTop().val) || + uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT - // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? - // TYP_BYREF could be used as TYP_I_IMPL which is long. - // TODO-CQ: remove this when we lower casts long/ulong --> float/double - // and generate SSE2 code instead of going through helper calls. + // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? + // TYP_BYREF could be used as TYP_I_IMPL which is long. + // TODO-CQ: remove this when we lower casts long/ulong --> float/double + // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; @@ -8922,7 +8925,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else - op1 = gtNewIconNode(1, lclTyp); + op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } @@ -8937,7 +8940,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) ClassLayout* layout; lclTyp = TypeHandleToVarType(fieldInfo.fieldType, clsHnd, &layout); op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, indirFlags) - : gtNewIndir(lclTyp, op1, indirFlags); + : gtNewIndir(lclTyp, op1, indirFlags); if ((indirFlags & GTF_IND_INVARIANT) != 0) { // TODO-ASG: delete this zero-diff quirk. @@ -9799,10 +9802,10 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. - (helper == CORINFO_HELP_UNBOX_NULLABLE && - varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. - ); + assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. + (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a + // struct. + ); /* ---------------------------------------------------------------------- @@ -10066,7 +10069,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, impPopStack().val); - // Fall through to clear out the eval stack. + // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) @@ -10353,7 +10356,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } break; - /******************************** NYI *******************************/ + /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); @@ -10506,7 +10509,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) // Returns: // Tree with reference to struct local to use as call return value. -GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, +GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); @@ -11794,7 +11797,7 @@ unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. - unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); + unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor @@ -12097,7 +12100,7 @@ void Compiler::impFixPredLists() unsigned XTnum = 0; bool added = false; - for (EHblkDsc *HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + for (EHblkDsc* HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (HBtab->HasFinallyHandler()) { @@ -12246,7 +12249,7 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, I { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. - ); + ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); @@ -13291,7 +13294,7 @@ GenTree* Compiler::impInlineFetchArg(InlArgInfo& argInfo, const InlLclVarInfo& l assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. - * Use a large size node as we may change it later */ + * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 53f33c45a98c76..52fdf5ab3cd476 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -2203,10 +2203,10 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, } else { - unsigned relOffset = 0; - auto addSegment = [=, &loweredNode, &relOffset](var_types type) { + unsigned relOffset = 0; + auto addSegment = [=, &loweredNode, &relOffset](var_types type) { GenTree* val = gtNewLclFldNode(structVal->GetLclNum(), type, - structVal->GetLclOffs() + offset + relOffset); + structVal->GetLclOffs() + offset + relOffset); if (loweredType == TYP_LONG) { @@ -2216,7 +2216,7 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, if (relOffset > 0) { val = gtNewOperNode(GT_LSH, genActualType(loweredType), val, - gtNewIconNode(relOffset * 8)); + gtNewIconNode(relOffset * 8)); } if (loweredNode == nullptr) @@ -3286,7 +3286,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, GenTree* op1 = impPopStack().val; GenTree* addr = gtNewIndexAddr(op1, op2, TYP_USHORT, NO_CLASS_HANDLE, OFFSETOF__CORINFO_String__chars, OFFSETOF__CORINFO_String__stringLen); - retNode = gtNewIndexIndir(addr->AsIndexAddr()); + retNode = gtNewIndexIndir(addr->AsIndexAddr()); break; } @@ -3633,8 +3633,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtArgs.CountArgs() == 1); - op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, - op1->AsCall()->gtArgs.GetArgByIndex(0)->GetEarlyNode()); + op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, + op1->AsCall()->gtArgs.GetArgByIndex(0)->GetEarlyNode()); op1->gtType = TYP_REF; retNode = op1; } @@ -6115,7 +6115,8 @@ void Compiler::impCheckForPInvokeCall( class SpillRetExprHelper { public: - SpillRetExprHelper(Compiler* comp) : comp(comp) + SpillRetExprHelper(Compiler* comp) + : comp(comp) { } @@ -6783,7 +6784,9 @@ void Compiler::considerGuardedDevirtualization(GenTreeCall* call, #ifdef DEBUG char buffer[256]; JITDUMP("%s call would invoke method %s\n", - isInterface ? "interface" : call->IsDelegateInvoke() ? "delegate" : "virtual", + isInterface ? "interface" + : call->IsDelegateInvoke() ? "delegate" + : "virtual", eeGetMethodFullName(likelyMethod, true, true, buffer, sizeof(buffer))); #endif @@ -7278,8 +7281,8 @@ bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) #if defined(TARGET_XARCH) switch (intrinsicName) { - // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 - // instructions to directly compute round/ceiling/floor/truncate. + // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 + // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: @@ -7482,8 +7485,8 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call, // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodHnd, - rootCompiler->info.compClassHnd, - &rootCompiler->info.compMethodInfo->args); + rootCompiler->info.compClassHnd, + &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. @@ -8432,161 +8435,160 @@ void Compiler::impCheckCanInline(GenTreeCall* call, bool success = eeRunWithErrorTrap( [](Param* pParam) { - - // Cache some frequently accessed state. - // - Compiler* const compiler = pParam->pThis; - COMP_HANDLE compCompHnd = compiler->info.compCompHnd; - CORINFO_METHOD_HANDLE ftn = pParam->fncHandle; - InlineResult* const inlineResult = pParam->result; + // Cache some frequently accessed state. + // + Compiler* const compiler = pParam->pThis; + COMP_HANDLE compCompHnd = compiler->info.compCompHnd; + CORINFO_METHOD_HANDLE ftn = pParam->fncHandle; + InlineResult* const inlineResult = pParam->result; #ifdef DEBUG - if (JitConfig.JitNoInline()) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); - return; - } + if (JitConfig.JitNoInline()) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); + return; + } #endif - JITDUMP("\nCheckCanInline: fetching method info for inline candidate %s -- context %p\n", - compiler->eeGetMethodName(ftn), compiler->dspPtr(pParam->exactContextHnd)); + JITDUMP("\nCheckCanInline: fetching method info for inline candidate %s -- context %p\n", + compiler->eeGetMethodName(ftn), compiler->dspPtr(pParam->exactContextHnd)); - if (pParam->exactContextHnd == METHOD_BEING_COMPILED_CONTEXT()) - { - JITDUMP("Current method context\n"); - } - else if ((((size_t)pParam->exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)) - { - JITDUMP("Method context: %s\n", - compiler->eeGetMethodFullName((CORINFO_METHOD_HANDLE)pParam->exactContextHnd)); - } - else - { - JITDUMP("Class context: %s\n", compiler->eeGetClassName((CORINFO_CLASS_HANDLE)( - (size_t)pParam->exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK))); - } - - // Fetch method info. This may fail, if the method doesn't have IL. - // - CORINFO_METHOD_INFO methInfo; - if (!compCompHnd->getMethodInfo(ftn, &methInfo, pParam->exactContextHnd)) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); - return; - } + if (pParam->exactContextHnd == METHOD_BEING_COMPILED_CONTEXT()) + { + JITDUMP("Current method context\n"); + } + else if ((((size_t)pParam->exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)) + { + JITDUMP("Method context: %s\n", + compiler->eeGetMethodFullName((CORINFO_METHOD_HANDLE)pParam->exactContextHnd)); + } + else + { + JITDUMP("Class context: %s\n", + compiler->eeGetClassName( + (CORINFO_CLASS_HANDLE)((size_t)pParam->exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK))); + } - // Profile data allows us to avoid early "too many IL bytes" outs. - // - inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE_WEIGHTS, - compiler->fgHaveSufficientProfileWeights()); - inlineResult->NoteBool(InlineObservation::CALLSITE_INSIDE_THROW_BLOCK, - compiler->compCurBB->KindIs(BBJ_THROW)); + // Fetch method info. This may fail, if the method doesn't have IL. + // + CORINFO_METHOD_INFO methInfo; + if (!compCompHnd->getMethodInfo(ftn, &methInfo, pParam->exactContextHnd)) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); + return; + } - bool const forceInline = (pParam->methAttr & CORINFO_FLG_FORCEINLINE) != 0; + // Profile data allows us to avoid early "too many IL bytes" outs. + // + inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE_WEIGHTS, + compiler->fgHaveSufficientProfileWeights()); + inlineResult->NoteBool(InlineObservation::CALLSITE_INSIDE_THROW_BLOCK, compiler->compCurBB->KindIs(BBJ_THROW)); - compiler->impCanInlineIL(ftn, &methInfo, forceInline, inlineResult); + bool const forceInline = (pParam->methAttr & CORINFO_FLG_FORCEINLINE) != 0; - if (inlineResult->IsFailure()) - { - assert(inlineResult->IsNever()); - return; - } + compiler->impCanInlineIL(ftn, &methInfo, forceInline, inlineResult); - // Speculatively check if initClass() can be done. - // If it can be done, we will try to inline the method. - CorInfoInitClassResult const initClassResult = - compCompHnd->initClass(nullptr /* field */, ftn /* method */, pParam->exactContextHnd /* context */); + if (inlineResult->IsFailure()) + { + assert(inlineResult->IsNever()); + return; + } - if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) - { - inlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); - return; - } + // Speculatively check if initClass() can be done. + // If it can be done, we will try to inline the method. + CorInfoInitClassResult const initClassResult = + compCompHnd->initClass(nullptr /* field */, ftn /* method */, pParam->exactContextHnd /* context */); - // Given the VM the final say in whether to inline or not. - // This should be last since for verifiable code, this can be expensive - // - CorInfoInline const vmResult = compCompHnd->canInline(compiler->info.compMethodHnd, ftn); + if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) + { + inlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); + return; + } - if (vmResult == INLINE_FAIL) - { - inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); - } - else if (vmResult == INLINE_NEVER) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); - } + // Given the VM the final say in whether to inline or not. + // This should be last since for verifiable code, this can be expensive + // + CorInfoInline const vmResult = compCompHnd->canInline(compiler->info.compMethodHnd, ftn); - if (inlineResult->IsFailure()) - { - // The VM already self-reported this failure, so mark it specially - // so the JIT doesn't also try reporting it. - // - inlineResult->SetVMFailure(); - return; - } + if (vmResult == INLINE_FAIL) + { + inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); + } + else if (vmResult == INLINE_NEVER) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); + } - // Get the method's class properties + if (inlineResult->IsFailure()) + { + // The VM already self-reported this failure, so mark it specially + // so the JIT doesn't also try reporting it. // - CORINFO_CLASS_HANDLE clsHandle = compCompHnd->getMethodClass(ftn); - unsigned const clsAttr = compCompHnd->getClassAttribs(clsHandle); + inlineResult->SetVMFailure(); + return; + } - // Return type - // - var_types const fncRetType = pParam->call->TypeGet(); + // Get the method's class properties + // + CORINFO_CLASS_HANDLE clsHandle = compCompHnd->getMethodClass(ftn); + unsigned const clsAttr = compCompHnd->getClassAttribs(clsHandle); + + // Return type + // + var_types const fncRetType = pParam->call->TypeGet(); #ifdef DEBUG - var_types fncRealRetType = JITtype2varType(methInfo.args.retType); + var_types fncRealRetType = JITtype2varType(methInfo.args.retType); - assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || - // VSW 288602 - // In case of IJW, we allow to assign a native pointer to a BYREF. - (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || - (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); + assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || + // VSW 288602 + // In case of IJW, we allow to assign a native pointer to a BYREF. + (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || + (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif - // Allocate an InlineCandidateInfo structure, - // - // Or, reuse the existing GuardedDevirtualizationCandidateInfo, - // which was pre-allocated to have extra room. - // - InlineCandidateInfo* pInfo; + // Allocate an InlineCandidateInfo structure, + // + // Or, reuse the existing GuardedDevirtualizationCandidateInfo, + // which was pre-allocated to have extra room. + // + InlineCandidateInfo* pInfo; - if (pParam->call->IsGuardedDevirtualizationCandidate()) - { - pInfo = pParam->call->GetGDVCandidateInfo(pParam->candidateIndex); - } - else - { - pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; + if (pParam->call->IsGuardedDevirtualizationCandidate()) + { + pInfo = pParam->call->GetGDVCandidateInfo(pParam->candidateIndex); + } + else + { + pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; - // Null out bits we don't use when we're just inlining - // - pInfo->guardedClassHandle = nullptr; - pInfo->guardedMethodHandle = nullptr; - pInfo->guardedMethodUnboxedEntryHandle = nullptr; - pInfo->likelihood = 0; - pInfo->requiresInstMethodTableArg = false; - } - - pInfo->methInfo = methInfo; - pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; - pInfo->clsHandle = clsHandle; - pInfo->exactContextHnd = pParam->exactContextHnd; - pInfo->retExpr = nullptr; - pInfo->preexistingSpillTemp = BAD_VAR_NUM; - pInfo->clsAttr = clsAttr; - pInfo->methAttr = pParam->methAttr; - pInfo->initClassResult = initClassResult; - pInfo->fncRetType = fncRetType; - pInfo->exactContextNeedsRuntimeLookup = false; - pInfo->inlinersContext = pParam->pThis->compInlineContext; - - // Note exactContextNeedsRuntimeLookup is reset later on, - // over in impMarkInlineCandidate. + // Null out bits we don't use when we're just inlining // - *(pParam->ppInlineCandidateInfo) = pInfo; - }, + pInfo->guardedClassHandle = nullptr; + pInfo->guardedMethodHandle = nullptr; + pInfo->guardedMethodUnboxedEntryHandle = nullptr; + pInfo->likelihood = 0; + pInfo->requiresInstMethodTableArg = false; + } + + pInfo->methInfo = methInfo; + pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; + pInfo->clsHandle = clsHandle; + pInfo->exactContextHnd = pParam->exactContextHnd; + pInfo->retExpr = nullptr; + pInfo->preexistingSpillTemp = BAD_VAR_NUM; + pInfo->clsAttr = clsAttr; + pInfo->methAttr = pParam->methAttr; + pInfo->initClassResult = initClassResult; + pInfo->fncRetType = fncRetType; + pInfo->exactContextNeedsRuntimeLookup = false; + pInfo->inlinersContext = pParam->pThis->compInlineContext; + + // Note exactContextNeedsRuntimeLookup is reset later on, + // over in impMarkInlineCandidate. + // + *(pParam->ppInlineCandidateInfo) = pInfo; + }, ¶m); if (!success) @@ -9560,372 +9562,374 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) else #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) if (strcmp(namespaceName, "Collections.Generic") == 0) - { - if (strcmp(className, "Comparer`1") == 0) { - if (strcmp(methodName, "get_Default") == 0) + if (strcmp(className, "Comparer`1") == 0) + { + if (strcmp(methodName, "get_Default") == 0) + { + result = NI_System_Collections_Generic_Comparer_get_Default; + } + } + else if (strcmp(className, "EqualityComparer`1") == 0) { - result = NI_System_Collections_Generic_Comparer_get_Default; + if (strcmp(methodName, "get_Default") == 0) + { + result = NI_System_Collections_Generic_EqualityComparer_get_Default; + } } } - else if (strcmp(className, "EqualityComparer`1") == 0) + else if (strcmp(namespaceName, "Numerics") == 0) { - if (strcmp(methodName, "get_Default") == 0) + if (strcmp(className, "BitOperations") == 0) { - result = NI_System_Collections_Generic_EqualityComparer_get_Default; + result = lookupPrimitiveIntNamedIntrinsic(method, methodName); } - } - } - else if (strcmp(namespaceName, "Numerics") == 0) - { - if (strcmp(className, "BitOperations") == 0) - { - result = lookupPrimitiveIntNamedIntrinsic(method, methodName); - } - else - { + else + { #ifdef FEATURE_HW_INTRINSICS - CORINFO_SIG_INFO sig; - info.compCompHnd->getMethodSig(method, &sig); + CORINFO_SIG_INFO sig; + info.compCompHnd->getMethodSig(method, &sig); - result = SimdAsHWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); + result = SimdAsHWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); #endif // FEATURE_HW_INTRINSICS - if (result == NI_Illegal) - { - // This allows the relevant code paths to be dropped as dead code even - // on platforms where FEATURE_HW_INTRINSICS is not supported. - - if (strcmp(methodName, "get_IsSupported") == 0) - { - assert(strcmp(className, "Vector`1") == 0); - result = NI_IsSupported_Type; - } - else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) - { - result = NI_IsSupported_False; - } - else if (strcmp(methodName, "get_Count") == 0) - { - assert(strcmp(className, "Vector`1") == 0); - result = NI_Vector_GetCount; - } - else if (gtIsRecursiveCall(method)) + if (result == NI_Illegal) { - // For the framework itself, any recursive intrinsics will either be - // only supported on a single platform or will be guarded by a relevant - // IsSupported check so the throw PNSE will be valid or dropped. + // This allows the relevant code paths to be dropped as dead code even + // on platforms where FEATURE_HW_INTRINSICS is not supported. - result = NI_Throw_PlatformNotSupportedException; + if (strcmp(methodName, "get_IsSupported") == 0) + { + assert(strcmp(className, "Vector`1") == 0); + result = NI_IsSupported_Type; + } + else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) + { + result = NI_IsSupported_False; + } + else if (strcmp(methodName, "get_Count") == 0) + { + assert(strcmp(className, "Vector`1") == 0); + result = NI_Vector_GetCount; + } + else if (gtIsRecursiveCall(method)) + { + // For the framework itself, any recursive intrinsics will either be + // only supported on a single platform or will be guarded by a relevant + // IsSupported check so the throw PNSE will be valid or dropped. + + result = NI_Throw_PlatformNotSupportedException; + } } } } - } - else if (strncmp(namespaceName, "Runtime.", 8) == 0) - { - namespaceName += 8; - - if (strcmp(namespaceName, "CompilerServices") == 0) + else if (strncmp(namespaceName, "Runtime.", 8) == 0) { - if (strcmp(className, "RuntimeHelpers") == 0) + namespaceName += 8; + + if (strcmp(namespaceName, "CompilerServices") == 0) { - if (strcmp(methodName, "CreateSpan") == 0) + if (strcmp(className, "RuntimeHelpers") == 0) { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; - } - else if (strcmp(methodName, "InitializeArray") == 0) - { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; + if (strcmp(methodName, "CreateSpan") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; + } + else if (strcmp(methodName, "InitializeArray") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; + } + else if (strcmp(methodName, "IsKnownConstant") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; + } } - else if (strcmp(methodName, "IsKnownConstant") == 0) + else if (strcmp(className, "Unsafe") == 0) { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; + if (strcmp(methodName, "Add") == 0) + { + result = NI_SRCS_UNSAFE_Add; + } + else if (strcmp(methodName, "AddByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_AddByteOffset; + } + else if (strcmp(methodName, "AreSame") == 0) + { + result = NI_SRCS_UNSAFE_AreSame; + } + else if (strcmp(methodName, "As") == 0) + { + result = NI_SRCS_UNSAFE_As; + } + else if (strcmp(methodName, "AsPointer") == 0) + { + result = NI_SRCS_UNSAFE_AsPointer; + } + else if (strcmp(methodName, "AsRef") == 0) + { + result = NI_SRCS_UNSAFE_AsRef; + } + else if (strcmp(methodName, "BitCast") == 0) + { + result = NI_SRCS_UNSAFE_BitCast; + } + else if (strcmp(methodName, "ByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_ByteOffset; + } + else if (strcmp(methodName, "Copy") == 0) + { + result = NI_SRCS_UNSAFE_Copy; + } + else if (strcmp(methodName, "CopyBlock") == 0) + { + result = NI_SRCS_UNSAFE_CopyBlock; + } + else if (strcmp(methodName, "CopyBlockUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_CopyBlockUnaligned; + } + else if (strcmp(methodName, "InitBlock") == 0) + { + result = NI_SRCS_UNSAFE_InitBlock; + } + else if (strcmp(methodName, "InitBlockUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_InitBlockUnaligned; + } + else if (strcmp(methodName, "IsAddressGreaterThan") == 0) + { + result = NI_SRCS_UNSAFE_IsAddressGreaterThan; + } + else if (strcmp(methodName, "IsAddressLessThan") == 0) + { + result = NI_SRCS_UNSAFE_IsAddressLessThan; + } + else if (strcmp(methodName, "IsNullRef") == 0) + { + result = NI_SRCS_UNSAFE_IsNullRef; + } + else if (strcmp(methodName, "NullRef") == 0) + { + result = NI_SRCS_UNSAFE_NullRef; + } + else if (strcmp(methodName, "Read") == 0) + { + result = NI_SRCS_UNSAFE_Read; + } + else if (strcmp(methodName, "ReadUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_ReadUnaligned; + } + else if (strcmp(methodName, "SizeOf") == 0) + { + result = NI_SRCS_UNSAFE_SizeOf; + } + else if (strcmp(methodName, "SkipInit") == 0) + { + result = NI_SRCS_UNSAFE_SkipInit; + } + else if (strcmp(methodName, "Subtract") == 0) + { + result = NI_SRCS_UNSAFE_Subtract; + } + else if (strcmp(methodName, "SubtractByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_SubtractByteOffset; + } + else if (strcmp(methodName, "Unbox") == 0) + { + result = NI_SRCS_UNSAFE_Unbox; + } + else if (strcmp(methodName, "Write") == 0) + { + result = NI_SRCS_UNSAFE_Write; + } + else if (strcmp(methodName, "WriteUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_WriteUnaligned; + } } } - else if (strcmp(className, "Unsafe") == 0) + else if (strcmp(namespaceName, "InteropServices") == 0) { - if (strcmp(methodName, "Add") == 0) - { - result = NI_SRCS_UNSAFE_Add; - } - else if (strcmp(methodName, "AddByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_AddByteOffset; - } - else if (strcmp(methodName, "AreSame") == 0) - { - result = NI_SRCS_UNSAFE_AreSame; - } - else if (strcmp(methodName, "As") == 0) - { - result = NI_SRCS_UNSAFE_As; - } - else if (strcmp(methodName, "AsPointer") == 0) - { - result = NI_SRCS_UNSAFE_AsPointer; - } - else if (strcmp(methodName, "AsRef") == 0) - { - result = NI_SRCS_UNSAFE_AsRef; - } - else if (strcmp(methodName, "BitCast") == 0) - { - result = NI_SRCS_UNSAFE_BitCast; - } - else if (strcmp(methodName, "ByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_ByteOffset; - } - else if (strcmp(methodName, "Copy") == 0) - { - result = NI_SRCS_UNSAFE_Copy; - } - else if (strcmp(methodName, "CopyBlock") == 0) - { - result = NI_SRCS_UNSAFE_CopyBlock; - } - else if (strcmp(methodName, "CopyBlockUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_CopyBlockUnaligned; - } - else if (strcmp(methodName, "InitBlock") == 0) - { - result = NI_SRCS_UNSAFE_InitBlock; - } - else if (strcmp(methodName, "InitBlockUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_InitBlockUnaligned; - } - else if (strcmp(methodName, "IsAddressGreaterThan") == 0) - { - result = NI_SRCS_UNSAFE_IsAddressGreaterThan; - } - else if (strcmp(methodName, "IsAddressLessThan") == 0) - { - result = NI_SRCS_UNSAFE_IsAddressLessThan; - } - else if (strcmp(methodName, "IsNullRef") == 0) - { - result = NI_SRCS_UNSAFE_IsNullRef; - } - else if (strcmp(methodName, "NullRef") == 0) - { - result = NI_SRCS_UNSAFE_NullRef; - } - else if (strcmp(methodName, "Read") == 0) - { - result = NI_SRCS_UNSAFE_Read; - } - else if (strcmp(methodName, "ReadUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_ReadUnaligned; - } - else if (strcmp(methodName, "SizeOf") == 0) - { - result = NI_SRCS_UNSAFE_SizeOf; - } - else if (strcmp(methodName, "SkipInit") == 0) - { - result = NI_SRCS_UNSAFE_SkipInit; - } - else if (strcmp(methodName, "Subtract") == 0) - { - result = NI_SRCS_UNSAFE_Subtract; - } - else if (strcmp(methodName, "SubtractByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_SubtractByteOffset; - } - else if (strcmp(methodName, "Unbox") == 0) - { - result = NI_SRCS_UNSAFE_Unbox; - } - else if (strcmp(methodName, "Write") == 0) - { - result = NI_SRCS_UNSAFE_Write; - } - else if (strcmp(methodName, "WriteUnaligned") == 0) + if (strcmp(className, "MemoryMarshal") == 0) { - result = NI_SRCS_UNSAFE_WriteUnaligned; + if (strcmp(methodName, "GetArrayDataReference") == 0) + { + result = NI_System_Runtime_InteropService_MemoryMarshal_GetArrayDataReference; + } } } - } - else if (strcmp(namespaceName, "InteropServices") == 0) - { - if (strcmp(className, "MemoryMarshal") == 0) + else if (strncmp(namespaceName, "Intrinsics", 10) == 0) { - if (strcmp(methodName, "GetArrayDataReference") == 0) - { - result = NI_System_Runtime_InteropService_MemoryMarshal_GetArrayDataReference; - } - } - } - else if (strncmp(namespaceName, "Intrinsics", 10) == 0) - { - // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled - // so we can specially handle IsSupported and recursive calls. - - // This is required to appropriately handle the intrinsics on platforms - // which don't support them. On such a platform methods like Vector64.Create - // will be seen as `Intrinsic` and `mustExpand` due to having a code path - // which is recursive. When such a path is hit we expect it to be handled by - // the importer and we fire an assert if it wasn't and in previous versions - // of the JIT would fail fast. This was changed to throw a PNSE instead but - // we still assert as most intrinsics should have been recognized/handled. - - // In order to avoid the assert, we specially handle the IsSupported checks - // (to better allow dead-code optimizations) and we explicitly throw a PNSE - // as we know that is the desired behavior for the HWIntrinsics when not - // supported. For cases like Vector64.Create, this is fine because it will - // be behind a relevant IsSupported check and will never be hit and the - // software fallback will be executed instead. - - CLANG_FORMAT_COMMENT_ANCHOR; + // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled + // so we can specially handle IsSupported and recursive calls. + + // This is required to appropriately handle the intrinsics on platforms + // which don't support them. On such a platform methods like Vector64.Create + // will be seen as `Intrinsic` and `mustExpand` due to having a code path + // which is recursive. When such a path is hit we expect it to be handled by + // the importer and we fire an assert if it wasn't and in previous versions + // of the JIT would fail fast. This was changed to throw a PNSE instead but + // we still assert as most intrinsics should have been recognized/handled. + + // In order to avoid the assert, we specially handle the IsSupported checks + // (to better allow dead-code optimizations) and we explicitly throw a PNSE + // as we know that is the desired behavior for the HWIntrinsics when not + // supported. For cases like Vector64.Create, this is fine because it will + // be behind a relevant IsSupported check and will never be hit and the + // software fallback will be executed instead. + + CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS - namespaceName += 10; - const char* platformNamespaceName; + namespaceName += 10; + const char* platformNamespaceName; #if defined(TARGET_XARCH) - platformNamespaceName = ".X86"; + platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) - platformNamespaceName = ".Arm"; + platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif - if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) - { - CORINFO_SIG_INFO sig; - info.compCompHnd->getMethodSig(method, &sig); + if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) + { + CORINFO_SIG_INFO sig; + info.compCompHnd->getMethodSig(method, &sig); - result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); - } + result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); + } #endif // FEATURE_HW_INTRINSICS - if (result == NI_Illegal) - { - // This allows the relevant code paths to be dropped as dead code even - // on platforms where FEATURE_HW_INTRINSICS is not supported. - - if (strcmp(methodName, "get_IsSupported") == 0) + if (result == NI_Illegal) { - if (strncmp(className, "Vector", 6) == 0) + // This allows the relevant code paths to be dropped as dead code even + // on platforms where FEATURE_HW_INTRINSICS is not supported. + + if (strcmp(methodName, "get_IsSupported") == 0) { - assert( - (strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || - (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); + if (strncmp(className, "Vector", 6) == 0) + { + assert((strcmp(className, "Vector64`1") == 0) || + (strcmp(className, "Vector128`1") == 0) || + (strcmp(className, "Vector256`1") == 0) || + (strcmp(className, "Vector512`1") == 0)); - result = NI_IsSupported_Type; + result = NI_IsSupported_Type; + } + else + { + result = NI_IsSupported_False; + } } - else + else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) { result = NI_IsSupported_False; } - } - else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) - { - result = NI_IsSupported_False; - } - else if (strcmp(methodName, "get_Count") == 0) - { - assert((strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || - (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); + else if (strcmp(methodName, "get_Count") == 0) + { + assert( + (strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || + (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); - result = NI_Vector_GetCount; - } - else if (gtIsRecursiveCall(method)) - { - // For the framework itself, any recursive intrinsics will either be - // only supported on a single platform or will be guarded by a relevant - // IsSupported check so the throw PNSE will be valid or dropped. + result = NI_Vector_GetCount; + } + else if (gtIsRecursiveCall(method)) + { + // For the framework itself, any recursive intrinsics will either be + // only supported on a single platform or will be guarded by a relevant + // IsSupported check so the throw PNSE will be valid or dropped. - result = NI_Throw_PlatformNotSupportedException; + result = NI_Throw_PlatformNotSupportedException; + } } } } - } - else if (strcmp(namespaceName, "StubHelpers") == 0) - { - if (strcmp(className, "StubHelpers") == 0) + else if (strcmp(namespaceName, "StubHelpers") == 0) { - if (strcmp(methodName, "GetStubContext") == 0) + if (strcmp(className, "StubHelpers") == 0) { - result = NI_System_StubHelpers_GetStubContext; - } - else if (strcmp(methodName, "NextCallReturnAddress") == 0) - { - result = NI_System_StubHelpers_NextCallReturnAddress; - } - } - } - else if (strcmp(namespaceName, "Text") == 0) - { - if (strcmp(className, "UTF8EncodingSealed") == 0) - { - if (strcmp(methodName, "ReadUtf8") == 0) - { - assert(strcmp(enclosingClassName, "UTF8Encoding") == 0); - result = NI_System_Text_UTF8Encoding_UTF8EncodingSealed_ReadUtf8; + if (strcmp(methodName, "GetStubContext") == 0) + { + result = NI_System_StubHelpers_GetStubContext; + } + else if (strcmp(methodName, "NextCallReturnAddress") == 0) + { + result = NI_System_StubHelpers_NextCallReturnAddress; + } } } - } - else if (strcmp(namespaceName, "Threading") == 0) - { - if (strcmp(className, "Interlocked") == 0) + else if (strcmp(namespaceName, "Text") == 0) { - if (strcmp(methodName, "And") == 0) + if (strcmp(className, "UTF8EncodingSealed") == 0) { - result = NI_System_Threading_Interlocked_And; - } - else if (strcmp(methodName, "Or") == 0) - { - result = NI_System_Threading_Interlocked_Or; - } - else if (strcmp(methodName, "CompareExchange") == 0) - { - result = NI_System_Threading_Interlocked_CompareExchange; - } - else if (strcmp(methodName, "Exchange") == 0) - { - result = NI_System_Threading_Interlocked_Exchange; - } - else if (strcmp(methodName, "ExchangeAdd") == 0) - { - result = NI_System_Threading_Interlocked_ExchangeAdd; - } - else if (strcmp(methodName, "MemoryBarrier") == 0) - { - result = NI_System_Threading_Interlocked_MemoryBarrier; - } - else if (strcmp(methodName, "ReadMemoryBarrier") == 0) - { - result = NI_System_Threading_Interlocked_ReadMemoryBarrier; + if (strcmp(methodName, "ReadUtf8") == 0) + { + assert(strcmp(enclosingClassName, "UTF8Encoding") == 0); + result = NI_System_Text_UTF8Encoding_UTF8EncodingSealed_ReadUtf8; + } } } - else if (strcmp(className, "Thread") == 0) + else if (strcmp(namespaceName, "Threading") == 0) { - if (strcmp(methodName, "get_CurrentThread") == 0) + if (strcmp(className, "Interlocked") == 0) { - result = NI_System_Threading_Thread_get_CurrentThread; - } - else if (strcmp(methodName, "get_ManagedThreadId") == 0) - { - result = NI_System_Threading_Thread_get_ManagedThreadId; + if (strcmp(methodName, "And") == 0) + { + result = NI_System_Threading_Interlocked_And; + } + else if (strcmp(methodName, "Or") == 0) + { + result = NI_System_Threading_Interlocked_Or; + } + else if (strcmp(methodName, "CompareExchange") == 0) + { + result = NI_System_Threading_Interlocked_CompareExchange; + } + else if (strcmp(methodName, "Exchange") == 0) + { + result = NI_System_Threading_Interlocked_Exchange; + } + else if (strcmp(methodName, "ExchangeAdd") == 0) + { + result = NI_System_Threading_Interlocked_ExchangeAdd; + } + else if (strcmp(methodName, "MemoryBarrier") == 0) + { + result = NI_System_Threading_Interlocked_MemoryBarrier; + } + else if (strcmp(methodName, "ReadMemoryBarrier") == 0) + { + result = NI_System_Threading_Interlocked_ReadMemoryBarrier; + } } - } - else if (strcmp(className, "Volatile") == 0) - { - if (strcmp(methodName, "Read") == 0) + else if (strcmp(className, "Thread") == 0) { - result = NI_System_Threading_Volatile_Read; + if (strcmp(methodName, "get_CurrentThread") == 0) + { + result = NI_System_Threading_Thread_get_CurrentThread; + } + else if (strcmp(methodName, "get_ManagedThreadId") == 0) + { + result = NI_System_Threading_Thread_get_ManagedThreadId; + } } - else if (strcmp(methodName, "Write") == 0) + else if (strcmp(className, "Volatile") == 0) { - result = NI_System_Threading_Volatile_Write; + if (strcmp(methodName, "Read") == 0) + { + result = NI_System_Threading_Volatile_Read; + } + else if (strcmp(methodName, "Write") == 0) + { + result = NI_System_Threading_Volatile_Write; + } } } - } } } else if (strcmp(namespaceName, "Internal.Runtime") == 0) diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index af7a2f2791d169..26ae9225cbd7ed 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -182,7 +182,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD( xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize); } -// ((v1 ^ cns1) | (v2 ^ cns2)) == zero + // ((v1 ^ cns1) | (v2 ^ cns2)) == zero #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX512F_VL)) @@ -317,7 +317,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSWAR( assert(len >= 1 && len <= 8); // Compose Int32 or Int64 values from ushort components -#define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) +#define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) #define MAKEINT64(c1, c2, c3, c4) ((UINT64)c4 << 48) | ((UINT64)c3 << 32) | ((UINT64)c2 << 16) | ((UINT64)c1 << 0) if (len == 1) @@ -516,10 +516,10 @@ GenTree* Compiler::impExpandHalfConstEquals(GenTreeLclVarCommon* data, GenTree* castedLen = gtNewCastNode(TYP_I_IMPL, gtCloneExpr(lengthFld), false, TYP_I_IMPL); GenTree* byteLen = gtNewOperNode(GT_MUL, TYP_I_IMPL, castedLen, gtNewIconNode(2, TYP_I_IMPL)); GenTreeOp* cmpStart = gtNewOperNode(GT_ADD, TYP_BYREF, gtClone(data), - gtNewOperNode(GT_SUB, TYP_I_IMPL, byteLen, - gtNewIconNode((ssize_t)(len * 2), TYP_I_IMPL))); - GenTree* storeTmp = gtNewTempStore(dataAddr->GetLclNum(), cmpStart); - indirCmp = gtNewOperNode(GT_COMMA, indirCmp->TypeGet(), storeTmp, indirCmp); + gtNewOperNode(GT_SUB, TYP_I_IMPL, byteLen, + gtNewIconNode((ssize_t)(len * 2), TYP_I_IMPL))); + GenTree* storeTmp = gtNewTempStore(dataAddr->GetLclNum(), cmpStart); + indirCmp = gtNewOperNode(GT_COMMA, indirCmp->TypeGet(), storeTmp, indirCmp); } GenTreeColon* lenCheckColon = gtNewColonNode(TYP_INT, indirCmp, gtNewFalse()); diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 0839f9fc2a045e..a85ba05596b519 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -67,7 +67,8 @@ class IndirectCallTransformer { public: - IndirectCallTransformer(Compiler* compiler) : compiler(compiler) + IndirectCallTransformer(Compiler* compiler) + : compiler(compiler) { } @@ -157,7 +158,9 @@ class IndirectCallTransformer { public: Transformer(Compiler* compiler, BasicBlock* block, Statement* stmt) - : compiler(compiler), currBlock(block), stmt(stmt) + : compiler(compiler) + , currBlock(block) + , stmt(stmt) { remainderBlock = nullptr; checkBlock = nullptr; @@ -197,7 +200,7 @@ class IndirectCallTransformer virtual const char* Name() = 0; virtual void ClearFlag() = 0; virtual GenTreeCall* GetCall(Statement* callStmt) = 0; - virtual void FixupRetExpr() = 0; + virtual void FixupRetExpr() = 0; //------------------------------------------------------------------------ // CreateRemainder: split current block at the call stmt and @@ -473,7 +476,8 @@ class IndirectCallTransformer { public: GuardedDevirtualizationTransformer(Compiler* compiler, BasicBlock* block, Statement* stmt) - : Transformer(compiler, block, stmt), returnTemp(BAD_VAR_NUM) + : Transformer(compiler, block, stmt) + , returnTemp(BAD_VAR_NUM) { } @@ -1259,7 +1263,9 @@ class IndirectCallTransformer unsigned m_nodeCount; ClonabilityVisitor(Compiler* compiler) - : GenTreeVisitor(compiler), m_unclonableNode(nullptr), m_nodeCount(0) + : GenTreeVisitor(compiler) + , m_unclonableNode(nullptr) + , m_nodeCount(0) { } diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp index 59e5b6a0d497de..19755c312de350 100644 --- a/src/coreclr/jit/inductionvariableopts.cpp +++ b/src/coreclr/jit/inductionvariableopts.cpp @@ -66,7 +66,6 @@ bool Compiler::optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop) LclVarDsc* dsc = lvaGetDesc(lclNum); BasicBlockVisit result = loop->VisitRegularExitBlocks([=](BasicBlock* exit) { - if (!VarSetOps::IsMember(this, exit->bbLiveIn, dsc->lvVarIndex)) { JITDUMP(" Exit " FMT_BB " does not need a sink; V%02u is not live-in\n", exit->bbNum, lclNum); @@ -94,7 +93,6 @@ bool Compiler::optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop) // unprofitable. If this ever changes we need some more expansive handling // here. loop->VisitLoopBlocks([=](BasicBlock* block) { - block->VisitAllSuccs(this, [=](BasicBlock* succ) { if (!loop->ContainsBlock(succ) && bbIsHandlerBeg(succ)) { @@ -334,7 +332,10 @@ void Compiler::optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned ne }; ReplaceVisitor(Compiler* comp, unsigned lclNum, unsigned ssaNum, unsigned newLclNum) - : GenTreeVisitor(comp), m_lclNum(lclNum), m_ssaNum(ssaNum), m_newLclNum(newLclNum) + : GenTreeVisitor(comp) + , m_lclNum(lclNum) + , m_ssaNum(ssaNum) + , m_newLclNum(newLclNum) { } diff --git a/src/coreclr/jit/inline.cpp b/src/coreclr/jit/inline.cpp index 06ca71126f855c..c8831a75b39bc5 100644 --- a/src/coreclr/jit/inline.cpp +++ b/src/coreclr/jit/inline.cpp @@ -383,7 +383,7 @@ void InlineContext::Dump(bool verbose, unsigned indent) #if defined(DEBUG) calleeName = compiler->eeGetMethodFullName(m_Callee); #else - calleeName = "callee"; + calleeName = "callee"; #endif // defined(DEBUG) } diff --git a/src/coreclr/jit/inline.h b/src/coreclr/jit/inline.h index 342dc3fca5d238..a87b2de79c058e 100644 --- a/src/coreclr/jit/inline.h +++ b/src/coreclr/jit/inline.h @@ -222,9 +222,9 @@ class InlinePolicy } // Policy observations - virtual void NoteSuccess() = 0; - virtual void NoteBool(InlineObservation obs, bool value) = 0; - virtual void NoteFatal(InlineObservation obs) = 0; + virtual void NoteSuccess() = 0; + virtual void NoteBool(InlineObservation obs, bool value) = 0; + virtual void NoteFatal(InlineObservation obs) = 0; virtual void NoteInt(InlineObservation obs, int value) = 0; virtual void NoteDouble(InlineObservation obs, double value) = 0; @@ -321,7 +321,7 @@ class InlinePolicy private: // No copying or assignment supported - InlinePolicy(const InlinePolicy&) = delete; + InlinePolicy(const InlinePolicy&) = delete; InlinePolicy& operator=(const InlinePolicy&) = delete; protected: @@ -558,7 +558,7 @@ class InlineResult private: // No copying or assignment allowed. - InlineResult(const InlineResult&) = delete; + InlineResult(const InlineResult&) = delete; InlineResult& operator=(const InlineResult&) = delete; // Report/log/dump decision as appropriate @@ -637,16 +637,16 @@ struct InlArgInfo CallArg* arg; // the caller argument GenTree* argBashTmpNode; // tmp node created, if it may be replaced with actual arg unsigned argTmpNum; // the argument tmp number - unsigned argIsUsed : 1; // is this arg used at all? - unsigned argIsInvariant : 1; // the argument is a constant or a local variable address - unsigned argIsLclVar : 1; // the argument is a local variable - unsigned argIsThis : 1; // the argument is the 'this' pointer - unsigned argHasSideEff : 1; // the argument has side effects - unsigned argHasGlobRef : 1; // the argument has a global ref - unsigned argHasCallerLocalRef : 1; // the argument value depends on an aliased caller local - unsigned argHasTmp : 1; // the argument will be evaluated to a temp - unsigned argHasLdargaOp : 1; // Is there LDARGA(s) operation on this argument? - unsigned argHasStargOp : 1; // Is there STARG(s) operation on this argument? + unsigned argIsUsed : 1; // is this arg used at all? + unsigned argIsInvariant : 1; // the argument is a constant or a local variable address + unsigned argIsLclVar : 1; // the argument is a local variable + unsigned argIsThis : 1; // the argument is the 'this' pointer + unsigned argHasSideEff : 1; // the argument has side effects + unsigned argHasGlobRef : 1; // the argument has a global ref + unsigned argHasCallerLocalRef : 1; // the argument value depends on an aliased caller local + unsigned argHasTmp : 1; // the argument will be evaluated to a temp + unsigned argHasLdargaOp : 1; // Is there LDARGA(s) operation on this argument? + unsigned argHasStargOp : 1; // Is there STARG(s) operation on this argument? unsigned argIsByRefToStructLocal : 1; // Is this arg an address of a struct local or a normed struct local or a // field in them? unsigned argIsExact : 1; // Is this arg of an exact class? @@ -658,10 +658,10 @@ struct InlLclVarInfo { CORINFO_CLASS_HANDLE lclTypeHandle; // Type handle from the signature. Available for structs and REFs. var_types lclTypeInfo; // Type from the signature. - unsigned char lclHasLdlocaOp : 1; // Is there LDLOCA(s) operation on this local? - unsigned char lclHasStlocOp : 1; // Is there a STLOC on this local? + unsigned char lclHasLdlocaOp : 1; // Is there LDLOCA(s) operation on this local? + unsigned char lclHasStlocOp : 1; // Is there a STLOC on this local? unsigned char lclHasMultipleStlocOp : 1; // Is there more than one STLOC on this local - unsigned char lclIsPinned : 1; + unsigned char lclIsPinned : 1; }; // InlineInfo provides detailed information about a particular inline candidate. @@ -887,8 +887,8 @@ class InlineContext InlinePolicy* m_Policy; // policy that evaluated this inline unsigned m_TreeID; // ID of the GenTreeCall in the parent bool m_Devirtualized : 1; // true if this was a devirtualized call - bool m_Guarded : 1; // true if this was a guarded call - bool m_Unboxed : 1; // true if this call now invokes the unboxed entry + bool m_Guarded : 1; // true if this was a guarded call + bool m_Unboxed : 1; // true if this call now invokes the unboxed entry #endif // defined(DEBUG) @@ -1026,7 +1026,7 @@ class InlineStrategy void DumpDataContents(FILE* file); // Dump xml-formatted description of inlines - void DumpXml(FILE* file = stderr, unsigned indent = 0); + void DumpXml(FILE* file = stderr, unsigned indent = 0); static void FinalizeXml(FILE* file = stderr); // Cache for file position of this method in the inline xml diff --git a/src/coreclr/jit/inlinepolicy.cpp b/src/coreclr/jit/inlinepolicy.cpp index d057ccd09ed0d9..3b771f291607bb 100644 --- a/src/coreclr/jit/inlinepolicy.cpp +++ b/src/coreclr/jit/inlinepolicy.cpp @@ -945,8 +945,9 @@ void DefaultPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) { // Inline appears to be unprofitable JITLOG_THIS(m_RootCompiler, - (LL_INFO100000, "Native estimate for function size exceeds threshold" - " for inlining %g > %g (multiplier = %g)\n", + (LL_INFO100000, + "Native estimate for function size exceeds threshold" + " for inlining %g > %g (multiplier = %g)\n", (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier)); // Fail the inline @@ -963,8 +964,9 @@ void DefaultPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) { // Inline appears to be profitable JITLOG_THIS(m_RootCompiler, - (LL_INFO100000, "Native estimate for function size is within threshold" - " for inlining %g <= %g (multiplier = %g)\n", + (LL_INFO100000, + "Native estimate for function size is within threshold" + " for inlining %g <= %g (multiplier = %g)\n", (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier)); // Update candidacy @@ -1072,7 +1074,8 @@ bool DefaultPolicy::PropagateNeverToRuntime() const // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -RandomPolicy::RandomPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +RandomPolicy::RandomPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { m_Random = compiler->m_inlineStrategy->GetRandom(); } @@ -2768,7 +2771,8 @@ void DiscretionaryPolicy::DumpData(FILE* file) const // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -2969,7 +2973,8 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -ProfilePolicy::ProfilePolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +ProfilePolicy::ProfilePolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -3169,7 +3174,8 @@ void ProfilePolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -3236,7 +3242,8 @@ void FullPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } diff --git a/src/coreclr/jit/inlinepolicy.h b/src/coreclr/jit/inlinepolicy.h index 52333d5aacac3c..a8d8e67f1db3cb 100644 --- a/src/coreclr/jit/inlinepolicy.h +++ b/src/coreclr/jit/inlinepolicy.h @@ -48,7 +48,8 @@ class LegalPolicy : public InlinePolicy public: // Constructor - LegalPolicy(bool isPrejitRoot) : InlinePolicy(isPrejitRoot) + LegalPolicy(bool isPrejitRoot) + : InlinePolicy(isPrejitRoot) { // empty } @@ -157,7 +158,7 @@ class DefaultPolicy : public LegalPolicy // Helper methods virtual double DetermineMultiplier(); int DetermineNativeSizeEstimate(); - int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); + int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); // Data members Compiler* m_RootCompiler; // root compiler instance @@ -174,20 +175,20 @@ class DefaultPolicy : public LegalPolicy unsigned m_ConstantArgFeedsConstantTest; int m_CalleeNativeSizeEstimate; int m_CallsiteNativeSizeEstimate; - bool m_IsForceInline : 1; - bool m_IsForceInlineKnown : 1; - bool m_IsInstanceCtor : 1; + bool m_IsForceInline : 1; + bool m_IsForceInlineKnown : 1; + bool m_IsInstanceCtor : 1; bool m_IsFromPromotableValueClass : 1; - bool m_HasSimd : 1; - bool m_LooksLikeWrapperMethod : 1; - bool m_MethodIsMostlyLoadStore : 1; - bool m_CallsiteIsInTryRegion : 1; - bool m_CallsiteIsInLoop : 1; - bool m_IsNoReturn : 1; - bool m_IsNoReturnKnown : 1; - bool m_ConstArgFeedsIsKnownConst : 1; - bool m_ArgFeedsIsKnownConst : 1; - bool m_InsideThrowBlock : 1; + bool m_HasSimd : 1; + bool m_LooksLikeWrapperMethod : 1; + bool m_MethodIsMostlyLoadStore : 1; + bool m_CallsiteIsInTryRegion : 1; + bool m_CallsiteIsInLoop : 1; + bool m_IsNoReturn : 1; + bool m_IsNoReturnKnown : 1; + bool m_ConstArgFeedsIsKnownConst : 1; + bool m_ArgFeedsIsKnownConst : 1; + bool m_InsideThrowBlock : 1; }; // ExtendedDefaultPolicy is a slightly more aggressive variant of @@ -271,11 +272,11 @@ class ExtendedDefaultPolicy : public DefaultPolicy unsigned m_UnrollableMemop; unsigned m_Switch; unsigned m_DivByCns; - bool m_ReturnsStructByValue : 1; - bool m_IsFromValueClass : 1; - bool m_NonGenericCallsGeneric : 1; + bool m_ReturnsStructByValue : 1; + bool m_IsFromValueClass : 1; + bool m_NonGenericCallsGeneric : 1; bool m_IsCallsiteInNoReturnRegion : 1; - bool m_HasProfileWeights : 1; + bool m_HasProfileWeights : 1; }; // DiscretionaryPolicy is a variant of the default policy. It diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp index dd82e7c08f92ba..7866c8a5e7b0f6 100644 --- a/src/coreclr/jit/instr.cpp +++ b/src/coreclr/jit/instr.cpp @@ -876,7 +876,7 @@ CodeGen::OperandDesc CodeGen::genOperandDesc(GenTree* op) // broadcast -> LCL_VAR(TYP_(U)INT) ssize_t scalarValue = hwintrinsicChild->AsIntCon()->IconValue(); UNATIVE_OFFSET cnum = emit->emitDataConst(&scalarValue, genTypeSize(simdBaseType), - genTypeSize(simdBaseType), simdBaseType); + genTypeSize(simdBaseType), simdBaseType); return OperandDesc(compiler->eeFindJitDataOffs(cnum)); } else @@ -1124,9 +1124,9 @@ void CodeGen::inst_RV_TT(instruction ins, emitAttr size, regNumber op1Reg, GenTr } /***************************************************************************** -* -* Generate an instruction of the form "op reg1, reg2, icon". -*/ + * + * Generate an instruction of the form "op reg1, reg2, icon". + */ void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival) { @@ -1256,8 +1256,8 @@ void CodeGen::inst_RV_RV_TT(instruction ins, emitter* emit = GetEmitter(); noway_assert(emit->emitVerifyEncodable(ins, EA_SIZE(size), targetReg)); -// TODO-XArch-CQ: Commutative operations can have op1 be contained -// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained + // TODO-XArch-CQ: Commutative operations can have op1 be contained + // TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) if (CodeGenInterface::IsEmbeddedBroadcastEnabled(ins, op2)) diff --git a/src/coreclr/jit/instrsarm.h b/src/coreclr/jit/instrsarm.h index 9356150d4b2e83..3a1c871d316f6a 100644 --- a/src/coreclr/jit/instrsarm.h +++ b/src/coreclr/jit/instrsarm.h @@ -19,7 +19,7 @@ * e8 -- encoding 8 * e9 -- encoding 9 * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_ARM) #error Unexpected target type diff --git a/src/coreclr/jit/instrsarm64.h b/src/coreclr/jit/instrsarm64.h index c07976f1eca0b6..c6ac7404c569d0 100644 --- a/src/coreclr/jit/instrsarm64.h +++ b/src/coreclr/jit/instrsarm64.h @@ -18,7 +18,7 @@ * e8 -- encoding 8 * e9 -- encoding 9 * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_ARM64) #error Unexpected target type diff --git a/src/coreclr/jit/instrsloongarch64.h b/src/coreclr/jit/instrsloongarch64.h index 4f94516c5fb91d..3794d91e02e388 100644 --- a/src/coreclr/jit/instrsloongarch64.h +++ b/src/coreclr/jit/instrsloongarch64.h @@ -11,7 +11,7 @@ * mask -- instruction's mask * fmt -- disasmbly format * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_LOONGARCH64) #error Unexpected target type diff --git a/src/coreclr/jit/instrsxarch.h b/src/coreclr/jit/instrsxarch.h index 17443cb9784927..440cc0033c82f9 100644 --- a/src/coreclr/jit/instrsxarch.h +++ b/src/coreclr/jit/instrsxarch.h @@ -18,7 +18,7 @@ * tt -- the tupletype for the instruction * flags -- flags, see INS_FLAGS_* enum * -******************************************************************************/ + ******************************************************************************/ // clang-format off #if !defined(TARGET_XARCH) diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 1df8c034d0c1df..754e6b27f44416 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -26,8 +26,9 @@ #define ZERO 0 #ifdef _MSC_VER -#define CHECK_STRUCT_PADDING 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after - // construct 'member_name'" on interesting structs/classes +#define CHECK_STRUCT_PADDING \ + 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after + // construct 'member_name'" on interesting structs/classes #else #define CHECK_STRUCT_PADDING 0 // Never enable it for non-MSFT compilers #endif @@ -295,9 +296,9 @@ typedef ptrdiff_t ssize_t; #include "corjit.h" #include "jitee.h" -#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these -#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake - // with our new(compiler*) pattern. +#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these +#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake + // with our new(compiler*) pattern. #include "utilcode.h" // this defines assert as _ASSERTE #include "host.h" // this redefines assert for the JIT to use assertAbort @@ -319,7 +320,7 @@ typedef ptrdiff_t ssize_t; #endif #ifdef DEBUG -#define INDEBUG(x) x +#define INDEBUG(x) x #define DEBUGARG(x) , x #else #define INDEBUG(x) @@ -334,7 +335,7 @@ typedef ptrdiff_t ssize_t; #if defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) , x -#define UNIX_AMD64_ABI_ONLY(x) x +#define UNIX_AMD64_ABI_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) #define UNIX_AMD64_ABI_ONLY(x) @@ -342,7 +343,7 @@ typedef ptrdiff_t ssize_t; #if defined(TARGET_LOONGARCH64) #define UNIX_LOONGARCH64_ONLY_ARG(x) , x -#define UNIX_LOONGARCH64_ONLY(x) x +#define UNIX_LOONGARCH64_ONLY(x) x #else // !TARGET_LOONGARCH64 #define UNIX_LOONGARCH64_ONLY_ARG(x) #define UNIX_LOONGARCH64_ONLY(x) @@ -355,16 +356,16 @@ typedef ptrdiff_t ssize_t; #if defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) , x -#define UNIX_AMD64_ABI_ONLY(x) x +#define UNIX_AMD64_ABI_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) -#define MULTIREG_HAS_SECOND_GC_RET 1 +#define MULTIREG_HAS_SECOND_GC_RET 1 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x -#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x +#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define MULTIREG_HAS_SECOND_GC_RET 0 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) @@ -385,7 +386,7 @@ typedef ptrdiff_t ssize_t; #define DUMMY_INIT(x) (x) #define REGEN_SHORTCUTS 0 -#define REGEN_CALLPAT 0 +#define REGEN_CALLPAT 0 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -473,9 +474,9 @@ class GlobalJitOptions /*****************************************************************************/ -#define CSE_INTO_HANDLERS 0 -#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files -#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 0 // if 1 we must have all handler entry points in the Hot code section +#define CSE_INTO_HANDLERS 0 +#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files +#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 0 // if 1 we must have all handler entry points in the Hot code section /*****************************************************************************/ @@ -483,40 +484,43 @@ class GlobalJitOptions /*****************************************************************************/ -#define DUMP_GC_TABLES DEBUG +#define DUMP_GC_TABLES DEBUG #define VERIFY_GC_TABLES 0 -#define REARRANGE_ADDS 1 +#define REARRANGE_ADDS 1 -#define FUNC_INFO_LOGGING 1 // Support dumping function info to a file. In retail, only NYIs, with no function name, - // are dumped. +#define FUNC_INFO_LOGGING \ + 1 // Support dumping function info to a file. In retail, only NYIs, with no function name, + // are dumped. /*****************************************************************************/ /*****************************************************************************/ /* Set these to 1 to collect and output various statistics about the JIT */ -#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments. -#define COUNT_BASIC_BLOCKS 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple - // case of single block methods. -#define COUNT_LOOPS 0 // Collect stats about loops, such as the total number of natural loops, a histogram of +#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments. +#define COUNT_BASIC_BLOCKS \ + 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple + // case of single block methods. +#define COUNT_LOOPS \ + 0 // Collect stats about loops, such as the total number of natural loops, a histogram of // the number of loop exits, etc. -#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes. -#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and FlowEdge node sizes and memory allocations. -#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts. -#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations. +#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes. +#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and FlowEdge node sizes and memory allocations. +#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts. +#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations. #define MEASURE_PTRTAB_SIZE 0 // Collect stats about GC pointer table allocations. -#define EMITTER_STATS 0 // Collect stats on the emitter. -#define NODEBASH_STATS 0 // Collect stats on changed gtOper values in GenTree's. -#define COUNT_AST_OPERS 0 // Display use counts for GenTree operators. +#define EMITTER_STATS 0 // Collect stats on the emitter. +#define NODEBASH_STATS 0 // Collect stats on changed gtOper values in GenTree's. +#define COUNT_AST_OPERS 0 // Display use counts for GenTree operators. #ifdef DEBUG #define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats. -#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats. -#define TRACK_LSRA_STATS 1 // Collect LSRA stats +#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats. +#define TRACK_LSRA_STATS 1 // Collect LSRA stats #define TRACK_ENREG_STATS 1 // Collect enregistration stats #else #define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well -#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well -#define TRACK_LSRA_STATS 0 // You can set this to 1 to get LSRA stats in retail, as well +#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well +#define TRACK_LSRA_STATS 0 // You can set this to 1 to get LSRA stats in retail, as well #define TRACK_ENREG_STATS 0 #endif @@ -602,7 +606,7 @@ const bool dspGCtbls = true; JitTls::GetCompiler()->fgTableDispBasicBlock(b); #define VERBOSE JitTls::GetCompiler()->verbose // Development-time only macros, simplify guards for specified IL methods one wants to debug/add log messages for -#define ISMETHOD(name) (strcmp(JitTls::GetCompiler()->impInlineRoot()->info.compMethodName, name) == 0) +#define ISMETHOD(name) (strcmp(JitTls::GetCompiler()->impInlineRoot()->info.compMethodName, name) == 0) #define ISMETHODHASH(hash) (JitTls::GetCompiler()->impInlineRoot()->info.compMethodHash() == hash) #else // !DEBUG #define JITDUMP(...) @@ -628,8 +632,9 @@ const bool dspGCtbls = true; */ #ifdef TARGET_X86 -#define DOUBLE_ALIGN 1 // permit the double alignment of ESP in prolog, - // and permit the double alignment of local offsets +#define DOUBLE_ALIGN \ + 1 // permit the double alignment of ESP in prolog, + // and permit the double alignment of local offsets #else #define DOUBLE_ALIGN 0 // no special handling for double alignment #endif @@ -673,7 +678,7 @@ inline bool IsUninitialized(T data); #define MISALIGNED_RD_U2(src) (*castto(src, unsigned short*)) #define MISALIGNED_WR_I2(dst, val) *castto(dst, short*) = val; -#define MISALIGNED_WR_I4(dst, val) *castto(dst, int*) = val; +#define MISALIGNED_WR_I4(dst, val) *castto(dst, int*) = val; #define MISALIGNED_WR_ST(dst, val) *castto(dst, ssize_t*) = val; @@ -740,16 +745,16 @@ inline size_t unsigned_abs(__int64 x) #define FEATURE_TAILCALL_OPT_SHARED_RETURN 0 #endif // !FEATURE_TAILCALL_OPT -#define CLFLG_CODESIZE 0x00001 -#define CLFLG_CODESPEED 0x00002 -#define CLFLG_CSE 0x00004 -#define CLFLG_REGVAR 0x00008 -#define CLFLG_RNGCHKOPT 0x00010 -#define CLFLG_DEADSTORE 0x00020 +#define CLFLG_CODESIZE 0x00001 +#define CLFLG_CODESPEED 0x00002 +#define CLFLG_CSE 0x00004 +#define CLFLG_REGVAR 0x00008 +#define CLFLG_RNGCHKOPT 0x00010 +#define CLFLG_DEADSTORE 0x00020 #define CLFLG_CODEMOTION 0x00040 -#define CLFLG_QMARK 0x00080 -#define CLFLG_TREETRANS 0x00100 -#define CLFLG_INLINING 0x00200 +#define CLFLG_QMARK 0x00080 +#define CLFLG_TREETRANS 0x00100 +#define CLFLG_INLINING 0x00200 #if FEATURE_STRUCTPROMOTE #define CLFLG_STRUCTPROMOTE 0x00400 @@ -813,7 +818,7 @@ class JitTls #endif static Compiler* GetCompiler(); - static void SetCompiler(Compiler* compiler); + static void SetCompiler(Compiler* compiler); }; #if defined(DEBUG) diff --git a/src/coreclr/jit/jitconfig.cpp b/src/coreclr/jit/jitconfig.cpp index 3c85031cee6cda..19730be75c2cdc 100644 --- a/src/coreclr/jit/jitconfig.cpp +++ b/src/coreclr/jit/jitconfig.cpp @@ -193,7 +193,7 @@ void JitConfigValues::initialize(ICorJitHost* host) assert(!m_isInitialized); #define CONFIG_INTEGER(name, key, defaultValue) m_##name = host->getIntConfigValue(key, defaultValue); -#define CONFIG_STRING(name, key) m_##name = host->getStringConfigValue(key); +#define CONFIG_STRING(name, key) m_##name = host->getStringConfigValue(key); #define CONFIG_METHODSET(name, key) \ const WCHAR* name##value = host->getStringConfigValue(key); \ m_##name.initialize(name##value, host); \ @@ -212,7 +212,7 @@ void JitConfigValues::destroy(ICorJitHost* host) } #define CONFIG_INTEGER(name, key, defaultValue) -#define CONFIG_STRING(name, key) host->freeStringConfigValue(m_##name); +#define CONFIG_STRING(name, key) host->freeStringConfigValue(m_##name); #define CONFIG_METHODSET(name, key) m_##name.destroy(host); #include "jitconfigvalues.h" diff --git a/src/coreclr/jit/jitconfig.h b/src/coreclr/jit/jitconfig.h index e19021cd52f22b..bd1c552f59438a 100644 --- a/src/coreclr/jit/jitconfig.h +++ b/src/coreclr/jit/jitconfig.h @@ -31,7 +31,7 @@ class JitConfigValues char* m_list; MethodName* m_names; - MethodSet(const MethodSet& other) = delete; + MethodSet(const MethodSet& other) = delete; MethodSet& operator=(const MethodSet& other) = delete; public: @@ -56,8 +56,8 @@ class JitConfigValues private: #define CONFIG_INTEGER(name, key, defaultValue) int m_##name; -#define CONFIG_STRING(name, key) const WCHAR* m_##name; -#define CONFIG_METHODSET(name, key) MethodSet m_##name; +#define CONFIG_STRING(name, key) const WCHAR* m_##name; +#define CONFIG_METHODSET(name, key) MethodSet m_##name; #include "jitconfigvalues.h" public: @@ -81,7 +81,7 @@ class JitConfigValues private: bool m_isInitialized; - JitConfigValues(const JitConfigValues& other) = delete; + JitConfigValues(const JitConfigValues& other) = delete; JitConfigValues& operator=(const JitConfigValues& other) = delete; public: diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index e0a2d7cb16fcb8..28d75fc93c3104 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -377,10 +377,10 @@ CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNu // CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0) -#define CONST_CSE_ENABLE_ARM 0 -#define CONST_CSE_DISABLE_ALL 1 +#define CONST_CSE_ENABLE_ARM 0 +#define CONST_CSE_DISABLE_ALL 1 #define CONST_CSE_ENABLE_ARM_NO_SHARING 2 -#define CONST_CSE_ENABLE_ALL 3 +#define CONST_CSE_ENABLE_ALL 3 #define CONST_CSE_ENABLE_ALL_NO_SHARING 4 // If nonzero, use the greedy RL policy. diff --git a/src/coreclr/jit/jitee.h b/src/coreclr/jit/jitee.h index 27963ac356efb5..71f53b4e10d7d6 100644 --- a/src/coreclr/jit/jitee.h +++ b/src/coreclr/jit/jitee.h @@ -54,13 +54,15 @@ class JitFlags }; // clang-format on - JitFlags() : m_jitFlags(0) + JitFlags() + : m_jitFlags(0) { // empty } // Convenience constructor to set exactly one flags. - JitFlags(JitFlag flag) : m_jitFlags(0) + JitFlags(JitFlag flag) + : m_jitFlags(0) { Set(flag); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 75749e50fbd3bc..fc12d55c35a468 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1718,7 +1718,7 @@ void Compiler::fgSortEHTable() (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) || (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) // Note that end of filter is beginning of handler - ) + ) { #ifdef DEBUG if (verbose) @@ -2082,7 +2082,7 @@ bool Compiler::fgNormalizeEHCase2() if (ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { -// clang-format off + // clang-format off // Don't touch mutually-protect regions: their 'try' regions must remain identical! // We want to continue the looping outwards, in case we have something like this: // @@ -2131,7 +2131,7 @@ bool Compiler::fgNormalizeEHCase2() // // In this case, all the 'try' start at the same block! Note that there are two sets of mutually-protect regions, // separated by some nesting. -// clang-format on + // clang-format on #ifdef DEBUG if (verbose) @@ -2361,7 +2361,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() { GenTree* ctxTree = getRuntimeContextTree(embedInfo.lookup.lookupKind.runtimeLookupKind); runtimeLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, - TYP_I_IMPL, &embedInfo.lookup.lookupKind, ctxTree); + TYP_I_IMPL, &embedInfo.lookup.lookupKind, ctxTree); } else { @@ -3026,8 +3026,8 @@ void Compiler::fgVerifyHandlerTab() assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } -// Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks -// haven't been renumbered since the deletion. + // Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks + // haven't been renumbered since the deletion. #if 0 // Useful for debugging, but don't want to put this in the dump all the time if (verbose) @@ -3274,9 +3274,9 @@ void Compiler::fgVerifyHandlerTab() assert(bbNumOuterHndLast != 0); assert(bbNumOuterHndBeg <= bbNumOuterHndLast); -// The outer handler must completely contain all the blocks in the EH region nested within it. However, if -// funclets have been created, it's harder to make any relationship asserts about the order of nested -// handlers, which also have been made into funclets. + // The outer handler must completely contain all the blocks in the EH region nested within it. However, if + // funclets have been created, it's harder to make any relationship asserts about the order of nested + // handlers, which also have been made into funclets. #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) @@ -4339,7 +4339,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG - // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' + // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' fgRedirectTargetEdge(bFilterLast, bPrev); } } diff --git a/src/coreclr/jit/jiteh.h b/src/coreclr/jit/jiteh.h index 95ae62527897ba..55b56ac9833c42 100644 --- a/src/coreclr/jit/jiteh.h +++ b/src/coreclr/jit/jiteh.h @@ -83,7 +83,8 @@ struct EHblkDsc BasicBlock* ebdTryLast; // Last block of the try BasicBlock* ebdHndBeg; // First block of the handler BasicBlock* ebdHndLast; // Last block of the handler - union { + union + { BasicBlock* ebdFilter; // First block of filter, if HasFilter() unsigned ebdTyp; // Exception type (a class token), otherwise }; @@ -165,8 +166,8 @@ struct EHblkDsc unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion); static bool ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks. - bool ebdIsSameTry(Compiler* comp, unsigned t2); - bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); + bool ebdIsSameTry(Compiler* comp, unsigned t2); + bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); #ifdef DEBUG void DispEntry(unsigned num); // Display this table entry diff --git a/src/coreclr/jit/jitexpandarray.h b/src/coreclr/jit/jitexpandarray.h index 646f9e6747a3be..8eaf52705986ad 100644 --- a/src/coreclr/jit/jitexpandarray.h +++ b/src/coreclr/jit/jitexpandarray.h @@ -54,7 +54,10 @@ class JitExpandArray // of size max(`minSize`, `idx`) is allocated. // JitExpandArray(CompAllocator alloc, unsigned minSize = 1) - : m_alloc(alloc), m_members(nullptr), m_size(0), m_minSize(minSize) + : m_alloc(alloc) + , m_members(nullptr) + , m_size(0) + , m_minSize(minSize) { assert(minSize > 0); } @@ -219,7 +222,9 @@ class JitExpandArrayStack : public JitExpandArray // Notes: // See JitExpandArray constructor notes. // - JitExpandArrayStack(CompAllocator alloc, unsigned minSize = 1) : JitExpandArray(alloc, minSize), m_used(0) + JitExpandArrayStack(CompAllocator alloc, unsigned minSize = 1) + : JitExpandArray(alloc, minSize) + , m_used(0) { } diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index b73e8fbc68773a..288042d4c6b1e4 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -27,7 +27,9 @@ struct RegSlotIdKey { } - RegSlotIdKey(unsigned short regNum, unsigned flags) : m_regNum(regNum), m_flags((unsigned short)flags) + RegSlotIdKey(unsigned short regNum, unsigned flags) + : m_regNum(regNum) + , m_flags((unsigned short)flags) { assert(m_flags == flags); } @@ -54,7 +56,9 @@ struct StackSlotIdKey } StackSlotIdKey(int offset, bool fpRel, unsigned flags) - : m_offset(offset), m_fpRel(fpRel), m_flags((unsigned short)flags) + : m_offset(offset) + , m_fpRel(fpRel) + , m_flags((unsigned short)flags) { assert(flags == m_flags); } @@ -165,7 +169,7 @@ class GCInfo unsigned char rpdCallInstrSize; // Length of the call instruction. #endif - unsigned short rpdArg : 1; // is this an argument descriptor? + unsigned short rpdArg : 1; // is this an argument descriptor? unsigned short rpdArgType : 2; // is this an argument push,pop, or kill? rpdArgType_t rpdArgTypeGet() { @@ -179,8 +183,8 @@ class GCInfo } unsigned short rpdIsThis : 1; // is it the 'this' pointer - unsigned short rpdCall : 1; // is this a true call site? - unsigned short : 1; // Padding bit, so next two start on a byte boundary + unsigned short rpdCall : 1; // is this a true call site? + unsigned short : 1; // Padding bit, so next two start on a byte boundary unsigned short rpdCallGCrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing GC pointers. unsigned short rpdCallByrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing byrefs. @@ -261,7 +265,8 @@ class GCInfo unsigned short cdArgCnt; - union { + union + { struct // used if cdArgCnt == 0 { unsigned cdArgMask; // ptr arg bitfield @@ -278,7 +283,7 @@ class GCInfo CallDsc* gcCallDescList; CallDsc* gcCallDescLast; -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #ifdef JIT32_GCENCODER void gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, UNALIGNED unsigned int* pVarPtrTableSize); @@ -303,7 +308,7 @@ class GCInfo #ifdef JIT32_GCENCODER size_t gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); - BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); + BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); #endif void gcRegPtrSetInit(); /*****************************************************************************/ @@ -382,7 +387,7 @@ class GCInfo #ifdef JIT32_GCENCODER size_t gcInfoBlockHdrDump(const BYTE* table, - InfoHdr* header, /* OUT */ + InfoHdr* header, /* OUT */ unsigned* methodSize); /* OUT */ size_t gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize); diff --git a/src/coreclr/jit/jithashtable.h b/src/coreclr/jit/jithashtable.h index 9ad73dbf2f7d51..f699c3eee19d24 100644 --- a/src/coreclr/jit/jithashtable.h +++ b/src/coreclr/jit/jithashtable.h @@ -57,10 +57,16 @@ class JitHashTableBehavior class JitPrimeInfo { public: - constexpr JitPrimeInfo() : prime(0), magic(0), shift(0) + constexpr JitPrimeInfo() + : prime(0) + , magic(0) + , shift(0) { } - constexpr JitPrimeInfo(unsigned p, unsigned m, unsigned s) : prime(p), magic(m), shift(s) + constexpr JitPrimeInfo(unsigned p, unsigned m, unsigned s) + : prime(p) + , magic(m) + , shift(s) { } unsigned prime; @@ -130,7 +136,10 @@ class JitHashTable Value m_val; template - Node(Node* next, Key k, Args&&... args) : m_next(next), m_key(k), m_val(std::forward(args)...) + Node(Node* next, Key k, Args&&... args) + : m_next(next) + , m_key(k) + , m_val(std::forward(args)...) { } @@ -166,7 +175,12 @@ class JitHashTable // JitHashTable always starts out empty, with no allocation overhead. // Call Reallocate to prime with an initial size if desired. // - JitHashTable(Allocator alloc) : m_alloc(alloc), m_table(nullptr), m_tableSizeInfo(), m_tableCount(0), m_tableMax(0) + JitHashTable(Allocator alloc) + : m_alloc(alloc) + , m_table(nullptr) + , m_tableSizeInfo() + , m_tableCount(0) + , m_tableMax(0) { #ifndef __GNUC__ // these crash GCC static_assert_no_msg(Behavior::s_growth_factor_numerator > Behavior::s_growth_factor_denominator); @@ -492,7 +506,8 @@ class JitHashTable class KeyIterator : public NodeIterator { public: - KeyIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + KeyIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -506,7 +521,8 @@ class JitHashTable class ValueIterator : public NodeIterator { public: - ValueIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + ValueIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -521,7 +537,8 @@ class JitHashTable class KeyValueIterator : public NodeIterator { public: - KeyValueIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + KeyValueIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -538,7 +555,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - KeyIteration(const JitHashTable* hash) : m_hash(hash) + KeyIteration(const JitHashTable* hash) + : m_hash(hash) { } @@ -559,7 +577,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - ValueIteration(const JitHashTable* hash) : m_hash(hash) + ValueIteration(const JitHashTable* hash) + : m_hash(hash) { } @@ -580,7 +599,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - KeyValueIteration(const JitHashTable* hash) : m_hash(hash) + KeyValueIteration(const JitHashTable* hash) + : m_hash(hash) { } diff --git a/src/coreclr/jit/layout.cpp b/src/coreclr/jit/layout.cpp index 918fd4ab6521d4..ad4c0077c22bd3 100644 --- a/src/coreclr/jit/layout.cpp +++ b/src/coreclr/jit/layout.cpp @@ -21,7 +21,8 @@ class ClassLayoutTable typedef JitHashTable, unsigned> BlkLayoutIndexMap; typedef JitHashTable, unsigned> ObjLayoutIndexMap; - union { + union + { // Up to 3 layouts can be stored "inline" and finding a layout by handle/size can be done using linear search. // Most methods need no more than 2 layouts. ClassLayout* m_layoutArray[3]; @@ -43,7 +44,10 @@ class ClassLayoutTable ClassLayout m_zeroSizedBlockLayout; public: - ClassLayoutTable() : m_layoutCount(0), m_layoutLargeCapacity(0), m_zeroSizedBlockLayout(0) + ClassLayoutTable() + : m_layoutCount(0) + , m_layoutLargeCapacity(0) + , m_zeroSizedBlockLayout(0) { } diff --git a/src/coreclr/jit/layout.h b/src/coreclr/jit/layout.h index 59ecaa9405485d..3c6487e516b91c 100644 --- a/src/coreclr/jit/layout.h +++ b/src/coreclr/jit/layout.h @@ -30,7 +30,8 @@ class ClassLayout // Array of CorInfoGCType (as BYTE) that describes the GC layout of the class. // For small classes the array is stored inline, avoiding an extra allocation // and the pointer size overhead. - union { + union + { BYTE* m_gcPtrs; BYTE m_gcPtrsArray[sizeof(BYTE*)]; }; @@ -69,7 +70,7 @@ class ClassLayout ClassLayout(CORINFO_CLASS_HANDLE classHandle, bool isValueClass, unsigned size, - var_types type DEBUGARG(const char* className) DEBUGARG(const char* shortClassName)) + var_types type DEBUGARG(const char* className) DEBUGARG(const char* shortClassName)) : m_classHandle(classHandle) , m_size(size) , m_isValueClass(isValueClass) diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 44b0afe1caf927..6b4c6cc693f9b2 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -14,7 +14,9 @@ class LocalSequencer final : public GenTreeVisitor UseExecutionOrder = true, }; - LocalSequencer(Compiler* comp) : GenTreeVisitor(comp), m_prevNode(nullptr) + LocalSequencer(Compiler* comp) + : GenTreeVisitor(comp) + , m_prevNode(nullptr) { } @@ -918,9 +920,9 @@ class LocalAddressVisitor final : public GenTreeVisitor break; #ifdef FEATURE_HW_INTRINSICS - // We have two cases we want to handle: - // 1. Vector2/3/4 and Quaternion where we have 4x float fields - // 2. Plane where we have 1x Vector3 and 1x float field + // We have two cases we want to handle: + // 1. Vector2/3/4 and Quaternion where we have 4x float fields + // 2. Plane where we have 1x Vector3 and 1x float field case IndirTransform::GetElement: { @@ -934,7 +936,7 @@ class LocalAddressVisitor final : public GenTreeVisitor { GenTree* indexNode = m_compiler->gtNewIconNode(offset / genTypeSize(elementType)); hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); break; } case TYP_SIMD12: diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 50997980ca7488..2753ce1978e413 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -332,9 +332,9 @@ void Compiler::lvaInitTypeRef() } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe - // as that will cause them to be placed along with the real unsafe buffers, - // unnecessarily exposing them to overruns. This can affect GS tests which - // intentionally do buffer-overruns. + // as that will cause them to be placed along with the real unsafe buffers, + // unnecessarily exposing them to overruns. This can affect GS tests which + // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) @@ -440,7 +440,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) lvaInitRetBuffArg(varDscInfo, true); } -//====================================================================== + //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods @@ -602,9 +602,9 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBuf // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { -//------------------------------------------------------------------------- -// Walk the function signature for the explicit arguments -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- + // Walk the function signature for the explicit arguments + //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs @@ -1319,8 +1319,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 - // TODO-CQ: We shouldn't have to go as far as to declare these - // address-exposed -- DoNotEnregister should suffice. + // TODO-CQ: We shouldn't have to go as far as to declare these + // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 @@ -1926,7 +1926,9 @@ void Compiler::lvSetMinOptsDoNotEnreg() // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // -Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler), structPromotionInfo() +Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) + : compiler(compiler) + , structPromotionInfo() { } @@ -2505,12 +2507,12 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) - { - JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " - "%d.\n", - lclNum, structPromotionInfo.fieldCnt); - shouldPromote = false; - } + { + JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " + "%d.\n", + lclNum, structPromotionInfo.fieldCnt); + shouldPromote = false; + } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { @@ -2549,8 +2551,8 @@ void Compiler::StructPromotionHelper::SortStructFields() { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, [](const lvaStructFieldInfo& lhs, const lvaStructFieldInfo& rhs) { - return lhs.fldOffset < rhs.fldOffset; - }); + return lhs.fldOffset < rhs.fldOffset; + }); structPromotionInfo.fieldsSorted = true; } } @@ -2605,7 +2607,7 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) compiler->compFloatingPointUsed = true; } -// Now grab the temp for the field local. + // Now grab the temp for the field local. #ifdef DEBUG char fieldNameBuffer[128]; @@ -3809,8 +3811,8 @@ void Compiler::lvaSortByRefCount() if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; - assert(varDsc->lvType != TYP_STRUCT || - varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. + assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when + // we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { @@ -4042,8 +4044,8 @@ unsigned LclVarDsc::lvSize() const // Size needed for storage representation. On } /********************************************************************************** -* Get stack size of the varDsc. -*/ + * Get stack size of the varDsc. + */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size @@ -4463,7 +4465,10 @@ void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) - : GenTreeVisitor(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) + : GenTreeVisitor(compiler) + , m_block(block) + , m_stmt(stmt) + , m_isRecompute(isRecompute) { } @@ -4888,11 +4893,11 @@ inline void Compiler::lvaIncrementFrameSize(unsigned size) } /**************************************************************************** -* -* Return true if absolute offsets of temps are larger than vars, or in other -* words, did we allocate temps before of after vars. The /GS buffer overrun -* checks want temps to be at low stack addresses than buffers -*/ + * + * Return true if absolute offsets of temps are larger than vars, or in other + * words, did we allocate temps before of after vars. The /GS buffer overrun + * checks want temps to be at low stack addresses than buffers + */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM @@ -4911,10 +4916,10 @@ bool Compiler::lvaTempsHaveLargerOffsetThanVars() } /**************************************************************************** -* -* Return an upper bound estimate for the size of the compiler spill temps -* -*/ + * + * Return an upper bound estimate for the size of the compiler spill temps + * + */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; @@ -5531,7 +5536,7 @@ void Compiler::lvaFixVirtualFrameOffsets() #endif ) #endif // !defined(TARGET_AMD64) - ) + ) { doAssignStkOffs = false; // Not on frame or an incoming stack arg } @@ -5552,8 +5557,8 @@ void Compiler::lvaFixVirtualFrameOffsets() // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative - varDsc->SetStackOffset(varDsc->GetStackOffset() + - (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP + varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and + // pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } @@ -5731,7 +5736,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) - argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); + argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; @@ -5892,8 +5897,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // -int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, - unsigned argSize, +int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, + unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); @@ -5984,8 +5989,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existence, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) -int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, - unsigned argSize, +int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, + unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); @@ -6213,8 +6218,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); - noway_assert(sizeofPreSpillRegArgs <= - argOffs + TARGET_POINTER_SIZE); // at most one register of alignment + noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of + // alignment } argOffs = sizeofPreSpillRegArgs; } @@ -6385,8 +6390,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() stkOffs -= initialStkOffs; } - if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || - !isFramePointerUsed()) // Note that currently we always have a frame pointer + if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have + // a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } @@ -7126,8 +7131,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 - if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && - isFramePointerUsed()) // Note that currently we always have a frame pointer + if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have + // a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; @@ -7412,9 +7417,9 @@ void Compiler::lvaAlignFrame() } // Align the stack with STACK_ALIGN value. - int adjustFrameSize = compLclFrameSize; + int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) - bool isEbpPushed = codeGen->isFramePointerUsed(); + bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif @@ -7892,9 +7897,9 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t r } /***************************************************************************** -* -* dump the lvaTable -*/ + * + * dump the lvaTable + */ void Compiler::lvaTableDump(FrameLayoutState curState) { diff --git a/src/coreclr/jit/likelyclass.cpp b/src/coreclr/jit/likelyclass.cpp index fa0839725c9fb7..e181a2e9a135ab 100644 --- a/src/coreclr/jit/likelyclass.cpp +++ b/src/coreclr/jit/likelyclass.cpp @@ -255,8 +255,8 @@ static unsigned getLikelyClassesOrMethods(LikelyClassMethodRecord* jitstd::sort(sortedEntries, sortedEntries + knownHandles, [](const LikelyClassMethodHistogramEntry& h1, const LikelyClassMethodHistogramEntry& h2) -> bool { - return h1.m_count > h2.m_count; - }); + return h1.m_count > h2.m_count; + }); const UINT32 numberOfClasses = min(knownHandles, maxLikelyClasses); @@ -410,7 +410,9 @@ extern "C" DLLEXPORT UINT32 WINAPI getLikelyValues(LikelyValueRecord* // sort by m_count (descending) jitstd::sort(sortedEntries, sortedEntries + h.countHistogramElements, [](const LikelyClassMethodHistogramEntry& h1, - const LikelyClassMethodHistogramEntry& h2) -> bool { return h1.m_count > h2.m_count; }); + const LikelyClassMethodHistogramEntry& h2) -> bool { + return h1.m_count > h2.m_count; + }); const UINT32 numberOfLikelyConst = min(h.countHistogramElements, maxLikelyValues); diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index b10bd98ff6221c..d172cea22d369a 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -9,7 +9,10 @@ #pragma hdrstop #endif -LIR::Use::Use() : m_range(nullptr), m_edge(nullptr), m_user(nullptr) +LIR::Use::Use() + : m_range(nullptr) + , m_edge(nullptr) + , m_user(nullptr) { } @@ -30,7 +33,10 @@ LIR::Use::Use(const Use& other) // // Return Value: // -LIR::Use::Use(Range& range, GenTree** edge, GenTree* user) : m_range(&range), m_edge(edge), m_user(user) +LIR::Use::Use(Range& range, GenTree** edge, GenTree* user) + : m_range(&range) + , m_edge(edge) + , m_user(user) { AssertIsValid(); } @@ -280,11 +286,15 @@ unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTre return lclNum; } -LIR::ReadOnlyRange::ReadOnlyRange() : m_firstNode(nullptr), m_lastNode(nullptr) +LIR::ReadOnlyRange::ReadOnlyRange() + : m_firstNode(nullptr) + , m_lastNode(nullptr) { } -LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) : m_firstNode(other.m_firstNode), m_lastNode(other.m_lastNode) +LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) + : m_firstNode(other.m_firstNode) + , m_lastNode(other.m_lastNode) { #ifdef DEBUG other.m_firstNode = nullptr; @@ -301,7 +311,9 @@ LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) : m_firstNode(other.m_f // firstNode - The first node in the range. // lastNode - The last node in the range. // -LIR::ReadOnlyRange::ReadOnlyRange(GenTree* firstNode, GenTree* lastNode) : m_firstNode(firstNode), m_lastNode(lastNode) +LIR::ReadOnlyRange::ReadOnlyRange(GenTree* firstNode, GenTree* lastNode) + : m_firstNode(firstNode) + , m_lastNode(lastNode) { assert((m_firstNode == nullptr) == (m_lastNode == nullptr)); assert((m_firstNode == m_lastNode) || (Contains(m_lastNode))); @@ -426,11 +438,13 @@ bool LIR::ReadOnlyRange::Contains(GenTree* node) const #endif -LIR::Range::Range() : ReadOnlyRange() +LIR::Range::Range() + : ReadOnlyRange() { } -LIR::Range::Range(Range&& other) : ReadOnlyRange(std::move(other)) +LIR::Range::Range(Range&& other) + : ReadOnlyRange(std::move(other)) { } @@ -442,7 +456,8 @@ LIR::Range::Range(Range&& other) : ReadOnlyRange(std::move(other)) // firstNode - The first node in the range. // lastNode - The last node in the range. // -LIR::Range::Range(GenTree* firstNode, GenTree* lastNode) : ReadOnlyRange(firstNode, lastNode) +LIR::Range::Range(GenTree* firstNode, GenTree* lastNode) + : ReadOnlyRange(firstNode, lastNode) { } @@ -1186,7 +1201,7 @@ bool LIR::Range::TryGetUse(GenTree* node, Use* use) // Returns: // The computed subrange. // -template +template LIR::ReadOnlyRange LIR::Range::GetMarkedRange(unsigned markCount, GenTree* start, bool* isClosed, @@ -1406,8 +1421,8 @@ class CheckLclVarSemanticsHelper // range - a range to do the check. // unusedDefs - map of defs that do no have users. // - CheckLclVarSemanticsHelper(Compiler* compiler, - const LIR::Range* range, + CheckLclVarSemanticsHelper(Compiler* compiler, + const LIR::Range* range, SmallHashTable& unusedDefs) : compiler(compiler) , range(range) @@ -1554,7 +1569,7 @@ class CheckLclVarSemanticsHelper void PopLclVarRead(const AliasSet::NodeInfo& defInfo) { SmallHashTable* reads; - const bool foundReads = unusedLclVarReads.TryGetValue(defInfo.LclNum(), &reads); + const bool foundReads = unusedLclVarReads.TryGetValue(defInfo.LclNum(), &reads); assert(foundReads); bool found = reads->TryRemove(defInfo.Node()); @@ -1569,11 +1584,11 @@ class CheckLclVarSemanticsHelper } private: - Compiler* compiler; - const LIR::Range* range; - SmallHashTable& unusedDefs; + Compiler* compiler; + const LIR::Range* range; + SmallHashTable& unusedDefs; SmallHashTable*, 16U> unusedLclVarReads; - ArrayStack*> lclVarReadsMapsCache; + ArrayStack*> lclVarReadsMapsCache; }; //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h index 9b4f940bc0ae38..8a3a9a507a38bb 100644 --- a/src/coreclr/jit/lir.h +++ b/src/coreclr/jit/lir.h @@ -73,7 +73,7 @@ class LIR final void AssertIsValid() const; bool IsDummyUse() const; - void ReplaceWith(GenTree* replacement); + void ReplaceWith(GenTree* replacement); unsigned ReplaceWithLclVar(Compiler* compiler, unsigned lclNum = BAD_VAR_NUM, GenTree** pStore = nullptr); }; @@ -113,7 +113,7 @@ class LIR final GenTree* m_firstNode; GenTree* m_lastNode; - ReadOnlyRange(const ReadOnlyRange& other) = delete; + ReadOnlyRange(const ReadOnlyRange& other) = delete; ReadOnlyRange& operator=(const ReadOnlyRange& other) = delete; public: @@ -125,12 +125,14 @@ class LIR final GenTree* m_node; - Iterator(GenTree* begin) : m_node(begin) + Iterator(GenTree* begin) + : m_node(begin) { } public: - Iterator() : m_node(nullptr) + Iterator() + : m_node(nullptr) { } @@ -167,12 +169,14 @@ class LIR final GenTree* m_node; - ReverseIterator(GenTree* begin) : m_node(begin) + ReverseIterator(GenTree* begin) + : m_node(begin) { } public: - ReverseIterator() : m_node(nullptr) + ReverseIterator() + : m_node(nullptr) { } @@ -245,7 +249,7 @@ class LIR final private: Range(GenTree* firstNode, GenTree* lastNode); - Range(const Range& other) = delete; + Range(const Range& other) = delete; Range& operator=(const Range& other) = delete; template @@ -280,7 +284,7 @@ class LIR final void InsertAtBeginning(Range&& range); void InsertAtEnd(Range&& range); - void Remove(GenTree* node, bool markOperandsUnused = false); + void Remove(GenTree* node, bool markOperandsUnused = false); Range Remove(GenTree* firstNode, GenTree* lastNode); Range Remove(ReadOnlyRange&& range); @@ -303,7 +307,7 @@ class LIR final }; public: - static Range& AsRange(BasicBlock* block); + static Range& AsRange(BasicBlock* block); static const Range& AsRange(const BasicBlock* block); static Range EmptyRange(); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 78fb96fe3d77d2..7f413b75d6649e 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -811,10 +811,10 @@ void Compiler::fgExtendDbgLifetimes() fgExtendDbgScopes(); -/*------------------------------------------------------------------------- - * Partly update liveness info so that we handle any funky BBF_INTERNAL - * blocks inserted out of sequence. - */ + /*------------------------------------------------------------------------- + * Partly update liveness info so that we handle any funky BBF_INTERNAL + * blocks inserted out of sequence. + */ #ifdef DEBUG if (verbose && 0) @@ -1005,7 +1005,7 @@ void Compiler::fgExtendDbgLifetimes() // So just ensure that they don't have a 0 ref cnt unsigned lclNum = 0; - for (LclVarDsc *varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) + for (LclVarDsc* varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if (lclNum >= info.compArgsCount) { @@ -1676,10 +1676,10 @@ GenTree* Compiler::fgTryRemoveDeadStoreEarly(Statement* stmt, GenTreeLclVarCommo * or subtree of a statement moving backward from startNode to endNode */ -void Compiler::fgComputeLife(VARSET_TP& life, - GenTree* startNode, - GenTree* endNode, - VARSET_VALARG_TP volatileVars, +void Compiler::fgComputeLife(VARSET_TP& life, + GenTree* startNode, + GenTree* endNode, + VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)) { // Don't kill vars in scope @@ -2116,11 +2116,11 @@ bool Compiler::fgTryRemoveDeadStoreLIR(GenTree* store, GenTreeLclVarCommon* lclN // Return Value: // true if we should skip the rest of the statement, false if we should continue // -bool Compiler::fgRemoveDeadStore(GenTree** pTree, - LclVarDsc* varDsc, - VARSET_VALARG_TP life, - bool* doAgain, - bool* pStmtInfoDirty, +bool Compiler::fgRemoveDeadStore(GenTree** pTree, + LclVarDsc* varDsc, + VARSET_VALARG_TP life, + bool* doAgain, + bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)) { assert(!compRationalIRForm); @@ -2186,7 +2186,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, #ifdef DEBUG *treeModf = true; #endif // DEBUG - // Update ordering, costs, FP levels, etc. + // Update ordering, costs, FP levels, etc. gtSetStmtInfo(compCurStmt); // Re-link the nodes for this statement @@ -2278,7 +2278,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, printf("\n"); } #endif // DEBUG - // No side effects - Change the store to a GT_NOP node + // No side effects - Change the store to a GT_NOP node store->gtBashToNOP(); #ifdef DEBUG diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 7283cc0d1d88c2..4c5ffe247634a4 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1371,7 +1371,7 @@ bool Compiler::optDeriveLoopCloningConditions(FlowGraphNaturalLoop* loop, LoopCl LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo(); LC_Array arrLen(LC_Array(LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(getAllocator(CMK_LoopClone)), mdArrInfo->dim, LC_Array::None)); - LC_Ident arrLenIdent = LC_Ident::CreateArrAccess(arrLen); + LC_Ident arrLenIdent = LC_Ident::CreateArrAccess(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loop->GetIndex())->Push(cond); @@ -1666,7 +1666,7 @@ void Compiler::optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore // performs the optimizations assuming that the path in which the candidates // were collected is the fast path in which the optimizations will be performed. // -void Compiler::optPerformStaticOptimizations(FlowGraphNaturalLoop* loop, +void Compiler::optPerformStaticOptimizations(FlowGraphNaturalLoop* loop, LoopCloneContext* context DEBUGARG(bool dynamicPath)) { JitExpandArrayStack* optInfos = context->GetLoopOptInfo(loop->GetIndex()); diff --git a/src/coreclr/jit/loopcloning.h b/src/coreclr/jit/loopcloning.h index 64e810be6ff424..20f041eab40a52 100644 --- a/src/coreclr/jit/loopcloning.h +++ b/src/coreclr/jit/loopcloning.h @@ -196,7 +196,12 @@ struct ArrIndex unsigned rank; // Rank of the array BasicBlock* useBlock; // Block where the [] occurs - ArrIndex(CompAllocator alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr) + ArrIndex(CompAllocator alloc) + : arrLcl(BAD_VAR_NUM) + , indLcls(alloc) + , bndsChks(alloc) + , rank(0) + , useBlock(nullptr) { } @@ -236,7 +241,8 @@ struct LcOptInfo }; OptType optType; - LcOptInfo(OptType optType) : optType(optType) + LcOptInfo(OptType optType) + : optType(optType) { } @@ -267,7 +273,10 @@ struct LcMdArrayOptInfo : public LcOptInfo ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation. LcMdArrayOptInfo(GenTreeArrElem* arrElem, unsigned dim) - : LcOptInfo(LcMdArray), arrElem(arrElem), dim(dim), index(nullptr) + : LcOptInfo(LcMdArray) + , arrElem(arrElem) + , dim(dim) + , index(nullptr) { } @@ -300,7 +309,10 @@ struct LcJaggedArrayOptInfo : public LcOptInfo Statement* stmt; // "stmt" where the optimization opportunity occurs. LcJaggedArrayOptInfo(ArrIndex& arrIndex, unsigned dim, Statement* stmt) - : LcOptInfo(LcJaggedArray), dim(dim), arrIndex(arrIndex), stmt(stmt) + : LcOptInfo(LcJaggedArray) + , dim(dim) + , arrIndex(arrIndex) + , stmt(stmt) { } }; @@ -319,7 +331,11 @@ struct LcTypeTestOptInfo : public LcOptInfo CORINFO_CLASS_HANDLE clsHnd; LcTypeTestOptInfo(Statement* stmt, GenTreeIndir* methodTableIndir, unsigned lclNum, CORINFO_CLASS_HANDLE clsHnd) - : LcOptInfo(LcTypeTest), stmt(stmt), methodTableIndir(methodTableIndir), lclNum(lclNum), clsHnd(clsHnd) + : LcOptInfo(LcTypeTest) + , stmt(stmt) + , methodTableIndir(methodTableIndir) + , lclNum(lclNum) + , clsHnd(clsHnd) { } }; @@ -343,7 +359,7 @@ struct LcMethodAddrTestOptInfo : public LcOptInfo GenTreeIndir* delegateAddressIndir, unsigned delegateLclNum, void* methAddr, - bool isSlot DEBUG_ARG(CORINFO_METHOD_HANDLE targetMethHnd)) + bool isSlot DEBUG_ARG(CORINFO_METHOD_HANDLE targetMethHnd)) : LcOptInfo(LcMethodAddrTest) , stmt(stmt) , delegateAddressIndir(delegateAddressIndir) @@ -393,15 +409,24 @@ struct LC_Array int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length - LC_Array() : type(Invalid), dim(-1) + LC_Array() + : type(Invalid) + , dim(-1) { } LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper) - : type(type), arrIndex(arrIndex), oper(oper), dim(dim) + : type(type) + , arrIndex(arrIndex) + , oper(oper) + , dim(dim) { } - LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1) + LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) + : type(type) + , arrIndex(arrIndex) + , oper(oper) + , dim(-1) { } @@ -464,7 +489,8 @@ struct LC_Ident }; private: - union { + union + { unsigned constant; struct { @@ -482,7 +508,8 @@ struct LC_Ident }; }; - LC_Ident(IdentType type) : type(type) + LC_Ident(IdentType type) + : type(type) { } @@ -490,7 +517,8 @@ struct LC_Ident // The type of this object IdentType type; - LC_Ident() : type(Invalid) + LC_Ident() + : type(Invalid) { } @@ -680,10 +708,13 @@ struct LC_Expr } #endif - LC_Expr() : type(Invalid) + LC_Expr() + : type(Invalid) { } - explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident) + explicit LC_Expr(const LC_Ident& ident) + : ident(ident) + , type(Ident) { } @@ -724,7 +755,10 @@ struct LC_Condition { } LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2, bool asUnsigned = false) - : op1(op1), op2(op2), oper(oper), compareUnsigned(asUnsigned) + : op1(op1) + , op2(op2) + , oper(oper) + , compareUnsigned(asUnsigned) { } @@ -756,7 +790,10 @@ struct LC_ArrayDeref unsigned level; - LC_ArrayDeref(const LC_Array& array, unsigned level) : array(array), children(nullptr), level(level) + LC_ArrayDeref(const LC_Array& array, unsigned level) + : array(array) + , children(nullptr) + , level(level) { } @@ -764,8 +801,8 @@ struct LC_ArrayDeref unsigned Lcl(); - bool HasChildren(); - void EnsureChildren(CompAllocator alloc); + bool HasChildren(); + void EnsureChildren(CompAllocator alloc); static LC_ArrayDeref* Find(JitExpandArrayStack* children, unsigned lcl); void DeriveLevelConditions(JitExpandArrayStack*>* len); @@ -859,7 +896,7 @@ struct LoopCloneContext } NaturalLoopIterInfo* GetLoopIterInfo(unsigned loopNum); - void SetLoopIterInfo(unsigned loopNum, NaturalLoopIterInfo* info); + void SetLoopIterInfo(unsigned loopNum, NaturalLoopIterInfo* info); // Evaluate conditions into a JTRUE stmt and put it in a new block after `insertAfter`. BasicBlock* CondToStmtInBlock(Compiler* comp, diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 935479beff01b3..e5e7aa9dbd301a 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1163,8 +1163,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, genActualType(tempLclType))); - GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); - LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); + GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); + LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } @@ -3887,7 +3887,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif - ); + ); if (removeCast) { @@ -4771,10 +4771,10 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) } convertToStoreObj = false; #else // TARGET_ARM64 - // This optimization on arm64 allows more SIMD16 vars to be enregistered but it could cause - // regressions when there are many calls and before/after each one we have to store/save the upper - // half of these registers. So enable this for arm64 only when LSRA is taught not to allocate registers when - // it would have to spilled too many times. + // This optimization on arm64 allows more SIMD16 vars to be enregistered but it could cause + // regressions when there are many calls and before/after each one we have to store/save the upper + // half of these registers. So enable this for arm64 only when LSRA is taught not to allocate registers when + // it would have to spilled too many times. convertToStoreObj = true; #endif // TARGET_ARM64 } @@ -5091,8 +5091,8 @@ void Lowering::LowerCallStruct(GenTreeCall* call) break; } #endif // FEATURE_SIMD - // importer has a separate mechanism to retype calls to helpers, - // keep it for now. + // importer has a separate mechanism to retype calls to helpers, + // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_NATIVEAOT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); @@ -8086,7 +8086,7 @@ void Lowering::ContainCheckNode(GenTree* node) #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT - // The regNum must have been set by the lowering of the call. + // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH @@ -8799,7 +8799,7 @@ void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) // const bool isContainable = IsInvariantInRange(ind->Addr(), ind); #else - const bool isContainable = true; + const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); @@ -8863,7 +8863,7 @@ GenTree* Lowering::LowerIndir(GenTreeIndir* ind) // const bool isContainable = IsInvariantInRange(ind->Addr(), ind); #else - const bool isContainable = true; + const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); @@ -9294,7 +9294,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas #if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool useNullCheck = true; #elif defined(TARGET_ARM) - bool useNullCheck = false; + bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); ind->ClearDontExtend(); @@ -9533,7 +9533,7 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store) #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 - bool shouldSwitchToInteger = FloatingPointUtils::isPositiveZero(dblCns); + bool shouldSwitchToInteger = FloatingPointUtils::isPositiveZero(dblCns); #endif if (shouldSwitchToInteger) diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index 76124820944f3c..318a148ee9c1c9 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -88,14 +88,14 @@ class Lowering final : public Phase void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); #ifdef TARGET_ARM64 - bool TryLowerAndOrToCCMP(GenTreeOp* tree, GenTree** next); + bool TryLowerAndOrToCCMP(GenTreeOp* tree, GenTree** next); insCflags TruthifyingFlags(GenCondition cond); - void ContainCheckConditionalCompare(GenTreeCCMP* ccmp); - void ContainCheckNeg(GenTreeOp* neg); - void TryLowerCnsIntCselToCinc(GenTreeOp* select, GenTree* cond); - void TryLowerCselToCSOp(GenTreeOp* select, GenTree* cond); - bool TryLowerAddSubToMulLongOp(GenTreeOp* op, GenTree** next); - bool TryLowerNegToMulLongOp(GenTreeOp* op, GenTree** next); + void ContainCheckConditionalCompare(GenTreeCCMP* ccmp); + void ContainCheckNeg(GenTreeOp* neg); + void TryLowerCnsIntCselToCinc(GenTreeOp* select, GenTree* cond); + void TryLowerCselToCSOp(GenTreeOp* select, GenTree* cond); + bool TryLowerAddSubToMulLongOp(GenTreeOp* op, GenTree** next); + bool TryLowerNegToMulLongOp(GenTreeOp* op, GenTree** next); #endif void ContainCheckSelect(GenTreeOp* select); void ContainCheckBitCast(GenTree* node); @@ -129,7 +129,7 @@ class Lowering final : public Phase static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG - void LowerBlock(BasicBlock* block); + void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsCFGCallArgInvariantInRange(GenTree* node, GenTree* endExclusive); @@ -138,28 +138,28 @@ class Lowering final : public Phase // Call Lowering // ------------------------------ GenTree* LowerCall(GenTree* call); - bool LowerCallMemmove(GenTreeCall* call, GenTree** next); - bool LowerCallMemcmp(GenTreeCall* call, GenTree** next); - bool LowerCallMemset(GenTreeCall* call, GenTree** next); - void LowerCFGCall(GenTreeCall* call); - void MoveCFGCallArgs(GenTreeCall* call); - void MoveCFGCallArg(GenTreeCall* call, GenTree* node); + bool LowerCallMemmove(GenTreeCall* call, GenTree** next); + bool LowerCallMemcmp(GenTreeCall* call, GenTree** next); + bool LowerCallMemset(GenTreeCall* call, GenTree** next); + void LowerCFGCall(GenTreeCall* call); + void MoveCFGCallArgs(GenTreeCall* call); + void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif - GenTree* OptimizeConstCompare(GenTree* cmp); - GenTree* LowerCompare(GenTree* cmp); - GenTree* LowerJTrue(GenTreeOp* jtrue); - GenTree* LowerSelect(GenTreeConditional* cond); - bool TryLowerConditionToFlagsNode(GenTree* parent, GenTree* condition, GenCondition* code); + GenTree* OptimizeConstCompare(GenTree* cmp); + GenTree* LowerCompare(GenTree* cmp); + GenTree* LowerJTrue(GenTreeOp* jtrue); + GenTree* LowerSelect(GenTreeConditional* cond); + bool TryLowerConditionToFlagsNode(GenTree* parent, GenTree* condition, GenCondition* code); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); - void LowerJmpMethod(GenTree* jmp); - void LowerRet(GenTreeUnOp* ret); - void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); - void LowerRetStruct(GenTreeUnOp* ret); - void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); - void LowerCallStruct(GenTreeCall* call); - void LowerStoreSingleRegCallStruct(GenTreeBlk* store); + void LowerJmpMethod(GenTree* jmp); + void LowerRet(GenTreeUnOp* ret); + void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); + void LowerRetStruct(GenTreeUnOp* ret); + void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); + void LowerCallStruct(GenTreeCall* call); + void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI @@ -168,29 +168,29 @@ class Lowering final : public Phase GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); - void LowerFastTailCall(GenTreeCall* callNode); - void RehomeArgForFastTailCall(unsigned int lclNum, - GenTree* insertTempBefore, - GenTree* lookForUsesStart, - GenTreeCall* callNode); - void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); + void LowerFastTailCall(GenTreeCall* callNode); + void RehomeArgForFastTailCall(unsigned int lclNum, + GenTree* insertTempBefore, + GenTree* lookForUsesStart, + GenTreeCall* callNode); + void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* FindEarliestPutArg(GenTreeCall* call); - size_t MarkPutArgNodes(GenTree* node); + size_t MarkPutArgNodes(GenTree* node); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); - void LowerArgsForCall(GenTreeCall* call); - void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); + void LowerArgsForCall(GenTreeCall* call); + void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, CallArg* callArg, var_types type); - void LowerArg(GenTreeCall* call, CallArg* callArg, bool late); + void LowerArg(GenTreeCall* call, CallArg* callArg, bool late); #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) GenTree* LowerFloatArg(GenTree** pArg, CallArg* callArg); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif - void InsertPInvokeCallProlog(GenTreeCall* call); - void InsertPInvokeCallEpilog(GenTreeCall* call); - void InsertPInvokeMethodProlog(); - void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); + void InsertPInvokeCallProlog(GenTreeCall* call); + void InsertPInvokeCallEpilog(GenTreeCall* call); + void InsertPInvokeMethodProlog(); + void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction @@ -316,31 +316,31 @@ class Lowering final : public Phase #endif // defined(TARGET_XARCH) // Per tree node member functions - void LowerStoreIndirCommon(GenTreeStoreInd* ind); + void LowerStoreIndirCommon(GenTreeStoreInd* ind); GenTree* LowerIndir(GenTreeIndir* ind); - bool OptimizeForLdp(GenTreeIndir* ind); - bool TryMakeIndirsAdjacent(GenTreeIndir* prevIndir, GenTreeIndir* indir); - void MarkTree(GenTree* root); - void UnmarkTree(GenTree* root); - void LowerStoreIndir(GenTreeStoreInd* node); - void LowerStoreIndirCoalescing(GenTreeStoreInd* node); + bool OptimizeForLdp(GenTreeIndir* ind); + bool TryMakeIndirsAdjacent(GenTreeIndir* prevIndir, GenTreeIndir* indir); + void MarkTree(GenTree* root); + void UnmarkTree(GenTree* root); + void LowerStoreIndir(GenTreeStoreInd* node); + void LowerStoreIndirCoalescing(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); - bool TryLowerAndNegativeOne(GenTreeOp* node, GenTree** nextNode); + bool TryLowerAndNegativeOne(GenTreeOp* node, GenTree** nextNode); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); - bool LowerUnsignedDivOrMod(GenTreeOp* divMod); - bool TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode); + bool LowerUnsignedDivOrMod(GenTreeOp* divMod); + bool TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode); GenTree* LowerSignedDivOrMod(GenTree* node); - void LowerBlockStore(GenTreeBlk* blkNode); - void LowerBlockStoreCommon(GenTreeBlk* blkNode); - void LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode); - void LowerLclHeap(GenTree* node); - void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent); - void LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode); + void LowerBlockStore(GenTreeBlk* blkNode); + void LowerBlockStoreCommon(GenTreeBlk* blkNode); + void LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode); + void LowerLclHeap(GenTree* node); + void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent); + void LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode); GenTree* LowerArrLength(GenTreeArrCommon* node); #ifdef TARGET_XARCH - void LowerPutArgStk(GenTreePutArgStk* putArgStk); + void LowerPutArgStk(GenTreePutArgStk* putArgStk); GenTree* TryLowerMulWithConstant(GenTreeOp* node); #endif // TARGET_XARCH @@ -351,12 +351,12 @@ class Lowering final : public Phase void TryRetypingFloatingPointStoreToIntegerStore(GenTree* store); GenTree* LowerSwitch(GenTree* node); - bool TryLowerSwitchToBitTest(FlowEdge* jumpTable[], - unsigned jumpCount, - unsigned targetCount, - BasicBlock* bbSwitch, - GenTree* switchValue, - weight_t defaultLikelihood); + bool TryLowerSwitchToBitTest(FlowEdge* jumpTable[], + unsigned jumpCount, + unsigned targetCount, + BasicBlock* bbSwitch, + GenTree* switchValue, + weight_t defaultLikelihood); void LowerCast(GenTree* node); @@ -374,12 +374,12 @@ class Lowering final : public Phase void LowerShift(GenTreeOp* shift); #ifdef FEATURE_HW_INTRINSICS GenTree* LowerHWIntrinsic(GenTreeHWIntrinsic* node); - void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); + void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); GenTree* LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); GenTree* LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) - void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); + void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node); @@ -389,7 +389,7 @@ class Lowering final : public Phase GenTree* TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); GenTree* TryLowerXorOpToGetMaskUpToLowestSetBit(GenTreeOp* xorNode); - void LowerBswapOp(GenTreeOp* node); + void LowerBswapOp(GenTreeOp* node); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); @@ -589,7 +589,9 @@ class Lowering final : public Phase target_ssize_t Offset; SavedIndir(GenTreeIndir* indir, GenTreeLclVar* addrBase, target_ssize_t offset) - : Indir(indir), AddrBase(addrBase), Offset(offset) + : Indir(indir) + , AddrBase(addrBase) + , Offset(offset) { } }; diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 9d28135c92a1a0..498093ae6fc52d 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -726,7 +726,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT { return; } -#else // !TARGET_ARM +#else // !TARGET_ARM if ((ClrSafeInt(offset) + ClrSafeInt(size)).IsOverflow()) { return; diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 5c4f05a04ad57a..999a3fc6d338ca 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -690,13 +690,13 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) else #endif // TARGET_X86 if (loadSize <= comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy)) - { - putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; - } - else - { - putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; - } + { + putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; + } + else + { + putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; + } } else // There are GC pointers. { @@ -767,7 +767,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) #if defined(TARGET_AMD64) && !src->IsIntegralConst(0) #endif // TARGET_AMD64 - ) + ) { MakeSrcContained(putArgStk, src); } @@ -1767,8 +1767,8 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) // currently ANDNOT logic cannot be optimized by the ternary node. break; } - GenTree* op3 = second->AsHWIntrinsic()->Op(1) == node ? second->AsHWIntrinsic()->Op(2) - : second->AsHWIntrinsic()->Op(1); + GenTree* op3 = second->AsHWIntrinsic()->Op(1) == node ? second->AsHWIntrinsic()->Op(2) + : second->AsHWIntrinsic()->Op(1); GenTree* control = comp->gtNewIconNode(node->GetTernaryControlByte(second->AsHWIntrinsic())); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); unsigned simdSize = node->GetSimdSize(); @@ -6650,12 +6650,12 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) else #endif // TARGET_X86 if (ctrlExpr->isIndir()) - { - // We may have cases where we have set a register target on the ctrlExpr, but if it - // contained we must clear it. - ctrlExpr->SetRegNum(REG_NA); - MakeSrcContained(call, ctrlExpr); - } + { + // We may have cases where we have set a register target on the ctrlExpr, but if it + // contained we must clear it. + ctrlExpr->SetRegNum(REG_NA); + MakeSrcContained(call, ctrlExpr); + } } } @@ -10027,8 +10027,8 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) if (op1->IsVectorZero()) { -// When op1 is zero, we can contain it and we expect that -// ival is already in the correct state to account for it + // When op1 is zero, we can contain it and we expect that + // ival is already in the correct state to account for it #if DEBUG ssize_t ival = lastOp->AsIntConCommon()->IconValue(); @@ -10048,8 +10048,8 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) } else if (op2->IsVectorZero()) { -// When op2 is zero, we can contain it and we expect that -// zmask is already in the correct state to account for it + // When op2 is zero, we can contain it and we expect that + // zmask is already in the correct state to account for it #if DEBUG ssize_t ival = lastOp->AsIntConCommon()->IconValue(); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index b2d37b9becad9d..50652ca075254d 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -384,9 +384,9 @@ void LinearScan::updateSpillCost(regNumber reg, Interval* interval) // interval - Interval of Refposition. // assignedReg - Assigned register for this refposition. // -void LinearScan::updateRegsFreeBusyState(RefPosition& refPosition, - regMaskTP regsBusy, - regMaskTP* regsToFree, +void LinearScan::updateRegsFreeBusyState(RefPosition& refPosition, + regMaskTP regsBusy, + regMaskTP* regsToFree, regMaskTP* delayRegsToFree DEBUG_ARG(Interval* interval) DEBUG_ARG(regNumber assignedReg)) { @@ -1437,7 +1437,7 @@ PhaseStatus LinearScan::doLinearScan() #ifdef DEBUG || VERBOSE #endif - ) + ) { dumpLsraStats(jitstdout()); } @@ -1771,7 +1771,7 @@ template void LinearScan::identifyCandidates(); // TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order // to avoid perturbation, but should be merged. template -void LinearScan::identifyCandidates() +void LinearScan::identifyCandidates() { if (localVarsEnregistered) { @@ -2022,24 +2022,24 @@ void LinearScan::identifyCandidates() else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (regType(type) == FloatRegisterType) - { - floatVarCount++; - weight_t refCntWtd = varDsc->lvRefCntWtd(); - if (varDsc->lvIsRegArg) - { - // Don't count the initial reference for register params. In those cases, - // using a callee-save causes an extra copy. - refCntWtd -= BB_UNITY_WEIGHT; - } - if (refCntWtd >= thresholdFPRefCntWtd) { - VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); - } - else if (refCntWtd >= maybeFPRefCntWtd) - { - VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); + floatVarCount++; + weight_t refCntWtd = varDsc->lvRefCntWtd(); + if (varDsc->lvIsRegArg) + { + // Don't count the initial reference for register params. In those cases, + // using a callee-save causes an extra copy. + refCntWtd -= BB_UNITY_WEIGHT; + } + if (refCntWtd >= thresholdFPRefCntWtd) + { + VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); + } + else if (refCntWtd >= maybeFPRefCntWtd) + { + VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); + } } - } JITDUMP(" "); DBEXEC(VERBOSE, newInt->dump(compiler)); } @@ -2498,7 +2498,7 @@ void LinearScan::checkLastUses(BasicBlock* block) // the register locations will be "rotated" to stress the resolution and allocation // code. // -BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, +BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)) { BasicBlock* predBlock = nullptr; @@ -2695,33 +2695,33 @@ void LinearScan::setFrameType() else #endif // DOUBLE_ALIGN if (compiler->codeGen->isFramePointerRequired()) - { - frameType = FT_EBP_FRAME; - } - else - { - if (compiler->rpMustCreateEBPCalled == false) - { -#ifdef DEBUG - const char* reason; -#endif // DEBUG - compiler->rpMustCreateEBPCalled = true; - if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) - { - JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); - compiler->codeGen->setFrameRequired(true); - } - } - - if (compiler->codeGen->isFrameRequired()) { frameType = FT_EBP_FRAME; } else { - frameType = FT_ESP_FRAME; + if (compiler->rpMustCreateEBPCalled == false) + { +#ifdef DEBUG + const char* reason; +#endif // DEBUG + compiler->rpMustCreateEBPCalled = true; + if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) + { + JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); + compiler->codeGen->setFrameRequired(true); + } + } + + if (compiler->codeGen->isFrameRequired()) + { + frameType = FT_EBP_FRAME; + } + else + { + frameType = FT_ESP_FRAME; + } } - } switch (frameType) { @@ -2941,7 +2941,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo // for enregistration. It simply finds the register to be assigned, if it was assigned to something // else, then will unassign it and then assign to the currentInterval // -regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, +regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { assert(!enregisterLocalVars); @@ -3004,7 +3004,7 @@ regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, // no such ref position, no register will be allocated. // template -regNumber LinearScan::allocateReg(Interval* currentInterval, +regNumber LinearScan::allocateReg(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { regMaskTP foundRegBit = @@ -7829,7 +7829,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) // the tree, and performs resolution across joins and back edges. // template -void LinearScan::resolveRegisters() +void LinearScan::resolveRegisters() { // Iterate over the tree and the RefPositions in lockstep // - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs) @@ -8302,8 +8302,8 @@ void LinearScan::resolveRegisters() { regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment; regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter) - ? REG_STK - : genRegNumFromMask(initialRegMask); + ? REG_STK + : genRegNumFromMask(initialRegMask); #ifdef TARGET_ARM if (varTypeIsMultiReg(varDsc)) @@ -8750,12 +8750,12 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, // Notes: // It inserts at least one move and updates incoming parameter 'location'. // -void LinearScan::addResolutionForDouble(BasicBlock* block, - GenTree* insertionPoint, - Interval** sourceIntervals, - regNumberSmall* location, - regNumber toReg, - regNumber fromReg, +void LinearScan::addResolutionForDouble(BasicBlock* block, + GenTree* insertionPoint, + Interval** sourceIntervals, + regNumberSmall* location, + regNumber toReg, + regNumber fromReg, ResolveType resolveType DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock)) { @@ -8825,10 +8825,10 @@ void LinearScan::addResolutionForDouble(BasicBlock* block, // The next time, we want to move from the stack to the destination (toReg), // in which case fromReg will be REG_STK, and we insert at the top. // -void LinearScan::addResolution(BasicBlock* block, - GenTree* insertionPoint, - Interval* interval, - regNumber toReg, +void LinearScan::addResolution(BasicBlock* block, + GenTree* insertionPoint, + Interval* interval, + regNumber toReg, regNumber fromReg DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock) DEBUG_ARG(const char* reason)) { @@ -9952,7 +9952,7 @@ const char* LinearScan::getStatName(unsigned stat) #define LSRA_STAT_DEF(stat, name) name, #include "lsra_stats.h" #undef LSRA_STAT_DEF -#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, +#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, #define BUSY_REG_SEL_DEF(stat, value, shortname, orderSeqId) REG_SEL_DEF(stat, value, shortname, orderSeqId) #include "lsra_score.h" }; @@ -11272,9 +11272,8 @@ void LinearScan::dumpRegRecordHeader() // l is either '*' (if a last use) or ' ' (otherwise) // d is either 'D' (if a delayed use) or ' ' (otherwise) - maxNodeLocation = (maxNodeLocation == 0) - ? 1 - : maxNodeLocation; // corner case of a method with an infinite loop without any GenTree nodes + maxNodeLocation = (maxNodeLocation == 0) ? 1 : maxNodeLocation; // corner case of a method with an infinite loop + // without any GenTree nodes assert(maxNodeLocation >= 1); assert(refPositions.size() >= 1); int treeIdWidth = 9; /* '[XXXXX] '*/ @@ -12404,7 +12403,7 @@ LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan) #ifdef TARGET_ARM64 && !linearScan->compiler->info.compNeedsConsecutiveRegisters #endif - ) + ) { ordering = W("MQQQQQQQQQQQQQQQQ"); } @@ -13121,7 +13120,7 @@ void LinearScan::RegisterSelection::try_PREV_REG_OPT() && !refPosition->needsConsecutive #endif - ) + ) { assert(!"Spill candidate has no assignedInterval recentRefPosition"); } @@ -13253,7 +13252,7 @@ void LinearScan::RegisterSelection::calculateCoversSets() // Register bit selected (a single register) and REG_NA if no register was selected. // template -regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, +regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { #ifdef DEBUG @@ -13718,7 +13717,7 @@ regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, // select the REG_ORDER heuristics (if there are any free candidates) or REG_NUM (if all registers // are busy). // -regMaskTP LinearScan::RegisterSelection::selectMinimal(Interval* currentInterval, +regMaskTP LinearScan::RegisterSelection::selectMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { assert(!linearScan->enregisterLocalVars); diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h index 9620abbc5a7824..e038b4e8243a57 100644 --- a/src/coreclr/jit/lsra.h +++ b/src/coreclr/jit/lsra.h @@ -30,13 +30,13 @@ const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** -* Register types -*****************************************************************************/ + * Register types + *****************************************************************************/ typedef var_types RegisterType; -#define IntRegisterType TYP_INT +#define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT -#define MaskRegisterType TYP_MASK +#define MaskRegisterType TYP_MASK //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type @@ -83,7 +83,9 @@ struct RefInfo RefPosition* ref; GenTree* treeNode; - RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) + RefInfo(RefPosition* r, GenTree* t) + : ref(r) + , treeNode(t) { } @@ -107,7 +109,8 @@ class RefInfoListNode final : public RefInfo RefInfoListNode* m_next; // The next node in the list public: - RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) + RefInfoListNode(RefPosition* r, GenTree* t) + : RefInfo(r, t) { } @@ -134,11 +137,15 @@ class RefInfoList final RefInfoListNode* m_tail; // The tail of the list public: - RefInfoList() : m_head(nullptr), m_tail(nullptr) + RefInfoList() + : m_head(nullptr) + , m_tail(nullptr) { } - RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) + RefInfoList(RefInfoListNode* node) + : m_head(node) + , m_tail(node) { assert(m_head->m_next == nullptr); } @@ -365,7 +372,7 @@ class RefInfoListNodePool final public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); - void ReturnNode(RefInfoListNode* listNode); + void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS @@ -374,7 +381,7 @@ enum LsraStat #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF -#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, +#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #define BUSY_REG_SEL_DEF(enum_name, value, short_str, orderSeqId) REG_SEL_DEF(enum_name, value, short_str, orderSeqId) #include "lsra_score.h" COUNT @@ -387,11 +394,11 @@ struct LsraBlockInfo // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; - bool hasCriticalInEdge : 1; + bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; - bool hasEHBoundaryIn : 1; - bool hasEHBoundaryOut : 1; - bool hasEHPred : 1; + bool hasEHBoundaryIn : 1; + bool hasEHBoundaryOut : 1; + bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. @@ -401,7 +408,7 @@ struct LsraBlockInfo enum RegisterScore { -#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, +#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #define BUSY_REG_SEL_DEF(enum_name, value, short_str, orderSeqId) REG_SEL_DEF(enum_name, value, short_str, orderSeqId) #include "lsra_score.h" NONE = 0 @@ -635,7 +642,7 @@ class LinearScan : public LinearScanInterface // This does the dataflow analysis and builds the intervals template - void buildIntervals(); + void buildIntervals(); // This is where the actual assignment is done for scenarios where // no local var enregistration is done. @@ -648,7 +655,7 @@ class LinearScan : public LinearScanInterface void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up template - void resolveRegisters(); + void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); @@ -658,7 +665,7 @@ class LinearScan : public LinearScanInterface void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE - void makeUpperVectorInterval(unsigned varIndex); + void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. @@ -693,20 +700,20 @@ class LinearScan : public LinearScanInterface }; #ifdef TARGET_ARM - void addResolutionForDouble(BasicBlock* block, - GenTree* insertionPoint, - Interval** sourceIntervals, - regNumberSmall* location, - regNumber toReg, - regNumber fromReg, + void addResolutionForDouble(BasicBlock* block, + GenTree* insertionPoint, + Interval** sourceIntervals, + regNumberSmall* location, + regNumber toReg, + regNumber fromReg, ResolveType resolveType DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock)); #endif - void addResolution(BasicBlock* block, - GenTree* insertionPoint, - Interval* interval, - regNumber outReg, + void addResolution(BasicBlock* block, + GenTree* insertionPoint, + Interval* interval, + regNumber outReg, regNumber inReg DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock) DEBUG_ARG(const char* reason)); @@ -816,8 +823,14 @@ class LinearScan : public LinearScanInterface // This controls the heuristics used to select registers // These can be combined. - enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, - LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; + enum LsraSelect + { + LSRA_SELECT_DEFAULT = 0, + LSRA_SELECT_REVERSE_HEURISTICS = 0x04, + LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, + LSRA_SELECT_NEAREST = 0x10, + LSRA_SELECT_MASK = 0x1c + }; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); @@ -836,9 +849,14 @@ class LinearScan : public LinearScanInterface } // This controls the order in which basic blocks are visited during allocation - enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, - LSRA_TRAVERSE_RANDOM = 0x60, // NYI - LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; + enum LsraTraversalOrder + { + LSRA_TRAVERSE_LAYOUT = 0x20, + LSRA_TRAVERSE_PRED_FIRST = 0x40, + LSRA_TRAVERSE_RANDOM = 0x60, // NYI + LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, + LSRA_TRAVERSE_MASK = 0x60 + }; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) @@ -858,7 +876,12 @@ class LinearScan : public LinearScanInterface // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts - enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; + enum LsraExtendLifetimes + { + LSRA_DONT_EXTEND = 0, + LSRA_EXTEND_LIFETIMES = 0x80, + LSRA_EXTEND_LIFETIMES_MASK = 0x80 + }; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); @@ -871,8 +894,13 @@ class LinearScan : public LinearScanInterface // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). - enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, - LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; + enum LsraBlockBoundaryLocations + { + LSRA_BLOCK_BOUNDARY_PRED = 0, + LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, + LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, + LSRA_BLOCK_BOUNDARY_MASK = 0x300 + }; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); @@ -881,7 +909,12 @@ class LinearScan : public LinearScanInterface // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) - enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; + enum LsraReload + { + LSRA_NO_RELOAD_IF_SAME = 0, + LSRA_ALWAYS_INSERT_RELOAD = 0x400, + LSRA_RELOAD_MASK = 0x400 + }; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); @@ -892,7 +925,12 @@ class LinearScan : public LinearScanInterface } // This controls whether we spill everywhere - enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; + enum LsraSpill + { + LSRA_DONT_SPILL_ALWAYS = 0, + LSRA_SPILL_ALWAYS = 0x800, + LSRA_SPILL_MASK = 0x800 + }; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); @@ -904,8 +942,12 @@ class LinearScan : public LinearScanInterface // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. - enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, - LSRA_REG_OPTIONAL_MASK = 0x1000}; + enum LsraRegOptionalControl + { + LSRA_REG_OPTIONAL_DEFAULT = 0, + LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, + LSRA_REG_OPTIONAL_MASK = 0x1000 + }; LsraRegOptionalControl getLsraRegOptionalControl() { @@ -988,7 +1030,7 @@ class LinearScan : public LinearScanInterface private: // Determine which locals are candidates for allocation template - void identifyCandidates(); + void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); @@ -997,8 +1039,8 @@ class LinearScan : public LinearScanInterface #ifdef DEBUG void checkLastUses(BasicBlock* block); - int ComputeOperandDstCount(GenTree* operand); - int ComputeAvailableSrcCount(GenTree* node); + int ComputeOperandDstCount(GenTree* operand); + int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); @@ -1014,20 +1056,20 @@ class LinearScan : public LinearScanInterface void resetAllRegistersState(); #ifdef TARGET_ARM - bool isSecondHalfReg(RegRecord* regRec, Interval* interval); + bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); - regNumber findAnotherHalfRegNum(regNumber regNum); - bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); - void unassignDoublePhysReg(RegRecord* doubleRegRecord); + regNumber findAnotherHalfRegNum(regNumber regNum); + bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); + void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif - void clearAssignedInterval(RegRecord* reg ARM_ARG(RegisterType regType)); - void updateAssignedInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); - void updatePreviousInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); - bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); - bool isAssignedToInterval(Interval* interval, RegRecord* regRec); - bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); - bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); + void clearAssignedInterval(RegRecord* reg ARM_ARG(RegisterType regType)); + void updateAssignedInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); + void updatePreviousInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); + bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); + bool isAssignedToInterval(Interval* interval, RegRecord* regRec); + bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); + bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later @@ -1214,13 +1256,13 @@ class LinearScan : public LinearScanInterface void spillGCRefs(RefPosition* killRefPosition); -/***************************************************************************** -* Register selection -****************************************************************************/ + /***************************************************************************** + * Register selection + ****************************************************************************/ #if defined(TARGET_ARM64) - bool canAssignNextConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); - void assignConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); + bool canAssignNextConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); + void assignConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); regMaskTP getConsecutiveCandidates(regMaskTP candidates, RefPosition* refPosition, regMaskTP* busyCandidates); regMaskTP filterConsecutiveCandidates(regMaskTP candidates, unsigned int registersNeeded, @@ -1258,10 +1300,10 @@ class LinearScan : public LinearScanInterface // Perform register selection and update currentInterval or refPosition template - FORCEINLINE regMaskTP select(Interval* currentInterval, + FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); - FORCEINLINE regMaskTP selectMinimal(Interval* currentInterval, + FORCEINLINE regMaskTP selectMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already @@ -1344,14 +1386,14 @@ class LinearScan : public LinearScanInterface return (prevRegBit & preferences) == foundRegBit; } - bool applySelection(int selectionScore, regMaskTP selectionCandidates); - bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); + bool applySelection(int selectionScore, regMaskTP selectionCandidates); + bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void calculateUnassignedSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); FORCEINLINE void resetMinimal(Interval* interval, RefPosition* refPosition); -#define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); +#define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #define BUSY_REG_SEL_DEF(stat, value, shortname, orderSeqId) REG_SEL_DEF(stat, value, shortname, orderSeqId) #include "lsra_score.h" }; @@ -1379,8 +1421,8 @@ class LinearScan : public LinearScanInterface unsigned toBBNum; }; typedef JitHashTable, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; - SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; - SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() + SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; + SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { @@ -1391,13 +1433,13 @@ class LinearScan : public LinearScanInterface } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); - void initVarRegMaps(); - void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); - void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); + void initVarRegMaps(); + void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); + void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); - void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); - regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); + void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); + regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); @@ -1410,8 +1452,8 @@ class LinearScan : public LinearScanInterface #ifdef TARGET_ARM64 typedef JitHashTable, RefPosition*> NextConsecutiveRefPositionsMap; - NextConsecutiveRefPositionsMap* nextConsecutiveRefPositionMap; - NextConsecutiveRefPositionsMap* getNextConsecutiveRefPositionsMap() + NextConsecutiveRefPositionsMap* nextConsecutiveRefPositionMap; + NextConsecutiveRefPositionsMap* getNextConsecutiveRefPositionsMap() { if (nextConsecutiveRefPositionMap == nullptr) { @@ -1439,7 +1481,12 @@ class LinearScan : public LinearScanInterface // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. - enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; + enum LsraTupleDumpMode + { + LSRA_DUMP_PRE, + LSRA_DUMP_REFPOS, + LSRA_DUMP_POST + }; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( @@ -1477,7 +1524,7 @@ class LinearScan : public LinearScanInterface regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; - bool shouldDumpReg(regNumber regNum) + bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } @@ -1498,29 +1545,54 @@ class LinearScan : public LinearScanInterface void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output - enum LsraDumpEvent{ + enum LsraDumpEvent + { // Conflicting def/use - LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, - LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, + LSRA_EVENT_DEFUSE_CONFLICT, + LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, + LSRA_EVENT_DEFUSE_CASE1, + LSRA_EVENT_DEFUSE_CASE2, + LSRA_EVENT_DEFUSE_CASE3, + LSRA_EVENT_DEFUSE_CASE4, + LSRA_EVENT_DEFUSE_CASE5, + LSRA_EVENT_DEFUSE_CASE6, // Spilling - LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, - LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, + LSRA_EVENT_SPILL, + LSRA_EVENT_SPILL_EXTENDED_LIFETIME, + LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, + LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, + LSRA_EVENT_DONE_KILL_GC_REFS, + LSRA_EVENT_NO_GC_KILLS, // Block boundaries - LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, + LSRA_EVENT_START_BB, + LSRA_EVENT_END_BB, // Miscellaneous - LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, + LSRA_EVENT_FREE_REGS, + LSRA_EVENT_UPPER_VECTOR_SAVE, + LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? - LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, + LSRA_EVENT_LAST_USE, + LSRA_EVENT_LAST_USE_DELAYED, + LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions - LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, - LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, - LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, + LSRA_EVENT_FIXED_REG, + LSRA_EVENT_EXP_USE, + LSRA_EVENT_ZERO_REF, + LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, + LSRA_EVENT_KEPT_ALLOCATION, + LSRA_EVENT_COPY_REG, + LSRA_EVENT_MOVE_REG, + LSRA_EVENT_ALLOC_REG, + LSRA_EVENT_NO_REG_ALLOCATED, + LSRA_EVENT_RELOAD, + LSRA_EVENT_SPECIAL_PUTARG, + LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, @@ -1533,14 +1605,14 @@ class LinearScan : public LinearScanInterface #if TRACK_LSRA_STATS unsigned regCandidateVarCount; - void updateLsraStat(LsraStat stat, unsigned currentBBNum); - void dumpLsraStats(FILE* file); + void updateLsraStat(LsraStat stat, unsigned currentBBNum); + void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: - virtual void dumpLsraStatsCsv(FILE* file); - virtual void dumpLsraStatsSummary(FILE* file); + virtual void dumpLsraStatsCsv(FILE* file); + virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x @@ -1576,7 +1648,7 @@ class LinearScan : public LinearScanInterface // Set of blocks that have been visited. BlockSet bbVisitedSet; - void markBlockVisited(BasicBlock* block) + void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } @@ -1603,17 +1675,17 @@ class LinearScan : public LinearScanInterface BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). - bool verifiedAllBBs; - void setBlockSequence(); - int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); + bool verifiedAllBBs; + void setBlockSequence(); + int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG - void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); - void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); + void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); + void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. @@ -1714,7 +1786,7 @@ class LinearScan : public LinearScanInterface #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) - static const var_types LargeVectorSaveType = TYP_DOUBLE; + static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; @@ -1790,14 +1862,14 @@ class LinearScan : public LinearScanInterface void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); - FORCEINLINE void updateRegsFreeBusyState(RefPosition& refPosition, - regMaskTP regsBusy, - regMaskTP* regsToFree, + FORCEINLINE void updateRegsFreeBusyState(RefPosition& refPosition, + regMaskTP regsBusy, + regMaskTP* regsToFree, regMaskTP* delayRegsToFree DEBUG_ARG(Interval* interval) DEBUG_ARG(regNumber assignedReg)); regMaskTP m_RegistersWithConstants; - void clearConstantReg(regNumber reg, var_types regType) + void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } @@ -1815,7 +1887,7 @@ class LinearScan : public LinearScanInterface regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; - void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); + void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; @@ -1932,11 +2004,11 @@ class LinearScan : public LinearScanInterface bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); - void setDelayFree(RefPosition* use); - int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); - int BuildCastUses(GenTreeCast* cast, regMaskTP candidates); + void setDelayFree(RefPosition* use); + int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); + int BuildCastUses(GenTreeCast* cast, regMaskTP candidates); #ifdef TARGET_XARCH - int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); + int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); inline regMaskTP BuildEvexIncompatibleMask(GenTree* tree); #endif // !TARGET_XARCH int BuildSelect(GenTreeOp* select); @@ -1948,19 +2020,19 @@ class LinearScan : public LinearScanInterface void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); - int BuildSimple(GenTree* tree); - int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); - void AddDelayFreeUses(RefPosition* refPosition, GenTree* rmwNode); - int BuildDelayFreeUses(GenTree* node, - GenTree* rmwNode = nullptr, - regMaskTP candidates = RBM_NONE, - RefPosition** useRefPosition = nullptr); - int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); - int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); - void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); + int BuildSimple(GenTree* tree); + int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); + void AddDelayFreeUses(RefPosition* refPosition, GenTree* rmwNode); + int BuildDelayFreeUses(GenTree* node, + GenTree* rmwNode = nullptr, + regMaskTP candidates = RBM_NONE, + RefPosition** useRefPosition = nullptr); + int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); + int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); + void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); - void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); - void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); + void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); + void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH @@ -1971,24 +2043,24 @@ class LinearScan : public LinearScanInterface #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif - int BuildPutArgReg(GenTreeUnOp* node); - int BuildCall(GenTreeCall* call); - int BuildCmp(GenTree* tree); - int BuildCmpOperands(GenTree* tree); - int BuildBlockStore(GenTreeBlk* blkNode); - int BuildModDiv(GenTree* tree); - int BuildIntrinsic(GenTree* tree); + int BuildPutArgReg(GenTreeUnOp* node); + int BuildCall(GenTreeCall* call); + int BuildCmp(GenTree* tree); + int BuildCmpOperands(GenTree* tree); + int BuildBlockStore(GenTreeBlk* blkNode); + int BuildModDiv(GenTree* tree); + int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); - int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); - int BuildStoreLoc(GenTreeLclVarCommon* tree); - int BuildIndir(GenTreeIndir* indirTree); - int BuildGCWriteBarrier(GenTree* tree); - int BuildCast(GenTreeCast* cast); + int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); + int BuildStoreLoc(GenTreeLclVarCommon* tree); + int BuildIndir(GenTreeIndir* indirTree); + int BuildGCWriteBarrier(GenTree* tree); + int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); - int BuildMul(GenTree* tree); + int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) @@ -2017,7 +2089,7 @@ class LinearScan : public LinearScanInterface #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #ifdef TARGET_ARM64 - int BuildConsecutiveRegistersForUse(GenTree* treeNode, GenTree* rmwNode = nullptr); + int BuildConsecutiveRegistersForUse(GenTree* treeNode, GenTree* rmwNode = nullptr); void BuildConsecutiveRegistersForDef(GenTree* treeNode, int fieldCount); #endif // TARGET_ARM64 #endif // FEATURE_HW_INTRINSICS @@ -2487,8 +2559,8 @@ class RefPosition // we need an explicit move. // - copyReg and moveReg must not exist with each other. - unsigned char reload : 1; - unsigned char spillAfter : 1; + unsigned char reload : 1; + unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. @@ -2496,7 +2568,7 @@ class RefPosition unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register - unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval + unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; @@ -2538,9 +2610,9 @@ class RefPosition GenTree* buildNode; #endif // DEBUG - RefPosition(unsigned int bbNum, - LsraLocation nodeLocation, - GenTree* treeNode, + RefPosition(unsigned int bbNum, + LsraLocation nodeLocation, + GenTree* treeNode, RefType refType DEBUG_ARG(GenTree* buildNode)) : referent(nullptr) , nextRefPosition(nullptr) diff --git a/src/coreclr/jit/lsraarmarch.cpp b/src/coreclr/jit/lsraarmarch.cpp index 4738fcf33725e6..c2b8b74406584e 100644 --- a/src/coreclr/jit/lsraarmarch.cpp +++ b/src/coreclr/jit/lsraarmarch.cpp @@ -212,7 +212,7 @@ int LinearScan::BuildCall(GenTreeCall* call) RegisterType registerType = call->TypeGet(); -// Set destination candidates for return value of the call. + // Set destination candidates for return value of the call. #ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) @@ -224,22 +224,22 @@ int LinearScan::BuildCall(GenTreeCall* call) else #endif // TARGET_ARM if (hasMultiRegRetVal) - { - assert(retTypeDesc != nullptr); - dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); - } - else if (varTypeUsesFloatArgReg(registerType)) - { - dstCandidates = RBM_FLOATRET; - } - else if (registerType == TYP_LONG) - { - dstCandidates = RBM_LNGRET; - } - else - { - dstCandidates = RBM_INTRET; - } + { + assert(retTypeDesc != nullptr); + dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); + } + else if (varTypeUsesFloatArgReg(registerType)) + { + dstCandidates = RBM_FLOATRET; + } + else if (registerType == TYP_LONG) + { + dstCandidates = RBM_LNGRET; + } + else + { + dstCandidates = RBM_INTRET; + } // First, count reg args // Each register argument corresponds to one source. diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 0eba0d6b19bdc2..4f3d39c76d3ad1 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -78,7 +78,8 @@ RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx // compiler - The compiler context. // preallocate - The number of nodes to preallocate. // -RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler) +RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) + : m_compiler(compiler) { if (preallocate > 0) { @@ -1155,9 +1156,9 @@ bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLo #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (varTypeIsFloating(varDsc) && !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex)) - { - continue; - } + { + continue; + } Interval* interval = getIntervalForLocalVar(varIndex); const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH)); @@ -2217,7 +2218,7 @@ template void LinearScan::buildIntervals(); // which we will do register allocation. // template -void LinearScan::buildIntervals() +void LinearScan::buildIntervals() { BasicBlock* block; @@ -2473,7 +2474,7 @@ void LinearScan::buildIntervals() assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr, - allRegs(interval->registerType)); + allRegs(interval->registerType)); pos->setRegOptional(true); } JITDUMP("Finished creating dummy definitions\n\n"); @@ -3691,7 +3692,7 @@ void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, defCandidates = allRegs(type); } #else - defCandidates = allRegs(type); + defCandidates = allRegs(type); #endif // TARGET_X86 RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index); @@ -3965,114 +3966,114 @@ int LinearScan::BuildReturn(GenTree* tree) else #endif // !defined(TARGET_64BIT) if ((tree->TypeGet() != TYP_VOID) && !op1->isContained()) - { - regMaskTP useCandidates = RBM_NONE; + { + regMaskTP useCandidates = RBM_NONE; #if FEATURE_MULTIREG_RET #ifdef TARGET_ARM64 - if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) - { - BuildUse(op1, RBM_DOUBLERET); - return 1; - } -#endif // TARGET_ARM64 - - if (varTypeIsStruct(tree)) - { - // op1 has to be either a lclvar or a multi-reg returning call - if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) + if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) { - BuildUse(op1, useCandidates); + BuildUse(op1, RBM_DOUBLERET); + return 1; } - else +#endif // TARGET_ARM64 + + if (varTypeIsStruct(tree)) { - noway_assert(op1->IsMultiRegCall() || (op1->IsMultiRegLclVar() && compiler->lvaEnregMultiRegVars)); + // op1 has to be either a lclvar or a multi-reg returning call + if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) + { + BuildUse(op1, useCandidates); + } + else + { + noway_assert(op1->IsMultiRegCall() || (op1->IsMultiRegLclVar() && compiler->lvaEnregMultiRegVars)); - ReturnTypeDesc retTypeDesc = compiler->compRetTypeDesc; - const int srcCount = retTypeDesc.GetReturnRegCount(); - assert(op1->GetMultiRegCount(compiler) == static_cast(srcCount)); + ReturnTypeDesc retTypeDesc = compiler->compRetTypeDesc; + const int srcCount = retTypeDesc.GetReturnRegCount(); + assert(op1->GetMultiRegCount(compiler) == static_cast(srcCount)); - // For any source that's coming from a different register file, we need to ensure that - // we reserve the specific ABI register we need. - bool hasMismatchedRegTypes = false; - if (op1->IsMultiRegLclVar()) - { - for (int i = 0; i < srcCount; i++) + // For any source that's coming from a different register file, we need to ensure that + // we reserve the specific ABI register we need. + bool hasMismatchedRegTypes = false; + if (op1->IsMultiRegLclVar()) { - RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); - RegisterType dstType = regType(retTypeDesc.GetReturnRegType(i)); - if (srcType != dstType) + for (int i = 0; i < srcCount; i++) { - hasMismatchedRegTypes = true; - regMaskTP dstRegMask = - genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)); - - if (varTypeUsesIntReg(dstType)) + RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); + RegisterType dstType = regType(retTypeDesc.GetReturnRegType(i)); + if (srcType != dstType) { - buildInternalIntRegisterDefForNode(tree, dstRegMask); - } + hasMismatchedRegTypes = true; + regMaskTP dstRegMask = + genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)); + + if (varTypeUsesIntReg(dstType)) + { + buildInternalIntRegisterDefForNode(tree, dstRegMask); + } #if defined(TARGET_XARCH) && defined(FEATURE_SIMD) - else if (varTypeUsesMaskReg(dstType)) - { - buildInternalMaskRegisterDefForNode(tree, dstRegMask); - } + else if (varTypeUsesMaskReg(dstType)) + { + buildInternalMaskRegisterDefForNode(tree, dstRegMask); + } #endif // TARGET_XARCH && FEATURE_SIMD - else - { - assert(varTypeUsesFloatReg(dstType)); - buildInternalFloatRegisterDefForNode(tree, dstRegMask); + else + { + assert(varTypeUsesFloatReg(dstType)); + buildInternalFloatRegisterDefForNode(tree, dstRegMask); + } } } } - } - for (int i = 0; i < srcCount; i++) - { - // We will build uses of the type of the operand registers/fields, and the codegen - // for return will move as needed. - if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == - regType(retTypeDesc.GetReturnRegType(i)))) + for (int i = 0; i < srcCount; i++) { - BuildUse(op1, genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)), i); + // We will build uses of the type of the operand registers/fields, and the codegen + // for return will move as needed. + if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == + regType(retTypeDesc.GetReturnRegType(i)))) + { + BuildUse(op1, genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)), i); + } + else + { + BuildUse(op1, RBM_NONE, i); + } } - else + if (hasMismatchedRegTypes) { - BuildUse(op1, RBM_NONE, i); + buildInternalRegisterUses(); } + return srcCount; } - if (hasMismatchedRegTypes) - { - buildInternalRegisterUses(); - } - return srcCount; } - } - else + else #endif // FEATURE_MULTIREG_RET - { - // Non-struct type return - determine useCandidates - switch (tree->TypeGet()) { - case TYP_VOID: - useCandidates = RBM_NONE; - break; - case TYP_FLOAT: - useCandidates = RBM_FLOATRET; - break; - case TYP_DOUBLE: - // We ONLY want the valid double register in the RBM_DOUBLERET mask. - useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); - break; - case TYP_LONG: - useCandidates = RBM_LNGRET; - break; - default: - useCandidates = RBM_INTRET; - break; + // Non-struct type return - determine useCandidates + switch (tree->TypeGet()) + { + case TYP_VOID: + useCandidates = RBM_NONE; + break; + case TYP_FLOAT: + useCandidates = RBM_FLOATRET; + break; + case TYP_DOUBLE: + // We ONLY want the valid double register in the RBM_DOUBLERET mask. + useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); + break; + case TYP_LONG: + useCandidates = RBM_LNGRET; + break; + default: + useCandidates = RBM_INTRET; + break; + } + BuildUse(op1, useCandidates); + return 1; } - BuildUse(op1, useCandidates); - return 1; } - } // No kills or defs. return 0; diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index 1e7935ee5a2153..ad7d25709ee303 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -1182,33 +1182,33 @@ int LinearScan::BuildCall(GenTreeCall* call) else #endif // TARGET_X86 if (hasMultiRegRetVal) - { - assert(retTypeDesc != nullptr); - dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); - assert((int)genCountBits(dstCandidates) == dstCount); - } - else if (varTypeUsesFloatReg(registerType)) - { + { + assert(retTypeDesc != nullptr); + dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); + assert((int)genCountBits(dstCandidates) == dstCount); + } + else if (varTypeUsesFloatReg(registerType)) + { #ifdef TARGET_X86 - // The return value will be on the X87 stack, and we will need to move it. - dstCandidates = allRegs(registerType); + // The return value will be on the X87 stack, and we will need to move it. + dstCandidates = allRegs(registerType); #else // !TARGET_X86 dstCandidates = RBM_FLOATRET; #endif // !TARGET_X86 - } - else - { - assert(varTypeUsesIntReg(registerType)); - - if (registerType == TYP_LONG) - { - dstCandidates = RBM_LNGRET; } else { - dstCandidates = RBM_INTRET; + assert(varTypeUsesIntReg(registerType)); + + if (registerType == TYP_LONG) + { + dstCandidates = RBM_LNGRET; + } + else + { + dstCandidates = RBM_INTRET; + } } - } // number of args to a call = // callRegArgs + (callargs - placeholders, setup, etc) diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index d342a85a324486..6bd3ede0cc4951 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -344,7 +344,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif - ) + ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } @@ -436,7 +436,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif - ) + ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; @@ -2109,8 +2109,8 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call unsigned numArgs = CountArgs(); #ifdef TARGET_X86 -// Compute the maximum number of arguments that can be passed in registers. -// For X86 we handle the varargs and unmanaged calling conventions + // Compute the maximum number of arguments that can be passed in registers. + // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) @@ -2513,7 +2513,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call #elif defined(TARGET_X86) || (isStructArg && comp->isTrivialPointerSizedStruct(argSigClass)) #endif - ) + ) { #ifdef TARGET_ARM if (passUsingFloatRegs) @@ -2936,7 +2936,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI - // No supported architecture supports partial structs using float registers. + // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else @@ -3242,12 +3242,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) assert(arg.AbiInfo.GetStackSlotsNumber() == 1); makeOutArgCopy = true; #else // UNIX_AMD64_ABI - // On Unix, structs are always passed by value. - // We only need a copy if we have one of the following: - // - The sizes don't match for a non-lclVar argument. - // - We have a known struct type (e.g. SIMD) that requires multiple registers. - // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not - // actually passed in registers. + // On Unix, structs are always passed by value. + // We only need a copy if we have one of the following: + // - The sizes don't match for a non-lclVar argument. + // - We have a known struct type (e.g. SIMD) that requires multiple registers. + // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not + // actually passed in registers. if (arg.AbiInfo.IsPassedInRegisters()) { if (argObj->OperIs(GT_BLK)) @@ -3332,9 +3332,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } #ifdef TARGET_AMD64 else if (!argObj->OperIs(GT_LCL_VAR) || !argObj->TypeIs(TYP_SIMD8)) // Handled by lowering. -#else // !TARGET_ARM64 +#else // !TARGET_ARM64 else -#endif // !TARGET_ARM64 +#endif // !TARGET_ARM64 { // TODO-CQ: perform this transformation in lowering instead of here and // avoid marking enregisterable structs DNER. @@ -3965,18 +3965,18 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg) if (!opts.MinOpts()) { found = ForEachHbvBitSet(*fgAvailableOutgoingArgTemps, [&](indexType lclNum) { - LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); - ClassLayout* layout = varDsc->GetLayout(); - if (!layout->IsBlockLayout() && (layout->GetClassHandle() == copyBlkClass)) - { - tmp = (unsigned)lclNum; - JITDUMP("reusing outgoing struct arg V%02u\n", tmp); - fgAvailableOutgoingArgTemps->clearBit(lclNum); - return HbvWalk::Abort; - } + LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); + ClassLayout* layout = varDsc->GetLayout(); + if (!layout->IsBlockLayout() && (layout->GetClassHandle() == copyBlkClass)) + { + tmp = (unsigned)lclNum; + JITDUMP("reusing outgoing struct arg V%02u\n", tmp); + fgAvailableOutgoingArgTemps->clearBit(lclNum); + return HbvWalk::Abort; + } - return HbvWalk::Continue; - }) == HbvWalk::Abort; + return HbvWalk::Continue; + }) == HbvWalk::Abort; } // Create the CopyBlk tree and insert it. @@ -4019,7 +4019,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg) GenTree* argNode = call->gtArgs.MakeTmpArgNode(this, arg); // Change the expression to "(tmp=val),tmp" - argNode = gtNewOperNode(GT_COMMA, argNode->TypeGet(), copyBlk, argNode); + argNode = gtNewOperNode(GT_COMMA, argNode->TypeGet(), copyBlk, argNode); #endif // !FEATURE_FIXED_OUT_ARGS @@ -4520,7 +4520,7 @@ GenTree* Compiler::fgMorphLeafLocal(GenTreeLclVarCommon* lclNode) #if FEATURE_IMPLICIT_BYREFS || varDsc->lvIsLastUseCopyOmissionCandidate #endif - ) + ) { lclNode->gtFlags |= GTF_GLOB_REF; } @@ -4594,7 +4594,7 @@ GenTree* Compiler::fgMorphExpandStackArgForVarArgs(GenTreeLclVarCommon* lclNode) { GenTree* data = lclNode->Data(); argNode = lclNode->TypeIs(TYP_STRUCT) ? gtNewStoreBlkNode(lclNode->GetLayout(this), argAddr, data) - : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, data)->AsIndir(); + : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, data)->AsIndir(); } else if (lclNode->OperIsLocalRead()) { @@ -6317,7 +6317,10 @@ void Compiler::fgValidateIRForTailCall(GenTreeCall* call) }; TailCallIRValidatorVisitor(Compiler* comp, GenTreeCall* tailcall) - : GenTreeVisitor(comp), m_tailcall(tailcall), m_lclNum(BAD_VAR_NUM), m_active(false) + : GenTreeVisitor(comp) + , m_tailcall(tailcall) + , m_lclNum(BAD_VAR_NUM) + , m_active(false) { } @@ -7903,7 +7906,7 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); - tmpTree1 = gtNewIndir(TYP_I_IMPL, tmpTree1, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); + tmpTree1 = gtNewIndir(TYP_I_IMPL, tmpTree1, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = @@ -8321,7 +8324,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA #if FEATURE_IMPLICIT_BYREFS || lclDsc->lvIsLastUseCopyOmissionCandidate #endif - ) + ) { tree->AddAllEffectsFlags(GTF_GLOB_REF); } @@ -8566,8 +8569,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH - // If this is an unsigned long mod with a constant divisor, - // then don't morph to a helper call - it can be done faster inline using idiv. + // If this is an unsigned long mod with a constant divisor, + // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptimizationEnabled()) @@ -11545,8 +11548,8 @@ GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT - // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST - // long<->double` there. + // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST + // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif @@ -12571,12 +12574,12 @@ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) bool optAssertionPropDone = false; -/*------------------------------------------------------------------------- - * fgMorphTree() can potentially replace a tree with another, and the - * caller has to store the return value correctly. - * Turn this on to always make copy of "tree" here to shake out - * hidden/unupdated references. - */ + /*------------------------------------------------------------------------- + * fgMorphTree() can potentially replace a tree with another, and the + * caller has to store the return value correctly. + * Turn this on to always make copy of "tree" here to shake out + * hidden/unupdated references. + */ #ifdef DEBUG @@ -13499,7 +13502,8 @@ void Compiler::fgMorphStmtBlockOps(BasicBlock* block, Statement* stmt) DoPostOrder = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -14975,7 +14979,8 @@ PhaseStatus Compiler::fgMarkImplicitByRefCopyOmissionCandidates() UseExecutionOrder = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -15187,9 +15192,9 @@ PhaseStatus Compiler::fgRetypeImplicitByRefArgs() { // Insert IR that initializes the temp from the parameter. fgEnsureFirstBBisScratch(); - GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); - GenTree* data = (varDsc->TypeGet() == TYP_STRUCT) ? gtNewBlkIndir(varDsc->GetLayout(), addr) - : gtNewIndir(varDsc->TypeGet(), addr); + GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); + GenTree* data = (varDsc->TypeGet() == TYP_STRUCT) ? gtNewBlkIndir(varDsc->GetLayout(), addr) + : gtNewIndir(varDsc->TypeGet(), addr); GenTree* store = gtNewStoreLclVarNode(newLclNum, data); fgNewStmtAtBeg(fgFirstBB, store); } @@ -15560,7 +15565,10 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock }; MorphMDArrayVisitor(Compiler* compiler, BasicBlock* block, MorphMDArrayTempCache* pTempCache) - : GenTreeVisitor(compiler), m_changed(false), m_block(block), m_pTempCache(pTempCache) + : GenTreeVisitor(compiler) + , m_changed(false) + , m_block(block) + , m_pTempCache(pTempCache) { } diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index d7fa5821eb9dbc..84c30e64621a2f 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -92,7 +92,8 @@ GenTree* MorphInitBlockHelper::MorphInitBlock(Compiler* comp, GenTree* tree) // Most class members are initialized via in-class member initializers. // MorphInitBlockHelper::MorphInitBlockHelper(Compiler* comp, GenTree* store, bool initBlock = true) - : m_comp(comp), m_initBlock(initBlock) + : m_comp(comp) + , m_initBlock(initBlock) { assert(store->OperIsStore()); assert((m_initBlock == store->OperIsInitBlkOp()) && (!m_initBlock == store->OperIsCopyBlkOp())); @@ -530,8 +531,8 @@ GenTree* MorphInitBlockHelper::EliminateCommas(GenTree** commaPool) { *commaPool = nullptr; - GenTree* sideEffects = nullptr; - auto addSideEffect = [&sideEffects](GenTree* sideEff) { + GenTree* sideEffects = nullptr; + auto addSideEffect = [&sideEffects](GenTree* sideEff) { sideEff->gtNext = sideEffects; sideEffects = sideEff; }; @@ -645,7 +646,8 @@ GenTree* MorphCopyBlockHelper::MorphCopyBlock(Compiler* comp, GenTree* tree) // Notes: // Most class members are initialized via in-class member initializers. // -MorphCopyBlockHelper::MorphCopyBlockHelper(Compiler* comp, GenTree* store) : MorphInitBlockHelper(comp, store, false) +MorphCopyBlockHelper::MorphCopyBlockHelper(Compiler* comp, GenTree* store) + : MorphInitBlockHelper(comp, store, false) { } diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index a86039dc333840..0af5f4ba7a9929 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -163,7 +163,8 @@ void ObjectAllocator::MarkEscapingVarsAndBuildConnGraph() }; BuildConnGraphVisitor(ObjectAllocator* allocator) - : GenTreeVisitor(allocator->comp), m_allocator(allocator) + : GenTreeVisitor(allocator->comp) + , m_allocator(allocator) { } @@ -504,8 +505,8 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a assert(m_AnalysisDone); const bool shortLifetime = false; - const unsigned int lclNum = comp->lvaGrabTemp(shortLifetime DEBUGARG("MorphAllocObjNodeIntoStackAlloc temp")); - const int unsafeValueClsCheck = true; + const unsigned int lclNum = comp->lvaGrabTemp(shortLifetime DEBUGARG("MorphAllocObjNodeIntoStackAlloc temp")); + const int unsafeValueClsCheck = true; comp->lvaSetStruct(lclNum, allocObj->gtAllocObjClsHnd, unsafeValueClsCheck); // Initialize the object memory if necessary. @@ -766,7 +767,8 @@ void ObjectAllocator::RewriteUses() }; RewriteUsesVisitor(ObjectAllocator* allocator) - : GenTreeVisitor(allocator->comp), m_allocator(allocator) + : GenTreeVisitor(allocator->comp) + , m_allocator(allocator) { } diff --git a/src/coreclr/jit/objectalloc.h b/src/coreclr/jit/objectalloc.h index f4a56cb4ca39d9..07307161da002b 100644 --- a/src/coreclr/jit/objectalloc.h +++ b/src/coreclr/jit/objectalloc.h @@ -47,21 +47,21 @@ class ObjectAllocator final : public Phase virtual PhaseStatus DoPhase() override; private: - bool CanAllocateLclVarOnStack(unsigned int lclNum, CORINFO_CLASS_HANDLE clsHnd); - bool CanLclVarEscape(unsigned int lclNum); - void MarkLclVarAsPossiblyStackPointing(unsigned int lclNum); - void MarkLclVarAsDefinitelyStackPointing(unsigned int lclNum); - bool MayLclVarPointToStack(unsigned int lclNum); - bool DoesLclVarPointToStack(unsigned int lclNum); - void DoAnalysis(); - void MarkLclVarAsEscaping(unsigned int lclNum); - void MarkEscapingVarsAndBuildConnGraph(); - void AddConnGraphEdge(unsigned int sourceLclNum, unsigned int targetLclNum); - void ComputeEscapingNodes(BitVecTraits* bitVecTraits, BitVec& escapingNodes); - void ComputeStackObjectPointers(BitVecTraits* bitVecTraits); - bool MorphAllocObjNodes(); - void RewriteUses(); - GenTree* MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* allocObj); + bool CanAllocateLclVarOnStack(unsigned int lclNum, CORINFO_CLASS_HANDLE clsHnd); + bool CanLclVarEscape(unsigned int lclNum); + void MarkLclVarAsPossiblyStackPointing(unsigned int lclNum); + void MarkLclVarAsDefinitelyStackPointing(unsigned int lclNum); + bool MayLclVarPointToStack(unsigned int lclNum); + bool DoesLclVarPointToStack(unsigned int lclNum); + void DoAnalysis(); + void MarkLclVarAsEscaping(unsigned int lclNum); + void MarkEscapingVarsAndBuildConnGraph(); + void AddConnGraphEdge(unsigned int sourceLclNum, unsigned int targetLclNum); + void ComputeEscapingNodes(BitVecTraits* bitVecTraits, BitVec& escapingNodes); + void ComputeStackObjectPointers(BitVecTraits* bitVecTraits); + bool MorphAllocObjNodes(); + void RewriteUses(); + GenTree* MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* allocObj); unsigned int MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj, BasicBlock* block, Statement* stmt); struct BuildConnGraphVisitorCallbackData; bool CanLclVarEscapeViaParentStack(ArrayStack* parentStack, unsigned int lclNum); diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp index cb17b65035cd5f..acaed299aad42a 100644 --- a/src/coreclr/jit/optcse.cpp +++ b/src/coreclr/jit/optcse.cpp @@ -204,7 +204,9 @@ void Compiler::optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData) DoPreOrder = true, }; - MaskDataWalker(Compiler* comp, optCSE_MaskData* maskData) : GenTreeVisitor(comp), m_maskData(maskData) + MaskDataWalker(Compiler* comp, optCSE_MaskData* maskData) + : GenTreeVisitor(comp) + , m_maskData(maskData) { } @@ -396,7 +398,9 @@ void CSEdsc::ComputeNumLocals(Compiler* compiler) }; LocalCountingVisitor(Compiler* compiler) - : GenTreeVisitor(compiler), m_count(0), m_occurrences(0) + : GenTreeVisitor(compiler) + , m_count(0) + , m_occurrences(0) { } @@ -1186,7 +1190,9 @@ class CSE_DataFlow EXPSET_TP m_preMergeOut; public: - CSE_DataFlow(Compiler* pCompiler) : m_comp(pCompiler), m_preMergeOut(BitVecOps::UninitVal()) + CSE_DataFlow(Compiler* pCompiler) + : m_comp(pCompiler) + , m_preMergeOut(BitVecOps::UninitVal()) { } @@ -1742,7 +1748,8 @@ void Compiler::optValnumCSE_Availability() // Notes: // This creates the basic CSE heuristic. It never does any CSEs. // -CSE_HeuristicCommon::CSE_HeuristicCommon(Compiler* pCompiler) : m_pCompiler(pCompiler) +CSE_HeuristicCommon::CSE_HeuristicCommon(Compiler* pCompiler) + : m_pCompiler(pCompiler) { m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */ sortTab = nullptr; @@ -2074,7 +2081,8 @@ void CSE_HeuristicCommon::DumpMetrics() // This creates the random CSE heuristic. It does CSEs randomly, with some // predetermined likelihood (set by config or by stress). // -CSE_HeuristicRandom::CSE_HeuristicRandom(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicRandom::CSE_HeuristicRandom(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { m_cseRNG.Init(m_pCompiler->info.compMethodHash() ^ JitConfig.JitRandomCSE()); } @@ -2200,7 +2208,8 @@ void CSE_HeuristicRandom::ConsiderCandidates() // This creates the replay CSE heuristic. It does CSEs specifed by // the ArrayConfig parsing of JitReplayCSE. // -CSE_HeuristicReplay::CSE_HeuristicReplay(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicReplay::CSE_HeuristicReplay(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { } @@ -2292,7 +2301,8 @@ double CSE_HeuristicParameterized::s_defaultParameters[CSE_HeuristicParameterize // Arguments; // pCompiler - compiler instance // -CSE_HeuristicParameterized::CSE_HeuristicParameterized(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicParameterized::CSE_HeuristicParameterized(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { // Default parameter values... // @@ -2605,7 +2615,7 @@ void CSE_HeuristicParameterized::GetFeatures(CSEdsc* cse, double* features) if (!isLiveAcrossCallLSRA) { unsigned count = 0; - for (BasicBlock *block = minPostorderBlock; + for (BasicBlock* block = minPostorderBlock; block != nullptr && block != maxPostorderBlock && count < blockSpread; block = block->Next(), count++) { if (block->HasFlag(BBF_HAS_CALL)) @@ -2986,7 +2996,10 @@ void CSE_HeuristicParameterized::DumpChoices(ArrayStack& choices, CSEdsc // Uses parameters from JitRLCSE to drive a deterministic greedy policy // CSE_HeuristicRL::CSE_HeuristicRL(Compiler* pCompiler) - : CSE_HeuristicParameterized(pCompiler), m_alpha(0.0), m_updateParameters(false), m_greedy(false) + : CSE_HeuristicParameterized(pCompiler) + , m_alpha(0.0) + , m_updateParameters(false) + , m_greedy(false) { // Set up the random state // @@ -3656,7 +3669,8 @@ CSE_HeuristicRL::Choice* CSE_HeuristicRL::FindChoice(CSEdsc* dsc, ArrayStack& choices); + void BuildChoices(ArrayStack& choices); Choice& ChooseGreedy(ArrayStack& choices, bool recompute); @@ -227,12 +231,12 @@ class CSE_HeuristicRL : public CSE_HeuristicParameterized bool m_updateParameters; bool m_greedy; - Choice& ChooseSoftmax(ArrayStack& choices); - void Softmax(ArrayStack& choices); - void SoftmaxPolicy(); - void UpdateParametersStep(CSEdsc* dsc, ArrayStack& choices, double reward, double* delta); - void UpdateParameters(); - Choice* FindChoice(CSEdsc* dsc, ArrayStack& choices); + Choice& ChooseSoftmax(ArrayStack& choices); + void Softmax(ArrayStack& choices); + void SoftmaxPolicy(); + void UpdateParametersStep(CSEdsc* dsc, ArrayStack& choices, double reward, double* delta); + void UpdateParameters(); + Choice* FindChoice(CSEdsc* dsc, ArrayStack& choices); const char* Name() const; public: diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index d456cb3793f5ef..1e5d5a00b107c0 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -74,10 +74,10 @@ class OptBoolsDsc private: Statement* optOptimizeBoolsChkBlkCond(); - GenTree* optIsBoolComp(OptTestInfo* pOptTest); - bool optOptimizeBoolsChkTypeCostCond(); - void optOptimizeBoolsUpdateTrees(); - bool FindCompareChain(GenTree* condition, bool* isTestCondition); + GenTree* optIsBoolComp(OptTestInfo* pOptTest); + bool optOptimizeBoolsChkTypeCostCond(); + void optOptimizeBoolsUpdateTrees(); + bool FindCompareChain(GenTree* condition, bool* isTestCondition); }; //----------------------------------------------------------------------------- diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index c7760c4241c361..289e37b16fc4e8 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -37,7 +37,8 @@ void Compiler::optInit() optCSEunmarks = 0; } -DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) +DataFlow::DataFlow(Compiler* pCompiler) + : m_pCompiler(pCompiler) { } @@ -889,7 +890,7 @@ bool Compiler::optComputeLoopRep(int constInit, switch (iterOperType) { -// For small types, the iteration operator will narrow these values if big + // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ @@ -908,7 +909,7 @@ bool Compiler::optComputeLoopRep(int constInit, INIT_ITER_BY_TYPE(unsigned short); break; - // For the big types, 32 bit arithmetic is performed + // For the big types, 32 bit arithmetic is performed case TYP_INT: if (unsTest) @@ -1795,7 +1796,9 @@ void Compiler::optReplaceScalarUsesWithConst(BasicBlock* block, unsigned lclNum, bool MadeChanges = false; ReplaceVisitor(Compiler* comp, unsigned lclNum, ssize_t cnsVal) - : GenTreeVisitor(comp), m_lclNum(lclNum), m_cnsVal(cnsVal) + : GenTreeVisitor(comp) + , m_lclNum(lclNum) + , m_cnsVal(cnsVal) { } @@ -1841,7 +1844,8 @@ Compiler::OptInvertCountTreeInfoType Compiler::optInvertCountTreeInfo(GenTree* t Compiler::OptInvertCountTreeInfoType Result = {}; - CountTreeInfoVisitor(Compiler* comp) : GenTreeVisitor(comp) + CountTreeInfoVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3516,8 +3520,8 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu return true; - /* Operands that are in memory can usually be narrowed - simply by changing their gtType */ + /* Operands that are in memory can usually be narrowed + simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ @@ -3775,7 +3779,8 @@ void Compiler::optRecordSsaUses(GenTree* tree, BasicBlock* block) }; SsaRecordingVisitor(Compiler* compiler, BasicBlock* block) - : GenTreeVisitor(compiler), m_block(block) + : GenTreeVisitor(compiler) + , m_block(block) { } @@ -4612,7 +4617,11 @@ void Compiler::optHoistLoopBlocks(FlowGraphNaturalLoop* loop, const char* m_failReason; #endif - Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) + Value(GenTree* node) + : m_node(node) + , m_hoistable(false) + , m_cctorDependent(false) + , m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; @@ -4812,9 +4821,9 @@ void Compiler::optHoistLoopBlocks(FlowGraphNaturalLoop* loop, // To be invariant the variable must be in SSA ... bool isInvariant = lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... - isInvariant = isInvariant && - !m_loop->ContainsBlock( - m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); + isInvariant = + isInvariant && !m_loop->ContainsBlock( + m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // @@ -5467,7 +5476,9 @@ PhaseStatus Compiler::fgCanonicalizeFirstBB() return PhaseStatus::MODIFIED_EVERYTHING; } -LoopSideEffects::LoopSideEffects() : VarInOut(VarSetOps::UninitVal()), VarUseDef(VarSetOps::UninitVal()) +LoopSideEffects::LoopSideEffects() + : VarInOut(VarSetOps::UninitVal()) + , VarUseDef(VarSetOps::UninitVal()) { for (MemoryKind mk : allMemoryKinds()) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 5a273679067748..71622ecfc3d759 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -34,7 +34,9 @@ class PatchpointTransformer Compiler* compiler; public: - PatchpointTransformer(Compiler* compiler) : ppCounterLclNum(BAD_VAR_NUM), compiler(compiler) + PatchpointTransformer(Compiler* compiler) + : ppCounterLclNum(BAD_VAR_NUM) + , compiler(compiler) { } diff --git a/src/coreclr/jit/phase.h b/src/coreclr/jit/phase.h index 6288d596729daf..0f3d461c2b13f1 100644 --- a/src/coreclr/jit/phase.h +++ b/src/coreclr/jit/phase.h @@ -34,14 +34,17 @@ class Phase virtual void Run(); protected: - Phase(Compiler* _compiler, Phases _phase) : comp(_compiler), m_name(nullptr), m_phase(_phase) + Phase(Compiler* _compiler, Phases _phase) + : comp(_compiler) + , m_name(nullptr) + , m_phase(_phase) { m_name = PhaseNames[_phase]; } virtual void PrePhase(); virtual PhaseStatus DoPhase() = 0; - virtual void PostPhase(PhaseStatus status); + virtual void PostPhase(PhaseStatus status); Compiler* comp; const char* m_name; @@ -54,7 +57,9 @@ template class ActionPhase final : public Phase { public: - ActionPhase(Compiler* _compiler, Phases _phase, A _action) : Phase(_compiler, _phase), action(_action) + ActionPhase(Compiler* _compiler, Phases _phase, A _action) + : Phase(_compiler, _phase) + , action(_action) { } @@ -84,7 +89,8 @@ class CompilerPhase final : public Phase { public: CompilerPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) - : Phase(_compiler, _phase), action(_action) + : Phase(_compiler, _phase) + , action(_action) { } @@ -114,7 +120,8 @@ class CompilerPhaseWithStatus final : public Phase { public: CompilerPhaseWithStatus(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) - : Phase(_compiler, _phase), action(_action) + : Phase(_compiler, _phase) + , action(_action) { } diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp index 2f7b1e0b31372c..e02a5f0e06bab3 100644 --- a/src/coreclr/jit/promotion.cpp +++ b/src/coreclr/jit/promotion.cpp @@ -80,7 +80,9 @@ struct Access #endif Access(unsigned offset, var_types accessType, ClassLayout* layout) - : Layout(layout), Offset(offset), AccessType(accessType) + : Layout(layout) + , Offset(offset) + , AccessType(accessType) { } @@ -220,7 +222,8 @@ bool AggregateInfo::OverlappingReplacements(unsigned offset, // numLocals - Number of locals to support in the map // AggregateInfoMap::AggregateInfoMap(CompAllocator allocator, unsigned numLocals) - : m_aggregates(allocator), m_numLocals(numLocals) + : m_aggregates(allocator) + , m_numLocals(numLocals) { m_lclNumToAggregateIndex = new (allocator) unsigned[numLocals]; for (unsigned i = 0; i < numLocals; i++) @@ -277,7 +280,9 @@ struct PrimitiveAccess unsigned Offset; var_types AccessType; - PrimitiveAccess(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType) + PrimitiveAccess(unsigned offset, var_types accessType) + : Offset(offset) + , AccessType(accessType) { } }; @@ -290,7 +295,8 @@ class LocalUses public: LocalUses(Compiler* comp) - : m_accesses(comp->getAllocator(CMK_Promotion)), m_inducedAccesses(comp->getAllocator(CMK_Promotion)) + : m_accesses(comp->getAllocator(CMK_Promotion)) + , m_inducedAccesses(comp->getAllocator(CMK_Promotion)) { } @@ -973,7 +979,7 @@ class LocalsUseVisitor : public GenTreeVisitor , m_prom(prom) , m_candidateStores(prom->m_compiler->getAllocator(CMK_Promotion)) { - m_uses = new (prom->m_compiler, CMK_Promotion) LocalUses*[prom->m_compiler->lvaCount]{}; + m_uses = new (prom->m_compiler, CMK_Promotion) LocalUses* [prom->m_compiler->lvaCount] {}; } //------------------------------------------------------------------------ @@ -2269,7 +2275,9 @@ void ReplaceVisitor::InsertPreStatementWriteBacks() DoPreOrder = true, }; - Visitor(Compiler* comp, ReplaceVisitor* replacer) : GenTreeVisitor(comp), m_replacer(replacer) + Visitor(Compiler* comp, ReplaceVisitor* replacer) + : GenTreeVisitor(comp) + , m_replacer(replacer) { } @@ -2716,8 +2724,8 @@ void ReplaceVisitor::WriteBackBeforeUse(GenTree** use, unsigned lcl, unsigned of GenTreeOp* comma = m_compiler->gtNewOperNode(GT_COMMA, (*use)->TypeGet(), Promotion::CreateWriteBack(m_compiler, lcl, rep), *use); - *use = comma; - use = &comma->gtOp2; + *use = comma; + use = &comma->gtOp2; ClearNeedsWriteBack(rep); m_madeChanges = true; diff --git a/src/coreclr/jit/promotion.h b/src/coreclr/jit/promotion.h index c421b019bc8f99..89097d78cd1061 100644 --- a/src/coreclr/jit/promotion.h +++ b/src/coreclr/jit/promotion.h @@ -31,7 +31,9 @@ struct Replacement const char* Description = ""; #endif - Replacement(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType) + Replacement(unsigned offset, var_types accessType) + : Offset(offset) + , AccessType(accessType) { } @@ -55,7 +57,9 @@ class StructSegments { } - Segment(unsigned start, unsigned end) : Start(start), End(end) + Segment(unsigned start, unsigned end) + : Start(start) + , End(end) { } @@ -69,7 +73,8 @@ class StructSegments jitstd::vector m_segments; public: - explicit StructSegments(CompAllocator allocator) : m_segments(allocator) + explicit StructSegments(CompAllocator allocator) + : m_segments(allocator) { } @@ -96,7 +101,10 @@ struct AggregateInfo // Max offset in the struct local of the unpromoted part. unsigned UnpromotedMax = 0; - AggregateInfo(CompAllocator alloc, unsigned lclNum) : Replacements(alloc), LclNum(lclNum), Unpromoted(alloc) + AggregateInfo(CompAllocator alloc, unsigned lclNum) + : Replacements(alloc) + , LclNum(lclNum) + , Unpromoted(alloc) { } @@ -115,7 +123,7 @@ class AggregateInfoMap public: AggregateInfoMap(CompAllocator allocator, unsigned numLocals); - void Add(AggregateInfo* agg); + void Add(AggregateInfo* agg); AggregateInfo* Lookup(unsigned lclNum); jitstd::vector::iterator begin() @@ -146,10 +154,10 @@ class Promotion StructSegments SignificantSegments(ClassLayout* layout); - void ExplicitlyZeroInitReplacementLocals(unsigned lclNum, - const jitstd::vector& replacements, - Statement** prevStmt); - void InsertInitStatement(Statement** prevStmt, GenTree* tree); + void ExplicitlyZeroInitReplacementLocals(unsigned lclNum, + const jitstd::vector& replacements, + Statement** prevStmt); + void InsertInitStatement(Statement** prevStmt, GenTree* tree); static GenTree* CreateWriteBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement); static GenTree* CreateReadBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement); @@ -198,11 +206,12 @@ class Promotion bool HaveCandidateLocals(); - static bool IsCandidateForPhysicalPromotion(LclVarDsc* dsc); + static bool IsCandidateForPhysicalPromotion(LclVarDsc* dsc); static GenTree* EffectiveUser(Compiler::GenTreeStack& ancestors); public: - explicit Promotion(Compiler* compiler) : m_compiler(compiler) + explicit Promotion(Compiler* compiler) + : m_compiler(compiler) { } @@ -218,12 +227,15 @@ class StructDeaths friend class PromotionLiveness; private: - StructDeaths(BitVec deaths, AggregateInfo* agg) : m_deaths(deaths), m_aggregate(agg) + StructDeaths(BitVec deaths, AggregateInfo* agg) + : m_deaths(deaths) + , m_aggregate(agg) { } public: - StructDeaths() : m_deaths(BitVecOps::UninitVal()) + StructDeaths() + : m_deaths(BitVecOps::UninitVal()) { } @@ -236,26 +248,28 @@ struct BasicBlockLiveness; // Class to compute and track liveness information pertaining promoted structs. class PromotionLiveness { - Compiler* m_compiler; - AggregateInfoMap& m_aggregates; - BitVecTraits* m_bvTraits = nullptr; - unsigned* m_structLclToTrackedIndex = nullptr; - unsigned m_numVars = 0; - BasicBlockLiveness* m_bbInfo = nullptr; - bool m_hasPossibleBackEdge = false; - BitVec m_liveIn; - BitVec m_ehLiveVars; + Compiler* m_compiler; + AggregateInfoMap& m_aggregates; + BitVecTraits* m_bvTraits = nullptr; + unsigned* m_structLclToTrackedIndex = nullptr; + unsigned m_numVars = 0; + BasicBlockLiveness* m_bbInfo = nullptr; + bool m_hasPossibleBackEdge = false; + BitVec m_liveIn; + BitVec m_ehLiveVars; JitHashTable, BitVec> m_aggDeaths; public: PromotionLiveness(Compiler* compiler, AggregateInfoMap& aggregates) - : m_compiler(compiler), m_aggregates(aggregates), m_aggDeaths(compiler->getAllocator(CMK_Promotion)) + : m_compiler(compiler) + , m_aggregates(aggregates) + , m_aggDeaths(compiler->getAllocator(CMK_Promotion)) { } - void Run(); - bool IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacement); - bool IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl, unsigned replacement); + void Run(); + bool IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacement); + bool IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl, unsigned replacement); StructDeaths GetDeathsForStructLocal(GenTreeLclVarCommon* use); private: @@ -297,7 +311,10 @@ class ReplaceVisitor : public GenTreeVisitor }; ReplaceVisitor(Promotion* prom, AggregateInfoMap& aggregates, PromotionLiveness* liveness) - : GenTreeVisitor(prom->m_compiler), m_promotion(prom), m_aggregates(aggregates), m_liveness(liveness) + : GenTreeVisitor(prom->m_compiler) + , m_promotion(prom) + , m_aggregates(aggregates) + , m_liveness(liveness) { } diff --git a/src/coreclr/jit/promotiondecomposition.cpp b/src/coreclr/jit/promotiondecomposition.cpp index 18ac84c58e4f2b..d4f71b99835208 100644 --- a/src/coreclr/jit/promotiondecomposition.cpp +++ b/src/coreclr/jit/promotiondecomposition.cpp @@ -275,7 +275,9 @@ class DecompositionPlan var_types PrimitiveType; RemainderStrategy(int type, unsigned primitiveOffset = 0, var_types primitiveType = TYP_UNDEF) - : Type(type), PrimitiveOffset(primitiveOffset), PrimitiveType(primitiveType) + : Type(type) + , PrimitiveOffset(primitiveOffset) + , PrimitiveType(primitiveType) { } }; @@ -727,8 +729,8 @@ class DecompositionPlan // remainderStrategy - The strategy we are using for the remainder // dump - Whether to JITDUMP decisions made // - bool CanSkipEntry(const Entry& entry, - const StructDeaths& deaths, + bool CanSkipEntry(const Entry& entry, + const StructDeaths& deaths, const RemainderStrategy& remainderStrategy DEBUGARG(bool dump = false)) { if (entry.ToReplacement != nullptr) diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 475df2d659cabc..eae6d627935837 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -461,7 +461,9 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon } // Remove hashtable entry for expr when we exit the present scope. - auto code = [this, expr] { m_pSearchPath->Remove(expr); }; + auto code = [this, expr] { + m_pSearchPath->Remove(expr); + }; jitstd::utility::scoped_code finally(code); if (m_pSearchPath->GetCount() > MAX_SEARCH_DEPTH) @@ -1123,7 +1125,7 @@ Range RangeCheck::GetRangeFromType(var_types type) // Compute the range for a local var definition. Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, GenTreeLclVarCommon* lcl, - bool monIncreasing DEBUGARG(int indent)) + bool monIncreasing DEBUGARG(int indent)) { LclSsaVarDsc* ssaDef = GetSsaDefStore(lcl); if (ssaDef == nullptr) @@ -1566,7 +1568,10 @@ struct MapMethodDefsData BasicBlock* block; Statement* stmt; - MapMethodDefsData(RangeCheck* rc, BasicBlock* block, Statement* stmt) : rc(rc), block(block), stmt(stmt) + MapMethodDefsData(RangeCheck* rc, BasicBlock* block, Statement* stmt) + : rc(rc) + , block(block) + , stmt(stmt) { } }; diff --git a/src/coreclr/jit/rangecheck.h b/src/coreclr/jit/rangecheck.h index 098e1cc62b0d7a..cd4193f1e2fb73 100644 --- a/src/coreclr/jit/rangecheck.h +++ b/src/coreclr/jit/rangecheck.h @@ -83,20 +83,28 @@ struct Limit keUnknown, // The limit could not be determined. }; - Limit() : type(keUndef) + Limit() + : type(keUndef) { } - Limit(LimitType type) : type(type) + Limit(LimitType type) + : type(type) { } - Limit(LimitType type, int cns) : cns(cns), vn(ValueNumStore::NoVN), type(type) + Limit(LimitType type, int cns) + : cns(cns) + , vn(ValueNumStore::NoVN) + , type(type) { assert(type == keConstant); } - Limit(LimitType type, ValueNum vn, int cns) : cns(cns), vn(vn), type(type) + Limit(LimitType type, ValueNum vn, int cns) + : cns(cns) + , vn(vn) + , type(type) { assert(type == keBinOpArray); } @@ -242,11 +250,15 @@ struct Range Limit uLimit; Limit lLimit; - Range(const Limit& limit) : uLimit(limit), lLimit(limit) + Range(const Limit& limit) + : uLimit(limit) + , lLimit(limit) { } - Range(const Limit& lLimit, const Limit& uLimit) : uLimit(uLimit), lLimit(lLimit) + Range(const Limit& lLimit, const Limit& uLimit) + : uLimit(uLimit) + , lLimit(lLimit) { } @@ -586,7 +598,10 @@ class RangeCheck BasicBlock* block; Statement* stmt; GenTreeLclVarCommon* tree; - Location(BasicBlock* block, Statement* stmt, GenTreeLclVarCommon* tree) : block(block), stmt(stmt), tree(tree) + Location(BasicBlock* block, Statement* stmt, GenTreeLclVarCommon* tree) + : block(block) + , stmt(stmt) + , tree(tree) { } diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp index cb54b617a6a9c8..d9b69b8df5aa27 100644 --- a/src/coreclr/jit/rationalize.cpp +++ b/src/coreclr/jit/rationalize.cpp @@ -383,7 +383,8 @@ PhaseStatus Rationalizer::DoPhase() }; RationalizeVisitor(Rationalizer& rationalizer) - : GenTreeVisitor(rationalizer.comp), m_rationalizer(rationalizer) + : GenTreeVisitor(rationalizer.comp) + , m_rationalizer(rationalizer) { } diff --git a/src/coreclr/jit/rationalize.h b/src/coreclr/jit/rationalize.h index 65264f8294582c..a8651b2e5b8c79 100644 --- a/src/coreclr/jit/rationalize.h +++ b/src/coreclr/jit/rationalize.h @@ -55,7 +55,8 @@ class Rationalizer final : public Phase Compiler::fgWalkResult RewriteNode(GenTree** useEdge, Compiler::GenTreeStack& parents); }; -inline Rationalizer::Rationalizer(Compiler* _comp) : Phase(_comp, PHASE_RATIONALIZE) +inline Rationalizer::Rationalizer(Compiler* _comp) + : Phase(_comp, PHASE_RATIONALIZE) { } diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index e8b346faccc376..e7569e86c2ed33 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -24,7 +24,9 @@ PhaseStatus Compiler::optRedundantBranches() public: bool madeChanges; - OptRedundantBranchesDomTreeVisitor(Compiler* compiler) : DomTreeVisitor(compiler), madeChanges(false) + OptRedundantBranchesDomTreeVisitor(Compiler* compiler) + : DomTreeVisitor(compiler) + , madeChanges(false) { } diff --git a/src/coreclr/jit/regset.cpp b/src/coreclr/jit/regset.cpp index efec31a78f5bd5..5f5c80a4a19d6c 100644 --- a/src/coreclr/jit/regset.cpp +++ b/src/coreclr/jit/regset.cpp @@ -233,7 +233,9 @@ void RegSet::SetMaskVars(regMaskTP newMaskVars) /*****************************************************************************/ -RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_rsGCInfo(gcInfo) +RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) + : m_rsCompiler(compiler) + , m_rsGCInfo(gcInfo) { /* Initialize the spill logic */ @@ -425,9 +427,9 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) #if defined(TARGET_X86) /***************************************************************************** -* -* Spill the top of the FP x87 stack. -*/ + * + * Spill the top of the FP x87 stack. + */ void RegSet::rsSpillFPStack(GenTreeCall* call) { SpillDsc* spill; diff --git a/src/coreclr/jit/regset.h b/src/coreclr/jit/regset.h index 73eb08aa943eb9..0924c410e3b85b 100644 --- a/src/coreclr/jit/regset.h +++ b/src/coreclr/jit/regset.h @@ -58,7 +58,7 @@ class RegSet TempDsc* spillTemp; // the temp holding the spilled value static SpillDsc* alloc(Compiler* pComp, RegSet* regSet, var_types type); - static void freeDsc(RegSet* regSet, SpillDsc* spillDsc); + static void freeDsc(RegSet* regSet, SpillDsc* spillDsc); }; //------------------------------------------------------------------------- @@ -179,14 +179,14 @@ class RegSet }; static var_types tmpNormalizeType(var_types type); - TempDsc* tmpGetTemp(var_types type); // get temp for the given type - void tmpRlsTemp(TempDsc* temp); - TempDsc* tmpFindNum(int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; + TempDsc* tmpGetTemp(var_types type); // get temp for the given type + void tmpRlsTemp(TempDsc* temp); + TempDsc* tmpFindNum(int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; void tmpEnd(); TempDsc* tmpListBeg(TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; TempDsc* tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; - void tmpDone(); + void tmpDone(); #ifdef DEBUG bool tmpAllFree() const; diff --git a/src/coreclr/jit/scev.cpp b/src/coreclr/jit/scev.cpp index 5819b56bdfd3a1..491ee4ab06f049 100644 --- a/src/coreclr/jit/scev.cpp +++ b/src/coreclr/jit/scev.cpp @@ -206,7 +206,9 @@ void Scev::Dump(Compiler* comp) // ResetForLoop. // ScalarEvolutionContext::ScalarEvolutionContext(Compiler* comp) - : m_comp(comp), m_cache(comp->getAllocator(CMK_LoopIVOpts)), m_ephemeralCache(comp->getAllocator(CMK_LoopIVOpts)) + : m_comp(comp) + , m_cache(comp->getAllocator(CMK_LoopIVOpts)) + , m_ephemeralCache(comp->getAllocator(CMK_LoopIVOpts)) { } @@ -967,8 +969,8 @@ Scev* ScalarEvolutionContext::Simplify(Scev* scev) ScevAddRec* addRec = (ScevAddRec*)op1; Scev* newStart = Simplify(NewBinop(binop->Oper, addRec->Start, op2)); Scev* newStep = scev->OperIs(ScevOper::Mul, ScevOper::Lsh) - ? Simplify(NewBinop(binop->Oper, addRec->Step, op2)) - : addRec->Step; + ? Simplify(NewBinop(binop->Oper, addRec->Step, op2)) + : addRec->Step; return NewAddRec(newStart, newStep); } diff --git a/src/coreclr/jit/scev.h b/src/coreclr/jit/scev.h index 0800be905503a9..1aab39e3d3a5de 100644 --- a/src/coreclr/jit/scev.h +++ b/src/coreclr/jit/scev.h @@ -48,7 +48,9 @@ struct Scev const ScevOper Oper; const var_types Type; - Scev(ScevOper oper, var_types type) : Oper(oper), Type(type) + Scev(ScevOper oper, var_types type) + : Oper(oper) + , Type(type) { } @@ -74,7 +76,9 @@ struct Scev struct ScevConstant : Scev { - ScevConstant(var_types type, int64_t value) : Scev(ScevOper::Constant, type), Value(value) + ScevConstant(var_types type, int64_t value) + : Scev(ScevOper::Constant, type) + , Value(value) { } @@ -84,7 +88,9 @@ struct ScevConstant : Scev struct ScevLocal : Scev { ScevLocal(var_types type, unsigned lclNum, unsigned ssaNum) - : Scev(ScevOper::Local, type), LclNum(lclNum), SsaNum(ssaNum) + : Scev(ScevOper::Local, type) + , LclNum(lclNum) + , SsaNum(ssaNum) { } @@ -96,7 +102,9 @@ struct ScevLocal : Scev struct ScevUnop : Scev { - ScevUnop(ScevOper oper, var_types type, Scev* op1) : Scev(oper, type), Op1(op1) + ScevUnop(ScevOper oper, var_types type, Scev* op1) + : Scev(oper, type) + , Op1(op1) { } @@ -105,7 +113,9 @@ struct ScevUnop : Scev struct ScevBinop : ScevUnop { - ScevBinop(ScevOper oper, var_types type, Scev* op1, Scev* op2) : ScevUnop(oper, type, op1), Op2(op2) + ScevBinop(ScevOper oper, var_types type, Scev* op1, Scev* op2) + : ScevUnop(oper, type, op1) + , Op2(op2) { } @@ -118,7 +128,9 @@ struct ScevBinop : ScevUnop struct ScevAddRec : Scev { ScevAddRec(var_types type, Scev* start, Scev* step DEBUGARG(FlowGraphNaturalLoop* loop)) - : Scev(ScevOper::AddRec, type), Start(start), Step(step) DEBUGARG(Loop(loop)) + : Scev(ScevOper::AddRec, type) + , Start(start) + , Step(step) DEBUGARG(Loop(loop)) { } @@ -204,7 +216,7 @@ class ScalarEvolutionContext Scev* MakeAddRecFromRecursiveScev(Scev* start, Scev* scev, Scev* recursiveScev); Scev* CreateSimpleInvariantScev(GenTree* tree); Scev* CreateScevForConstant(GenTreeIntConCommon* tree); - void ExtractAddOperands(ScevBinop* add, ArrayStack& operands); + void ExtractAddOperands(ScevBinop* add, ArrayStack& operands); public: ScalarEvolutionContext(Compiler* comp); @@ -212,10 +224,10 @@ class ScalarEvolutionContext void ResetForLoop(FlowGraphNaturalLoop* loop); ScevConstant* NewConstant(var_types type, int64_t value); - ScevLocal* NewLocal(unsigned lclNum, unsigned ssaNum); - ScevUnop* NewExtension(ScevOper oper, var_types targetType, Scev* op); - ScevBinop* NewBinop(ScevOper oper, Scev* op1, Scev* op2); - ScevAddRec* NewAddRec(Scev* start, Scev* step); + ScevLocal* NewLocal(unsigned lclNum, unsigned ssaNum); + ScevUnop* NewExtension(ScevOper oper, var_types targetType, Scev* op); + ScevBinop* NewBinop(ScevOper oper, Scev* op1, Scev* op2); + ScevAddRec* NewAddRec(Scev* start, Scev* step); Scev* Analyze(BasicBlock* block, GenTree* tree); Scev* Simplify(Scev* scev); diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 7a1290f9ac7858..ddb766e94a0de9 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -790,11 +790,9 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRang else { JITDUMP("Debug: New V%02u debug range: %s\n", m_varNum, - m_VariableLiveRanges->empty() - ? "first" - : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) - ? "new var or location" - : "not adjacent"); + m_VariableLiveRanges->empty() ? "first" + : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) ? "new var or location" + : "not adjacent"); // Creates new live range with invalid end m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation()); m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit); @@ -1685,9 +1683,9 @@ NATIVE_OFFSET CodeGen::psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const } /*============================================================================ -* INTERFACE (public) Functions for PrologScopeInfo -*============================================================================ -*/ + * INTERFACE (public) Functions for PrologScopeInfo + *============================================================================ + */ //------------------------------------------------------------------------ // psiBegProlog: Initializes the PrologScopeInfo creating open psiScopes or diff --git a/src/coreclr/jit/sideeffects.cpp b/src/coreclr/jit/sideeffects.cpp index e39bf596c4770b..4a9b1899b24b84 100644 --- a/src/coreclr/jit/sideeffects.cpp +++ b/src/coreclr/jit/sideeffects.cpp @@ -8,7 +8,10 @@ #include "sideeffects.h" -LclVarSet::LclVarSet() : m_bitVector(nullptr), m_hasAnyLcl(false), m_hasBitVector(false) +LclVarSet::LclVarSet() + : m_bitVector(nullptr) + , m_hasAnyLcl(false) + , m_hasBitVector(false) { } @@ -121,7 +124,10 @@ void LclVarSet::Clear() } AliasSet::AliasSet() - : m_lclVarReads(), m_lclVarWrites(), m_readsAddressableLocation(false), m_writesAddressableLocation(false) + : m_lclVarReads() + , m_lclVarWrites() + , m_readsAddressableLocation(false) + , m_writesAddressableLocation(false) { } @@ -136,7 +142,11 @@ AliasSet::AliasSet() // node - The node in question. // AliasSet::NodeInfo::NodeInfo(Compiler* compiler, GenTree* node) - : m_compiler(compiler), m_node(node), m_flags(0), m_lclNum(0), m_lclOffs(0) + : m_compiler(compiler) + , m_node(node) + , m_flags(0) + , m_lclNum(0) + , m_lclOffs(0) { if (node->IsCall()) { @@ -444,7 +454,9 @@ void AliasSet::Clear() m_lclVarWrites.Clear(); } -SideEffectSet::SideEffectSet() : m_sideEffectFlags(0), m_aliasSet() +SideEffectSet::SideEffectSet() + : m_sideEffectFlags(0) + , m_aliasSet() { } @@ -460,7 +472,9 @@ SideEffectSet::SideEffectSet() : m_sideEffectFlags(0), m_aliasSet() // compiler - The compiler context. // node - The node to use for initialization. // -SideEffectSet::SideEffectSet(Compiler* compiler, GenTree* node) : m_sideEffectFlags(0), m_aliasSet() +SideEffectSet::SideEffectSet(Compiler* compiler, GenTree* node) + : m_sideEffectFlags(0) + , m_aliasSet() { AddNode(compiler, node); } diff --git a/src/coreclr/jit/sideeffects.h b/src/coreclr/jit/sideeffects.h index d94622d9f0ca8b..0fef277532cf1a 100644 --- a/src/coreclr/jit/sideeffects.h +++ b/src/coreclr/jit/sideeffects.h @@ -13,7 +13,8 @@ // class LclVarSet final { - union { + union + { hashBv* m_bitVector; unsigned m_lclNum; }; diff --git a/src/coreclr/jit/simd.h b/src/coreclr/jit/simd.h index aec72eaab542e8..3a5311aaaa79df 100644 --- a/src/coreclr/jit/simd.h +++ b/src/coreclr/jit/simd.h @@ -6,7 +6,8 @@ struct simd8_t { - union { + union + { float f32[2]; double f64[1]; int8_t i8[8]; @@ -58,7 +59,8 @@ static_assert_no_msg(sizeof(simd8_t) == 8); #include struct simd12_t { - union { + union + { float f32[3]; int8_t i8[12]; int16_t i16[6]; @@ -116,7 +118,8 @@ static_assert_no_msg(sizeof(simd12_t) == 12); struct simd16_t { - union { + union + { float f32[4]; double f64[2]; int8_t i8[16]; @@ -170,7 +173,8 @@ static_assert_no_msg(sizeof(simd16_t) == 16); #if defined(TARGET_XARCH) struct simd32_t { - union { + union + { float f32[8]; double f64[4]; int8_t i8[32]; @@ -224,7 +228,8 @@ static_assert_no_msg(sizeof(simd32_t) == 32); struct simd64_t { - union { + union + { float f32[16]; double f64[8]; int8_t i8[64]; @@ -279,7 +284,8 @@ static_assert_no_msg(sizeof(simd64_t) == 64); struct simdmask_t { - union { + union + { int8_t i8[8]; int16_t i16[4]; int32_t i32[2]; diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index f06b38736ddadb..c22ebc7b635440 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -399,7 +399,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, { argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, simdBaseJitType, simdSize); } @@ -421,7 +421,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize); } @@ -954,7 +954,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); switch (intrinsic) { diff --git a/src/coreclr/jit/sm.cpp b/src/coreclr/jit/sm.cpp index 5cd6e9879c78da..5e9b97699b841f 100644 --- a/src/coreclr/jit/sm.cpp +++ b/src/coreclr/jit/sm.cpp @@ -130,8 +130,8 @@ SM_STATE_ID CodeSeqSM::GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode) if (cell->srcState != srcState) { - assert(cell->srcState == 0 || - cell->srcState != srcState); // Either way means there is not outgoing edge from srcState. + assert(cell->srcState == 0 || cell->srcState != srcState); // Either way means there is not outgoing edge from + // srcState. return 0; } else diff --git a/src/coreclr/jit/smallhash.h b/src/coreclr/jit/smallhash.h index f16905c995fbd4..5bbf58e99a4bd9 100644 --- a/src/coreclr/jit/smallhash.h +++ b/src/coreclr/jit/smallhash.h @@ -338,7 +338,10 @@ class HashTableBase protected: HashTableBase(TAllocator alloc, Bucket* buckets, unsigned numBuckets) - : m_alloc(alloc), m_buckets(buckets), m_numBuckets(numBuckets), m_numFullBuckets(0) + : m_alloc(alloc) + , m_buckets(buckets) + , m_numBuckets(numBuckets) + , m_numFullBuckets(0) { if (numBuckets > 0) { @@ -359,13 +362,15 @@ class HashTableBase Bucket* m_bucket; - KeyValuePair(Bucket* bucket) : m_bucket(bucket) + KeyValuePair(Bucket* bucket) + : m_bucket(bucket) { assert(m_bucket != nullptr); } public: - KeyValuePair() : m_bucket(nullptr) + KeyValuePair() + : m_bucket(nullptr) { } @@ -392,7 +397,9 @@ class HashTableBase unsigned m_index; Iterator(Bucket* buckets, unsigned numBuckets, unsigned index) - : m_buckets(buckets), m_numBuckets(numBuckets), m_index(index) + : m_buckets(buckets) + , m_numBuckets(numBuckets) + , m_index(index) { assert((buckets != nullptr) || (numBuckets == 0)); assert(index <= numBuckets); @@ -405,7 +412,10 @@ class HashTableBase } public: - Iterator() : m_buckets(nullptr), m_numBuckets(0), m_index(0) + Iterator() + : m_buckets(nullptr) + , m_numBuckets(0) + , m_index(0) { } @@ -636,7 +646,8 @@ class HashTable final : public HashTableBase } public: - HashTable(TAllocator alloc) : TBase(alloc, nullptr, 0) + HashTable(TAllocator alloc) + : TBase(alloc, nullptr, 0) { } @@ -670,7 +681,8 @@ class SmallHashTable final : public HashTableBase> 1)); // Parameters: // info - Info about the method being classified. // -Arm32Classifier::Arm32Classifier(const ClassifierInfo& info) : m_info(info) +Arm32Classifier::Arm32Classifier(const ClassifierInfo& info) + : m_info(info) { } diff --git a/src/coreclr/jit/targetarm64.cpp b/src/coreclr/jit/targetarm64.cpp index f48cfae542cd34..cef1e95780695b 100644 --- a/src/coreclr/jit/targetarm64.cpp +++ b/src/coreclr/jit/targetarm64.cpp @@ -32,7 +32,9 @@ const regMaskTP fltArgMasks[] = {RBM_V0, RBM_V1, RBM_V2, RBM_V3, RBM_V4, RBM_V5, // info - Info about the method being classified. // Arm64Classifier::Arm64Classifier(const ClassifierInfo& info) - : m_info(info), m_intRegs(intArgRegs, ArrLen(intArgRegs)), m_floatRegs(fltArgRegs, ArrLen(fltArgRegs)) + : m_info(info) + , m_intRegs(intArgRegs, ArrLen(intArgRegs)) + , m_floatRegs(fltArgRegs, ArrLen(fltArgRegs)) { } diff --git a/src/coreclr/jit/targetx86.cpp b/src/coreclr/jit/targetx86.cpp index 1c3e91be1bd2ff..5c2702d4728893 100644 --- a/src/coreclr/jit/targetx86.cpp +++ b/src/coreclr/jit/targetx86.cpp @@ -28,7 +28,8 @@ const regMaskTP intArgMasks[] = {RBM_ECX, RBM_EDX}; // Parameters: // info - Info about the method being classified. // -X86Classifier::X86Classifier(const ClassifierInfo& info) : m_regs(nullptr, 0) +X86Classifier::X86Classifier(const ClassifierInfo& info) + : m_regs(nullptr, 0) { switch (info.CallConv) { diff --git a/src/coreclr/jit/treelifeupdater.cpp b/src/coreclr/jit/treelifeupdater.cpp index 9ae6d3cd02f74d..31563b4d501cc8 100644 --- a/src/coreclr/jit/treelifeupdater.cpp +++ b/src/coreclr/jit/treelifeupdater.cpp @@ -339,7 +339,7 @@ void TreeLifeUpdater::UpdateLifeBit(VARSET_TP& set, LclVarDsc* dsc, // can be dumped after potential updates. // template -void TreeLifeUpdater::StoreCurrentLifeForDump() +void TreeLifeUpdater::StoreCurrentLifeForDump() { #ifdef DEBUG if (compiler->verbose) diff --git a/src/coreclr/jit/typelist.h b/src/coreclr/jit/typelist.h index 1a9a8c4072f6bf..bf5acb5ee014a5 100644 --- a/src/coreclr/jit/typelist.h +++ b/src/coreclr/jit/typelist.h @@ -4,7 +4,7 @@ #define GCS EA_GCREF #define BRS EA_BYREF #define EPS EA_PTRSIZE -#define PS TARGET_POINTER_SIZE +#define PS TARGET_POINTER_SIZE #define PST (TARGET_POINTER_SIZE / sizeof(int)) #ifdef TARGET_64BIT diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index a927e73c02b9f1..e1ff9bc464a163 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -128,7 +128,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, assert(func->funKind == FUNC_HANDLER); *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr - : new (this, CMK_UnwindInfo) + : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next())); } } diff --git a/src/coreclr/jit/unwind.h b/src/coreclr/jit/unwind.h index 4d1b540f060624..8b7fcaa5a103dc 100644 --- a/src/coreclr/jit/unwind.h +++ b/src/coreclr/jit/unwind.h @@ -21,46 +21,51 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(TARGET_ARM) const unsigned MAX_PROLOG_SIZE_BYTES = 44; const unsigned MAX_EPILOG_SIZE_BYTES = 44; -#define UWC_END 0xFF // "end" unwind code +#define UWC_END 0xFF // "end" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 19) -#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record -#define UW_MAX_EPILOG_START_INDEX 0xFFU // Max number that can be encoded in the "Epilog Start Index" field - // of the .pdata record +#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record +#define UW_MAX_EPILOG_START_INDEX \ + 0xFFU // Max number that can be encoded in the "Epilog Start Index" field + // of the .pdata record #elif defined(TARGET_ARM64) const unsigned MAX_PROLOG_SIZE_BYTES = 100; const unsigned MAX_EPILOG_SIZE_BYTES = 100; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #elif defined(TARGET_LOONGARCH64) const unsigned MAX_PROLOG_SIZE_BYTES = 200; const unsigned MAX_EPILOG_SIZE_BYTES = 200; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #elif defined(TARGET_RISCV64) const unsigned MAX_PROLOG_SIZE_BYTES = 200; const unsigned MAX_EPILOG_SIZE_BYTES = 200; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #endif // TARGET_RISCV64 -#define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field - // of the .pdata record -#define UW_MAX_EXTENDED_CODE_WORDS_COUNT 0xFFU // Max number that can be encoded in the "Extended Code Words" - // field of the .pdata record -#define UW_MAX_EXTENDED_EPILOG_COUNT 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count" - // field of the .pdata record -#define UW_MAX_EPILOG_START_OFFSET 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset" - // field of the .pdata record +#define UW_MAX_EPILOG_COUNT \ + 31 // Max number that can be encoded in the "Epilog count" field + // of the .pdata record +#define UW_MAX_EXTENDED_CODE_WORDS_COUNT \ + 0xFFU // Max number that can be encoded in the "Extended Code Words" + // field of the .pdata record +#define UW_MAX_EXTENDED_EPILOG_COUNT \ + 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count" + // field of the .pdata record +#define UW_MAX_EPILOG_START_OFFSET \ + 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset" + // field of the .pdata record // // Forward declaration of class defined in emit.h @@ -85,7 +90,8 @@ class UnwindInfo; class UnwindBase { protected: - UnwindBase(Compiler* comp) : uwiComp(comp) + UnwindBase(Compiler* comp) + : uwiComp(comp) { } @@ -107,9 +113,9 @@ class UnwindCodesBase public: // Add a single unwind code. - virtual void AddCode(BYTE b1) = 0; - virtual void AddCode(BYTE b1, BYTE b2) = 0; - virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0; + virtual void AddCode(BYTE b1) = 0; + virtual void AddCode(BYTE b1, BYTE b2) = 0; + virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0; virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4) = 0; // Get access to the unwind codes @@ -139,7 +145,9 @@ class UnwindCodesBase // information for a function, including unwind info header, the prolog codes, // and any epilog codes. -class UnwindPrologCodes : public UnwindBase, public UnwindCodesBase +class UnwindPrologCodes + : public UnwindBase + , public UnwindCodesBase { // UPC_LOCAL_COUNT is the amount of memory local to this class. For ARM CoreLib, the maximum size is 34. // Here is a histogram of other interesting sizes: @@ -303,7 +311,9 @@ class UnwindPrologCodes : public UnwindBase, public UnwindCodesBase // Epilog unwind codes arrive in the order they will be emitted. Store them as an array, // adding new ones to the end of the array. -class UnwindEpilogCodes : public UnwindBase, public UnwindCodesBase +class UnwindEpilogCodes + : public UnwindBase + , public UnwindCodesBase { // UEC_LOCAL_COUNT is the amount of memory local to this class. For ARM CoreLib, the maximum size is 6, // while 89% of epilogs fit in 4. So, set it to 4 to maintain array alignment and hit most cases. diff --git a/src/coreclr/jit/unwindamd64.cpp b/src/coreclr/jit/unwindamd64.cpp index 549c4e9910567c..e42a4368581fb7 100644 --- a/src/coreclr/jit/unwindamd64.cpp +++ b/src/coreclr/jit/unwindamd64.cpp @@ -199,7 +199,7 @@ void Compiler::unwindPushWindows(regNumber reg) // since it is pushed as a frame register. || (reg == REG_FPBASE) #endif // ETW_EBP_FRAMED - ) + ) { code->UnwindOp = UWOP_PUSH_NONVOL; code->OpInfo = (BYTE)reg; diff --git a/src/coreclr/jit/unwindarm64.cpp b/src/coreclr/jit/unwindarm64.cpp index 0725eb41dfdba5..f842737171c0b4 100644 --- a/src/coreclr/jit/unwindarm64.cpp +++ b/src/coreclr/jit/unwindarm64.cpp @@ -461,8 +461,8 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o pu->AddCode(0x80 | (BYTE)z); } - else if ((reg1 == REG_R19) && - (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x unwind code. + else if ((reg1 == REG_R19) && (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x + // unwind code. { // save_r19r20_x: 001zzzzz: save pair at [sp-#Z*8]!, pre-indexed offset >= -248 // NOTE: I'm not sure why we allow Z==0 here; seems useless, and the calculation of offset is different from the @@ -758,7 +758,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index 445b2581ca0abb..bdc7663bde7ed1 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -243,9 +243,8 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16) } else { - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); bool shortFormat = false; BYTE val = 0; @@ -321,9 +320,8 @@ void Compiler::unwindPushPopMaskFloat(regMaskTP maskFloat) void Compiler::unwindPushMaskInt(regMaskTP maskInt) { // Only r0-r12 and lr are supported - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); #if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) @@ -364,9 +362,8 @@ void Compiler::unwindPopMaskInt(regMaskTP maskInt) #endif // FEATURE_CFI_SUPPORT // Only r0-r12 and lr and pc are supported (pc is mapped to lr when encoding) - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR | RBM_PC)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR | RBM_PC)) == 0); bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_PC)) == maskInt); // Can POP use the 16-bit encoding? @@ -721,8 +718,8 @@ unsigned GetOpcodeSizeFromUnwindHeader(BYTE b1) }; BYTE opsize = s_UnwindOpsize[b1]; - assert(opsize == 2 || - opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled specially) + assert(opsize == 2 || opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled + // specially) return opsize; } @@ -887,9 +884,9 @@ void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi) int epiSize = pEpi->Size(); memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(), - epiSize); // -3 to avoid writing to the alignment padding - assert(pEpi->GetStartIndex() == - upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it. + epiSize); // -3 to avoid writing to the alignment padding + assert(pEpi->GetStartIndex() == upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy + // it. upcEpilogSlot += epiSize; assert(upcEpilogSlot <= upcMemSize - 3); @@ -1455,7 +1452,7 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) } #endif -// Compute the header + // Compute the header #if defined(TARGET_ARM) noway_assert((functionLength & 1) == 0); @@ -1504,8 +1501,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // Start writing the header - noway_assert(headerFunctionLength <= - 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error + noway_assert(headerFunctionLength <= 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, + // we have an internal error if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) || (headerCodeWords > UW_MAX_CODE_WORDS_COUNT)) { @@ -1516,7 +1513,7 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerFBit << 22) | (headerEpilogCount << 23) | (headerCodeWords << 28); #elif defined(TARGET_ARM64) - DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | + DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerEpilogCount << 22) | (headerCodeWords << 27); #endif // defined(TARGET_ARM64) @@ -2203,7 +2200,7 @@ DWORD DumpRegSetRange(const char* const rtype, DWORD start, DWORD end, DWORD lr) DWORD DumpOpsize(DWORD padding, DWORD opsize) { if (padding > 100) // underflow? - padding = 4; + padding = 4; DWORD printed = padding; for (; padding > 0; padding--) printf(" "); @@ -2231,7 +2228,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/unwindloongarch64.cpp b/src/coreclr/jit/unwindloongarch64.cpp index 3aa5fd668d40c6..1b561eaaaae669 100644 --- a/src/coreclr/jit/unwindloongarch64.cpp +++ b/src/coreclr/jit/unwindloongarch64.cpp @@ -516,7 +516,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; @@ -1149,9 +1149,9 @@ void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi) int epiSize = pEpi->Size(); memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(), - epiSize); // -3 to avoid writing to the alignment padding - assert(pEpi->GetStartIndex() == - upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it. + epiSize); // -3 to avoid writing to the alignment padding + assert(pEpi->GetStartIndex() == upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy + // it. upcEpilogSlot += epiSize; assert(upcEpilogSlot <= upcMemSize - 3); @@ -1772,8 +1772,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // Start writing the header - noway_assert(headerFunctionLength <= - 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error + noway_assert(headerFunctionLength <= 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, + // we have an internal error if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) || (headerCodeWords > UW_MAX_CODE_WORDS_COUNT)) { diff --git a/src/coreclr/jit/unwindriscv64.cpp b/src/coreclr/jit/unwindriscv64.cpp index b78eb04c228e93..f9db0d433c6f13 100644 --- a/src/coreclr/jit/unwindriscv64.cpp +++ b/src/coreclr/jit/unwindriscv64.cpp @@ -327,7 +327,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index 9e706fd884e56b..23b8d0000b800d 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -1107,7 +1107,8 @@ void Counter::dump(FILE* output) * Histogram class. */ -Histogram::Histogram(const unsigned* const sizeTable) : m_sizeTable(sizeTable) +Histogram::Histogram(const unsigned* const sizeTable) + : m_sizeTable(sizeTable) { unsigned sizeCount = 0; do @@ -1839,7 +1840,8 @@ void HelperCallProperties::init() // // You must use ';' as a separator; whitespace no longer works -AssemblyNamesList2::AssemblyNamesList2(const WCHAR* list, HostAllocator alloc) : m_alloc(alloc) +AssemblyNamesList2::AssemblyNamesList2(const WCHAR* list, HostAllocator alloc) + : m_alloc(alloc) { WCHAR prevChar = '?'; // dummy LPWSTR nameStart = nullptr; // start of the name currently being processed. nullptr if no current name @@ -1926,7 +1928,9 @@ bool AssemblyNamesList2::IsInList(const char* assemblyName) // MethodSet //============================================================================= -MethodSet::MethodSet(const WCHAR* filename, HostAllocator alloc) : m_pInfos(nullptr), m_alloc(alloc) +MethodSet::MethodSet(const WCHAR* filename, HostAllocator alloc) + : m_pInfos(nullptr) + , m_alloc(alloc) { FILE* methodSetFile = _wfopen(filename, W("r")); if (methodSetFile == nullptr) @@ -2155,7 +2159,8 @@ double CachedCyclesPerSecond() } #ifdef FEATURE_JIT_METHOD_PERF -CycleCount::CycleCount() : cps(CachedCyclesPerSecond()) +CycleCount::CycleCount() + : cps(CachedCyclesPerSecond()) { } @@ -2299,7 +2304,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) u64 = UINT64(INT64(d)); #else - u64 = UINT64(d); + u64 = UINT64(d); #endif // TARGET_XARCH return u64; @@ -4099,7 +4104,7 @@ int64_t GetSigned64Magic(int64_t d, int* shift /*out*/) return GetSignedMagic(d, shift); } #endif -} +} // namespace MagicDivide namespace CheckedOps { @@ -4293,4 +4298,4 @@ bool CastFromDoubleOverflows(double fromValue, var_types toType) unreached(); } } -} +} // namespace CheckedOps diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 0b1b6840be6ec3..39001f32215ef5 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -88,7 +88,9 @@ class IteratorPair TIterator m_end; public: - IteratorPair(TIterator begin, TIterator end) : m_begin(begin), m_end(end) + IteratorPair(TIterator begin, TIterator end) + : m_begin(begin) + , m_end(end) { } @@ -116,7 +118,8 @@ struct ConstLog2 { enum { - value = ConstLog2::value + value = ConstLog2 < val / 2, + acc + 1 > ::value }; }; @@ -246,7 +249,9 @@ class ConfigMethodRange class ConfigIntArray { public: - ConfigIntArray() : m_values(nullptr), m_length(0) + ConfigIntArray() + : m_values(nullptr) + , m_length(0) { } @@ -270,7 +275,7 @@ class ConfigIntArray } private: - void Init(const WCHAR* str); + void Init(const WCHAR* str); int* m_values; unsigned m_length; }; @@ -280,7 +285,9 @@ class ConfigIntArray class ConfigDoubleArray { public: - ConfigDoubleArray() : m_values(nullptr), m_length(0) + ConfigDoubleArray() + : m_values(nullptr) + , m_length(0) { } @@ -304,7 +311,7 @@ class ConfigDoubleArray } private: - void Init(const WCHAR* str); + void Init(const WCHAR* str); double* m_values; unsigned m_length; }; @@ -404,7 +411,8 @@ template class ScopedSetVariable { public: - ScopedSetVariable(T* pVariable, T value) : m_pVariable(pVariable) + ScopedSetVariable(T* pVariable, T value) + : m_pVariable(pVariable) { m_oldValue = *m_pVariable; *m_pVariable = value; @@ -442,7 +450,8 @@ class PhasedVar public: PhasedVar() #ifdef DEBUG - : m_initialized(false), m_writePhase(true) + : m_initialized(false) + , m_writePhase(true) #endif // DEBUG { } @@ -704,7 +713,9 @@ class MethodSet MethodInfo* m_next; MethodInfo(char* methodName, int methodHash) - : m_MethodName(methodName), m_MethodHash(methodHash), m_next(nullptr) + : m_MethodName(methodName) + , m_MethodHash(methodHash) + , m_next(nullptr) { } }; @@ -786,8 +797,8 @@ unsigned CountDigits(double num, unsigned base = 10); #endif // DEBUG /***************************************************************************** -* Floating point utility class -*/ + * Floating point utility class + */ class FloatingPointUtils { public: @@ -1019,7 +1030,7 @@ class CritSecObject CRITSEC_COOKIE m_pCs; // No copying or assignment allowed. - CritSecObject(const CritSecObject&) = delete; + CritSecObject(const CritSecObject&) = delete; CritSecObject& operator=(const CritSecObject&) = delete; }; @@ -1029,7 +1040,8 @@ class CritSecObject class CritSecHolder { public: - CritSecHolder(CritSecObject& critSec) : m_CritSec(critSec) + CritSecHolder(CritSecObject& critSec) + : m_CritSec(critSec) { ClrEnterCriticalSection(m_CritSec.Val()); } @@ -1043,7 +1055,7 @@ class CritSecHolder CritSecObject& m_CritSec; // No copying or assignment allowed. - CritSecHolder(const CritSecHolder&) = delete; + CritSecHolder(const CritSecHolder&) = delete; CritSecHolder& operator=(const CritSecHolder&) = delete; }; @@ -1059,7 +1071,7 @@ int32_t GetSigned32Magic(int32_t d, int* shift /*out*/); #ifdef TARGET_64BIT int64_t GetSigned64Magic(int64_t d, int* shift /*out*/); #endif -} +} // namespace MagicDivide // // Profiling helpers @@ -1160,6 +1172,6 @@ bool CastFromIntOverflows(int32_t fromValue, var_types toType, bool fromUnsigned bool CastFromLongOverflows(int64_t fromValue, var_types toType, bool fromUnsigned); bool CastFromFloatOverflows(float fromValue, var_types toType); bool CastFromDoubleOverflows(double fromValue, var_types toType); -} +} // namespace CheckedOps #endif // _UTILS_H_ diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 7ebde5995cf732..889e0227e992d9 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -45,7 +45,7 @@ struct FloatTraits #if defined(TARGET_XARCH) unsigned bits = 0xFFC00000u; #elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - unsigned bits = 0x7FC00000u; + unsigned bits = 0x7FC00000u; #else #error Unsupported or unset target architecture #endif @@ -1647,7 +1647,11 @@ bool ValueNumStore::IsSharedStatic(ValueNum vn) } ValueNumStore::Chunk::Chunk(CompAllocator alloc, ValueNum* pNextBaseVN, var_types typ, ChunkExtraAttribs attribs) - : m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs) + : m_defs(nullptr) + , m_numUsed(0) + , m_baseVN(*pNextBaseVN) + , m_typ(typ) + , m_attribs(attribs) { // Allocate "m_defs" here, according to the typ/attribs pair. switch (attribs) @@ -2971,7 +2975,8 @@ typedef JitHashTable, bool> ValueN class SmallValueNumSet { - union { + union + { ValueNum m_inlineElements[4]; ValueNumSet* m_set; }; @@ -3416,7 +3421,7 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, { bool usedRecursiveVN = false; ValueNum curResult = VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, - &usedRecursiveVN, recMemoryDependencies); + &usedRecursiveVN, recMemoryDependencies); *pUsedRecursiveVN |= usedRecursiveVN; if (sameSelResult == ValueNumStore::RecursiveVN) @@ -3449,8 +3454,9 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, GetMapSelectWorkCache()->Set(fstruct, entry); } - recMemoryDependencies.ForEach( - [this, &memoryDependencies](ValueNum vn) { memoryDependencies.Add(m_pComp, vn); }); + recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { + memoryDependencies.Add(m_pComp, vn); + }); return sameSelResult; } @@ -3485,7 +3491,9 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, GetMapSelectWorkCache()->Set(fstruct, entry); } - recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { memoryDependencies.Add(m_pComp, vn); }); + recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { + memoryDependencies.Add(m_pComp, vn); + }); return entry.Result; } @@ -5610,7 +5618,7 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeq* fldSeq, ssize_t offs { fldSeq = m_pComp->GetFieldSeqStore()->Append(FieldSeqVNToFieldSeq(funcApp.m_args[1]), fldSeq); res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], VNForFieldSeq(fldSeq), - VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); + VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); } else if (funcApp.m_func == VNF_PtrToArrElem) { @@ -5653,7 +5661,6 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, auto processDef = [=](unsigned defLclNum, unsigned defSsaNum, ssize_t defOffset, unsigned defSize, ValueNumPair defValue) { - LclVarDsc* defVarDsc = lvaGetDesc(defLclNum); if (defSsaNum != SsaConfig::RESERVED_SSA_NUM) @@ -12189,8 +12196,8 @@ void Compiler::fgValueNumberCastTree(GenTree* tree) ValueNum ValueNumStore::VNForCast(ValueNum srcVN, var_types castToType, var_types castFromType, - bool srcIsUnsigned, /* = false */ - bool hasOverflowCheck) /* = false */ + bool srcIsUnsigned, /* = false */ + bool hasOverflowCheck) /* = false */ { if ((castFromType == TYP_I_IMPL) && (castToType == TYP_BYREF) && IsVNHandle(srcVN)) @@ -12235,8 +12242,8 @@ ValueNum ValueNumStore::VNForCast(ValueNum srcVN, ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, var_types castToType, var_types castFromType, - bool srcIsUnsigned, /* = false */ - bool hasOverflowCheck) /* = false */ + bool srcIsUnsigned, /* = false */ + bool hasOverflowCheck) /* = false */ { ValueNum srcLibVN = srcVNPair.GetLiberal(); ValueNum srcConVN = srcVNPair.GetConservative(); @@ -13742,7 +13749,6 @@ void Compiler::fgDebugCheckExceptionSets() ValueNumPair operandsExcSet = vnStore->VNPForEmptyExcSet(); tree->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult { - CheckTree(operand, vnStore); ValueNumPair operandVNP = operand->gtVNPair.BothDefined() ? operand->gtVNPair : vnStore->VNPForVoid(); @@ -13796,7 +13802,7 @@ void Compiler::JitTestCheckVN() // First we have to know which nodes in the tree are reachable. typedef JitHashTable, int> NodeToIntMap; - NodeToIntMap* reachable = FindReachableNodesInNodeTestData(); + NodeToIntMap* reachable = FindReachableNodesInNodeTestData(); LabelToVNMap* labelToVN = new (getAllocatorDebugOnly()) LabelToVNMap(getAllocatorDebugOnly()); VNToLabelMap* vnToLabel = new (getAllocatorDebugOnly()) VNToLabelMap(getAllocatorDebugOnly()); @@ -13931,7 +13937,9 @@ void Compiler::vnPrint(ValueNum vn, unsigned level) #endif // DEBUG // Methods of ValueNumPair. -ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN) +ValueNumPair::ValueNumPair() + : m_liberal(ValueNumStore::NoVN) + , m_conservative(ValueNumStore::NoVN) { } diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 7cd6c27aec206c..1f9171e13cef21 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -238,7 +238,8 @@ class ValueNumStore class VNMap : public JitHashTable { public: - VNMap(CompAllocator alloc) : JitHashTable(alloc) + VNMap(CompAllocator alloc) + : JitHashTable(alloc) { } @@ -306,7 +307,7 @@ class ValueNumStore bool illegalAsVNFunc, GenTreeOperKind kind); static constexpr uint8_t GetOpAttribsForFunc(int arity, bool commute, bool knownNonNull, bool sharedStatic); - static const uint8_t s_vnfOpAttribs[]; + static const uint8_t s_vnfOpAttribs[]; // Returns "true" iff gtOper is a legal value number function. // (Requires InitValueNumStoreStatics to have been run.) @@ -355,18 +356,18 @@ class ValueNumStore public: // Given an constant value number return its value. - int GetConstantInt32(ValueNum argVN); - INT64 GetConstantInt64(ValueNum argVN); + int GetConstantInt32(ValueNum argVN); + INT64 GetConstantInt64(ValueNum argVN); double GetConstantDouble(ValueNum argVN); - float GetConstantSingle(ValueNum argVN); + float GetConstantSingle(ValueNum argVN); #if defined(FEATURE_SIMD) - simd8_t GetConstantSimd8(ValueNum argVN); + simd8_t GetConstantSimd8(ValueNum argVN); simd12_t GetConstantSimd12(ValueNum argVN); simd16_t GetConstantSimd16(ValueNum argVN); #if defined(TARGET_XARCH) - simd32_t GetConstantSimd32(ValueNum argVN); - simd64_t GetConstantSimd64(ValueNum argVN); + simd32_t GetConstantSimd32(ValueNum argVN); + simd64_t GetConstantSimd64(ValueNum argVN); simdmask_t GetConstantSimdMask(ValueNum argVN); #endif // TARGET_XARCH #endif // FEATURE_SIMD @@ -560,7 +561,7 @@ class ValueNumStore // Create or return the existimg value number representing a singleton exception set // for the exception value "x". - ValueNum VNExcSetSingleton(ValueNum x); + ValueNum VNExcSetSingleton(ValueNum x); ValueNumPair VNPExcSetSingleton(ValueNumPair x); // Returns true if the current pair of items are in ascending order and they are not duplicates. @@ -814,7 +815,7 @@ class ValueNumStore return ValueNumPair(liberalFuncVN, conservativeFuncVN); } - ValueNum VNForExpr(BasicBlock* block, var_types type = TYP_UNKNOWN); + ValueNum VNForExpr(BasicBlock* block, var_types type = TYP_UNKNOWN); ValueNumPair VNPairForExpr(BasicBlock* block, var_types type); // This controls extra tracing of the "evaluation" of "VNF_MapSelect" functions. @@ -916,7 +917,10 @@ class ValueNumStore ValueNum vnIdx; ValueNum vnBound; - UnsignedCompareCheckedBoundInfo() : cmpOper(GT_NONE), vnIdx(NoVN), vnBound(NoVN) + UnsignedCompareCheckedBoundInfo() + : cmpOper(GT_NONE) + , vnIdx(NoVN) + , vnBound(NoVN) { } }; @@ -930,7 +934,12 @@ class ValueNumStore ValueNum arrOp; unsigned cmpOper; ValueNum cmpOp; - CompareCheckedBoundArithInfo() : vnBound(NoVN), arrOper(GT_NONE), arrOp(NoVN), cmpOper(GT_NONE), cmpOp(NoVN) + CompareCheckedBoundArithInfo() + : vnBound(NoVN) + , arrOper(GT_NONE) + , arrOp(NoVN) + , cmpOper(GT_NONE) + , cmpOp(NoVN) { } #ifdef DEBUG @@ -958,7 +967,11 @@ class ValueNumStore ValueNum cmpOpVN; bool isUnsigned; - ConstantBoundInfo() : constVal(0), cmpOper(GT_NONE), cmpOpVN(NoVN), isUnsigned(false) + ConstantBoundInfo() + : constVal(0) + , cmpOper(GT_NONE) + , cmpOpVN(NoVN) + , isUnsigned(false) { } @@ -1307,7 +1320,8 @@ class ValueNumStore VNFunc m_func; ValueNum m_args[NumArgs]; - VNDefFuncApp() : m_func(VNF_COUNT) + VNDefFuncApp() + : m_func(VNF_COUNT) { for (size_t i = 0; i < NumArgs; i++) { @@ -1316,7 +1330,9 @@ class ValueNumStore } template - VNDefFuncApp(VNFunc func, VNs... vns) : m_func(func), m_args{vns...} + VNDefFuncApp(VNFunc func, VNs... vns) + : m_func(func) + , m_args{vns...} { static_assert_no_msg(NumArgs == sizeof...(VNs)); } @@ -1477,7 +1493,7 @@ class ValueNumStore static const int SmallIntConstMin = -1; static const int SmallIntConstMax = 10; static const unsigned SmallIntConstNum = SmallIntConstMax - SmallIntConstMin + 1; - static bool IsSmallIntConst(int i) + static bool IsSmallIntConst(int i) { return SmallIntConstMin <= i && i <= SmallIntConstMax; } @@ -1487,7 +1503,9 @@ class ValueNumStore { ValueNum vn; ValueNumList* next; - ValueNumList(const ValueNum& v, ValueNumList* n = nullptr) : vn(v), next(n) + ValueNumList(const ValueNum& v, ValueNumList* n = nullptr) + : vn(v) + , next(n) { } }; @@ -1518,8 +1536,8 @@ class ValueNumStore } typedef VNMap HandleToValueNumMap; - HandleToValueNumMap* m_handleMap; - HandleToValueNumMap* GetHandleMap() + HandleToValueNumMap* m_handleMap; + HandleToValueNumMap* GetHandleMap() { if (m_handleMap == nullptr) { @@ -1529,10 +1547,10 @@ class ValueNumStore } typedef SmallHashTable EmbeddedToCompileTimeHandleMap; - EmbeddedToCompileTimeHandleMap m_embeddedToCompileTimeHandleMap; + EmbeddedToCompileTimeHandleMap m_embeddedToCompileTimeHandleMap; typedef SmallHashTable FieldAddressToFieldSeqMap; - FieldAddressToFieldSeqMap m_fieldAddressToFieldSeqMap; + FieldAddressToFieldSeqMap m_fieldAddressToFieldSeqMap; struct LargePrimitiveKeyFuncsFloat : public JitLargePrimitiveKeyFuncs { @@ -1543,8 +1561,8 @@ class ValueNumStore }; typedef VNMap FloatToValueNumMap; - FloatToValueNumMap* m_floatCnsMap; - FloatToValueNumMap* GetFloatCnsMap() + FloatToValueNumMap* m_floatCnsMap; + FloatToValueNumMap* GetFloatCnsMap() { if (m_floatCnsMap == nullptr) { @@ -1563,8 +1581,8 @@ class ValueNumStore }; typedef VNMap DoubleToValueNumMap; - DoubleToValueNumMap* m_doubleCnsMap; - DoubleToValueNumMap* GetDoubleCnsMap() + DoubleToValueNumMap* m_doubleCnsMap; + DoubleToValueNumMap* GetDoubleCnsMap() { if (m_doubleCnsMap == nullptr) { @@ -1604,8 +1622,8 @@ class ValueNumStore }; typedef VNMap Simd8ToValueNumMap; - Simd8ToValueNumMap* m_simd8CnsMap; - Simd8ToValueNumMap* GetSimd8CnsMap() + Simd8ToValueNumMap* m_simd8CnsMap; + Simd8ToValueNumMap* GetSimd8CnsMap() { if (m_simd8CnsMap == nullptr) { @@ -1634,8 +1652,8 @@ class ValueNumStore }; typedef VNMap Simd12ToValueNumMap; - Simd12ToValueNumMap* m_simd12CnsMap; - Simd12ToValueNumMap* GetSimd12CnsMap() + Simd12ToValueNumMap* m_simd12CnsMap; + Simd12ToValueNumMap* GetSimd12CnsMap() { if (m_simd12CnsMap == nullptr) { @@ -1665,8 +1683,8 @@ class ValueNumStore }; typedef VNMap Simd16ToValueNumMap; - Simd16ToValueNumMap* m_simd16CnsMap; - Simd16ToValueNumMap* GetSimd16CnsMap() + Simd16ToValueNumMap* m_simd16CnsMap; + Simd16ToValueNumMap* GetSimd16CnsMap() { if (m_simd16CnsMap == nullptr) { @@ -1701,8 +1719,8 @@ class ValueNumStore }; typedef VNMap Simd32ToValueNumMap; - Simd32ToValueNumMap* m_simd32CnsMap; - Simd32ToValueNumMap* GetSimd32CnsMap() + Simd32ToValueNumMap* m_simd32CnsMap; + Simd32ToValueNumMap* GetSimd32CnsMap() { if (m_simd32CnsMap == nullptr) { @@ -1744,8 +1762,8 @@ class ValueNumStore }; typedef VNMap Simd64ToValueNumMap; - Simd64ToValueNumMap* m_simd64CnsMap; - Simd64ToValueNumMap* GetSimd64CnsMap() + Simd64ToValueNumMap* m_simd64CnsMap; + Simd64ToValueNumMap* GetSimd64CnsMap() { if (m_simd64CnsMap == nullptr) { @@ -1773,8 +1791,8 @@ class ValueNumStore }; typedef VNMap SimdMaskToValueNumMap; - SimdMaskToValueNumMap* m_simdMaskCnsMap; - SimdMaskToValueNumMap* GetSimdMaskCnsMap() + SimdMaskToValueNumMap* m_simdMaskCnsMap; + SimdMaskToValueNumMap* GetSimdMaskCnsMap() { if (m_simdMaskCnsMap == nullptr) { @@ -1813,8 +1831,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<1>> VNFunc1ToValueNumMap; - VNFunc1ToValueNumMap* m_VNFunc1Map; - VNFunc1ToValueNumMap* GetVNFunc1Map() + VNFunc1ToValueNumMap* m_VNFunc1Map; + VNFunc1ToValueNumMap* GetVNFunc1Map() { if (m_VNFunc1Map == nullptr) { @@ -1824,8 +1842,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<2>> VNFunc2ToValueNumMap; - VNFunc2ToValueNumMap* m_VNFunc2Map; - VNFunc2ToValueNumMap* GetVNFunc2Map() + VNFunc2ToValueNumMap* m_VNFunc2Map; + VNFunc2ToValueNumMap* GetVNFunc2Map() { if (m_VNFunc2Map == nullptr) { @@ -1835,8 +1853,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<3>> VNFunc3ToValueNumMap; - VNFunc3ToValueNumMap* m_VNFunc3Map; - VNFunc3ToValueNumMap* GetVNFunc3Map() + VNFunc3ToValueNumMap* m_VNFunc3Map; + VNFunc3ToValueNumMap* GetVNFunc3Map() { if (m_VNFunc3Map == nullptr) { @@ -1846,8 +1864,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<4>> VNFunc4ToValueNumMap; - VNFunc4ToValueNumMap* m_VNFunc4Map; - VNFunc4ToValueNumMap* GetVNFunc4Map() + VNFunc4ToValueNumMap* m_VNFunc4Map; + VNFunc4ToValueNumMap* GetVNFunc4Map() { if (m_VNFunc4Map == nullptr) { @@ -1858,7 +1876,8 @@ class ValueNumStore class MapSelectWorkCacheEntry { - union { + union + { ValueNum* m_memoryDependencies; ValueNum m_inlineMemoryDependencies[sizeof(ValueNum*) / sizeof(ValueNum)]; }; diff --git a/src/coreclr/jit/valuenumtype.h b/src/coreclr/jit/valuenumtype.h index 2eb3254e3e18b5..e41db972675434 100644 --- a/src/coreclr/jit/valuenumtype.h +++ b/src/coreclr/jit/valuenumtype.h @@ -115,7 +115,9 @@ struct ValueNumPair // Initializes both elements to "NoVN". Defined in ValueNum.cpp. ValueNumPair(); - ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons) + ValueNumPair(ValueNum lib, ValueNum cons) + : m_liberal(lib) + , m_conservative(cons) { } diff --git a/src/coreclr/jit/varset.h b/src/coreclr/jit/varset.h index 465ab146cbaca3..b9e4cab1a0c458 100644 --- a/src/coreclr/jit/varset.h +++ b/src/coreclr/jit/varset.h @@ -108,7 +108,7 @@ typedef BitSetOpsWithCounter VarSetOps; #else -typedef VarSetOpsRaw VarSetOps; +typedef VarSetOpsRaw VarSetOps; #endif #define ALLVARSET_REP BSShortLong diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h index 1623addb69b079..642ab159360350 100644 --- a/src/coreclr/jit/vartype.h +++ b/src/coreclr/jit/vartype.h @@ -225,7 +225,7 @@ inline bool varTypeIsIntOrI(T vt) #ifdef TARGET_64BIT || (TypeGet(vt) == TYP_I_IMPL) #endif // TARGET_64BIT - ); + ); } template @@ -321,11 +321,11 @@ inline bool varTypeUsesFloatReg(T vt) template inline bool varTypeUsesMaskReg(T vt) { -// The technically correct check is: -// return varTypeRegister[TypeGet(vt)] == VTR_MASK; -// -// However, we only have one type that uses VTR_MASK today -// and so its quite a bit cheaper to just check that directly + // The technically correct check is: + // return varTypeRegister[TypeGet(vt)] == VTR_MASK; + // + // However, we only have one type that uses VTR_MASK today + // and so its quite a bit cheaper to just check that directly #if defined(FEATURE_SIMD) && (defined(TARGET_XARCH) || defined(TARGET_ARM64)) assert((TypeGet(vt) == TYP_MASK) || (varTypeRegister[TypeGet(vt)] != VTR_MASK)); From 1c1903363e64915f1f044c5d200158a1f15e1281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Strehovsk=C3=BD?= Date: Thu, 4 Apr 2024 07:09:32 +0900 Subject: [PATCH 075/132] Fix building x86 Windows native AOT runtime packs (#100512) Looks like we had a whole extra job definition just to avoid building the native AOT parts in the official build. --- eng/pipelines/runtime-official.yml | 36 +----------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml index feb6b016d46c77..b8e74630b856ec 100644 --- a/eng/pipelines/runtime-official.yml +++ b/eng/pipelines/runtime-official.yml @@ -65,6 +65,7 @@ extends: buildConfig: release platforms: - windows_x64 + - windows_x86 - windows_arm64 jobParameters: templatePath: 'templates-official' @@ -89,41 +90,6 @@ extends: parameters: name: $(osGroup)$(osSubgroup)_$(archType) - - # - # Build CoreCLR runtime packs - # Windows x86 - # No NativeAOT as NativeAOT is not supported on x86 - # Sign diagnostic files after native build - # - - template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/common/global-build-job.yml - buildConfig: release - platforms: - - windows_x86 - jobParameters: - templatePath: 'templates-official' - buildArgs: -s clr.runtime+clr.alljits -c $(_BuildConfig) /bl:$(Build.SourcesDirectory)/artifacts/logs/$(_BuildConfig)/CoreClrNativeBuild.binlog - nameSuffix: CoreCLR - isOfficialBuild: ${{ variables.isOfficialBuild }} - timeoutInMinutes: 120 - postBuildSteps: - - template: /eng/pipelines/coreclr/templates/sign-diagnostic-files.yml - parameters: - basePath: $(Build.SourcesDirectory)/artifacts/bin/coreclr - isOfficialBuild: ${{ variables.isOfficialBuild }} - timeoutInMinutes: 30 - # Now that we've signed the diagnostic files, do the rest of the build. - - template: /eng/pipelines/common/templates/global-build-step.yml - parameters: - buildArgs: -s clr.corelib+clr.nativecorelib+clr.tools+clr.packages+libs+host+packs -c $(_BuildConfig) - displayName: Build managed CoreCLR components, all libraries, hosts, and packs - - # Upload the results. - - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - parameters: - name: $(osGroup)$(osSubgroup)_$(archType) # # Build CoreCLR runtime packs # Mac x64/arm64 From ac7f0253bb3daee50b8f2579524f59d32dc42669 Mon Sep 17 00:00:00 2001 From: Manish Godse <61718172+mangod9@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:02:56 -0700 Subject: [PATCH 076/132] Update CI builds to not use VS Previews (#100598) * Update CI to use Release VS2022. --- eng/pipelines/common/xplat-setup.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eng/pipelines/common/xplat-setup.yml b/eng/pipelines/common/xplat-setup.yml index 068a506262c005..743f6a42531bcc 100644 --- a/eng/pipelines/common/xplat-setup.yml +++ b/eng/pipelines/common/xplat-setup.yml @@ -181,12 +181,12 @@ jobs: # Official Build Windows Pool ${{ if and(or(eq(parameters.osGroup, 'windows'), eq(parameters.jobParameters.hostedOs, 'windows')), ne(variables['System.TeamProject'], 'public')) }}: name: $(DncEngInternalBuildPool) - demands: ImageOverride -equals windows.vs2022preview.amd64 + demands: ImageOverride -equals windows.vs2022.amd64 # Public Windows Build Pool ${{ if and(or(eq(parameters.osGroup, 'windows'), eq(parameters.jobParameters.hostedOs, 'windows')), eq(variables['System.TeamProject'], 'public')) }}: name: $(DncEngPublicBuildPool) - demands: ImageOverride -equals windows.vs2022preview.amd64.open + demands: ImageOverride -equals windows.vs2022.amd64.open ${{ if eq(parameters.helixQueuesTemplate, '') }}: From 95b67ed92ee5b785f6dd92efad2404583e05a8e7 Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Thu, 4 Apr 2024 01:48:33 +0200 Subject: [PATCH 077/132] Remove HOST_CALLS contract and related stuff (#100604) * Remove HOST_CALLS contract and related stuff The HOST_CALLS contract is obsolete remainder from the .NET framework era, this change removes all of the use of that contract and the related code. * Remove ShouldAvoidHost calls et al too. --- src/coreclr/inc/contract.h | 91 +--------------- src/coreclr/inc/staticcontract.h | 9 -- .../Runtime/windows/CoffNativeCodeManager.cpp | 1 - src/coreclr/utilcode/check.cpp | 1 - src/coreclr/utilcode/debug.cpp | 6 -- src/coreclr/vm/amd64/cgenamd64.cpp | 1 - src/coreclr/vm/amd64/gmsamd64.cpp | 18 +--- src/coreclr/vm/amd64/gmscpu.h | 3 +- src/coreclr/vm/arm/gmscpu.h | 3 +- src/coreclr/vm/arm/stubs.cpp | 19 +--- src/coreclr/vm/arm64/gmscpu.h | 3 +- src/coreclr/vm/arm64/stubs.cpp | 19 +--- src/coreclr/vm/codeman.cpp | 59 +---------- src/coreclr/vm/codeman.h | 7 +- src/coreclr/vm/ecall.cpp | 1 - src/coreclr/vm/eecontract.cpp | 39 ------- src/coreclr/vm/eecontract.h | 5 - src/coreclr/vm/eetwain.cpp | 1 - .../vm/eventing/eventpipe/ep-rt-coreclr.cpp | 4 - src/coreclr/vm/eventtrace.cpp | 5 - src/coreclr/vm/frames.cpp | 15 +-- src/coreclr/vm/frames.h | 2 +- src/coreclr/vm/gc_unwind_x86.inl | 2 - src/coreclr/vm/i386/cgenx86.cpp | 11 -- src/coreclr/vm/i386/gmscpu.h | 3 +- src/coreclr/vm/i386/gmsx86.cpp | 36 +------ src/coreclr/vm/jitinterface.cpp | 2 - src/coreclr/vm/loongarch64/gmscpu.h | 3 +- src/coreclr/vm/loongarch64/stubs.cpp | 19 +--- src/coreclr/vm/proftoeeinterfaceimpl.cpp | 100 +++--------------- src/coreclr/vm/proftoeeinterfaceimpl.inl | 37 ------- src/coreclr/vm/riscv64/gmscpu.h | 3 +- src/coreclr/vm/riscv64/stubs.cpp | 19 +--- src/coreclr/vm/vars.hpp | 9 -- 34 files changed, 43 insertions(+), 513 deletions(-) diff --git a/src/coreclr/inc/contract.h b/src/coreclr/inc/contract.h index d4376d61da85df..a3017ab9cb1b07 100644 --- a/src/coreclr/inc/contract.h +++ b/src/coreclr/inc/contract.h @@ -140,7 +140,6 @@ // ModeViolation // FaultViolation // FaultNotFatal -// HostViolation // LoadsTypeViolation // TakesLockViolation // @@ -378,7 +377,7 @@ struct DbgStateLockState #define CONTRACT_BITMASK_OK_TO_THROW 0x1 << 0 #define CONTRACT_BITMASK_FAULT_FORBID 0x1 << 1 -#define CONTRACT_BITMASK_HOSTCALLS 0x1 << 2 +// Unused 0x1 << 2 #define CONTRACT_BITMASK_SOTOLERANT 0x1 << 3 #define CONTRACT_BITMASK_DEBUGONLY 0x1 << 4 #define CONTRACT_BITMASK_SONOTMAINLINE 0x1 << 5 @@ -422,7 +421,6 @@ struct ClrDebugState // By default, GetThread() is perfectly fine to call // By default, it's ok to take a lock (or call someone who does) m_flags = CONTRACT_BITMASK_OK_TO_THROW| - CONTRACT_BITMASK_HOSTCALLS| CONTRACT_BITMASK_SOTOLERANT| CONTRACT_BITMASK_OK_TO_LOCK| CONTRACT_BITMASK_OK_TO_RETAKE_LOCK; @@ -512,30 +510,6 @@ struct ClrDebugState CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_FAULT_FORBID); } - //--// - BOOL IsHostCaller() - { - return CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_HOSTCALLS); - } - - void SetHostCaller() - { - CONTRACT_BITMASK_SET(CONTRACT_BITMASK_HOSTCALLS); - } - - - BOOL SetHostCaller(BOOL value) - { - BOOL prevState = CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_HOSTCALLS); - CONTRACT_BITMASK_UPDATE(CONTRACT_BITMASK_HOSTCALLS,value); - return prevState; - } - - void ResetHostCaller() - { - CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_HOSTCALLS); - } - //--// BOOL IsDebugOnly() { @@ -896,11 +870,8 @@ class BaseContract SO_MAINLINE_No = 0x00000800, // code is not part of our mainline SO scenario - // Any place where we can't safely call into the host should have a HOST_NoCalls contract - HOST_Mask = 0x00003000, - HOST_Calls = 0x00002000, - HOST_NoCalls = 0x00001000, - HOST_Disabled = 0x00000000, // the default + // Unused = 0x00002000, + // Unused = 0x00001000, // These enforce the CAN_TAKE_LOCK / CANNOT_TAKE_LOCK contracts CAN_TAKE_LOCK_Mask = 0x00060000, @@ -920,7 +891,7 @@ class BaseContract LOADS_TYPE_Disabled = 0x00000000, // the default ALL_Disabled = THROWS_Disabled|GC_Disabled|FAULT_Disabled|MODE_Disabled|LOADS_TYPE_Disabled| - HOST_Disabled|CAN_TAKE_LOCK_Disabled|CAN_RETAKE_LOCK_No_Disabled + CAN_TAKE_LOCK_Disabled|CAN_RETAKE_LOCK_No_Disabled }; @@ -1124,7 +1095,6 @@ enum ContractViolationBits FaultNotFatal = 0x00000010, // suppress INJECT_FAULT but not fault injection by harness LoadsTypeViolation = 0x00000040, // suppress LOADS_TYPE tags in this scope TakesLockViolation = 0x00000080, // suppress CAN_TAKE_LOCK tags in this scope - HostViolation = 0x00000100, // suppress HOST_CALLS tags in this scope //These are not violation bits. We steal some bits out of the violation mask to serve as // general flag bits. @@ -1667,7 +1637,7 @@ class ContractViolationHolder FORCEINLINE void EnterInternal(UINT_PTR violationMask) { _ASSERTE(0 == (violationMask & ~(ThrowsViolation | GCViolation | ModeViolation | FaultViolation | - FaultNotFatal | HostViolation | + FaultNotFatal | TakesLockViolation | LoadsTypeViolation)) || violationMask == AllViolation); @@ -1738,9 +1708,6 @@ enum PermanentContractViolationReason ReasonIBC, // Code runs in IBC scenarios only and the violation is safe. ReasonNGEN, // Code runs in NGEN scenarios only and the violation is safe. ReasonProfilerCallout, // Profiler implementers are guaranteed not to throw. - ReasonUnsupportedForSQLF1Profiling, // This code path violates HOST_NOCALLS, but that's ok b/c SQL will never - // invoke it, and thus SQL/F1 profiling (the primary reason to enforce - // HOST_NOCALLS) is not in danger. ReasonRuntimeReentrancy, // e.g. SafeQueryInterface ReasonShutdownOnly, // Code path only runs as part of Shutdown and the violation is safe. ReasonSOTolerance, // We would like to redesign SO contracts anyways @@ -2006,54 +1973,6 @@ inline ClrDebugState *GetClrDebugState(BOOL fAlloc) return NULL; } -#endif // ENABLE_CONTRACTS_IMPL - -#ifdef ENABLE_CONTRACTS_IMPL - -class HostNoCallHolder -{ - public: - DEBUG_NOINLINE HostNoCallHolder() - { - SCAN_SCOPE_BEGIN; - STATIC_CONTRACT_HOST_NOCALLS; - - m_clrDebugState = GetClrDebugState(); - m_previousState = m_clrDebugState->SetHostCaller(FALSE); - } - - DEBUG_NOINLINE ~HostNoCallHolder() - { - SCAN_SCOPE_END; - - m_clrDebugState->SetHostCaller(m_previousState); - } - - private: - BOOL m_previousState; - ClrDebugState* m_clrDebugState; - -}; - -#define BEGIN_HOST_NOCALL_CODE \ - { \ - HostNoCallHolder __hostNoCallHolder; \ - CantAllocHolder __cantAlloc; - -#define END_HOST_NOCALL_CODE \ - } - -#else // ENABLE_CONTRACTS_IMPL -#define BEGIN_HOST_NOCALL_CODE \ - { \ - CantAllocHolder __cantAlloc; \ - -#define END_HOST_NOCALL_CODE \ - } -#endif - - -#if defined(ENABLE_CONTRACTS_IMPL) // Macros to indicate we're taking or releasing locks diff --git a/src/coreclr/inc/staticcontract.h b/src/coreclr/inc/staticcontract.h index ecb528829b7025..df26383593e7e4 100644 --- a/src/coreclr/inc/staticcontract.h +++ b/src/coreclr/inc/staticcontract.h @@ -93,8 +93,6 @@ #define ANNOTATION_FN_MODE_COOPERATIVE __annotation(W("MODE_COOPERATIVE ") SCAN_WIDEN(__FUNCTION__)) #define ANNOTATION_FN_MODE_PREEMPTIVE __annotation(W("MODE_PREEMPTIVE ") SCAN_WIDEN(__FUNCTION__)) #define ANNOTATION_FN_MODE_ANY __annotation(W("MODE_ANY ") SCAN_WIDEN(__FUNCTION__)) -#define ANNOTATION_FN_HOST_NOCALLS __annotation(W("HOST_NOCALLS ") SCAN_WIDEN(__FUNCTION__)) -#define ANNOTATION_FN_HOST_CALLS __annotation(W("HOST_CALLS ") SCAN_WIDEN(__FUNCTION__)) #define ANNOTATION_ENTRY_POINT __annotation(W("SO_EP ") SCAN_WIDEN(__FUNCTION__)) @@ -135,9 +133,6 @@ #define ANNOTATION_TRY_MARKER { } #define ANNOTATION_CATCH_MARKER { } -#define ANNOTATION_FN_HOST_NOCALLS { } -#define ANNOTATION_FN_HOST_CALLS { } - #define ANNOTATION_FN_SPECIAL_HOLDER_BEGIN { } #define ANNOTATION_SPECIAL_HOLDER_END { } #define ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT { } @@ -155,8 +150,6 @@ #define ANNOTATION_FN_MODE_COOPERATIVE { } #define ANNOTATION_FN_MODE_PREEMPTIVE { } #define ANNOTATION_FN_MODE_ANY { } -#define ANNOTATION_FN_HOST_NOCALLS { } -#define ANNOTATION_FN_HOST_CALLS { } #define ANNOTATION_SUPPORTS_DAC { } #define ANNOTATION_SUPPORTS_DAC_HOST_ONLY { } @@ -179,8 +172,6 @@ #define STATIC_CONTRACT_FORBID_FAULT ANNOTATION_FN_FORBID_FAULT #define STATIC_CONTRACT_GC_TRIGGERS ANNOTATION_FN_GC_TRIGGERS #define STATIC_CONTRACT_GC_NOTRIGGER ANNOTATION_FN_GC_NOTRIGGER -#define STATIC_CONTRACT_HOST_NOCALLS ANNOTATION_FN_HOST_NOCALLS -#define STATIC_CONTRACT_HOST_CALLS ANNOTATION_FN_HOST_CALLS #define STATIC_CONTRACT_SUPPORTS_DAC ANNOTATION_SUPPORTS_DAC #define STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY ANNOTATION_SUPPORTS_DAC_HOST_ONLY diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp index 497a09ce815ff4..0f2aa4f73669c2 100644 --- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp @@ -29,7 +29,6 @@ #define CONTRACTL_END #define NOTHROW #define GC_NOTRIGGER -#define HOST_NOCALLS #include "../../inc/gcdecoder.cpp" #include "../../inc/gc_unwind_x86.h" diff --git a/src/coreclr/utilcode/check.cpp b/src/coreclr/utilcode/check.cpp index 30296b0ebc0641..617f0c3dd9836a 100644 --- a/src/coreclr/utilcode/check.cpp +++ b/src/coreclr/utilcode/check.cpp @@ -63,7 +63,6 @@ SPECIALIZED_VIOLATION(GCViolation); SPECIALIZED_VIOLATION(ModeViolation); SPECIALIZED_VIOLATION(FaultViolation); SPECIALIZED_VIOLATION(FaultNotFatal); -SPECIALIZED_VIOLATION(HostViolation); SPECIALIZED_VIOLATION(TakesLockViolation); SPECIALIZED_VIOLATION(LoadsTypeViolation); diff --git a/src/coreclr/utilcode/debug.cpp b/src/coreclr/utilcode/debug.cpp index 9784bc71dc98ef..cc49e9bcfedc1e 100644 --- a/src/coreclr/utilcode/debug.cpp +++ b/src/coreclr/utilcode/debug.cpp @@ -411,12 +411,6 @@ VOID DbgAssertDialog(const char *szFile, int iLine, const char *szExpr) SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; - // Raising the assert dialog can cause us to re-enter the host when allocating - // memory for the string. Since this is debug-only code, we can safely skip - // violation asserts here, particularly since they can also cause infinite - // recursion. - PERMANENT_CONTRACT_VIOLATION(HostViolation, ReasonDebugOnly); - dbgForceToMemory = &szFile; //make certain these args are available in the debugger dbgForceToMemory = &iLine; dbgForceToMemory = &szExpr; diff --git a/src/coreclr/vm/amd64/cgenamd64.cpp b/src/coreclr/vm/amd64/cgenamd64.cpp index f774d71a3b427f..8e136612c8c80d 100644 --- a/src/coreclr/vm/amd64/cgenamd64.cpp +++ b/src/coreclr/vm/amd64/cgenamd64.cpp @@ -93,7 +93,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } diff --git a/src/coreclr/vm/amd64/gmsamd64.cpp b/src/coreclr/vm/amd64/gmsamd64.cpp index 41c7b0c9afa211..8af5247d07c334 100644 --- a/src/coreclr/vm/amd64/gmsamd64.cpp +++ b/src/coreclr/vm/amd64/gmsamd64.cpp @@ -11,8 +11,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundState, DWORD threadId, - int funCallDepth /* = 1 */, - HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */) + int funCallDepth /* = 1 */) { CONTRACTL { @@ -83,20 +82,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - unwoundState->_pRetAddr = NULL; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; diff --git a/src/coreclr/vm/amd64/gmscpu.h b/src/coreclr/vm/amd64/gmscpu.h index 9836f03253cf4a..4154fd995d1c2c 100644 --- a/src/coreclr/vm/amd64/gmscpu.h +++ b/src/coreclr/vm/amd64/gmscpu.h @@ -101,8 +101,7 @@ struct LazyMachState : public MachState static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); friend class HelperMethodFrame; friend class CheckAsmOffsets; diff --git a/src/coreclr/vm/arm/gmscpu.h b/src/coreclr/vm/arm/gmscpu.h index faa93a2279b3c3..42641889ad9e37 100644 --- a/src/coreclr/vm/arm/gmscpu.h +++ b/src/coreclr/vm/arm/gmscpu.h @@ -81,8 +81,7 @@ struct LazyMachState : public MachState { static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); friend class HelperMethodFrame; friend class CheckAsmOffsets; diff --git a/src/coreclr/vm/arm/stubs.cpp b/src/coreclr/vm/arm/stubs.cpp index 1424dcecbd918d..5f8e3bf445e78b 100644 --- a/src/coreclr/vm/arm/stubs.cpp +++ b/src/coreclr/vm/arm/stubs.cpp @@ -509,8 +509,7 @@ void FlushWriteBarrierInstructionCache() void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundstate, DWORD threadId, - int funCallDepth, - HostCallPreference hostCallPreference) + int funCallDepth) { T_CONTEXT ctx; T_KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs; @@ -575,20 +574,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - unwoundstate->_isValid = false; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; @@ -1583,7 +1569,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } diff --git a/src/coreclr/vm/arm64/gmscpu.h b/src/coreclr/vm/arm64/gmscpu.h index 000eed14b4743b..f33230702afc5f 100644 --- a/src/coreclr/vm/arm64/gmscpu.h +++ b/src/coreclr/vm/arm64/gmscpu.h @@ -44,8 +44,7 @@ struct LazyMachState : public MachState{ static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); }; inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) diff --git a/src/coreclr/vm/arm64/stubs.cpp b/src/coreclr/vm/arm64/stubs.cpp index 03783f016a52d3..623938dfba61cb 100644 --- a/src/coreclr/vm/arm64/stubs.cpp +++ b/src/coreclr/vm/arm64/stubs.cpp @@ -277,8 +277,7 @@ void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundstate, DWORD threadId, - int funCallDepth, - HostCallPreference hostCallPreference) + int funCallDepth) { T_CONTEXT context; T_KNONVOLATILE_CONTEXT_POINTERS nonVolContextPtrs; @@ -357,20 +356,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - unwoundstate->_isValid = false; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; @@ -684,7 +670,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 0fdbf29418c48c..c6bac693c679a6 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -645,11 +645,10 @@ BOOL EEJitManager::CodeHeapIterator::Next() // writer lock and check for any readers. If there are any, the WriterLockHolder functions // release the writer and yield to wait for the readers to be done. -ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) +ExecutionManager::ReaderLockHolder::ReaderLockHolder() { CONTRACTL { NOTHROW; - if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; } GC_NOTRIGGER; CAN_TAKE_LOCK; } CONTRACTL_END; @@ -662,15 +661,6 @@ ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCall if (VolatileLoad(&m_dwWriterLock) != 0) { - if (hostCallPreference != AllowHostCalls) - { - // Rats, writer lock is held. Gotta bail. Since the reader count was already - // incremented, we're technically still blocking writers at the moment. But - // the holder who called us is about to call DecrementReader in its - // destructor and unblock writers. - return; - } - YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0)); } } @@ -765,7 +755,7 @@ ExecutionManager::WriterLockHolder::~WriterLockHolder() // If it is, we will assume the locked data is in an inconsistent // state and throw. We never actually take the lock. // Note: Throws -ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/) +ExecutionManager::ReaderLockHolder::ReaderLockHolder() { SUPPORTS_DAC; @@ -947,7 +937,6 @@ ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -3297,7 +3286,6 @@ GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -4556,35 +4544,6 @@ BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) return result; } -//************************************************************************** -BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/) -{ - CONTRACTL { - NOTHROW; - GC_NOTRIGGER; - } CONTRACTL_END; - -#ifdef DACCESS_COMPILE - return IsManagedCode(currentPC); -#else - if (hostCallPreference == AllowHostCalls) - { - return IsManagedCode(currentPC); - } - - ReaderLockHolder rlh(hostCallPreference); - if (!rlh.Acquired()) - { - _ASSERTE(pfFailedReaderLock != NULL); - *pfFailedReaderLock = TRUE; - return FALSE; - } - - RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; - return IsManagedCodeWorker(currentPC, &lockState); -#endif -} - //************************************************************************** // Assumes that the ExecutionManager reader/writer lock is taken or that // it is safe not to take it. @@ -4697,7 +4656,6 @@ RangeSection* ExecutionManager::GetRangeSection(TADDR addr, RangeSectionLockStat { CONTRACTL { NOTHROW; - HOST_NOCALLS; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; @@ -4713,7 +4671,6 @@ PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) NOTHROW; GC_NOTRIGGER; MODE_ANY; - STATIC_CONTRACT_HOST_CALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -4787,7 +4744,6 @@ void ExecutionManager::AddCodeRange(TADDR pStartRange, CONTRACTL { THROWS; GC_NOTRIGGER; - HOST_CALLS; PRECONDITION(pStartRange < pEndRange); PRECONDITION(CheckPointer(pJit)); PRECONDITION(CheckPointer(pModule)); @@ -4811,7 +4767,6 @@ void ExecutionManager::AddCodeRange(TADDR pStartRange, CONTRACTL { THROWS; GC_NOTRIGGER; - HOST_CALLS; PRECONDITION(pStartRange < pEndRange); PRECONDITION(CheckPointer(pJit)); PRECONDITION(CheckPointer(pHp)); @@ -4836,7 +4791,6 @@ void ExecutionManager::AddCodeRange(TADDR pStartRange, CONTRACTL { THROWS; GC_NOTRIGGER; - HOST_CALLS; PRECONDITION(pStartRange < pEndRange); PRECONDITION(CheckPointer(pJit)); PRECONDITION(CheckPointer(pRangeList)); @@ -4919,8 +4873,6 @@ void RangeSection::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { - STATIC_CONTRACT_HOST_CALLS; - ReaderLockHolder rlh; // @@ -5587,7 +5539,6 @@ ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKE CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -5599,7 +5550,6 @@ UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodTo CONTRACTL{ NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -5613,7 +5563,6 @@ PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHO CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -5625,7 +5574,6 @@ TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToke CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -5638,7 +5586,6 @@ GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken) CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -6034,7 +5981,6 @@ BOOL ReadyToRunJitManager::IsFunclet(EECodeInfo* pCodeInfo) CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -6118,7 +6064,6 @@ void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodT CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; PRECONDITION(methodRegionInfo != NULL); } CONTRACTL_END; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 7051b4af8195fb..038ce3fe875ec4 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -2000,9 +2000,6 @@ class ExecutionManager // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64. static BOOL IsManagedCode(PCODE currentPC); - // Special version with profiler hook - static BOOL IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference, BOOL *pfFailedReaderLock); - // Returns true if currentPC is ready to run codegen static BOOL IsReadyToRunCode(PCODE currentPC); @@ -2033,7 +2030,7 @@ class ExecutionManager class ReaderLockHolder { public: - ReaderLockHolder(HostCallPreference hostCallPreference = AllowHostCalls); + ReaderLockHolder(); ~ReaderLockHolder(); BOOL Acquired(); @@ -2266,7 +2263,6 @@ inline TADDR EEJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -2280,7 +2276,6 @@ inline void EEJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodTo CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; PRECONDITION(methodRegionInfo != NULL); } CONTRACTL_END; diff --git a/src/coreclr/vm/ecall.cpp b/src/coreclr/vm/ecall.cpp index 74cab901277c71..35a5d36eae4d76 100644 --- a/src/coreclr/vm/ecall.cpp +++ b/src/coreclr/vm/ecall.cpp @@ -616,7 +616,6 @@ MethodDesc* ECall::MapTargetBackToMethod(PCODE pTarg, PCODE * ppAdjustedEntryPoi NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; diff --git a/src/coreclr/vm/eecontract.cpp b/src/coreclr/vm/eecontract.cpp index 6a8ca22eba4835..87d30daf7c460b 100644 --- a/src/coreclr/vm/eecontract.cpp +++ b/src/coreclr/vm/eecontract.cpp @@ -159,44 +159,5 @@ void EEContract::DoChecks(UINT testmask, _In_z_ const char *szFunction, _In_z_ c default: UNREACHABLE(); } - - // Host Triggers check - switch (testmask & HOST_Mask) - { - case HOST_Calls: - { - if (!m_pClrDebugState->IsHostCaller()) - { - if (!( (HostViolation|BadDebugState) & m_pClrDebugState->ViolationMask())) - { - // Avoid infinite recursion by temporarily allowing HOST_CALLS - // violations so that we don't get contract asserts in anything - // called downstream of CONTRACT_ASSERT. If we unwind out of - // here, our dtor will reset our state to what it was on entry. - CONTRACT_VIOLATION(HostViolation); - CONTRACT_ASSERT("HOST_CALLS encountered in a HOST_NOCALLS scope", - Contract::HOST_NoCalls, - Contract::HOST_Mask, - m_contractStackRecord.m_szFunction, - m_contractStackRecord.m_szFile, - m_contractStackRecord.m_lineNum - ); - } - } - } - break; - - case HOST_NoCalls: - // m_pClrDebugState->ViolationMaskReset( HostViolation ); - m_pClrDebugState->ResetHostCaller(); - break; - - case HOST_Disabled: - // Nothing - break; - - default: - UNREACHABLE(); - } } #endif // ENABLE_CONTRACTS diff --git a/src/coreclr/vm/eecontract.h b/src/coreclr/vm/eecontract.h index 4df0c6f8fcaf5b..290100775f7a8f 100644 --- a/src/coreclr/vm/eecontract.h +++ b/src/coreclr/vm/eecontract.h @@ -52,9 +52,6 @@ class EEContract : public BaseContract #define GC_TRIGGERS do { STATIC_CONTRACT_GC_TRIGGERS; REQUEST_TEST(Contract::GC_Triggers, Contract::GC_Disabled); } while(0) #define GC_NOTRIGGER do { STATIC_CONTRACT_GC_NOTRIGGER; REQUEST_TEST(Contract::GC_NoTrigger, Contract::GC_Disabled); } while(0) -#define HOST_NOCALLS do { STATIC_CONTRACT_HOST_NOCALLS; REQUEST_TEST(Contract::HOST_NoCalls, Contract::HOST_Disabled); } while(0) -#define HOST_CALLS do { STATIC_CONTRACT_HOST_CALLS; REQUEST_TEST(Contract::HOST_Calls, Contract::HOST_Disabled); } while(0) - #else // ENABLE_CONTRACTS_IMPL #define MODE_COOPERATIVE @@ -62,8 +59,6 @@ class EEContract : public BaseContract #define MODE_ANY #define GC_TRIGGERS #define GC_NOTRIGGER -#define HOST_NOCALLS -#define HOST_CALLS #endif // ENABLE_CONTRACTS_IMPL diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index 1665e1c86cf6f0..b12aac718c84bd 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -1180,7 +1180,6 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext, CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; diff --git a/src/coreclr/vm/eventing/eventpipe/ep-rt-coreclr.cpp b/src/coreclr/vm/eventing/eventpipe/ep-rt-coreclr.cpp index 913ef57fce501d..8542b2d0a904cd 100644 --- a/src/coreclr/vm/eventing/eventpipe/ep-rt-coreclr.cpp +++ b/src/coreclr/vm/eventing/eventpipe/ep-rt-coreclr.cpp @@ -74,10 +74,6 @@ ep_rt_coreclr_walk_managed_stack_for_thread ( EP_ASSERT (thread != NULL); EP_ASSERT (stack_contents != NULL); - // Calling into StackWalkFrames in preemptive mode violates the host contract, - // but this contract is not used on CoreCLR. - CONTRACT_VIOLATION (HostViolation); - // Before we call into StackWalkFrames we need to mark GC_ON_TRANSITIONS as FALSE // because under GCStress runs (GCStress=0x3), a GC will be triggered for every transition, // which will cause the GC to try to walk the stack while we are in the middle of walking the stack. diff --git a/src/coreclr/vm/eventtrace.cpp b/src/coreclr/vm/eventtrace.cpp index 89939e3d667994..1bac59f998fdca 100644 --- a/src/coreclr/vm/eventtrace.cpp +++ b/src/coreclr/vm/eventtrace.cpp @@ -264,11 +264,6 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::GetCurrentThreadsCallStac } CONTRACTL_END; - // The stack walk performed below can cause allocations (thus entering the host). But - // this is acceptable, since we're not supporting the use of SQL/F1 profiling and - // full-blown ETW CLR stacks (which would be redundant). - PERMANENT_CONTRACT_VIOLATION(HostViolation, ReasonUnsupportedForSQLF1Profiling); - m_FrameCount = 0; ETW::SamplingLog::EtwStackWalkStatus stackwalkStatus = SaveCurrentStack(); diff --git a/src/coreclr/vm/frames.cpp b/src/coreclr/vm/frames.cpp index c78b4ea6c1fa76..cfd8eb11a7a9cc 100644 --- a/src/coreclr/vm/frames.cpp +++ b/src/coreclr/vm/frames.cpp @@ -1811,27 +1811,18 @@ MethodDesc* HelperMethodFrame::GetFunction() // This is used when the HelperMethodFrame is first created. // * false: complete any initialization that was left to do, if any. // * unwindState - [out] DAC builds use this to return the unwound machine state. -// * hostCallPreference - (See code:HelperMethodFrame::HostCallPreference.) // // Return Value: // Normally, the function always returns TRUE meaning the initialization succeeded. // -// However, if hostCallPreference is NoHostCalls, AND if a callee (like -// LazyMachState::unwindLazyState) needed to acquire a JIT reader lock and was unable -// to do so (lest it re-enter the host), then InsureInit will abort and return FALSE. -// So any callers that specify hostCallPreference = NoHostCalls (which is not the -// default), should check for FALSE return, and refuse to use the HMF in that case. -// Currently only asynchronous calls made by profilers use that code path. // BOOL HelperMethodFrame::InsureInit(bool initialInit, - MachState * unwindState, - HostCallPreference hostCallPreference /* = AllowHostCalls */) + MachState * unwindState) { CONTRACTL { NOTHROW; GC_NOTRIGGER; - if ((hostCallPreference == AllowHostCalls) && !m_MachState.isValid()) { HOST_CALLS; } else { HOST_NOCALLS; } SUPPORTS_DAC; } CONTRACTL_END; @@ -1872,8 +1863,7 @@ BOOL HelperMethodFrame::InsureInit(bool initialInit, lazy, &unwound, threadId, - 0, - hostCallPreference); + 0); #if !defined(DACCESS_COMPILE) if (!unwound.isValid()) @@ -1890,7 +1880,6 @@ BOOL HelperMethodFrame::InsureInit(bool initialInit, // will commonly return an unwound state with _pRetAddr==NULL (which counts // as an "invalid" MachState). So have DAC builds deliberately fall through // rather than aborting when unwound is invalid. - _ASSERTE(hostCallPreference == NoHostCalls); return FALSE; } #endif // !defined(DACCESS_COMPILE) diff --git a/src/coreclr/vm/frames.h b/src/coreclr/vm/frames.h index 780e0e87242315..ea7eb1e1941360 100644 --- a/src/coreclr/vm/frames.h +++ b/src/coreclr/vm/frames.h @@ -1337,7 +1337,7 @@ class HelperMethodFrame : public Frame } #endif // DACCESS_COMPILE - BOOL InsureInit(bool initialInit, struct MachState* unwindState, HostCallPreference hostCallPreference = AllowHostCalls); + BOOL InsureInit(bool initialInit, struct MachState* unwindState); LazyMachState * MachineState() { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/vm/gc_unwind_x86.inl b/src/coreclr/vm/gc_unwind_x86.inl index 28c7253f382676..5b308911bc0b16 100644 --- a/src/coreclr/vm/gc_unwind_x86.inl +++ b/src/coreclr/vm/gc_unwind_x86.inl @@ -142,7 +142,6 @@ size_t DecodeGCHdrInfo(GCInfoToken gcInfoToken, CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -494,7 +493,6 @@ FrameType GetHandlerFrameInfo(hdrInfo * info, CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; diff --git a/src/coreclr/vm/i386/cgenx86.cpp b/src/coreclr/vm/i386/cgenx86.cpp index b50ff163b2a24b..108bc66a99b153 100644 --- a/src/coreclr/vm/i386/cgenx86.cpp +++ b/src/coreclr/vm/i386/cgenx86.cpp @@ -146,7 +146,6 @@ void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats) NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -170,7 +169,6 @@ void TransitionFrame::UpdateRegDisplayHelper(const PREGDISPLAY pRD, UINT cbStack NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -218,7 +216,6 @@ void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloat NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; PRECONDITION(m_MachState.isValid()); // InsureInit has been called SUPPORTS_DAC; } @@ -398,7 +395,6 @@ void ExternalMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFlo NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -418,7 +414,6 @@ void StubDispatchFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloat NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -475,7 +470,6 @@ void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool update NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -533,7 +527,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } @@ -622,7 +615,6 @@ void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats) NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -701,7 +693,6 @@ void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats) CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; @@ -760,7 +751,6 @@ void PInvokeCalliFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloat NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; @@ -781,7 +771,6 @@ void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats) NOTHROW; GC_NOTRIGGER; MODE_ANY; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; diff --git a/src/coreclr/vm/i386/gmscpu.h b/src/coreclr/vm/i386/gmscpu.h index 5b3fa0ba881a8f..9dd91f42caf1c6 100644 --- a/src/coreclr/vm/i386/gmscpu.h +++ b/src/coreclr/vm/i386/gmscpu.h @@ -98,8 +98,7 @@ struct LazyMachState : public MachState { static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); friend class HelperMethodFrame; friend class CheckAsmOffsets; diff --git a/src/coreclr/vm/i386/gmsx86.cpp b/src/coreclr/vm/i386/gmsx86.cpp index 7fd4d28e430512..ca3b45b5b4ea4f 100644 --- a/src/coreclr/vm/i386/gmsx86.cpp +++ b/src/coreclr/vm/i386/gmsx86.cpp @@ -362,8 +362,7 @@ static bool shouldEnterCall(PTR_BYTE ip) { void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth /* = 1 */, - HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */) + int funCallDepth /* = 1 */) { CONTRACTL { NOTHROW; @@ -1099,20 +1098,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(*lazyState->pRetAddr(), hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - lazyState->_pRetAddr = NULL; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(*lazyState->pRetAddr()); if (fIsManagedCode) goto done; @@ -1285,8 +1271,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth /* = 1 */, - HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */) + int funCallDepth /* = 1 */) { CONTRACTL { NOTHROW; @@ -1347,20 +1332,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - lazyState->_pRetAddr = NULL; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 6bde54108a0426..bcaf0351e52b3c 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -14568,7 +14568,6 @@ TADDR EECodeInfo::GetSavedMethodCode() // be used during GC. NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; #ifndef HOST_64BIT @@ -14596,7 +14595,6 @@ TADDR EECodeInfo::GetStartAddress() CONTRACTL { NOTHROW; GC_NOTRIGGER; - HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; diff --git a/src/coreclr/vm/loongarch64/gmscpu.h b/src/coreclr/vm/loongarch64/gmscpu.h index 01420a8157162a..3576c3301a8bce 100644 --- a/src/coreclr/vm/loongarch64/gmscpu.h +++ b/src/coreclr/vm/loongarch64/gmscpu.h @@ -39,8 +39,7 @@ struct LazyMachState : public MachState{ static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); }; inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) diff --git a/src/coreclr/vm/loongarch64/stubs.cpp b/src/coreclr/vm/loongarch64/stubs.cpp index 052d71ebc1e44e..56581498f003df 100644 --- a/src/coreclr/vm/loongarch64/stubs.cpp +++ b/src/coreclr/vm/loongarch64/stubs.cpp @@ -301,8 +301,7 @@ void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundstate, DWORD threadId, - int funCallDepth, - HostCallPreference hostCallPreference) + int funCallDepth) { T_CONTEXT context; T_KNONVOLATILE_CONTEXT_POINTERS nonVolContextPtrs; @@ -381,20 +380,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - unwoundstate->_isValid = false; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; @@ -701,7 +687,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } diff --git a/src/coreclr/vm/proftoeeinterfaceimpl.cpp b/src/coreclr/vm/proftoeeinterfaceimpl.cpp index 675d96b4859dcb..8c94aca1c40898 100644 --- a/src/coreclr/vm/proftoeeinterfaceimpl.cpp +++ b/src/coreclr/vm/proftoeeinterfaceimpl.cpp @@ -1921,11 +1921,6 @@ HRESULT GetFunctionInfoInternal(LPCBYTE ip, EECodeInfo * pCodeInfo) EE_THREAD_NOT_REQUIRED; CAN_TAKE_LOCK; CANNOT_RETAKE_LOCK; - - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } } CONTRACTL_END; @@ -1936,21 +1931,7 @@ HRESULT GetFunctionInfoInternal(LPCBYTE ip, EECodeInfo * pCodeInfo) return CORPROF_E_NOT_YET_AVAILABLE; } - if (ShouldAvoidHostCalls()) - { - ExecutionManager::ReaderLockHolder rlh(NoHostCalls); - if (!rlh.Acquired()) - { - // Couldn't get the info. Try again later - return CORPROF_E_ASYNCHRONOUS_UNSAFE; - } - - pCodeInfo->Init((PCODE)ip, ExecutionManager::ScanNoReaderLock); - } - else - { - pCodeInfo->Init((PCODE)ip); - } + pCodeInfo->Init((PCODE)ip); if (!pCodeInfo->IsValid()) { @@ -2019,11 +2000,6 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP(LPCBYTE ip, FunctionID * pFunct // This contract detects any attempts to reenter locks held at the time // this function was called. CANNOT_RETAKE_LOCK; - - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } } CONTRACTL_END; @@ -2237,11 +2213,6 @@ HRESULT GetCodeInfoFromCodeStart( // We need to take the ExecutionManager reader lock to find the // appropriate jit manager. CAN_TAKE_LOCK; - - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } } CONTRACTL_END; @@ -2299,7 +2270,6 @@ HRESULT GetCodeInfoFromCodeStart( &codeInfo); if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE) { - _ASSERTE(ShouldAvoidHostCalls()); return hr; } if (FAILED(hr)) @@ -2395,11 +2365,6 @@ HRESULT ProfToEEInterfaceImpl::GetCodeInfo(FunctionID functionId, LPCBYTE * pSta // (See locking contract comment in GetCodeInfoHelper.) CANNOT_RETAKE_LOCK; - - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } } CONTRACTL_END; @@ -2482,11 +2447,6 @@ HRESULT ProfToEEInterfaceImpl::GetCodeInfo2(FunctionID functionId, // (See locking contract comment in GetCodeInfoHelper.) CANNOT_RETAKE_LOCK; - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } - PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK)); PRECONDITION(CheckPointer(codeInfos, NULL_OK)); } @@ -8139,11 +8099,6 @@ static BOOL EnsureFrameInitialized(Frame * pFrame) { NOTHROW; GC_NOTRIGGER; - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } - SUPPORTS_DAC; } CONTRACTL_END; @@ -8159,19 +8114,14 @@ static BOOL EnsureFrameInitialized(Frame * pFrame) if (pHMF->InsureInit( false, // initialInit - NULL, // unwindState - (ShouldAvoidHostCalls() ? - NoHostCalls : - AllowHostCalls) + NULL // unwindState ) != NULL) { // InsureInit() succeeded and found the return address return TRUE; } - // No return address was found. It must be because we asked InsureInit() to bail if - // it would have entered the host - _ASSERTE(ShouldAvoidHostCalls()); + // No return address was found return FALSE; } @@ -8202,10 +8152,6 @@ HRESULT ProfToEEInterfaceImpl::ProfilerEbpWalker( NOTHROW; MODE_ANY; EE_THREAD_NOT_REQUIRED; - - // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the - // host (SQL). Corners will be cut to ensure this is the case - if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; } } CONTRACTL_END; @@ -8256,7 +8202,6 @@ HRESULT ProfToEEInterfaceImpl::ProfilerEbpWalker( &codeInfo); if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE) { - _ASSERTE(ShouldAvoidHostCalls()); return hr; } if (SUCCEEDED(hr)) @@ -8485,27 +8430,18 @@ HRESULT ProfToEEInterfaceImpl::ProfilerStackWalkFramesWrapper(Thread * pThreadTo // // Arguments: // pCtx - Context to look at -// hostCallPreference - Describes how to acquire the reader lock--either AllowHostCalls -// or NoHostCalls (see code:HostCallPreference). // // Return Value: // S_OK: The context is in managed code // S_FALSE: The context is not in managed code. -// Error: Unable to determine (typically because hostCallPreference was NoHostCalls -// and the reader lock was unattainable without yielding) // -HRESULT IsContextInManagedCode(const CONTEXT * pCtx, HostCallPreference hostCallPreference) +HRESULT IsContextInManagedCode(const CONTEXT * pCtx) { WRAPPER_NO_CONTRACT; - BOOL fFailedReaderLock = FALSE; // if there's no Jit Manager for the IP, it's not managed code. - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(pCtx), hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - return CORPROF_E_ASYNCHRONOUS_UNSAFE; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(pCtx)); return fIsManagedCode ? S_OK : S_FALSE; } @@ -8680,8 +8616,6 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, goto Cleanup; } - HostCallPreference hostCallPreference; - // First, check "1) Target thread to walk == current thread OR Target thread is suspended" if (pThreadToSnapshot != pCurrentThread && !g_profControlBlock.fProfilerRequestedRuntimeSuspend) { @@ -8727,11 +8661,6 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, #endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND } - hostCallPreference = - ShouldAvoidHostCalls() ? - NoHostCalls : // Async call: Ensure this thread won't yield & re-enter host - AllowHostCalls; // Synchronous calls may re-enter host just fine - // If target thread is in pre-emptive mode, the profiler's seed context is unnecessary // because our frame chain is good enough: it will give us at least as accurate a // starting point as the profiler could. Also, since profiler contexts cannot be @@ -8768,11 +8697,10 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, goto Cleanup; } - hrCurrentContextIsManaged = IsContextInManagedCode(&ctxCurrent, hostCallPreference); + hrCurrentContextIsManaged = IsContextInManagedCode(&ctxCurrent); if (FAILED(hrCurrentContextIsManaged)) { // Couldn't get the info. Try again later - _ASSERTE(ShouldAvoidHostCalls()); hr = CORPROF_E_ASYNCHRONOUS_UNSAFE; goto Cleanup; } @@ -8840,7 +8768,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, } else { - hr = IsContextInManagedCode(pctxSeed, hostCallPreference); + hr = IsContextInManagedCode(pctxSeed); if (FAILED(hr)) { hr = CORPROF_E_ASYNCHRONOUS_UNSAFE; @@ -8876,16 +8804,12 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, { if (pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd)) { - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(&ctxCurrent), hostCallPreference, &fFailedReaderLock); + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(&ctxCurrent)); - if (!fFailedReaderLock) - { - // not in jitted or ngend code or inside an inlined P/Invoke (the leaf-most EE Frame is - // an InlinedCallFrame with an active call) - _ASSERTE(!fIsManagedCode || - (InlinedCallFrame::FrameHasActiveCall(pThreadToSnapshot->GetFrame()))); - } + // not in jitted or ngend code or inside an inlined P/Invoke (the leaf-most EE Frame is + // an InlinedCallFrame with an active call) + _ASSERTE(!fIsManagedCode || + (InlinedCallFrame::FrameHasActiveCall(pThreadToSnapshot->GetFrame()))); } } #endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND diff --git a/src/coreclr/vm/proftoeeinterfaceimpl.inl b/src/coreclr/vm/proftoeeinterfaceimpl.inl index 524900c7182e00..afbf50aa067cbe 100644 --- a/src/coreclr/vm/proftoeeinterfaceimpl.inl +++ b/src/coreclr/vm/proftoeeinterfaceimpl.inl @@ -87,43 +87,6 @@ inline BOOL IsCalledAsynchronously() } -//--------------------------------------------------------------------------------------- -// -// Simple helper that decides whether we should avoid calling into the host. Generally, -// host calls should be avoided if the current Info method was called asynchronously -// (i.e., from an F1-style hijack), for fear of re-entering the host (mainly SQL). -// -// Server GC threads are native (non-EE) threads, which therefore do not track enough -// state for us to determine if a call is made asynhronously on those threads. So we -// pessimistically assume that the current call on a server GC thread is from a hijack -// for the purposes of determining whether we may enter the host. Reasoning for this: -// * SQL enables server-mode GC -// * server GC threads are responsible for performing runtime suspension, and thus -// call Thread::SuspendThread() which yields/sleeps and thus enters the host. So -// server GC threads are examples of non-EE Threads that actually do spend time -// in the host (this otherwise almost never happens for other non-EE threads). -// * In spite of this pessimism, the effect on the profiler should be minimal. The -// host calls we're avoiding are from the code manager's lock, which: -// * a) Is only used when doing stack walks or translating IPs to functions -// * b) Is only affected if it tries to yield/sleep when the code manager -// writer lock is taken, and that happens for incredibly tiny windows of -// time. -// - -inline BOOL ShouldAvoidHostCalls() -{ - LIMITED_METHOD_CONTRACT; - - return - ( - IsCalledAsynchronously() || - ( - (GetThreadNULLOk() == NULL) && IsGCSpecialThread() - ) - ); -} - - //--------------------------------------------------------------------------------------- // // Simple helper that returns nonzero iff the current thread is a non-EE thread in the diff --git a/src/coreclr/vm/riscv64/gmscpu.h b/src/coreclr/vm/riscv64/gmscpu.h index 6506b10b8f751c..9330e81e773c53 100644 --- a/src/coreclr/vm/riscv64/gmscpu.h +++ b/src/coreclr/vm/riscv64/gmscpu.h @@ -39,8 +39,7 @@ struct LazyMachState : public MachState{ static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, - int funCallDepth = 1, - HostCallPreference hostCallPreference = AllowHostCalls); + int funCallDepth = 1); }; inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) diff --git a/src/coreclr/vm/riscv64/stubs.cpp b/src/coreclr/vm/riscv64/stubs.cpp index 4ce55a3849e12b..ebc0d0495c2a08 100644 --- a/src/coreclr/vm/riscv64/stubs.cpp +++ b/src/coreclr/vm/riscv64/stubs.cpp @@ -180,8 +180,7 @@ void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundstate, DWORD threadId, - int funCallDepth, - HostCallPreference hostCallPreference) + int funCallDepth) { T_CONTEXT context; T_KNONVOLATILE_CONTEXT_POINTERS nonVolContextPtrs; @@ -266,20 +265,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. - BOOL fFailedReaderLock = FALSE; - BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); - if (fFailedReaderLock) - { - // We don't know if we would have been able to find a JIT - // manager, because we couldn't enter the reader lock without - // yielding (and our caller doesn't want us to yield). So abort - // now. - - // Invalidate the lazyState we're returning, so the caller knows - // we aborted before we could fully unwind - unwoundstate->_isValid = false; - return; - } + BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc); if (fIsManagedCode) break; @@ -618,7 +604,6 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif - HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } diff --git a/src/coreclr/vm/vars.hpp b/src/coreclr/vm/vars.hpp index 468a3bdb049c41..63a27a8701e542 100644 --- a/src/coreclr/vm/vars.hpp +++ b/src/coreclr/vm/vars.hpp @@ -686,15 +686,6 @@ PTR_GSCookie GetProcessGSCookiePtr() { return PTR_GSCookie(&s_gsCookie); } inline GSCookie GetProcessGSCookie() { return *(RAW_KEYWORD(volatile) GSCookie *)(&s_gsCookie); } -// Passed to JitManager APIs to determine whether to avoid calling into the host. -// The profiling API stackwalking uses this to ensure to avoid re-entering the host -// (particularly SQL) from a hijacked thread. -enum HostCallPreference -{ - AllowHostCalls, - NoHostCalls, -}; - #ifdef TARGET_WINDOWS typedef BOOL(WINAPI* PINITIALIZECONTEXT2)(PVOID Buffer, DWORD ContextFlags, PCONTEXT* Context, PDWORD ContextLength, ULONG64 XStateCompactionMask); extern PINITIALIZECONTEXT2 g_pfnInitializeContext2; From 01f039ce1daf29ea3ac0b484c6752b3143f3b697 Mon Sep 17 00:00:00 2001 From: Vladimir Sadov Date: Wed, 3 Apr 2024 18:53:44 -0700 Subject: [PATCH 078/132] RequiresProcessIsolation in Runtime_100437 (#100605) --- .../Regression/JitBlue/Runtime_100437/Runtime_100437.csproj | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_100437/Runtime_100437.csproj b/src/tests/JIT/Regression/JitBlue/Runtime_100437/Runtime_100437.csproj index 197767e2c4e249..0f460bbb16f72f 100644 --- a/src/tests/JIT/Regression/JitBlue/Runtime_100437/Runtime_100437.csproj +++ b/src/tests/JIT/Regression/JitBlue/Runtime_100437/Runtime_100437.csproj @@ -1,4 +1,8 @@ + + + true + From 5c4e2a301ec1dd8ef75a3ba98e5be1ff2cea0130 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 3 Apr 2024 20:31:21 -0700 Subject: [PATCH 079/132] Remove remaining CRT PAL wrappers and enable including standard headers in the CoreCLR build (#98336) - Remove malloc-family PAL and update callsites that might get passed 0 to bump up to 1. - Move `getenv` usage to use the `PAL` function directly when on non-Windows (to preserve the existing behavior). - Remove other remaining CRT PAL shims - Remove header shims and enable building with the CRT and STL headers across the product, with various build fixes required (mostly around using the standard min/max definitions) --- eng/native/configurecompiler.cmake | 1 - eng/native/functions.cmake | 6 + src/coreclr/CMakeLists.txt | 13 +- src/coreclr/binder/assemblyname.cpp | 2 +- src/coreclr/clrdefinitions.cmake | 1 + src/coreclr/debug/createdump/CMakeLists.txt | 2 - src/coreclr/debug/daccess/daccess.cpp | 2 +- src/coreclr/debug/dbgutil/CMakeLists.txt | 2 - src/coreclr/debug/debug-pal/CMakeLists.txt | 2 - src/coreclr/debug/di/rspriv.h | 4 +- src/coreclr/debug/di/rsthread.cpp | 10 +- src/coreclr/debug/di/rstype.cpp | 2 +- src/coreclr/debug/di/shimcallback.cpp | 2 +- src/coreclr/debug/di/stdafx.h | 3 + src/coreclr/debug/ee/debugger.cpp | 2 +- src/coreclr/debug/ee/funceval.cpp | 2 +- src/coreclr/debug/ee/stdafx.h | 2 + src/coreclr/debug/inc/dbgipcevents.h | 2 +- .../debug/shared/dbgtransportsession.cpp | 2 +- .../dlls/mscordac/mscordac_unixexports.src | 8 - src/coreclr/dlls/mscorpe/stdafx.h | 4 + src/coreclr/gc/env/common.h | 3 + src/coreclr/gc/env/gcenv.base.h | 9 - src/coreclr/gc/gc.cpp | 112 ++-- src/coreclr/gc/gcpriv.h | 9 +- src/coreclr/gc/sample/CMakeLists.txt | 1 + src/coreclr/gc/sample/GCSample.cpp | 2 +- src/coreclr/gc/sample/GCSample.vcxproj | 6 +- src/coreclr/gc/vxsort/defs.h | 14 +- src/coreclr/gc/windows/gcenv.windows.cpp | 10 +- src/coreclr/hosts/coreshim/CoreShim.h | 1 - src/coreclr/ildasm/dasm.cpp | 8 +- src/coreclr/ildasm/dis.cpp | 4 +- src/coreclr/ildasm/ildasmpch.h | 4 + src/coreclr/inc/allocacheck.h | 11 +- src/coreclr/inc/check.h | 2 +- src/coreclr/inc/clr_std/algorithm | 118 ---- src/coreclr/inc/clr_std/string | 425 ------------ src/coreclr/inc/clr_std/type_traits | 627 ------------------ src/coreclr/inc/clr_std/utility | 253 ------- src/coreclr/inc/clr_std/vector | 462 ------------- src/coreclr/inc/contract.h | 1 - src/coreclr/inc/contract.inl | 6 +- src/coreclr/inc/crtwrap.h | 2 +- src/coreclr/inc/daccess.h | 8 +- src/coreclr/inc/dacprivate.h | 4 +- src/coreclr/inc/holder.h | 5 - src/coreclr/inc/loaderheap.h | 2 +- src/coreclr/inc/random.h | 4 +- src/coreclr/inc/safemath.h | 8 +- src/coreclr/inc/utilcode.h | 81 +-- src/coreclr/jit/alloc.cpp | 6 +- src/coreclr/jit/assertionprop.cpp | 2 +- src/coreclr/jit/codegenarm64.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 8 +- src/coreclr/jit/compiler.cpp | 6 +- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/compiler.hpp | 3 +- src/coreclr/jit/emit.cpp | 4 +- src/coreclr/jit/emitarm.cpp | 2 +- src/coreclr/jit/emitarm64.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 4 +- src/coreclr/jit/fgprofilesynthesis.cpp | 2 +- src/coreclr/jit/gentree.cpp | 4 +- src/coreclr/jit/hashbv.cpp | 2 +- src/coreclr/jit/inline.h | 2 +- src/coreclr/jit/jit.h | 6 +- src/coreclr/jit/jiteh.cpp | 2 +- src/coreclr/jit/jitpch.h | 8 + src/coreclr/jit/jitstd/list.h | 2 +- src/coreclr/jit/jitstd/utility.h | 2 +- src/coreclr/jit/lclvars.cpp | 4 +- src/coreclr/jit/loopcloning.cpp | 2 +- src/coreclr/jit/lower.cpp | 4 +- src/coreclr/jit/morph.cpp | 6 +- src/coreclr/jit/targetarm64.cpp | 9 +- src/coreclr/jit/utils.cpp | 2 +- src/coreclr/jit/utils.h | 2 +- src/coreclr/md/ceefilegen/blobfetcher.cpp | 2 +- src/coreclr/md/ceefilegen/stdafx.h | 4 + src/coreclr/md/compiler/import.cpp | 2 +- src/coreclr/md/compiler/stdafx.h | 4 + src/coreclr/md/enc/rwutil.cpp | 4 +- src/coreclr/md/enc/stdafx.h | 4 + src/coreclr/md/runtime/stdafx.h | 1 + src/coreclr/nativeaot/CMakeLists.txt | 1 + src/coreclr/nativeaot/Runtime/CMakeLists.txt | 1 - src/coreclr/nativeaot/Runtime/CommonMacros.h | 10 - src/coreclr/nativeaot/Runtime/threadstore.cpp | 2 +- src/coreclr/pal/CMakeLists.txt | 1 - src/coreclr/pal/inc/pal.h | 430 +----------- src/coreclr/pal/inc/pal_mstypes.h | 73 +- src/coreclr/pal/inc/rt/cpp/assert.h | 12 - src/coreclr/pal/inc/rt/cpp/cstdlib | 13 - src/coreclr/pal/inc/rt/cpp/ctype.h | 12 - src/coreclr/pal/inc/rt/cpp/emmintrin.h | 128 ---- src/coreclr/pal/inc/rt/cpp/fcntl.h | 12 - src/coreclr/pal/inc/rt/cpp/float.h | 12 - src/coreclr/pal/inc/rt/cpp/limits.h | 12 - src/coreclr/pal/inc/rt/cpp/malloc.h | 12 - src/coreclr/pal/inc/rt/cpp/math.h | 12 - src/coreclr/pal/inc/rt/cpp/memory.h | 12 - src/coreclr/pal/inc/rt/cpp/stdarg.h | 12 - src/coreclr/pal/inc/rt/cpp/stdbool.h | 4 - src/coreclr/pal/inc/rt/cpp/stddef.h | 12 - src/coreclr/pal/inc/rt/cpp/stdint.h | 4 - src/coreclr/pal/inc/rt/cpp/stdio.h | 12 - src/coreclr/pal/inc/rt/cpp/stdlib.h | 12 - src/coreclr/pal/inc/rt/cpp/string.h | 12 - src/coreclr/pal/inc/rt/cpp/time.h | 12 - src/coreclr/pal/inc/rt/cpp/wchar.h | 12 - src/coreclr/pal/inc/rt/cpp/xmmintrin.h | 117 ---- src/coreclr/pal/inc/rt/palrt.h | 14 - src/coreclr/pal/inc/rt/safecrt.h | 24 +- src/coreclr/pal/inc/rt/sal.h | 10 +- src/coreclr/pal/inc/rt/specstrings.h | 2 - src/coreclr/pal/inc/rt/specstrings_strict.h | 1 - src/coreclr/pal/inc/rt/specstrings_undef.h | 3 - src/coreclr/pal/inc/strsafe.h | 9 - src/coreclr/pal/src/CMakeLists.txt | 3 - src/coreclr/pal/src/cruntime/malloc.cpp | 106 --- src/coreclr/pal/src/cruntime/misc.cpp | 264 -------- src/coreclr/pal/src/cruntime/thread.cpp | 38 -- src/coreclr/pal/src/cruntime/wchar.cpp | 24 +- src/coreclr/pal/src/debug/debug.cpp | 3 +- .../dummyprovider/CMakeLists.txt | 1 - .../lttngprovider/CMakeLists.txt | 1 - src/coreclr/pal/src/file/directory.cpp | 10 +- src/coreclr/pal/src/file/file.cpp | 2 +- src/coreclr/pal/src/file/find.cpp | 2 +- src/coreclr/pal/src/handlemgr/handlemgr.cpp | 4 +- src/coreclr/pal/src/include/pal/file.h | 1 + src/coreclr/pal/src/include/pal/malloc.hpp | 95 +-- src/coreclr/pal/src/include/pal/misc.h | 17 - src/coreclr/pal/src/include/pal/palinternal.h | 430 ------------ .../pal/src/include/pal/stackstring.hpp | 4 +- src/coreclr/pal/src/include/pal/utils.h | 2 +- src/coreclr/pal/src/init/pal.cpp | 6 +- src/coreclr/pal/src/loader/module.cpp | 6 +- src/coreclr/pal/src/map/map.cpp | 9 +- src/coreclr/pal/src/map/virtual.cpp | 2 +- src/coreclr/pal/src/misc/cgroup.cpp | 42 +- src/coreclr/pal/src/misc/environ.cpp | 24 +- src/coreclr/pal/src/misc/fmtmessage.cpp | 6 +- src/coreclr/pal/src/misc/miscpalapi.cpp | 1 + src/coreclr/pal/src/misc/perftrace.cpp | 44 +- src/coreclr/pal/src/misc/strutil.cpp | 2 +- src/coreclr/pal/src/misc/utils.cpp | 8 +- src/coreclr/pal/src/objmgr/palobjbase.cpp | 4 +- src/coreclr/pal/src/objmgr/shmobject.cpp | 2 +- src/coreclr/pal/src/safecrt/input.inl | 6 +- .../pal/src/sharedmemory/sharedmemory.cpp | 2 +- src/coreclr/pal/src/synchmgr/synchmanager.cpp | 1 + src/coreclr/pal/src/synchmgr/synchmanager.hpp | 2 +- src/coreclr/pal/src/thread/process.cpp | 16 +- src/coreclr/pal/src/thread/thread.cpp | 4 +- src/coreclr/pal/src/thread/threadsusp.cpp | 1 + src/coreclr/pal/tests/palsuite/CMakeLists.txt | 11 - .../c_runtime/__iscsym/test1/__iscsym.cpp | 92 --- .../c_runtime/_putenv/test1/test1.cpp | 4 +- .../c_runtime/_putenv/test2/test2.cpp | 4 +- .../c_runtime/_putenv/test3/test3.cpp | 4 +- .../c_runtime/bsearch/test1/test1.cpp | 47 -- .../c_runtime/bsearch/test2/test2.cpp | 56 -- .../palsuite/c_runtime/exit/test1/test1.cpp | 36 - .../palsuite/c_runtime/exit/test2/test2.cpp | 37 -- .../palsuite/c_runtime/free/test1/test1.cpp | 61 -- .../palsuite/c_runtime/malloc/test1/test1.cpp | 51 -- .../palsuite/c_runtime/malloc/test2/test2.cpp | 40 -- .../palsuite/c_runtime/qsort/test1/test1.cpp | 47 -- .../palsuite/c_runtime/qsort/test2/test2.cpp | 48 -- .../c_runtime/rand_srand/test1/test1.cpp | 99 --- .../c_runtime/realloc/test1/test1.cpp | 65 -- .../palsuite/c_runtime/time/test1/test1.cpp | 50 -- .../c_runtime/wcstoul/test5/test5.cpp | 8 +- .../pal/tests/palsuite/compilableTests.txt | 13 - .../OutputDebugStringA/test1/test1.cpp | 3 - .../tests/palsuite/manual-unautomatable.dat | 3 - .../pal/tests/palsuite/paltestlist.txt | 12 - .../palsuite/paltestlist_to_be_reviewed.txt | 1 - src/coreclr/pal/tests/palsuite/runpaltests.sh | 2 +- .../pal/tests/palsuite/tests-manual.dat | 1 - .../CriticalSectionFunctions/test8/test8.cpp | 1 + .../WaitForMultipleObjectsEx/test6/test6.cpp | 1 + src/coreclr/palrt/memorystream.cpp | 6 +- src/coreclr/scripts/genDummyProvider.py | 2 - src/coreclr/scripts/genLttngProvider.py | 2 - .../tools/StressLogAnalyzer/StressLogDump.cpp | 4 + .../StressLogAnalyzer/StressLogPlugin.cpp | 10 +- src/coreclr/tools/StressLogAnalyzer/util.h | 1 - src/coreclr/tools/metainfo/mdinfo.cpp | 3 +- .../superpmi-shared/methodcontext.cpp | 2 +- .../superpmi-shared/spmidumphelper.cpp | 2 +- .../superpmi/superpmi-shared/spmidumphelper.h | 4 +- .../superpmi/superpmi-shared/standardpch.h | 16 +- src/coreclr/utilcode/clrconfig.cpp | 4 + .../utilcode/clrhost_nodependencies.cpp | 5 + src/coreclr/utilcode/loaderheap.cpp | 2 +- src/coreclr/utilcode/md5.cpp | 2 +- src/coreclr/utilcode/stdafx.h | 3 + src/coreclr/utilcode/stgpool.cpp | 6 +- src/coreclr/utilcode/stresslog.cpp | 4 +- src/coreclr/utilcode/util.cpp | 2 +- src/coreclr/utilcode/utsem.cpp | 2 +- src/coreclr/vm/.vscode/c_cpp_properties.json | 1 + src/coreclr/vm/appdomain.cpp | 4 +- src/coreclr/vm/callcounting.cpp | 2 +- src/coreclr/vm/castcache.cpp | 1 + src/coreclr/vm/ceeload.cpp | 12 +- src/coreclr/vm/ceemain.cpp | 2 +- src/coreclr/vm/cgensys.h | 2 - src/coreclr/vm/classhash.cpp | 4 +- src/coreclr/vm/classlayoutinfo.cpp | 4 +- src/coreclr/vm/codeman.cpp | 34 +- src/coreclr/vm/codeman.h | 5 + src/coreclr/vm/common.h | 7 +- src/coreclr/vm/dacenumerablehash.inl | 2 +- src/coreclr/vm/dllimportcallback.h | 4 +- src/coreclr/vm/dynamicmethod.cpp | 2 +- src/coreclr/vm/eetwain.cpp | 4 +- .../vm/eventing/eventpipe/ds-rt-coreclr.h | 6 +- src/coreclr/vm/interpreter.h | 2 +- src/coreclr/vm/jithelpers.cpp | 7 +- src/coreclr/vm/jitinterface.cpp | 6 +- src/coreclr/vm/methodtable.cpp | 2 +- src/coreclr/vm/methodtablebuilder.cpp | 4 +- src/coreclr/vm/object.inl | 2 +- src/coreclr/vm/perfmap.cpp | 4 + src/coreclr/vm/profdetach.cpp | 4 +- src/coreclr/vm/proftoeeinterfaceimpl.cpp | 2 +- src/coreclr/vm/qcall.h | 2 +- src/coreclr/vm/stackingallocator.cpp | 2 +- src/coreclr/vm/stringliteralmap.cpp | 2 +- src/coreclr/vm/syncblk.cpp | 2 +- src/coreclr/vm/threadstatics.cpp | 4 +- src/coreclr/vm/util.hpp | 2 +- src/coreclr/vm/vars.hpp | 40 -- src/coreclr/vm/virtualcallstub.cpp | 2 + src/coreclr/vm/virtualcallstub.h | 2 +- src/mono/dlls/mscordbi/CMakeLists.txt | 1 - 240 files changed, 559 insertions(+), 5127 deletions(-) delete mode 100644 src/coreclr/inc/clr_std/algorithm delete mode 100644 src/coreclr/inc/clr_std/string delete mode 100644 src/coreclr/inc/clr_std/type_traits delete mode 100644 src/coreclr/inc/clr_std/utility delete mode 100644 src/coreclr/inc/clr_std/vector delete mode 100644 src/coreclr/pal/inc/rt/cpp/assert.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/cstdlib delete mode 100644 src/coreclr/pal/inc/rt/cpp/ctype.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/emmintrin.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/fcntl.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/float.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/limits.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/malloc.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/math.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/memory.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdarg.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdbool.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stddef.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdint.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdio.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdlib.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/string.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/time.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/wchar.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/xmmintrin.h delete mode 100644 src/coreclr/pal/src/cruntime/malloc.cpp delete mode 100644 src/coreclr/pal/src/cruntime/misc.cpp delete mode 100644 src/coreclr/pal/src/cruntime/thread.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index f7144a028a80d2..bdf370fdf966b6 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -28,7 +28,6 @@ if (CLR_CMAKE_HOST_UNIX) add_compile_options(-Wno-null-conversion) add_compile_options(-glldb) else() - add_compile_options($<$:-Werror=conversion-null>) add_compile_options(-g) endif() endif() diff --git a/eng/native/functions.cmake b/eng/native/functions.cmake index e10e008d775e44..d4a7e1bee92edb 100644 --- a/eng/native/functions.cmake +++ b/eng/native/functions.cmake @@ -220,6 +220,12 @@ endfunction(convert_to_absolute_path) function(preprocess_file inputFilename outputFilename) get_compile_definitions(PREPROCESS_DEFINITIONS) get_include_directories(PREPROCESS_INCLUDE_DIRECTORIES) + get_source_file_property(SOURCE_FILE_DEFINITIONS ${inputFilename} COMPILE_DEFINITIONS) + + foreach(DEFINITION IN LISTS SOURCE_FILE_DEFINITIONS) + list(APPEND PREPROCESS_DEFINITIONS -D${DEFINITION}) + endforeach() + if (MSVC) add_custom_command( OUTPUT ${outputFilename} diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index 4aa45914ff54b0..aaf4005aa7394b 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -202,11 +202,12 @@ if(CLR_CMAKE_HOST_UNIX) add_subdirectory(debug/createdump) endif(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) - # Include the dummy c++ include files - include_directories("pal/inc/rt/cpp") - - # This prevents inclusion of standard C compiler headers - add_compile_options(-nostdinc) + # The CoreCLR PAL used to redefine NULL, which caused a number of null conversion and arithmetic + # warnings and errors to be suppressed. + # Suppress these warnings here to avoid breaking the build. + add_compile_options($<$:-Wno-null-arithmetic>) + add_compile_options($<$:-Wno-conversion-null>) + add_compile_options($<$:-Wno-pointer-arith>) set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) @@ -218,7 +219,7 @@ if(CLR_CMAKE_HOST_UNIX) # given Windows .rc file. The target C++ file path is returned in the # variable specified by the TARGET_FILE parameter. function(build_resources SOURCE TARGET_NAME TARGET_FILE) - + set_property(SOURCE ${SOURCE} APPEND PROPERTY COMPILE_DEFINITIONS "RC_INVOKED") set(PREPROCESSED_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.rc.i) preprocess_file(${SOURCE} ${PREPROCESSED_SOURCE}) diff --git a/src/coreclr/binder/assemblyname.cpp b/src/coreclr/binder/assemblyname.cpp index 9eea2ee8ba736f..0c96f6be47ec29 100644 --- a/src/coreclr/binder/assemblyname.cpp +++ b/src/coreclr/binder/assemblyname.cpp @@ -11,10 +11,10 @@ // // ============================================================ +#include "common.h" #include "assemblyname.hpp" #include "assemblybindercommon.hpp" -#include "common.h" #include "utils.hpp" #include "textualidentityparser.hpp" diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index e69da7ed4ac412..0040f9575de27f 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -53,6 +53,7 @@ if(CLR_CMAKE_HOST_WIN32) add_definitions(-D_WIN32_WINNT=0x0602) add_definitions(-DWIN32_LEAN_AND_MEAN) add_definitions(-D_CRT_SECURE_NO_WARNINGS) + add_compile_definitions(NOMINMAX) endif(CLR_CMAKE_HOST_WIN32) if (NOT (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)) diff --git a/src/coreclr/debug/createdump/CMakeLists.txt b/src/coreclr/debug/createdump/CMakeLists.txt index 71e5b78b08e525..3c72b8a0fa42b4 100644 --- a/src/coreclr/debug/createdump/CMakeLists.txt +++ b/src/coreclr/debug/createdump/CMakeLists.txt @@ -56,8 +56,6 @@ else(CLR_CMAKE_HOST_WIN32) endif(CLR_CMAKE_HOST_OSX) endif (CORECLR_SET_RPATH) - add_definitions(-DPAL_STDCPP_COMPAT) - # This is so we can include "version.c" include_directories(${CMAKE_BINARY_DIR}) diff --git a/src/coreclr/debug/daccess/daccess.cpp b/src/coreclr/debug/daccess/daccess.cpp index eb5127cdfa4f3c..d6b8d99d7c37e9 100644 --- a/src/coreclr/debug/daccess/daccess.cpp +++ b/src/coreclr/debug/daccess/daccess.cpp @@ -5793,7 +5793,7 @@ ClrDataAccess::RawGetMethodName( SIZE_T maxPrecodeSize = sizeof(StubPrecode); #ifdef HAS_THISPTR_RETBUF_PRECODE - maxPrecodeSize = max(maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); + maxPrecodeSize = max((size_t)maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); #endif for (SIZE_T i = 0; i < maxPrecodeSize / PRECODE_ALIGNMENT; i++) diff --git a/src/coreclr/debug/dbgutil/CMakeLists.txt b/src/coreclr/debug/dbgutil/CMakeLists.txt index 2d8e02b07fc7f1..0ad223630a5839 100644 --- a/src/coreclr/debug/dbgutil/CMakeLists.txt +++ b/src/coreclr/debug/dbgutil/CMakeLists.txt @@ -9,8 +9,6 @@ if(CLR_CMAKE_HOST_WIN32 OR CLR_CMAKE_HOST_OSX) include_directories(${CLR_DIR}/inc/llvm) endif(CLR_CMAKE_HOST_WIN32 OR CLR_CMAKE_HOST_OSX) -add_definitions(-DPAL_STDCPP_COMPAT) - if(CLR_CMAKE_TARGET_LINUX_MUSL) add_definitions(-DTARGET_LINUX_MUSL) endif(CLR_CMAKE_TARGET_LINUX_MUSL) diff --git a/src/coreclr/debug/debug-pal/CMakeLists.txt b/src/coreclr/debug/debug-pal/CMakeLists.txt index baa11c163dffe3..adc8efacab4ae9 100644 --- a/src/coreclr/debug/debug-pal/CMakeLists.txt +++ b/src/coreclr/debug/debug-pal/CMakeLists.txt @@ -2,8 +2,6 @@ include_directories(../inc) include_directories(../../pal/inc) include_directories(${EP_GENERATED_HEADER_PATH}) -add_definitions(-DPAL_STDCPP_COMPAT) - set(SHARED_EVENTPIPE_SOURCE_PATH ${CLR_SRC_NATIVE_DIR}/eventpipe) add_definitions(-DFEATURE_CORECLR) add_definitions(-DFEATURE_PERFTRACING) diff --git a/src/coreclr/debug/di/rspriv.h b/src/coreclr/debug/di/rspriv.h index 68080a65cb8a6f..63886b56bfa5a4 100644 --- a/src/coreclr/debug/di/rspriv.h +++ b/src/coreclr/debug/di/rspriv.h @@ -3975,9 +3975,9 @@ class CordbProcess : // CORDB_ADDRESS's are UINT_PTR's (64 bit under HOST_64BIT, 32 bit otherwise) #if defined(TARGET_64BIT) -#define MAX_ADDRESS (_UI64_MAX) +#define MAX_ADDRESS (UINT64_MAX) #else -#define MAX_ADDRESS (_UI32_MAX) +#define MAX_ADDRESS (UINT32_MAX) #endif #define MIN_ADDRESS (0x0) CORDB_ADDRESS m_minPatchAddr; //smallest patch in table diff --git a/src/coreclr/debug/di/rsthread.cpp b/src/coreclr/debug/di/rsthread.cpp index 3c5024fc80fab4..1f455dad376d14 100644 --- a/src/coreclr/debug/di/rsthread.cpp +++ b/src/coreclr/debug/di/rsthread.cpp @@ -5122,7 +5122,7 @@ HRESULT CordbValueEnum::Next(ULONG celt, ICorDebugValue *values[], ULONG *pceltF HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i< iMax;i++) { @@ -8186,7 +8186,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, // first argument, but thereafter we have to decrement it // before getting the variable's location from it. So increment // it here to be consistent later. - rpCur += max(cbType, cbArchitectureMin); + rpCur += max((ULONG)cbType, cbArchitectureMin); #endif // Grab the IL code's function's method signature so we can see if it's static. @@ -8219,7 +8219,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, IfFailThrow(pArgType->GetUnboxedObjectSize(&cbType)); #if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK - rpCur -= max(cbType, cbArchitectureMin); + rpCur -= max((ULONG)cbType, cbArchitectureMin); m_rgNVI[i].loc.vlFixedVarArg.vlfvOffset = (unsigned)(m_FirstArgAddr - rpCur); @@ -8229,7 +8229,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, #else // STACK_GROWS_UP_ON_ARGS_WALK m_rgNVI[i].loc.vlFixedVarArg.vlfvOffset = (unsigned)(rpCur - m_FirstArgAddr); - rpCur += max(cbType, cbArchitectureMin); + rpCur += max((ULONG)cbType, cbArchitectureMin); AlignAddressForType(pArgType, rpCur); #endif @@ -10877,7 +10877,7 @@ HRESULT CordbCodeEnum::Next(ULONG celt, ICorDebugCode *values[], ULONG *pceltFet HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i < iMax; i++) diff --git a/src/coreclr/debug/di/rstype.cpp b/src/coreclr/debug/di/rstype.cpp index 45ccd44be6565a..ae686064e96cb1 100644 --- a/src/coreclr/debug/di/rstype.cpp +++ b/src/coreclr/debug/di/rstype.cpp @@ -2898,7 +2898,7 @@ HRESULT CordbTypeEnum::Next(ULONG celt, ICorDebugType *values[], ULONG *pceltFet HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i < iMax; i++) diff --git a/src/coreclr/debug/di/shimcallback.cpp b/src/coreclr/debug/di/shimcallback.cpp index 4e8f029209def7..bf6c817fc880d5 100644 --- a/src/coreclr/debug/di/shimcallback.cpp +++ b/src/coreclr/debug/di/shimcallback.cpp @@ -1408,7 +1408,7 @@ HRESULT ShimProxyCallback::DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugT this->m_pThread.Assign(pThread); _ASSERTE(contextSize == sizeof(CONTEXT)); - this->m_contextSize = min(contextSize, sizeof(CONTEXT)); + this->m_contextSize = min(contextSize, (ULONG32)sizeof(CONTEXT)); memcpy(&(this->m_context), pContext, this->m_contextSize); } diff --git a/src/coreclr/debug/di/stdafx.h b/src/coreclr/debug/di/stdafx.h index 061c576c4725b7..8ee806f88f2718 100644 --- a/src/coreclr/debug/di/stdafx.h +++ b/src/coreclr/debug/di/stdafx.h @@ -10,6 +10,9 @@ #include #include #include +#include +using std::min; +using std::max; #include diff --git a/src/coreclr/debug/ee/debugger.cpp b/src/coreclr/debug/ee/debugger.cpp index 62b9f3c99c9c29..79aa2d5f13fc48 100644 --- a/src/coreclr/debug/ee/debugger.cpp +++ b/src/coreclr/debug/ee/debugger.cpp @@ -3029,7 +3029,7 @@ HRESULT Debugger::GetILToNativeMappingIntoArrays( if (pDJI == NULL) return E_FAIL; - ULONG32 cMap = min(cMapMax, pDJI->GetSequenceMapCount()); + ULONG32 cMap = min((ULONG32)cMapMax, pDJI->GetSequenceMapCount()); DebuggerILToNativeMap * rgMapInt = pDJI->GetSequenceMap(); NewArrayHolder rguiILOffsetTemp = new (nothrow) UINT[cMap]; diff --git a/src/coreclr/debug/ee/funceval.cpp b/src/coreclr/debug/ee/funceval.cpp index 7844edbe8b306d..a7e888452c7812 100644 --- a/src/coreclr/debug/ee/funceval.cpp +++ b/src/coreclr/debug/ee/funceval.cpp @@ -2806,7 +2806,7 @@ void PackArgumentArray(DebuggerEval *pDE, #ifdef FEATURE_HFA // The buffer for HFAs has to be always ENREGISTERED_RETURNTYPE_MAXSIZE - size = max(size, ENREGISTERED_RETURNTYPE_MAXSIZE); + size = max(size, (unsigned)ENREGISTERED_RETURNTYPE_MAXSIZE); #endif BYTE * pTemp = new (interopsafe) BYTE[ALIGN_UP(sizeof(ValueClassInfo), 8) + size]; diff --git a/src/coreclr/debug/ee/stdafx.h b/src/coreclr/debug/ee/stdafx.h index f21a670e210bf8..21ef5f0efa329c 100644 --- a/src/coreclr/debug/ee/stdafx.h +++ b/src/coreclr/debug/ee/stdafx.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include diff --git a/src/coreclr/debug/inc/dbgipcevents.h b/src/coreclr/debug/inc/dbgipcevents.h index 0eb393c37fce94..1545aa28083704 100644 --- a/src/coreclr/debug/inc/dbgipcevents.h +++ b/src/coreclr/debug/inc/dbgipcevents.h @@ -768,7 +768,7 @@ class MSLAYOUT VMPTR_Base // // Operators to emulate Pointer semantics. // - bool IsNull() { SUPPORTS_DAC; return m_addr == NULL; } + bool IsNull() { SUPPORTS_DAC; return m_addr == (TADDR)0; } static VMPTR_This NullPtr() { diff --git a/src/coreclr/debug/shared/dbgtransportsession.cpp b/src/coreclr/debug/shared/dbgtransportsession.cpp index 8b8ca6203c9571..3bebb8282aed7d 100644 --- a/src/coreclr/debug/shared/dbgtransportsession.cpp +++ b/src/coreclr/debug/shared/dbgtransportsession.cpp @@ -1949,7 +1949,7 @@ void DbgTransportSession::TransportWorker() DWORD cbBytesToRead = sReceiveHeader.TypeSpecificData.MemoryAccess.m_cbLeftSideBuffer; while (cbBytesToRead) { - DWORD cbTransfer = min(cbBytesToRead, sizeof(rgDummy)); + DWORD cbTransfer = min(cbBytesToRead, (DWORD)sizeof(rgDummy)); if (!ReceiveBlock(rgDummy, cbTransfer)) HANDLE_TRANSIENT_ERROR(); cbBytesToRead -= cbTransfer; diff --git a/src/coreclr/dlls/mscordac/mscordac_unixexports.src b/src/coreclr/dlls/mscordac/mscordac_unixexports.src index 8d94292d5c572c..ad056eb1104e3b 100644 --- a/src/coreclr/dlls/mscordac/mscordac_unixexports.src +++ b/src/coreclr/dlls/mscordac/mscordac_unixexports.src @@ -22,15 +22,10 @@ nativeStringResourceTable_mscorrc ; All the # exports are prefixed with DAC_ #PAL_CatchHardwareExceptionHolderEnter #PAL_CatchHardwareExceptionHolderExit -#PAL_bsearch #PAL_CopyModuleData -#PAL_errno -#PAL_free #PAL_GetLogicalCpuCountFromOS #PAL_GetTotalCpuCount #PAL_GetUnwindInfoSize -#PAL_stdout -#PAL_stderr #PAL_GetApplicationGroupId #PAL_GetTransportName #PAL_GetCurrentThread @@ -47,9 +42,6 @@ nativeStringResourceTable_mscorrc #PAL_ReadProcessMemory #PAL_ProbeMemory #PAL_Random -#PAL_malloc -#PAL_realloc -#PAL_qsort #PAL__wcstoui64 #PAL_wcstoul #PAL_wcstod diff --git a/src/coreclr/dlls/mscorpe/stdafx.h b/src/coreclr/dlls/mscorpe/stdafx.h index 996113b500154c..bd78a49013c94a 100644 --- a/src/coreclr/dlls/mscorpe/stdafx.h +++ b/src/coreclr/dlls/mscorpe/stdafx.h @@ -11,6 +11,7 @@ #include #include #include +#include #define FEATURE_NO_HOST // Do not use host interface #include @@ -21,3 +22,6 @@ #include "ceegen.h" #include "ceefilegenwriter.h" #include "ceesectionstring.h" + +using std::min; +using std::max; diff --git a/src/coreclr/gc/env/common.h b/src/coreclr/gc/env/common.h index a3f6539aa3a491..5d8cff7f779041 100644 --- a/src/coreclr/gc/env/common.h +++ b/src/coreclr/gc/env/common.h @@ -25,6 +25,9 @@ #include #include +#include +#include +#include #ifdef TARGET_UNIX #include diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index 623cee04134508..1603448ae2a4f8 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -100,14 +100,6 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x) #define ZeroMemory(Destination,Length) memset((Destination),0,(Length)) -#ifndef min -#define min(a,b) (((a) < (b)) ? (a) : (b)) -#endif - -#ifndef max -#define max(a,b) (((a) > (b)) ? (a) : (b)) -#endif - #define C_ASSERT(cond) static_assert( cond, #cond ) #define UNREFERENCED_PARAMETER(P) (void)(P) @@ -393,7 +385,6 @@ typedef struct _PROCESSOR_NUMBER { uint8_t Number; uint8_t Reserved; } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER; - #endif // _INC_WINDOWS // ----------------------------------------------------------------------------------------------------------- diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 67d6fa75e051f5..e43047cf6e113a 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -3113,7 +3113,7 @@ void gc_history_global::print() uint32_t limit_time_to_uint32 (uint64_t time) { - time = min (time, UINT32_MAX); + time = min (time, (uint64_t)UINT32_MAX); return (uint32_t)time; } @@ -6972,7 +6972,7 @@ void gc_heap::gc_thread_function () dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index]; wait_time = min (wait_time, (uint32_t)(sample.elapsed_between_gcs / 1000 / 3)); - wait_time = max (wait_time, 1); + wait_time = max (wait_time, 1u); dprintf (6666, ("gc#0 thread waiting for %d ms (betwen GCs %I64d)", wait_time, sample.elapsed_between_gcs)); } @@ -7022,7 +7022,7 @@ void gc_heap::gc_thread_function () } // wait till the threads that should have gone idle at least reached the place where they are about to wait on the idle event. - if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) && + if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) && (n_heaps != dynamic_heap_count_data.last_n_heaps)) { int spin_count = 1024; @@ -12141,7 +12141,7 @@ void gc_heap::clear_region_demoted (heap_segment* region) int gc_heap::get_plan_gen_num (int gen_number) { - return ((settings.promotion) ? min ((gen_number + 1), max_generation) : gen_number); + return ((settings.promotion) ? min ((gen_number + 1), (int)max_generation) : gen_number); } uint8_t* gc_heap::get_uoh_start_object (heap_segment* region, generation* gen) @@ -12280,7 +12280,7 @@ void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS - int gen_num_for_region = min (gen_num, max_generation); + int gen_num_for_region = min (gen_num, (int)max_generation); set_region_gen_num (seg, gen_num_for_region); heap_segment_plan_gen_num (seg) = gen_num_for_region; heap_segment_swept_in_plan (seg) = false; @@ -13300,7 +13300,7 @@ void gc_heap::distribute_free_regions() const int i = 0; const int n_heaps = 1; #endif //MULTIPLE_HEAPS - ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0); + ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), (ptrdiff_t)0); int kind = gen >= loh_generation; size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind]; dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %zd bytes (%zd regions)", i, gen, budget_gen, budget_gen_in_region_units)); @@ -13520,7 +13520,7 @@ void gc_heap::distribute_free_regions() if (ephemeral_elapsed >= DECOMMIT_TIME_STEP_MILLISECONDS) { gc_last_ephemeral_decommit_time = dd_time_clock (dd0); - size_t decommit_step_milliseconds = min (ephemeral_elapsed, (10*1000)); + size_t decommit_step_milliseconds = min (ephemeral_elapsed, (size_t)(10*1000)); decommit_step (decommit_step_milliseconds); } @@ -13896,7 +13896,7 @@ uint32_t adjust_heaps_hard_limit_worker (uint32_t nhp, size_t limit) size_t aligned_limit = align_on_segment_hard_limit (limit); uint32_t nhp_oh = (uint32_t)(aligned_limit / min_segment_size_hard_limit); nhp = min (nhp_oh, nhp); - return (max (nhp, 1)); + return (max (nhp, 1u)); } uint32_t gc_heap::adjust_heaps_hard_limit (uint32_t nhp) @@ -14300,7 +14300,7 @@ gc_heap::init_semi_shared() #endif //!USE_REGIONS #ifdef MULTIPLE_HEAPS - mark_list_size = min (100*1024, max (8192, soh_segment_size/(2*10*32))); + mark_list_size = min ((size_t)100*1024, max ((size_t)8192, soh_segment_size/(2*10*32))); #ifdef DYNAMIC_HEAP_COUNT if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) { @@ -14322,7 +14322,7 @@ gc_heap::init_semi_shared() } #else //MULTIPLE_HEAPS - mark_list_size = min(100*1024, max (8192, soh_segment_size/(64*32))); + mark_list_size = min((size_t)100*1024, max ((size_t)8192, soh_segment_size/(64*32))); g_mark_list_total_size = mark_list_size; g_mark_list = make_mark_list (mark_list_size); @@ -14444,7 +14444,7 @@ gc_heap::init_semi_shared() if (bgc_tuning::enable_fl_tuning && (current_memory_load < bgc_tuning::memory_load_goal)) { uint32_t distance_to_goal = bgc_tuning::memory_load_goal - current_memory_load; - bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1); + bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1u); bgc_tuning::last_stepping_mem_load = current_memory_load; bgc_tuning::last_stepping_bgc_count = 0; dprintf (BGC_TUNING_LOG, ("current ml: %d, %d to goal, interval: %d", @@ -21815,13 +21815,13 @@ size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps) dprintf (GTC_LOG, ("min av: %zd, 10%% gen2: %zd, 3%% mem: %zd", min_mem_based_on_available, ten_percent_size, three_percent_mem)); #endif //SIMPLE_DPRINTF - return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem))); + return (size_t)(min ((uint64_t)min_mem_based_on_available, min ((uint64_t)ten_percent_size, three_percent_mem))); } inline uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps) { - return min (available_mem, (256*1024*1024)) / num_heaps; + return min (available_mem, (uint64_t)(256*1024*1024)) / num_heaps; } enum { @@ -22082,7 +22082,7 @@ size_t gc_heap::exponential_smoothing (int gen, size_t collection_count, size_t { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. - size_t smoothing = min(3, collection_count); + size_t smoothing = min((size_t)3, collection_count); size_t desired_total = desired_per_heap * n_heaps; size_t new_smoothed_desired_total = desired_total / smoothing + ((smoothed_desired_total[gen] / smoothing) * (smoothing - 1)); @@ -22191,7 +22191,7 @@ void gc_heap::gc1() } //adjust the allocation size from the pinned quantities. - for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++) + for (int gen_number = 0; gen_number <= min ((int)max_generation,n+1); gen_number++) { generation* gn = generation_of (gen_number); if (settings.compaction) @@ -22371,7 +22371,7 @@ void gc_heap::gc1() if (alloc_contexts_used >= 1) { allocation_quantum = Align (min ((size_t)CLR_SIZE, - (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))), + (size_t)max ((size_t)1024, get_new_allocation (0) / (2 * alloc_contexts_used))), get_alignment_constant(FALSE)); dprintf (3, ("New allocation quantum: %zd(0x%zx)", allocation_quantum, allocation_quantum)); } @@ -28594,7 +28594,7 @@ BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p) if (grow_mark_array_p) { // Try to grow the array. - size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); + size_t new_size = max ((size_t)MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { @@ -28934,7 +28934,7 @@ BOOL gc_heap::process_mark_overflow(int condemned_gen_number) overflow_p = TRUE; // Try to grow the array. size_t new_size = - max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); + max ((size_t)MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { @@ -29237,7 +29237,7 @@ BOOL gc_heap::decide_on_promotion_surv (size_t threshold) { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS - dynamic_data* dd = hp->dynamic_data_of (min ((settings.condemned_generation + 1), max_generation)); + dynamic_data* dd = hp->dynamic_data_of (min ((int)(settings.condemned_generation + 1), (int)max_generation)); size_t older_gen_size = dd_current_size (dd) + (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t promoted = hp->total_promoted_bytes; @@ -29313,7 +29313,7 @@ void gc_heap::verify_region_to_generation_map() } size_t region_index_start = get_basic_region_index_for_address (get_region_start (region)); size_t region_index_end = get_basic_region_index_for_address (heap_segment_reserved (region)); - int gen_num = min (gen_number, soh_gen2); + int gen_num = min (gen_number, (int)soh_gen2); assert (gen_num == heap_segment_gen_num (region)); int plan_gen_num = heap_segment_plan_gen_num (region); bool is_demoted = (region->flags & heap_segment_flags_demoted) != 0; @@ -32546,7 +32546,7 @@ void gc_heap::plan_phase (int condemned_gen_number) if ((condemned_gen_number < max_generation)) { - older_gen = generation_of (min (max_generation, 1 + condemned_gen_number)); + older_gen = generation_of (min ((int)max_generation, 1 + condemned_gen_number)); generation_allocator (older_gen)->copy_to_alloc_list (r_free_list); r_free_list_space = generation_free_list_space (older_gen); @@ -34117,7 +34117,7 @@ void gc_heap::plan_phase (int condemned_gen_number) { reset_pinned_queue_bos(); #ifndef USE_REGIONS - unsigned int gen_number = min (max_generation, 1 + condemned_gen_number); + unsigned int gen_number = (unsigned int)min ((int)max_generation, 1 + condemned_gen_number); generation* gen = generation_of (gen_number); uint8_t* low = generation_allocation_start (generation_of (gen_number-1)); uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); @@ -42454,8 +42454,8 @@ BOOL gc_heap::best_fit (size_t free_space, #endif // SEG_REUSE_STATS if (free_space_items) { - max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2); - max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES); + max_free_space_items = min ((size_t)MAX_NUM_FREE_SPACES, free_space_items * 2); + max_free_space_items = max (max_free_space_items, (size_t)MIN_NUM_FREE_SPACES); } else { @@ -42686,8 +42686,8 @@ BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t memcpy (ordered_free_space_indices, saved_ordered_free_space_indices, sizeof(ordered_free_space_indices)); - max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); - max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items); + max_free_space_items = max ((size_t)MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); + max_free_space_items = min ((size_t)MAX_NUM_FREE_SPACES, max_free_space_items); dprintf (SEG_REUSE_LOG_0, ("could fit! %zd free spaces, %zd max", free_space_items, max_free_space_items)); } @@ -43371,14 +43371,14 @@ void gc_heap::init_static_data() size_t gen0_max_size = #ifdef MULTIPLE_HEAPS - max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)); + max ((size_t)6*1024*1024, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC - max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)) + max ((size_t)6*1024*1024, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)) ); #endif //MULTIPLE_HEAPS @@ -43408,14 +43408,14 @@ void gc_heap::init_static_data() // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS - max (6*1024*1024, Align(soh_segment_size/2)); + max ((size_t)6*1024*1024, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC - max (6*1024*1024, Align(soh_segment_size/2)) + max ((size_t)6*1024*1024, Align(soh_segment_size/2)) ); #endif //MULTIPLE_HEAPS @@ -43562,7 +43562,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd, } else { - new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size); + new_size = (size_t) min (max ( (size_t)(f * current_size), min_gc_size), max_size); } assert ((new_size >= current_size) || (new_size == max_size)); @@ -43634,7 +43634,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd, size_t survivors = out; cst = float (survivors) / float (dd_begin_data_size (dd)); f = surv_to_growth (cst, limit, max_limit); - new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size); + new_allocation = (size_t) min (max ((size_t)(f * (survivors)), min_gc_size), max_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); @@ -43700,9 +43700,9 @@ size_t gc_heap::generation_plan_size (int gen_number) return result; #else //USE_REGIONS if (0 == gen_number) - return max((heap_segment_plan_allocated (ephemeral_heap_segment) - + return (size_t)max((heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (gen_number))), - (int)Align (min_obj_size)); + (ptrdiff_t)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); @@ -43751,9 +43751,9 @@ size_t gc_heap::generation_size (int gen_number) return result; #else //USE_REGIONS if (0 == gen_number) - return max((heap_segment_allocated (ephemeral_heap_segment) - + return (size_t)max((heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (gen_number))), - (int)Align (min_obj_size)); + (ptrdiff_t)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); @@ -43835,7 +43835,7 @@ size_t gc_heap::trim_youngest_desired (uint32_t memory_load, } else { - size_t total_max_allocation = max (mem_one_percent, total_min_allocation); + size_t total_max_allocation = max ((size_t)mem_one_percent, total_min_allocation); return min (total_new_allocation, total_max_allocation); } } @@ -44170,7 +44170,7 @@ void gc_heap::decommit_ephemeral_segment_pages() dynamic_data* dd0 = dynamic_data_of (0); ptrdiff_t desired_allocation = dd_new_allocation (dd0) + - max (estimate_gen_growth (soh_gen1), 0) + + max (estimate_gen_growth (soh_gen1), (ptrdiff_t)0) + loh_size_threshold; size_t slack_space = @@ -44219,7 +44219,7 @@ void gc_heap::decommit_ephemeral_segment_pages() // we do a max of DECOMMIT_SIZE_PER_MILLISECOND per millisecond of elapsed time since the last GC // we limit the elapsed time to 10 seconds to avoid spending too much time decommitting - ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; + ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (size_t)(10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; decommit_size = min (decommit_size, max_decommit_size); slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment) - decommit_size; @@ -47199,7 +47199,7 @@ enable_no_gc_region_callback_status gc_heap::enable_no_gc_callback(NoGCRegionCal soh_withheld_budget = soh_withheld_budget / gc_heap::n_heaps; loh_withheld_budget = loh_withheld_budget / gc_heap::n_heaps; #endif - soh_withheld_budget = max(soh_withheld_budget, 1); + soh_withheld_budget = max(soh_withheld_budget, (size_t)1); soh_withheld_budget = Align(soh_withheld_budget, get_alignment_constant (TRUE)); loh_withheld_budget = Align(loh_withheld_budget, get_alignment_constant (FALSE)); #ifdef MULTIPLE_HEAPS @@ -47614,7 +47614,7 @@ void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_ } if (can_verify_gen_num) { - if (heap_segment_gen_num (seg_in_gen) != min (gen_number, max_generation)) + if (heap_segment_gen_num (seg_in_gen) != min (gen_number, (int)max_generation)) { dprintf (REGIONS_LOG, ("h%d gen%d region %p(%p) gen is %d!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), @@ -48464,7 +48464,7 @@ HRESULT GCHeap::Initialize() nhp = ((nhp_from_config == 0) ? g_num_active_processors : nhp_from_config); - nhp = min (nhp, MAX_SUPPORTED_CPUS); + nhp = min (nhp, (uint32_t)MAX_SUPPORTED_CPUS); gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? !affinity_config_specified_p : (GCConfig::GetNoAffinitize() != 0)); @@ -48613,7 +48613,7 @@ HRESULT GCHeap::Initialize() /* * Allocation requests less than loh_size_threshold will be allocated on the small object heap. * - * An object cannot span more than one region and regions in small object heap are of the same size - gc_region_size. + * An object cannot span more than one region and regions in small object heap are of the same size - gc_region_size. * However, the space available for actual allocations is reduced by the following implementation details - * * 1.) heap_segment_mem is set to the new pages + sizeof(aligned_plug_and_gap) in make_heap_segment. @@ -48629,7 +48629,7 @@ HRESULT GCHeap::Initialize() #ifdef FEATURE_STRUCTALIGN /* * The above assumed FEATURE_STRUCTALIGN is not turned on for platforms where USE_REGIONS is supported, otherwise it is possible - * that the allocation size is inflated by ComputeMaxStructAlignPad in GCHeap::Alloc and we have to compute an upper bound of that + * that the allocation size is inflated by ComputeMaxStructAlignPad in GCHeap::Alloc and we have to compute an upper bound of that * function. * * Note that ComputeMaxStructAlignPad is defined to be 0 if FEATURE_STRUCTALIGN is turned off. @@ -48838,7 +48838,7 @@ HRESULT GCHeap::Initialize() gc_heap::dynamic_heap_count_data.inc_recheck_threshold = 5; gc_heap::dynamic_heap_count_data.dec_failure_recheck_threshold = 5; // This should really be set as part of computing static data and should take conserve_mem_setting into consideration. - gc_heap::dynamic_heap_count_data.max_gen0_new_allocation = min (dd_max_size (gc_heap::g_heaps[0]->dynamic_data_of (0)), (64 * 1024 * 1024)); + gc_heap::dynamic_heap_count_data.max_gen0_new_allocation = min (dd_max_size (gc_heap::g_heaps[0]->dynamic_data_of (0)), (size_t)(64 * 1024 * 1024)); gc_heap::dynamic_heap_count_data.min_gen0_new_allocation = dd_min_size (gc_heap::g_heaps[0]->dynamic_data_of (0)); dprintf (6666, ("datas max gen0 budget %Id, min %Id", @@ -49809,7 +49809,7 @@ GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS - generation = (generation < 0) ? max_generation : min (generation, max_generation); + generation = (generation < 0) ? max_generation : min (generation, (int)max_generation); dynamic_data* dd = hpt->dynamic_data_of (generation); #ifdef BACKGROUND_GC @@ -49907,7 +49907,7 @@ size_t GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode) { int gen = (generation < 0) ? - max_generation : min (generation, max_generation); + max_generation : min (generation, (int)max_generation); gc_reason reason = reason_empty; @@ -51262,11 +51262,11 @@ size_t gc_heap::get_gen0_min_size() #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. - gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024)); + gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), (size_t)(256*1024)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. - size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024)); + size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE), (size_t)(256*1024)); dprintf (1, ("cache: %zd-%zd", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); @@ -51274,8 +51274,8 @@ size_t gc_heap::get_gen0_min_size() int n_heaps = gc_heap::n_heaps; #else //SERVER_GC size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE); - gen0size = max((4*trueSize/5),(256*1024)); - trueSize = max(trueSize, (256*1024)); + gen0size = max((4*trueSize/5),(size_t)(256*1024)); + trueSize = max(trueSize, (size_t)(256*1024)); int n_heaps = 1; #endif //SERVER_GC @@ -51283,7 +51283,7 @@ size_t gc_heap::get_gen0_min_size() if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) { // if we are asked to be stingy with memory, limit gen 0 size - gen0size = min (gen0size, (4*1024*1024)); + gen0size = min (gen0size, (size_t)(4*1024*1024)); } #endif //DYNAMIC_HEAP_COUNT @@ -51904,7 +51904,7 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p) // it was promoted or not if (gen_0_empty_p) { - for (int i = min (gen+1, max_generation); i > 0; i--) + for (int i = min (gen+1, (int)max_generation); i > 0; i--) { m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)]; } @@ -52807,7 +52807,7 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin if (is_initialization) #endif //USE_REGIONS { - heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc); + heap_hard_limit = (size_t)max ((uint64_t)(20 * 1024 * 1024), physical_mem_for_gc); } } } @@ -52855,8 +52855,8 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent(); if (highmem_th_from_config) { - high_memory_load_th = min (99, highmem_th_from_config); - v_high_memory_load_th = min (99, (highmem_th_from_config + 7)); + high_memory_load_th = min (99u, highmem_th_from_config); + v_high_memory_load_th = min (99u, (highmem_th_from_config + 7)); #ifdef FEATURE_EVENT_TRACE high_mem_percent_from_config = highmem_th_from_config; #endif //FEATURE_EVENT_TRACE diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 6a3b600f8633f0..6db2e06d04c004 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -3345,8 +3345,8 @@ class gc_heap size_t new_current_total_committed); #ifdef USE_REGIONS - PER_HEAP_ISOLATED_METHOD void compute_committed_bytes(size_t& total_committed, size_t& committed_decommit, size_t& committed_free, - size_t& committed_bookkeeping, size_t& new_current_total_committed, size_t& new_current_total_committed_bookkeeping, + PER_HEAP_ISOLATED_METHOD void compute_committed_bytes(size_t& total_committed, size_t& committed_decommit, size_t& committed_free, + size_t& committed_bookkeeping, size_t& new_current_total_committed, size_t& new_current_total_committed_bookkeeping, size_t* new_committed_by_oh); #endif @@ -4226,7 +4226,7 @@ class gc_heap #ifdef DYNAMIC_HEAP_COUNT // Sample collection - - // + // // For every GC, we collect the msl wait time + GC pause duration info and use both to calculate the // throughput cost percentage. We will also be using the wait time and the GC pause duration separately // for other purposes in the future. @@ -6019,3 +6019,6 @@ class card_marking_enumerator #else #define THIS_ARG #endif // FEATURE_CARD_MARKING_STEALING + +using std::min; +using std::max; diff --git a/src/coreclr/gc/sample/CMakeLists.txt b/src/coreclr/gc/sample/CMakeLists.txt index 94a736e8c8126d..1f297fd2313329 100644 --- a/src/coreclr/gc/sample/CMakeLists.txt +++ b/src/coreclr/gc/sample/CMakeLists.txt @@ -53,6 +53,7 @@ if(CLR_CMAKE_TARGET_WIN32) list(APPEND SOURCES ../windows/gcenv.windows.cpp) add_definitions(-DUNICODE) + add_compile_definitions(NOMINMAX) else() list(APPEND SOURCES ../gcenv.unix.cpp) diff --git a/src/coreclr/gc/sample/GCSample.cpp b/src/coreclr/gc/sample/GCSample.cpp index 41e275035b9142..0f2afc7c20a717 100644 --- a/src/coreclr/gc/sample/GCSample.cpp +++ b/src/coreclr/gc/sample/GCSample.cpp @@ -176,7 +176,7 @@ int __cdecl main(int argc, char* argv[]) // GC expects the size of ObjHeader (extra void*) to be included in the size. baseSize = baseSize + sizeof(ObjHeader); // Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE. - My_MethodTable.m_MT.m_baseSize = max(baseSize, MIN_OBJECT_SIZE); + My_MethodTable.m_MT.m_baseSize = max(baseSize, (uint32_t)MIN_OBJECT_SIZE); My_MethodTable.m_MT.m_componentSize = 0; // Array component size My_MethodTable.m_MT.m_flags = MTFlag_ContainsPointers; diff --git a/src/coreclr/gc/sample/GCSample.vcxproj b/src/coreclr/gc/sample/GCSample.vcxproj index 6e33738d18d0dd..0b7e657b35f807 100644 --- a/src/coreclr/gc/sample/GCSample.vcxproj +++ b/src/coreclr/gc/sample/GCSample.vcxproj @@ -51,7 +51,7 @@ Use Level3 Disabled - WIN32;HOST_X86;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;NOMINMAX;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true common.h .;..;..\env @@ -68,7 +68,7 @@ MaxSpeed true true - WIN32;HOST_X86;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;NOMINMAX;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true .;..;..\env @@ -109,4 +109,4 @@ - \ No newline at end of file + diff --git a/src/coreclr/gc/vxsort/defs.h b/src/coreclr/gc/vxsort/defs.h index d048185884770e..d6373a21ad2699 100644 --- a/src/coreclr/gc/vxsort/defs.h +++ b/src/coreclr/gc/vxsort/defs.h @@ -45,16 +45,6 @@ #define NOINLINE __attribute__((noinline)) #endif -#include - -#ifndef max -template -T max(T a, T b) { - if (a > b) - return a; - else - return b; -} -#endif - +using std::max; +using std::min; #endif // VXSORT_DEFS_H diff --git a/src/coreclr/gc/windows/gcenv.windows.cpp b/src/coreclr/gc/windows/gcenv.windows.cpp index 0aae8e035bbb4e..608751dd169aff 100644 --- a/src/coreclr/gc/windows/gcenv.windows.cpp +++ b/src/coreclr/gc/windows/gcenv.windows.cpp @@ -290,8 +290,8 @@ static size_t GetRestrictedPhysicalMemoryLimit() (job_process_memory_limit != (size_t)UINTPTR_MAX) || (job_workingset_limit != (size_t)UINTPTR_MAX)) { - job_physical_memory_limit = min (job_memory_limit, job_process_memory_limit); - job_physical_memory_limit = min (job_physical_memory_limit, job_workingset_limit); + job_physical_memory_limit = std::min (job_memory_limit, job_process_memory_limit); + job_physical_memory_limit = std::min (job_physical_memory_limit, job_workingset_limit); MEMORYSTATUSEX ms; ::GetProcessMemoryLoad(&ms); @@ -299,7 +299,7 @@ static size_t GetRestrictedPhysicalMemoryLimit() total_physical = ms.ullAvailPhys; // A sanity check in case someone set a larger limit than there is actual physical memory. - job_physical_memory_limit = (size_t) min (job_physical_memory_limit, ms.ullTotalPhys); + job_physical_memory_limit = (size_t) std::min (job_physical_memory_limit, (size_t)ms.ullTotalPhys); } } } @@ -1139,7 +1139,7 @@ bool GCToOSInterface::GetNumaInfo(uint16_t* total_nodes, uint32_t* max_procs_per mask &= mask - 1; } - currentProcsOnNode = max(currentProcsOnNode, procsOnNode); + currentProcsOnNode = std::max(currentProcsOnNode, procsOnNode); } *max_procs_per_node = currentProcsOnNode; *total_nodes = (uint16_t)g_nNodes; @@ -1163,7 +1163,7 @@ bool GCToOSInterface::GetCPUGroupInfo(uint16_t* total_groups, uint32_t* max_proc DWORD currentProcsInGroup = 0; for (WORD i = 0; i < g_nGroups; i++) { - currentProcsInGroup = max(currentProcsInGroup, g_CPUGroupInfoArray[i].nr_active); + currentProcsInGroup = std::max(currentProcsInGroup, (DWORD)g_CPUGroupInfoArray[i].nr_active); } *max_procs_per_group = currentProcsInGroup; return true; diff --git a/src/coreclr/hosts/coreshim/CoreShim.h b/src/coreclr/hosts/coreshim/CoreShim.h index 97b630bdb9e193..9be052926ec57c 100644 --- a/src/coreclr/hosts/coreshim/CoreShim.h +++ b/src/coreclr/hosts/coreshim/CoreShim.h @@ -5,7 +5,6 @@ #define _CORESHIM_H_ // Platform -#define NOMINMAX #include #include diff --git a/src/coreclr/ildasm/dasm.cpp b/src/coreclr/ildasm/dasm.cpp index 21dff99a381233..da3aa514c0dca2 100644 --- a/src/coreclr/ildasm/dasm.cpp +++ b/src/coreclr/ildasm/dasm.cpp @@ -1914,7 +1914,7 @@ BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, for(n=0; n < numElements; n++) { if(n) appendStr(out," "); - _gcvt_s(str,64,*((float*)dataPtr), 8); + sprintf_s(str, 64, "%.*g", 8, (double)(*((float*)dataPtr))); float df = (float)atof(str); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -1933,7 +1933,7 @@ BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, { if(n) appendStr(out," "); char *pch; - _gcvt_s(str,64,*((double*)dataPtr), 17); + sprintf_s(str, 64, "%.*g", 17, *((double*)dataPtr)); double df = strtod(str, &pch); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 64-bit precision number!!!! @@ -2605,7 +2605,7 @@ void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void case ELEMENT_TYPE_R4: { char szf[32]; - _gcvt_s(szf,32,MDDV.m_fltValue, 8); + sprintf_s(szf, 32, "%.*g", 8, (double)MDDV.m_fltValue); float df = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -2619,7 +2619,7 @@ void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void case ELEMENT_TYPE_R8: { char szf[32], *pch; - _gcvt_s(szf,32,MDDV.m_dblValue, 17); + sprintf_s(szf, 32, "%.*g", 17, MDDV.m_dblValue); double df = strtod(szf, &pch); //atof(szf); szf[31]=0; // Must compare as underlying bytes, not floating point otherwise optimizer will diff --git a/src/coreclr/ildasm/dis.cpp b/src/coreclr/ildasm/dis.cpp index 21fc8c86790285..2ad1ecd2d200a1 100644 --- a/src/coreclr/ildasm/dis.cpp +++ b/src/coreclr/ildasm/dis.cpp @@ -1573,7 +1573,7 @@ BOOL Disassemble(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, md if(f==0.0) strcpy_s(szf,32,((v>>24)==0)? "0.0" : "-0.0"); else - _gcvt_s(szf,32,(double)f, 8); + sprintf_s(szf, 32, "%.*g", 8, (double)f); float fd = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -1612,7 +1612,7 @@ BOOL Disassemble(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, md if(d==0.0) strcpy_s(szf,32,((v>>56)==0)? "0.0" : "-0.0"); else - _gcvt_s(szf,32,d, 17); + sprintf_s(szf, 32, "%.*g", 17, d); double df = strtod(szf, &pch); //atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 64-bit precision number!!!! diff --git a/src/coreclr/ildasm/ildasmpch.h b/src/coreclr/ildasm/ildasmpch.h index 9d89ba46db52c2..5bb192dd14e10e 100644 --- a/src/coreclr/ildasm/ildasmpch.h +++ b/src/coreclr/ildasm/ildasmpch.h @@ -12,6 +12,10 @@ #include #include #include +#include + +using std::min; +using std::max; #ifndef Debug_ReportError #define Debug_ReportError(strMessage) diff --git a/src/coreclr/inc/allocacheck.h b/src/coreclr/inc/allocacheck.h index ea7e6df316f01d..1c4f0a58497133 100644 --- a/src/coreclr/inc/allocacheck.h +++ b/src/coreclr/inc/allocacheck.h @@ -23,7 +23,16 @@ #ifndef AllocaCheck_h #define AllocaCheck_h -#include // for alloca itself + +#if defined(HOST_WINDOWS) +#include // for alloca itself +#else +#if defined(__has_include) +#if __has_include() +#include +#endif // __has_include(alloca.h) +#endif // defined(__has_include) +#endif // defined(HOST_WINDOWS) #if defined(assert) && !defined(_ASSERTE) #define _ASSERTE assert diff --git a/src/coreclr/inc/check.h b/src/coreclr/inc/check.h index 6951e2a41837b6..30ea0fdaf4d815 100644 --- a/src/coreclr/inc/check.h +++ b/src/coreclr/inc/check.h @@ -111,7 +111,7 @@ class CHECK #ifdef _DEBUG , m_condition (NULL) , m_file(NULL) - , m_line(NULL) + , m_line(0) , m_pCount(NULL) #endif {} diff --git a/src/coreclr/inc/clr_std/algorithm b/src/coreclr/inc/clr_std/algorithm deleted file mode 100644 index ebd21b09c5e587..00000000000000 --- a/src/coreclr/inc/clr_std/algorithm +++ /dev/null @@ -1,118 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/algorithm -// -// Copy of some key Standard Template Library functionality - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_algorithm_h__ -#define __clr_std_algorithm_h__ - -namespace std -{ - template - iter find_if ( iter first, iter last, CompareFunc comp ) - { - for ( ; first!=last ; first++ ) - if ( comp(*first) ) - break; - return first; - } - - template - iter find(iter first, iter last, const T& val) - { - for (;first != last; first++) - { - if (*first == val) - break; - } - return first; - } - - template - iter qsort_partition( iter first, iter last, iter pivot, comp compare ) - { - iter lastMinusOne = last - 1; - swap(pivot, lastMinusOne); - - // Pivot is at end - pivot = last - 1; - - iter partitionLoc = first; - - for (iter partitionWalk = first; partitionWalk != pivot; ++partitionWalk) - { - if (compare(*partitionWalk, *pivot)) - { - swap(*partitionWalk, *partitionLoc); - partitionLoc++; - } - } - swap(*pivot, *partitionLoc); - - return partitionLoc; - } - - template - void sort_worker ( iter first, iter last, comp compare ) - { - typename iter::difference_type RangeSize = last - first; - - // When down to a list of size 1, be done - if (RangeSize < 2) - return; - - // Pick pivot - - // Use simple pick middle algorithm - iter pivotLoc = first + (RangeSize / 2); - - // Partition - pivotLoc = qsort_partition(first, last, pivotLoc, compare); - - // Sort first array - sort_worker(first, pivotLoc, compare); - - // Sort second array - sort_worker(pivotLoc + 1, last, compare); - } - - template - void sort ( iter first, iter last, comp compare ) - { - sort_worker(first, last, compare); - if (first != last) - { - for (iter i = first; i < (last - 1); i++) - { - // Assert that the sort function works. - assert(!compare(*(i+1), *i)); - } - } - } - - template - OutIter transform( InIter first, InIter last, OutIter dest, Fn1 func ) - { - for ( ; first!=last ; ++first, ++dest ) - *dest = func(*first); - return dest; - } - -} // namespace std - -#endif /* __clr_std_algorithm_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/string b/src/coreclr/inc/clr_std/string deleted file mode 100644 index 59ac67b98653cd..00000000000000 --- a/src/coreclr/inc/clr_std/string +++ /dev/null @@ -1,425 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/string -// -// Copy of some key Standard Template Library functionality -// -// This was created for use with SuperPMI. It has the minimal functionality needed by SuperPMI. It hasn't -// been tested elsewhere. - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_string_h__ -#define __clr_std_string_h__ - -#include "clr_std/vector" - -namespace std -{ - -template -class basic_string -{ -public: - typedef T value_type; - typedef size_t size_type; - typedef typename vector::iterator iterator; - typedef typename vector::const_iterator const_iterator; - - basic_string() - : m_string(1) // start with a string of length 1 for null terminator - { - m_string[0] = T(); - } - - basic_string(const basic_string& _Right) - { - assign(_Right); - } - - // Initialize a string with _Count characters from the string pointed at by _Ptr. - // If you want to include the trailing null character, _Count needs to include that. - basic_string(const value_type* _Ptr, size_type _Count) - : m_string(_Count + 1) // add 1 for a null terminator - { - copy(_Ptr, _Count); - } - - basic_string(const value_type* _Ptr) : basic_string(_Ptr, c_len(_Ptr)) - { - } - - void reserve(size_t newcapacity) - { - m_string.reserve(newcapacity + 1); // add 1 for the null terminator - } - - // - // Assignment - // - - basic_string& operator=(const basic_string& _Right) - { - if (this != &_Right) - { - assign(_Right); - } - return (*this); - } - - basic_string& assign(const basic_string& _Right) - { - m_string.resize(_Right.size() + 1); // +1 for null terminator - copy(_Right); - return (*this); - } - - // - // Basic data copying - // - - void copy(const basic_string& _Right) - { - assert(size() >= _Right.size()); - size_type i; - for (i = 0; i < _Right.size(); i++) - { - m_string[i] = _Right.m_string[i]; - } - m_string[i] = T(); - } - - void copy(const value_type* _Ptr, size_type _Count) - { - assert(size() >= _Count); - size_type i; - for (i = 0; i < _Count; i++) - { - m_string[i] = _Ptr[i]; - } - m_string[i] = T(); - } - - // - // Appending - // - - // Append a C-style string to the string. - basic_string& operator+=(const value_type* _Ptr) - { - size_type oldsize = size(); // doesn't include null terminator - size_type addsize = c_len(_Ptr); // doesn't include null terminator - size_type newsize = oldsize + addsize + 1; - m_string.resize(newsize); - size_type i; - for (i = oldsize; i < newsize - 1; i++) - { - m_string[i] = *_Ptr++; - } - m_string[i] = T(); - return (*this); - } - - basic_string& operator+=(const basic_string& _Right) - { - size_type oldsize = size(); // doesn't include null terminator - size_type addsize = _Right.size(); // doesn't include null terminator - size_type newsize = oldsize + addsize + 1; - m_string.resize(newsize); - size_type new_index = oldsize, right_index = 0; - while (right_index < addsize) - { - m_string[new_index] = _Right.m_string[right_index]; - ++new_index; - ++right_index; - } - m_string[new_index] = T(); - return (*this); - } - - basic_string& operator+=(value_type _Ch) - { - size_type oldsize = size(); // doesn't include null terminator - m_string[oldsize] = _Ch; // Replace the null terminator with the new symbol. - m_string.push_back(T()); // Return the replaced terminator again. - return (*this); - } - - ~basic_string() - { - // vector destructor does all the work - } - - size_t size() const - { - assert(m_string.size() > 0); - return m_string.size() - 1; // Don't report the null terminator. - } - - size_t length() const - { - return size(); - } - - T& operator[](size_t iIndex) - { - assert(iIndex < size() + 1); // allow looking at the null terminator - return m_string[iIndex]; - } - - const T* c_str() const - { - return m_string.data(); - } - - iterator begin() - { - return m_string.begin(); - } - - iterator end() - { - return m_string.end(); - } - - const_iterator cbegin() const - { - return m_string.cbegin(); - } - - const_iterator cend() const - { - return m_string.cend(); - } - - basic_string substr(size_type _Off = 0, size_type _Count = npos) const - { - size_type cursize = size(); - if (_Off >= cursize) - { - // result will be empty - return basic_string(); - } - else - { - if ((_Count == npos) || // No count specified; take the whole string suffix - (_Off + _Count > cursize)) // Count specified is too many characters; just take the whole suffix - { - _Count = cursize - _Off; - } - return basic_string(m_string.data() + _Off, _Count); - } - } - - size_type find_last_of(value_type _Ch) const - { - for (size_type _Off = size(); _Off != 0; _Off--) - { - if (m_string[_Off - 1] == _Ch) - { - return _Off - 1; - } - } - return npos; - } - - bool empty() const - { - return size() == 0; - } - - int compare(const basic_string& _Str) const - { - size_type i; - size_type compareSize = size(); - if (_Str.size() < compareSize) - { - // This string is longer; compare character-by-character only as many characters as we have. - compareSize = _Str.size(); - } - for (i = 0; i < compareSize; i++) - { - if (m_string[i] != _Str.m_string[i]) - { - if (m_string[i] < _Str.m_string[i]) - { - return -1; - } - else - { - return 1; - } - } - } - - // All the characters we compared were identical, but one string might be longer than the other. - if (size() == _Str.size()) - { - // We compared everything. - return 0; - } - else if (size() < _Str.size()) - { - // _Str has more characters than this. - return -1; - } - else - { - // this has more characters than _Str - return 1; - } - } - - static const size_type npos = size_type(-1); - -private: - - // Compute the length in characters of a null-terminated C-style string, not including the trailing null character. - // _Ptr must not be nullptr. - size_type c_len(const value_type* _Ptr) - { - size_type count; - for (count = 0; *_Ptr != T(); _Ptr++) - { - count++; - } - return count; - } - - vector m_string; // use a vector<> to represent the string, to avoid reimplementing similar functionality - -}; // class basic_string - -// -// String class instantiations -// - -typedef basic_string string; - -// -// Numeric conversions -// - -// convert integer T to string -template inline -string _IntToString(const char *_Fmt, T _Val) -{ - const size_t MaxIntBufSize = 21; /* can hold -2^63 and 2^64 - 1, plus NUL */ - char buf[MaxIntBufSize]; - int len = sprintf_s(buf, MaxIntBufSize, _Fmt, _Val); - return (string(buf, len)); -} - -inline string to_string(int _Val) -{ - return (_IntToString("%d", _Val)); -} - -inline string to_string(unsigned int _Val) -{ - return (_IntToString("%u", _Val)); -} - -inline string to_string(long _Val) -{ - return (_IntToString("%ld", _Val)); -} - -inline string to_string(unsigned long _Val) -{ - return (_IntToString("%lu", _Val)); -} - -inline string to_string(long long _Val) -{ - return (_IntToString("%lld", _Val)); -} - -inline string to_string(unsigned long long _Val) -{ - return (_IntToString("%llu", _Val)); -} - -// -// Comparisons -// - -template inline -bool operator==( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Left.compare(_Right) == 0); -} - -template inline -bool operator!=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Left == _Right)); -} - -template inline -bool operator<( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Left.compare(_Right) < 0); -} - -template inline -bool operator>( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Right < _Left); -} - -template inline -bool operator<=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Right < _Left)); -} - -template inline -bool operator>=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Left < _Right)); -} - -// -// String concatenation and other string operations -// - -template inline -basic_string operator+( - const basic_string& _Left, - const basic_string& _Right) -{ - basic_string ret; - ret.reserve(_Left.size() + _Right.size()); - ret += _Left; - ret += _Right; - return ret; -} - -}; // namespace std - -#endif /* __clr_std_string_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/type_traits b/src/coreclr/inc/clr_std/type_traits deleted file mode 100644 index ba007c32d9fef2..00000000000000 --- a/src/coreclr/inc/clr_std/type_traits +++ /dev/null @@ -1,627 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/utility -// -// Copy of some key Standard Template Library functionality. -// See http://msdn.microsoft.com/en-us/library/bb982077.aspx for documentation. -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifndef __clr_std_type_traits_h__ -#define __clr_std_type_traits_h__ - -#ifdef USE_STL - -#include - -#else - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_const - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type[]; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type[_Nx]; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_volatile - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type[]; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type[_Nx]; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_cv - template - struct remove_cv - { // remove top level const and volatile qualifiers - typedef typename remove_const::type>::type type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE remove_reference - template - struct remove_reference - { // remove reference - typedef T type; - }; - - template - struct remove_reference - { // remove reference - typedef T type; - }; - - template - struct remove_reference - { // remove rvalue reference - typedef T type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE remove_pointer - template - struct remove_pointer - { // remove pointer - typedef T type; - }; - - template - struct remove_pointer - { // remove pointer - typedef T type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION identity - template - struct identity - { // map T to type unchanged - typedef T type; - - inline - const T& operator()(const T& left) const - { // apply identity operator to operand - return (left); - } - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS integral_constant - template - struct integral_constant - { // convenient template for integral constant types - static const _Ty value = _Val; - - typedef _Ty value_type; - typedef integral_constant<_Ty, _Val> type; - }; - - typedef integral_constant true_type; - typedef integral_constant false_type; - - // TEMPLATE CLASS _Cat_base - template - struct _Cat_base - : false_type - { // base class for type predicates - }; - - template<> - struct _Cat_base - : true_type - { // base class for type predicates - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS enable_if - template - struct enable_if - { // type is undefined for assumed !_Test - }; - - template - struct enable_if - { // type is _Type for _Test - typedef _Type type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS conditional - template - struct conditional - { // type is _Ty2 for assumed !_Test - typedef _Ty2 type; - }; - - template - struct conditional - { // type is _Ty1 for _Test - typedef _Ty1 type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS make_unsigned - template - struct make_unsigned - { - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - -#ifndef HOST_UNIX - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - -#endif // !HOST_UNIX - - template<> - struct make_unsigned<__int64> - { - typedef unsigned __int64 type; - }; - - template<> - struct make_unsigned - { - typedef size_t type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS make_signed - template - struct make_signed - { - }; - - template<> - struct make_signed - { - typedef signed int type; - }; - -#ifndef HOST_UNIX - - template<> - struct make_signed - { - typedef signed long type; - }; - -#endif // !HOST_UNIX - - template<> - struct make_signed - { - typedef signed __int64 type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_lvalue_reference - template - struct is_lvalue_reference - : false_type - { // determine whether _Ty is an lvalue reference - }; - - template - struct is_lvalue_reference<_Ty&> - : true_type - { // determine whether _Ty is an lvalue reference - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_rvalue_reference - template - struct is_rvalue_reference - : false_type - { // determine whether _Ty is an rvalue reference - }; - - template - struct is_rvalue_reference<_Ty&&> - : true_type - { // determine whether _Ty is an rvalue reference - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_reference - template - struct is_reference - : conditional< - is_lvalue_reference<_Ty>::value || is_rvalue_reference<_Ty>::value, - true_type, - false_type>::type - { // determine whether _Ty is a reference - }; - - // TEMPLATE CLASS is_pointer - template - struct is_pointer - : false_type - { // determine whether _Ty is a pointer - }; - - template - struct is_pointer<_Ty *> - : true_type - { // determine whether _Ty is a pointer - }; - - // TEMPLATE CLASS _Is_integral - template - struct _Is_integral - : false_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - -// On Unix 'long' is a 64-bit type (same as __int64) and the following two definitions -// conflict with _Is_integral and _Is_integral. -#if !defined(HOST_UNIX) || defined(__APPLE__) - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; -#endif /* !HOST_UNIX || __APPLE__ */ - - #if _HAS_CHAR16_T_LANGUAGE_SUPPORT - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - #endif /* _HAS_CHAR16_T_LANGUAGE_SUPPORT */ - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - // TEMPLATE CLASS is_integral - template - struct is_integral - : _Is_integral::type> - { // determine whether _Ty is integral - }; - - // TEMPLATE CLASS _Is_floating_point - template - struct _Is_floating_point - : false_type - { // determine whether _Ty is floating point - }; - - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; - - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; - -// In PAL, we define long as int and so this becomes int double, -// which is a nonsense -#ifndef HOST_UNIX - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; -#endif - - // TEMPLATE CLASS is_floating_point - template - struct is_floating_point - : _Is_floating_point::type> - { // determine whether _Ty is floating point - }; - - // TEMPLATE CLASS is_arithmetic - template - struct is_arithmetic - : _Cat_base::value - || is_floating_point<_Ty>::value> - { // determine whether _Ty is an arithmetic type - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_signed - template - struct is_signed : conditional< - static_cast::type>(-1) < 0, true_type, false_type>::type {}; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_same - template - struct is_same : false_type { }; - - //----------------------------------------------------------------------------------------- - template - struct is_same : true_type { }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_base_of -#ifdef _MSC_VER - - template - struct is_base_of : - conditional<__is_base_of( TBase, TDerived), true_type, false_type>::type {}; - -#else - namespace detail - { - //------------------------------------------------------------------------------------- - // Helper types Small and Big - guarantee that sizeof(Small) < sizeof(Big) - // - - template - struct conversion_helper - { - typedef char Small; - struct Big { char dummy[2]; }; - static Big Test(...); - static Small Test(U); - static T MakeT(); - }; - - //------------------------------------------------------------------------------------- - // class template conversion - // Figures out the conversion relationships between two types - // Invocations (T and U are types): - // a) conversion::exists - // returns (at compile time) true if there is an implicit conversion from T - // to U (example: Derived to Base) - // b) conversion::exists2Way - // returns (at compile time) true if there are both conversions from T - // to U and from U to T (example: int to char and back) - // c) conversion::sameType - // returns (at compile time) true if T and U represent the same type - // - // NOTE: might not work if T and U are in a private inheritance hierarchy. - // - - template - struct conversion - { - typedef detail::conversion_helper H; - static const bool exists = sizeof(typename H::Small) == sizeof((H::Test(H::MakeT()))); - static const bool exists2Way = exists && conversion::exists; - static const bool sameType = false; - }; - - template - struct conversion - { - static const bool exists = true; - static const bool exists2Way = true; - static const bool sameType = true; - }; - - template - struct conversion - { - static const bool exists = false; - static const bool exists2Way = false; - static const bool sameType = false; - }; - - template - struct conversion - { - static const bool exists = false; - static const bool exists2Way = false; - static const bool sameType = false; - }; - - template <> - struct conversion - { - static const bool exists = true; - static const bool exists2Way = true; - static const bool sameType = true; - }; - } // detail - - // Note that we need to compare pointer types here, since conversion of types by-value - // just tells us whether or not an implicit conversion constructor exists. We handle - // type parameters that are already pointers specially; see below. - template - struct is_base_of : - conditional::exists, true_type, false_type>::type {}; - - // Specialization to handle type parameters that are already pointers. - template - struct is_base_of : - conditional::exists, true_type, false_type>::type {}; - - // Specialization to handle invalid mixing of pointer types. - template - struct is_base_of : - false_type {}; - - // Specialization to handle invalid mixing of pointer types. - template - struct is_base_of : - false_type {}; - -#endif - - namespace detail - { - template - using void_t = void; - } - // Always false dependent-value for static_asserts. - template - struct _Always_false - { - const bool value = false; - }; - - template - struct _Add_reference { // add reference (non-referenceable type) - using _Lvalue = _Ty; - using _Rvalue = _Ty; - }; - - template - struct _Add_reference<_Ty, detail::void_t<_Ty&>> { // (referenceable type) - using _Lvalue = _Ty&; - using _Rvalue = _Ty&&; - }; - - template - struct add_lvalue_reference { - using type = typename _Add_reference<_Ty>::_Lvalue; - }; - - template - struct add_rvalue_reference { - using type = typename _Add_reference<_Ty>::_Rvalue; - }; - - template - typename add_rvalue_reference<_Ty>::type declval() noexcept - { - static_assert(_Always_false<_Ty>::value, "Calling declval is ill-formed, see N4892 [declval]/2."); - } -} // namespace std - -#endif // !USE_STL - -#define REM_CONST(T) typename std::remove_const< T >::type -#define REM_CV(T) typename std::remove_cv< T >::type -#define REM_REF(T) typename std::remove_reference< T >::type - -#define REF_T(T) REM_REF(T) & -#define REF_CT(T) REM_REF(REM_CONST(T)) const & - -#endif // __clr_std_type_traits_h__ - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/utility b/src/coreclr/inc/clr_std/utility deleted file mode 100644 index 1b6b5a7b72c1e1..00000000000000 --- a/src/coreclr/inc/clr_std/utility +++ /dev/null @@ -1,253 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/utility -// -// Copy of some key Standard Template Library functionality -// See http://msdn.microsoft.com/en-us/library/bb982077.aspx for documentation. -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_utility_h__ -#define __clr_std_utility_h__ - -#include "clr_std/type_traits" - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION move - template inline - typename remove_reference::type&& - move(T&& arg) - { // forward _Arg as movable - return ((typename remove_reference::type&&)arg); - } - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION swap (from ) - template inline - void swap(T& left, T& right) - { // exchange values stored at left and right - T tmp = std::move(left); - left = std::move(right); - right = std::move(tmp); - } - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION forward - template inline - T&& - forward(typename identity::type& _Arg) - { // forward _Arg, given explicitly specified type parameter - return ((T&&)_Arg); - } -} - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE STRUCT pair - template - struct pair - { // store a pair of values - typedef pair<_Ty1, _Ty2> _Myt; - typedef _Ty1 first_type; - typedef _Ty2 second_type; - - pair() - : first(_Ty1()), second(_Ty2()) - { // construct from defaults - } - - pair(const _Ty1& _Val1, const _Ty2& _Val2) - : first(_Val1.first), second(_Val2.second) - { // construct from specified values - } - - template - pair(pair<_Other1, _Other2>& _Right) - : first(_Right.first), second(_Right.second) - { // construct from compatible pair - } - - template - pair(const pair<_Other1, _Other2>& _Right) - : first(_Right.first), second(_Right.second) - { // construct from compatible pair - } - - void swap(_Myt& _Right) - { // exchange contents with _Right - if (this != &_Right) - { // different, worth swapping - swap(this->first, _Right.first); - swap(this->second, _Right.second); - } - } - - _Myt& operator=(const _Myt& _Right) - { // assign from copied pair - this->first = _Right.first; - this->second = _Right.second; - return (*this); - } - - typedef typename remove_reference<_Ty1>::type _Ty1x; - typedef typename remove_reference<_Ty2>::type _Ty2x; - - pair(_Ty1x&& _Val1, _Ty2x&& _Val2) - : first(std::move(_Val1)), - second(std::move(_Val2)) - { // construct from specified values - } - - pair(const _Ty1x& _Val1, _Ty2x&& _Val2) - : first(_Val1), - second(std::move(_Val2)) - { // construct from specified values - } - - pair(_Ty1x&& _Val1, const _Ty2x& _Val2) - : first(std::move(_Val1)), - second(_Val2) - { // construct from specified values - } - - template - pair(_Other1&& _Val1, _Other2&& _Val2) - : first(std::move(_Val1)), - second(std::move(_Val2)) - { // construct from moved values - } - - template - pair(pair<_Other1, _Other2>&& _Right) - : first(std::move(_Right.first)), - second(std::move(_Right.second)) - { // construct from moved compatible pair - } - - pair& operator=(pair<_Ty1, _Ty2>&& _Right) - { // assign from moved pair - this->first = std::move(_Right.first); - this->second = std::move(_Right.second); - return (*this); - } - - void swap(_Myt&& _Right) - { // exchange contents with _Right - if (this != &_Right) - { // different, worth swapping - this->first = std::move(_Right.first); - this->second = std::move(_Right.second); - } - } - - _Ty1 first; // the first stored value - _Ty2 second; // the second stored value - }; // struct pair - - //----------------------------------------------------------------------------------------- - // pair TEMPLATE FUNCTIONS - - template inline - void swap(pair<_Ty1, _Ty2>& _Left, pair<_Ty1, _Ty2>& _Right) - { // swap _Left and _Right pairs - _Left.swap(_Right); - } - - template inline - void swap(pair<_Ty1, _Ty2>& _Left, pair<_Ty1, _Ty2>&& _Right) - { // swap _Left and _Right pairs - typedef pair<_Ty1, _Ty2> _Myt; - _Left.swap(std::forward<_Myt>(_Right)); - } - - template inline - void swap( - pair<_Ty1, _Ty2>&& _Left, - pair<_Ty1, _Ty2>& _Right) - { // swap _Left and _Right pairs - typedef pair<_Ty1, _Ty2> _Myt; - _Right.swap(std::forward<_Myt>(_Left)); - } - - template inline - bool operator==( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test for pair equality - return (_Left.first == _Right.first && _Left.second == _Right.second); - } - - template inline - bool operator!=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test for pair inequality - return (!(_Left == _Right)); - } - - template inline - bool operator<( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left < _Right for pairs - return (_Left.first < _Right.first || - (!(_Right.first < _Left.first) && _Left.second < _Right.second)); - } - - template inline - bool operator>( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left > _Right for pairs - return (_Right < _Left); - } - - template inline - bool operator<=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left <= _Right for pairs - return (!(_Right < _Left)); - } - - template inline - bool operator>=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left >= _Right for pairs - return (!(_Left < _Right)); - } - - template inline - _InIt begin( - const pair<_InIt, _InIt>& _Pair) - { // return first element of pair - return (_Pair.first); - } - - template inline - _InIt end( - const pair<_InIt, _InIt>& _Pair) - { // return second element of pair - return (_Pair.second); - } - -} // namespace std - -#endif /* __clr_std_utility_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/vector b/src/coreclr/inc/clr_std/vector deleted file mode 100644 index c2d1caba890aaf..00000000000000 --- a/src/coreclr/inc/clr_std/vector +++ /dev/null @@ -1,462 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/vector -// -// Copy of some key Standard Template Library functionality -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_vector_h__ -#define __clr_std_vector_h__ - -// This is defined in the debugmacrosext.h header, but don't take a dependency on that. -#ifndef INDEBUG -#ifdef _DEBUG -#define INDEBUG(x) x -#else -#define INDEBUG(x) -#endif -#endif // !def INDEBUG - -namespace std -{ - template - class vector - { - public: - class const_iterator; - - class iterator - { - friend class std::vector::const_iterator; - public: - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T* pointer; - typedef T& reference; - - typedef class vector::iterator _MyIter; - - _MyIter &operator++() - { - m_ptr++; - return *this; - } - - _MyIter operator++(int) - { - // post-increment ++ - _MyIter myiter(m_ptr); - m_ptr++; - return myiter; - } - - _MyIter &operator--() - { - m_ptr--; - return *this; - } - - _MyIter operator--(int) - { - // post-decrement -- - _MyIter myiter(m_ptr); - m_ptr--; - return myiter; - } - - _MyIter operator- (ptrdiff_t n) - { - _MyIter myiter(m_ptr); - myiter.m_ptr -= n; - return myiter; - } - - ptrdiff_t operator- (_MyIter right) - { - _MyIter myiter(m_ptr); - return myiter.m_ptr - right.m_ptr; - } - - _MyIter operator+ (ptrdiff_t n) - { - _MyIter myiter(m_ptr); - myiter.m_ptr += n; - return myiter; - } - - T* operator->() const - { - return m_ptr; - } - - T & operator*() const - { - return *m_ptr; - } - - bool operator==(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return equals; - } - - bool operator!=(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return !equals; - } - - bool operator<(const _MyIter& _Right) const - { - return this->m_ptr < _Right.m_ptr; - } - - bool operator>(const _MyIter& _Right) const - { - return this->m_ptr > _Right.m_ptr; - } - public: - explicit iterator(T* ptr) - { - m_ptr = ptr; - } - - private: - T* m_ptr; - }; // class iterator - - class const_iterator - { - public: - typedef class vector::const_iterator _MyIter; - typedef class vector::iterator _MyNonConstIter; - - _MyIter &operator++() - { - m_ptr++; - return *this; - } - - _MyIter operator++(int) - { - // post-increment ++ - _MyIter myiter(m_ptr); - m_ptr++; - return myiter; - } - - const T* operator->() const - { - return m_ptr; - } - - const T & operator*() const - { - return *m_ptr; - } - - bool operator==(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return equals; - } - - bool operator!=(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return !equals; - } - - public: - explicit const_iterator(T* ptr) - { - m_ptr = ptr; - } - const_iterator(const _MyNonConstIter &nonConstIterator) - { - m_ptr = nonConstIterator.m_ptr; - } - - private: - T* m_ptr; - }; // class const iterator - - - public: - explicit vector(size_t n = 0) - { - m_size = 0; - m_capacity = 0; - m_pelements = NULL; - m_isBufferOwner = true; - resize(n); - } - - ~vector() - { - if (m_isBufferOwner) - { - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; // cast to BYTE* as we don't want this delete to invoke T's dtor - } - else - { - m_size = 0; - m_capacity = 0; - } - } - - vector(const vector&) = delete; - vector& operator=(const vector&) = delete; - - vector(vector&& v) noexcept - : m_size(v.m_size) - , m_capacity(v.m_capacity) - , m_pelements(v.m_pelements) - , m_isBufferOwner(v.m_isBufferOwner) - { - v.m_isBufferOwner = false; - } - - vector& operator=(vector&& v) noexcept - { - if (m_isBufferOwner) - { - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; - } - - m_size = v.m_size; - m_capacity = v.m_capacity; - m_pelements = v.m_pelements; - m_isBufferOwner = v.m_isBufferOwner; - v.m_isBufferOwner = false; - return *this; - } - - size_t size() const - { - return m_size; - } - - T & operator[](size_t iIndex) - { - assert(iIndex < m_size); - return m_pelements[iIndex]; - } - - T & operator[](size_t iIndex) const - { - assert(iIndex < m_size); - return m_pelements[iIndex]; - } - - void resize(size_t newsize) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - resize_noinit(newsize); - if (newsize > oldsize) - { - fill_uninitialized_with_default_value(m_pelements, oldsize, newsize); - } - } - - void clear() - { - assert(m_isBufferOwner); - resize(0); - } - - void resize(size_t newsize, T c) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - resize_noinit(newsize); - if (newsize > oldsize) - { - for (size_t i = oldsize; i < newsize; i++) - { - m_pelements[i] = c; - } - } - } - - void wrap(size_t numElements, T* pElements) - { - m_size = numElements; - m_pelements = pElements; - m_isBufferOwner = false; - } - - void resize_noinit(size_t newsize) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - if (newsize < oldsize) - { - // Shrink - erase(m_pelements, newsize, oldsize); - } - else if (newsize > oldsize) - { - // Grow - reserve(newsize); - } - m_size = newsize; - } - - void push_back(const T & val) - { - assert(m_isBufferOwner); - if (m_size + 1 < m_size) - { - assert("push_back: overflow"); - // @todo: how to throw. - } - resize(m_size + 1, val); - } - - void reserve(size_t newcapacity) - { - assert(m_isBufferOwner); - if (newcapacity > m_capacity) - { - // To avoid resizing for every element that gets added to a vector, we - // allocate at least twice the old capacity, or 16 elements, whichever is greater. - newcapacity = max(newcapacity, max(m_capacity * 2, 16)); - - size_t bytesNeeded = newcapacity * sizeof(T); - if (bytesNeeded / sizeof(T) != newcapacity) - { - assert("resize: overflow"); - // @todo: how to throw something here? - } - - - T *pelements = (T*)(new BYTE[bytesNeeded]); // Allocate as BYTE array to avoid automatic construction - INDEBUG(memset(pelements, 0xcc, bytesNeeded)); - for (size_t i = 0; i < m_size; i++) - { - pelements[i] = m_pelements[i]; - } - - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; // cast to BYTE* as we don't want this delete to invoke T's dtor - - m_pelements = pelements; - m_capacity = newcapacity; - } - } - - iterator begin() - { - return iterator(m_pelements); - } - - iterator end() - { - return iterator(m_pelements + m_size); - } - - const_iterator cbegin() const - { - return const_iterator(m_pelements); - } - - const_iterator cend() const - { - return const_iterator(m_pelements + m_size); - } - - iterator erase(iterator position) - { - assert(m_isBufferOwner); - assert((position > begin() || position == begin()) && position < end()); - ptrdiff_t index = position - begin(); - erase(m_pelements, index, index + 1); - memcpy(&m_pelements[index], &m_pelements[index + 1], sizeof(T) * (m_size - index - 1)); - --m_size; - return iterator(m_pelements + (position - begin())); - } - - iterator erase(iterator position, iterator positionEnd) - { - assert(m_isBufferOwner); - assert((position > begin() || position == begin()) && position < end()); - ptrdiff_t index = position - begin(); - ptrdiff_t elements = positionEnd - position; - erase(m_pelements, index, index + elements); - memcpy(&m_pelements[index], &m_pelements[index + elements], sizeof(T) * (m_size - index - elements)); - m_size -= elements; - return iterator(m_pelements + (position - begin())); - } - - T* data() - { - return m_pelements; - } - - const T* data() const - { - return m_pelements; - } - - private: - // Transition a subset of the array from uninitialized to initialized with default value for T. - static void fill_uninitialized_with_default_value(T* pelements, size_t startIdx, size_t endIdx) - { - assert(startIdx <= endIdx); - assert(pelements != NULL || startIdx == endIdx); - for (size_t i = startIdx; i < endIdx; i++) - { - INDEBUG(assert(0xcc == *((BYTE*)&pelements[i]))); - pelements[i] = T(); - } - } - - // Transition a subset of the array from a valid value of T to uninitialized. - static void erase(T* pelements, size_t startIdx, size_t endIdx) - { - assert(startIdx <= endIdx); - assert(pelements != NULL || startIdx == endIdx); - for (size_t i = startIdx; i < endIdx; i++) - { - pelements[i].~T(); - } - - INDEBUG(memset(&pelements[startIdx], 0xcc, (endIdx - startIdx) * sizeof(T))); - } - - private: - size_t m_size; //# of elements - size_t m_capacity; //# of elements allocated - T *m_pelements; //actual array - // invariants: - // dimensions == m_capacity - // elements 0 thru m_size-1 always contain constructed T values. - // elements from m_size thru m_capacity - 1 contain memory garbage (0xcc in DEBUG). - bool m_isBufferOwner; // indicate if this vector creates its own buffer, or wraps an existing buffer. - - - - - }; // class vector - -}; // namespace std - -#endif /* __clr_std_vector_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/contract.h b/src/coreclr/inc/contract.h index a3017ab9cb1b07..6658d4a999cda3 100644 --- a/src/coreclr/inc/contract.h +++ b/src/coreclr/inc/contract.h @@ -232,7 +232,6 @@ #include "specstrings.h" #include "clrtypes.h" -#include "malloc.h" #include "check.h" #include "debugreturn.h" #include "staticcontract.h" diff --git a/src/coreclr/inc/contract.inl b/src/coreclr/inc/contract.inl index d614f84e74f2a9..211b6b5a1d7012 100644 --- a/src/coreclr/inc/contract.inl +++ b/src/coreclr/inc/contract.inl @@ -352,7 +352,7 @@ inline void DbgStateLockData::LockTaken(DbgStateLockType dbgStateLockType, // Remember as many of these new entrances in m_rgTakenLockInfos as we can for (UINT i = cCombinedLocks; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), cCombinedLocks + cTakes); + i < std::min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)(cCombinedLocks + cTakes)); i++) { m_rgTakenLockInfos[i].m_pvLock = pvLock; @@ -377,7 +377,7 @@ inline void DbgStateLockData::LockReleased(DbgStateLockType dbgStateLockType, UI // If lock count is within range of our m_rgTakenLockInfos buffer size, then // make sure we're releasing locks in reverse order of how we took them for (UINT i = cCombinedLocks - cReleases; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), cCombinedLocks); + i < std::min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)cCombinedLocks); i++) { if (m_rgTakenLockInfos[i].m_pvLock != pvLock) @@ -443,7 +443,7 @@ inline BOOL DbgStateLockState::IsLockRetaken(void * pvLock) // m_cLocksEnteringCannotRetakeLock records the number of locks that were taken // when CANNOT_RETAKE_LOCK contract was constructed. for (UINT i = 0; - i < min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), m_cLocksEnteringCannotRetakeLock); + i < std::min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), (size_t)m_cLocksEnteringCannotRetakeLock); ++i) { if (m_pLockData->m_rgTakenLockInfos[i].m_pvLock == pvLock) diff --git a/src/coreclr/inc/crtwrap.h b/src/coreclr/inc/crtwrap.h index d3ab3a28be7c6b..59b68d7d466941 100644 --- a/src/coreclr/inc/crtwrap.h +++ b/src/coreclr/inc/crtwrap.h @@ -11,11 +11,11 @@ #define __CrtWrap_h__ #include +#include #include #include #include "debugmacros.h" #include -#include #include #include diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index fcd5f5bbf1ff18..ef6af6f2be0342 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -561,6 +561,10 @@ #ifndef NATIVEAOT #include +#if !defined(HOST_WINDOWS) +#include +#endif + #include "switches.h" #include "safemath.h" #include "corerror.h" @@ -568,12 +572,8 @@ // Keep in sync with the definitions in dbgutil.cpp and createdump.h #define DACCESS_TABLE_SYMBOL "g_dacTable" -#ifdef PAL_STDCPP_COMPAT #include -#else -#include "clr_std/type_traits" #include "crosscomp.h" -#endif #include diff --git a/src/coreclr/inc/dacprivate.h b/src/coreclr/inc/dacprivate.h index e8d0be5aba07e8..ae91e940ce22fd 100644 --- a/src/coreclr/inc/dacprivate.h +++ b/src/coreclr/inc/dacprivate.h @@ -467,7 +467,7 @@ struct MSLAYOUT DacpAssemblyData HRESULT Request(ISOSDacInterface *sos, CLRDATA_ADDRESS addr) { - return Request(sos, addr, NULL); + return Request(sos, addr, 0); } }; @@ -577,7 +577,7 @@ struct MSLAYOUT DacpMethodDescData { return sos->GetMethodDescData( addr, - NULL, // IP address + 0, // IP address this, 0, // cRejitData NULL, // rejitData[] diff --git a/src/coreclr/inc/holder.h b/src/coreclr/inc/holder.h index 16551b141ca1a8..984260308d04a2 100644 --- a/src/coreclr/inc/holder.h +++ b/src/coreclr/inc/holder.h @@ -11,13 +11,8 @@ #include "volatile.h" #include "palclr.h" -#ifdef PAL_STDCPP_COMPAT #include #include -#else -#include "clr_std/utility" -#include "clr_std/type_traits" -#endif #if defined(FEATURE_COMINTEROP) && !defined(STRIKE) #include diff --git a/src/coreclr/inc/loaderheap.h b/src/coreclr/inc/loaderheap.h index 216668315cbffe..b155d0188b84ea 100644 --- a/src/coreclr/inc/loaderheap.h +++ b/src/coreclr/inc/loaderheap.h @@ -158,7 +158,7 @@ struct LoaderHeapEvent; inline UINT32 GetStubCodePageSize() { #if defined(TARGET_ARM64) && defined(TARGET_UNIX) - return max(16*1024, GetOsPageSize()); + return max(16*1024u, GetOsPageSize()); #elif defined(TARGET_ARM) return 4096; // ARM is special as the 32bit instruction set does not easily permit a 16KB offset #else diff --git a/src/coreclr/inc/random.h b/src/coreclr/inc/random.h index 99936c6177b0a3..6a8d7001b20430 100644 --- a/src/coreclr/inc/random.h +++ b/src/coreclr/inc/random.h @@ -24,7 +24,7 @@ // Forbid the use of srand()/rand(), as these are globally shared facilities and our use of them would // interfere with native user code in the same process. This override is not compatible with stl headers. // -#if !defined(DO_NOT_DISABLE_RAND) && !defined(USE_STL) +#if !defined(DO_NOT_DISABLE_RAND) #ifdef srand #undef srand @@ -36,7 +36,7 @@ #endif #define rand Do_not_use_rand -#endif //!DO_NOT_DISABLE_RAND && !USE_STL +#endif //!DO_NOT_DISABLE_RAND class CLRRandom diff --git a/src/coreclr/inc/safemath.h b/src/coreclr/inc/safemath.h index fcd51af3de8cb0..ff1fcbee78115a 100644 --- a/src/coreclr/inc/safemath.h +++ b/src/coreclr/inc/safemath.h @@ -31,11 +31,11 @@ #include "static_assert.h" -#ifdef PAL_STDCPP_COMPAT #include -#else -#include "clr_std/type_traits" -#endif + +#ifdef FEATURE_PAL +#include "pal_mstypes.h" +#endif // FEATURE_PAL //================================================================== // Semantics: if val can be represented as the exact same value diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index fe5db13f6b9719..55713550aedc4b 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -10,14 +10,16 @@ #ifndef __UtilCode_h__ #define __UtilCode_h__ +#include +#include +#include +#include + #include "crtwrap.h" #include "winwrap.h" #include -#include -#include #include #include -#include #include "clrtypes.h" #include "safewrap.h" #include "volatile.h" @@ -29,12 +31,6 @@ #include "safemath.h" #include "new.hpp" -#ifdef PAL_STDCPP_COMPAT -#include -#else -#include "clr_std/type_traits" -#endif - #include "contract.h" #include @@ -224,7 +220,7 @@ typedef LPSTR LPUTF8; #define MAKE_UTF8PTR_FROMWIDE_NOTHROW(ptrname, widestr) \ CQuickBytes __qb##ptrname; \ int __l##ptrname = (int)u16_strlen(widestr); \ - LPUTF8 ptrname = 0; \ + LPUTF8 ptrname = NULL; \ if (__l##ptrname <= MAKE_MAX_LENGTH) { \ __l##ptrname = (int)((__l##ptrname + 1) * 2 * sizeof(char)); \ ptrname = (LPUTF8) __qb##ptrname.AllocNoThrow(__l##ptrname); \ @@ -240,12 +236,12 @@ typedef LPSTR LPUTF8; if (WszWideCharToMultiByte(CP_UTF8, 0, widestr, -1, ptrname, __lsize##ptrname, NULL, NULL) != 0) { \ ptrname[__l##ptrname] = 0; \ } else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } \ else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } \ @@ -255,7 +251,7 @@ typedef LPSTR LPUTF8; #define MAKE_WIDEPTR_FROMUTF8N_NOTHROW(ptrname, utf8str, n8chrs) \ CQuickBytes __qb##ptrname; \ int __l##ptrname; \ - LPWSTR ptrname = 0; \ + LPWSTR ptrname = NULL; \ __l##ptrname = WszMultiByteToWideChar(CP_UTF8, 0, utf8str, n8chrs, 0, 0); \ if (__l##ptrname <= MAKE_MAX_LENGTH) { \ ptrname = (LPWSTR) __qb##ptrname.AllocNoThrow((__l##ptrname+1)*sizeof(WCHAR)); \ @@ -263,7 +259,7 @@ typedef LPSTR LPUTF8; if (WszMultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8str, n8chrs, ptrname, __l##ptrname) != 0) { \ ptrname[__l##ptrname] = 0; \ } else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } @@ -302,28 +298,6 @@ inline WCHAR* FormatInteger(WCHAR* str, size_t strCount, const char* fmt, I v) return str; } -//***************************************************************************** -// Placement new is used to new and object at an exact location. The pointer -// is simply returned to the caller without actually using the heap. The -// advantage here is that you cause the ctor() code for the object to be run. -// This is ideal for heaps of C++ objects that need to get init'd multiple times. -// Example: -// void *pMem = GetMemFromSomePlace(); -// Foo *p = new (pMem) Foo; -// DoSomething(p); -// p->~Foo(); -//***************************************************************************** -#ifndef __PLACEMENT_NEW_INLINE -#define __PLACEMENT_NEW_INLINE -inline void *__cdecl operator new(size_t, void *_P) -{ - LIMITED_METHOD_DAC_CONTRACT; - - return (_P); -} -#endif // __PLACEMENT_NEW_INLINE - - /********************************************************************************/ /* portability helpers */ @@ -1920,7 +1894,7 @@ class CHashTableAndData : public CHashTable ~CHashTableAndData() { WRAPPER_NO_CONTRACT; - if (m_pcEntries != NULL) + if (m_pcEntries != (TADDR)NULL) MemMgr::Free((BYTE*)m_pcEntries, MemMgr::RoundSize(m_iEntries * m_iEntrySize)); } @@ -2100,7 +2074,7 @@ int CHashTableAndData::Grow() // 1 if successful, 0 if not. int iCurSize; // Current size in bytes. int iEntries; // New # of entries. - _ASSERTE(m_pcEntries != NULL); + _ASSERTE(m_pcEntries != (TADDR)NULL); _ASSERTE(m_iFree == UINT32_MAX); // Compute the current size and new # of entries. @@ -3934,37 +3908,6 @@ inline T* InterlockedCompareExchangeT( return InterlockedCompareExchangeT(destination, exchange, static_cast(comparand)); } -// NULL pointer variants of the above to avoid having to cast NULL -// to the appropriate pointer type. -template -inline T* InterlockedExchangeT( - T* volatile * target, - int value) // When NULL is provided as argument. -{ - //STATIC_ASSERT(value == 0); - return InterlockedExchangeT(target, nullptr); -} - -template -inline T* InterlockedCompareExchangeT( - T* volatile * destination, - int exchange, // When NULL is provided as argument. - T* comparand) -{ - //STATIC_ASSERT(exchange == 0); - return InterlockedCompareExchangeT(destination, nullptr, comparand); -} - -template -inline T* InterlockedCompareExchangeT( - T* volatile * destination, - T* exchange, - int comparand) // When NULL is provided as argument. -{ - //STATIC_ASSERT(comparand == 0); - return InterlockedCompareExchangeT(destination, exchange, nullptr); -} - #undef InterlockedExchangePointer #define InterlockedExchangePointer Use_InterlockedExchangeT #undef InterlockedCompareExchangePointer diff --git a/src/coreclr/jit/alloc.cpp b/src/coreclr/jit/alloc.cpp index 7178066ab584c5..2fcb3f877418b9 100644 --- a/src/coreclr/jit/alloc.cpp +++ b/src/coreclr/jit/alloc.cpp @@ -156,7 +156,11 @@ void* ArenaAllocator::allocateHostMemory(size_t size, size_t* pActualSize) if (bypassHostAllocator()) { *pActualSize = size; - void* p = malloc(size); + if (size == 0) + { + size = 1; + } + void* p = malloc(size); if (p == nullptr) { NOMEM(); diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index e4e73adc58c6c4..ebcb101663a308 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -673,7 +673,7 @@ void Compiler::optAssertionInit(bool isLocalProp) // Local assertion prop keeps mappings from each local var to the assertions about that var. // optAssertionDep = - new (this, CMK_AssertionProp) JitExpandArray(getAllocator(CMK_AssertionProp), max(1, lvaCount)); + new (this, CMK_AssertionProp) JitExpandArray(getAllocator(CMK_AssertionProp), max(1u, lvaCount)); if (optCrossBlockLocalAssertionProp) { diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 5447fc34724c21..ca9ab73224d7a8 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -432,7 +432,7 @@ void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool* { // spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive // value. - ssize_t spDeltaAbs = abs(spDelta); + ssize_t spDeltaAbs = std::abs(spDelta); unsigned unwindSpDelta = (unsigned)spDeltaAbs; assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 62fe40ed3c5876..5c733c5457d946 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2815,6 +2815,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX * assigned location, in the function prolog. */ +// std::max isn't constexpr until C++14 and we're still on C++11 +constexpr size_t const_max(size_t a, size_t b) +{ + return a > b ? a : b; +} + #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function @@ -2908,7 +2914,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere bool circular; // true if this register participates in a circular dependency loop. bool hfaConflict; // arg is part of an HFA that will end up in the same register // but in a different slot (eg arg in s3 = v3.s[0], needs to end up in v3.s[3]) - } regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {}; + } regArgTab[const_max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {}; unsigned varNum; LclVarDsc* varDsc; diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index ef45fa5ca81ab8..10dc0ef5f07e5d 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5659,7 +5659,7 @@ void Compiler::SplitTreesRandomly() rng.Init(info.compMethodHash() ^ 0x077cc4d4); // Splitting creates a lot of new locals. Set a limit on how many we end up creating here. - unsigned maxLvaCount = max(lvaCount * 2, 50000); + unsigned maxLvaCount = max(lvaCount * 2, 50000u); for (BasicBlock* block : Blocks()) { @@ -5721,7 +5721,7 @@ void Compiler::SplitTreesRandomly() void Compiler::SplitTreesRemoveCommas() { // Splitting creates a lot of new locals. Set a limit on how many we end up creating here. - unsigned maxLvaCount = max(lvaCount * 2, 50000); + unsigned maxLvaCount = max(lvaCount * 2, 50000u); for (BasicBlock* block : Blocks()) { @@ -7532,7 +7532,7 @@ void Compiler::compInitVarScopeMap() compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. - compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); + compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599u)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 539629086f747a..30fedcd9cd56cc 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -350,7 +350,7 @@ class SsaDefArray void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; - unsigned newSize = max(2, oldSize * 2); + unsigned newSize = max(2u, oldSize * 2); T* newArray = alloc.allocate(newSize); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 336c95f7527616..b1329e88b0436b 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2600,7 +2600,8 @@ inline #else int outGoingArgSpaceSize = 0; #endif - varOffset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); + varOffset = + outGoingArgSpaceSize + max(-varNum * (int)TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); } else { diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 6b6b96b0265460..cabad877f83835 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -6243,10 +6243,10 @@ void emitter::emitLoopAlignAdjustments() { #if defined(TARGET_XARCH) - unsigned newPadding = min(paddingToAdj, MAX_ENCODED_SIZE); + unsigned newPadding = min(paddingToAdj, (unsigned)MAX_ENCODED_SIZE); alignInstrToAdj->idCodeSize(newPadding); #elif defined(TARGET_ARM64) - unsigned newPadding = min(paddingToAdj, INSTR_ENCODED_SIZE); + unsigned newPadding = min(paddingToAdj, (unsigned)INSTR_ENCODED_SIZE); if (newPadding == 0) { alignInstrToAdj->idInsOpt(INS_OPTS_NONE); diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 4dd1d470887a20..5a20f8a1f940ad 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -6504,7 +6504,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) code |= (immHi << 16); code |= immLo; - disp = abs(disp); + disp = std::abs(disp); assert((disp & 0x00fffffe) == disp); callInstrSize = SafeCvtAssert(emitOutput_Thumb2Instr(dst, code)); diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index 54e2fcb503589b..181b9706e41611 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8964,7 +8964,7 @@ void emitter::emitIns_Call(EmitCallType callType, // Our stack level should be always greater than the bytes of arguments we push. Just // a sanity test. - assert((unsigned)abs(argSize) <= codeGen->genStackLevel); + assert((unsigned)std::abs(argSize) <= codeGen->genStackLevel); // Trim out any callee-trashed registers from the live set. regMaskTP savedSet = emitGetGCRegsSavedOrModified(methHnd); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 433a512469816a..9a98ea7f619cd2 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2467,7 +2467,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // weight_t targetWeight = target->bbWeight; weight_t blockWeight = block->bbWeight; - target->setBBProfileWeight(max(0, targetWeight - blockWeight)); + target->setBBProfileWeight(max(0.0, targetWeight - blockWeight)); JITDUMP("Decreased " FMT_BB " profile weight from " FMT_WT " to " FMT_WT "\n", target->bbNum, targetWeight, target->bbWeight); } @@ -3047,7 +3047,7 @@ bool Compiler::fgOptimizeSwitchJumps() blockToTargetEdge->setEdgeWeights(blockToTargetWeight, blockToTargetWeight, dominantTarget); blockToTargetEdge->setLikelihood(fraction); blockToNewBlockEdge->setEdgeWeights(blockToNewBlockWeight, blockToNewBlockWeight, block); - blockToNewBlockEdge->setLikelihood(max(0, 1.0 - fraction)); + blockToNewBlockEdge->setLikelihood(max(0.0, 1.0 - fraction)); // There may be other switch cases that lead to this same block, but there's just // one edge in the flowgraph. So we need to subtract off the profile data that now flows diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 77803454f0cfde..70b7ab6448b30f 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -1353,7 +1353,7 @@ void ProfileSynthesis::GaussSeidelSolver() for (unsigned j = m_dfsTree->GetPostOrderCount(); j != 0; j--) { BasicBlock* const block = dfs->GetPostOrder(j - 1); - block->setBBProfileWeight(max(0, countVector[block->bbNum])); + block->setBBProfileWeight(max(0.0, countVector[block->bbNum])); } m_approximate = !converged || (m_cappedCyclicProbabilities > 0); diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index ee41985df53bc8..5125c3ad42fcfe 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -5745,7 +5745,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) { // Store to an enregistered local. costEx = op1->GetCostEx(); - costSz = max(3, op1->GetCostSz()); // 3 is an estimate for a reg-reg move. + costSz = max(3, (int)op1->GetCostSz()); // 3 is an estimate for a reg-reg move. goto DONE; } @@ -26906,7 +26906,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, assert(varTypeIsValidHfaType(hfaType)); // Note that the retail build issues a warning about a potential divsion by zero without this "max", - unsigned elemSize = max(1, genTypeSize(hfaType)); + unsigned elemSize = max(1u, genTypeSize(hfaType)); // The size of this struct should be evenly divisible by elemSize assert((structSize % elemSize) == 0); diff --git a/src/coreclr/jit/hashbv.cpp b/src/coreclr/jit/hashbv.cpp index 203219a7ec20ca..87acddf099bcb9 100644 --- a/src/coreclr/jit/hashbv.cpp +++ b/src/coreclr/jit/hashbv.cpp @@ -824,7 +824,7 @@ void hashBv::setAll(indexType numToSet) for (unsigned int i = 0; i < numToSet; i += BITS_PER_NODE) { hashBvNode* node = getOrAddNodeForIndex(i); - indexType bits_to_set = min(BITS_PER_NODE, numToSet - i); + indexType bits_to_set = min((indexType)BITS_PER_NODE, numToSet - i); node->setLowest(bits_to_set); } } diff --git a/src/coreclr/jit/inline.h b/src/coreclr/jit/inline.h index a87b2de79c058e..8c1cb56124ad2e 100644 --- a/src/coreclr/jit/inline.h +++ b/src/coreclr/jit/inline.h @@ -1049,7 +1049,7 @@ class InlineStrategy enum { ALWAYS_INLINE_SIZE = 16, - IMPLEMENTATION_MAX_INLINE_SIZE = _UI16_MAX, + IMPLEMENTATION_MAX_INLINE_SIZE = UINT16_MAX, IMPLEMENTATION_MAX_INLINE_DEPTH = 1000 }; diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 754e6b27f44416..1094740a8e25d0 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -702,19 +702,19 @@ inline unsigned int roundUp(unsigned size, unsigned mult) inline unsigned int unsigned_abs(int x) { - return ((unsigned int)abs(x)); + return ((unsigned int)std::abs(x)); } #ifdef TARGET_64BIT inline size_t unsigned_abs(ssize_t x) { - return ((size_t)abs((__int64)x)); + return ((size_t)std::abs((__int64)x)); } #ifdef __APPLE__ inline size_t unsigned_abs(__int64 x) { - return ((size_t)abs(x)); + return ((size_t)std::abs(x)); } #endif // __APPLE__ #endif // TARGET_64BIT diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index fc12d55c35a468..573191fecb38c3 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1554,7 +1554,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) // Double the table size. For stress, we could use +1. Note that if the table isn't allocated // yet, such as when we add an EH region for synchronized methods that don't already have one, // we start at zero, so we need to make sure the new table has at least one entry. - unsigned newHndBBtabAllocCount = max(1, compHndBBtabAllocCount * 2); + unsigned newHndBBtabAllocCount = max(1u, compHndBBtabAllocCount * 2); noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow if (newHndBBtabAllocCount > MAX_XCPTN_INDEX) diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 63f12133f61bff..6e9a0a6f800230 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -11,7 +11,15 @@ #include #include #include +#include #include +#ifdef HOST_WINDOWS +#include +#endif +#include + +using std::max; +using std::min; // Don't allow using the windows.h #defines for the BitScan* APIs. Using the #defines means our // `BitOperations::BitScan*` functions have their name mapped, which is confusing and messes up diff --git a/src/coreclr/jit/jitstd/list.h b/src/coreclr/jit/jitstd/list.h index f00c1596452556..77b5f893bea101 100644 --- a/src/coreclr/jit/jitstd/list.h +++ b/src/coreclr/jit/jitstd/list.h @@ -14,7 +14,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "iterator.h" #include "functional.h" -#include "clr_std/utility" +#include namespace jitstd { diff --git a/src/coreclr/jit/jitstd/utility.h b/src/coreclr/jit/jitstd/utility.h index 624bb7bc7c39a3..0df302a9352a58 100644 --- a/src/coreclr/jit/jitstd/utility.h +++ b/src/coreclr/jit/jitstd/utility.h @@ -5,7 +5,7 @@ #pragma once -#include "clr_std/type_traits" +#include namespace jitstd { diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 2753ce1978e413..0de4f52eee7560 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -622,7 +622,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments - const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); + const int64_t numUserArgs = min((int64_t)takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) @@ -2000,7 +2000,7 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE #if defined(FEATURE_SIMD) // getMaxVectorByteLength() represents the size of the largest primitive type that we can struct promote. const unsigned maxSize = - MAX_NumOfFieldsInPromotableStruct * max(compiler->getMaxVectorByteLength(), sizeof(double)); + MAX_NumOfFieldsInPromotableStruct * max(compiler->getMaxVectorByteLength(), (uint32_t)sizeof(double)); #else // !FEATURE_SIMD // sizeof(double) represents the size of the largest primitive type that we can struct promote. const unsigned maxSize = MAX_NumOfFieldsInPromotableStruct * sizeof(double); diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 4c5ffe247634a4..2001f396c6f225 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2073,7 +2073,7 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex FlowEdge* const falseEdge = fgAddRefPred(fastPreheader, condLast); condLast->SetFalseEdge(falseEdge); FlowEdge* const trueEdge = condLast->GetTrueEdge(); - falseEdge->setLikelihood(max(0, 1.0 - trueEdge->getLikelihood())); + falseEdge->setLikelihood(max(0.0, 1.0 - trueEdge->getLikelihood())); } //------------------------------------------------------------------------- diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index e5e7aa9dbd301a..5d3a504175ee9e 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -7153,7 +7153,7 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode) } size_t absDivisorValue = - (divisorValue == SSIZE_T_MIN) ? static_cast(divisorValue) : static_cast(abs(divisorValue)); + (divisorValue == SSIZE_T_MIN) ? static_cast(divisorValue) : static_cast(std::abs(divisorValue)); if (!isPow2(absDivisorValue)) { @@ -8950,7 +8950,7 @@ bool Lowering::OptimizeForLdp(GenTreeIndir* ind) JITDUMP("[%06u] and [%06u] are indirs off the same base with offsets +%03u and +%03u\n", Compiler::dspTreeID(ind), Compiler::dspTreeID(prevIndir), (unsigned)offs, (unsigned)prev.Offset); - if (abs(offs - prev.Offset) == genTypeSize(ind)) + if (std::abs(offs - prev.Offset) == genTypeSize(ind)) { JITDUMP(" ..and they are amenable to ldp optimization\n"); if (TryMakeIndirsAdjacent(prevIndir, ind)) diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6bd3ede0cc4951..2144869b1ce40e 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -2934,7 +2934,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument - intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); + intArgRegNum = min(intArgRegNum + size, (unsigned)MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); @@ -2945,7 +2945,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI - fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); + fltArgRegNum = min(fltArgRegNum + size, (unsigned)MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } @@ -3019,7 +3019,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call unsigned CallArgs::OutgoingArgsStackSize() const { unsigned aligned = Compiler::GetOutgoingArgByteSize(m_nextStackByteOffset); - return max(aligned, MIN_ARG_AREA_FOR_CALL); + return max(aligned, (unsigned)MIN_ARG_AREA_FOR_CALL); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/targetarm64.cpp b/src/coreclr/jit/targetarm64.cpp index cef1e95780695b..a0e4dfb5c3cf43 100644 --- a/src/coreclr/jit/targetarm64.cpp +++ b/src/coreclr/jit/targetarm64.cpp @@ -86,10 +86,11 @@ ABIPassingInformation Arm64Classifier::Classify(Compiler* comp, } else { - unsigned alignment = compAppleArm64Abi() ? min(elemSize, TARGET_POINTER_SIZE) : TARGET_POINTER_SIZE; - m_stackArgSize = roundUp(m_stackArgSize, alignment); - info = ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, - structLayout->GetSize())); + unsigned alignment = + compAppleArm64Abi() ? min(elemSize, (unsigned)TARGET_POINTER_SIZE) : TARGET_POINTER_SIZE; + m_stackArgSize = roundUp(m_stackArgSize, alignment); + info = ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, + structLayout->GetSize())); m_stackArgSize += roundUp(structLayout->GetSize(), alignment); // After passing any float value on the stack, we should not enregister more float values. m_floatRegs.Clear(); diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index 23b8d0000b800d..c3234e5524deaa 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -4050,7 +4050,7 @@ T GetSignedMagic(T denom, int* shift /*out*/) UT t; T result_magic; - absDenom = abs(denom); + absDenom = std::abs(denom); t = two_nminus1 + (UT(denom) >> bits_minus_1); absNc = t - 1 - (t % absDenom); // absolute value of nc p = bits_minus_1; // initialize p diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 39001f32215ef5..6a0362bbbf0678 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #define _UTILS_H_ #include "safemath.h" -#include "clr_std/type_traits" +#include #include "iallocator.h" #include "hostallocator.h" #include "cycletimer.h" diff --git a/src/coreclr/md/ceefilegen/blobfetcher.cpp b/src/coreclr/md/ceefilegen/blobfetcher.cpp index 7a110eeeeaf594..f08908147de756 100644 --- a/src/coreclr/md/ceefilegen/blobfetcher.cpp +++ b/src/coreclr/md/ceefilegen/blobfetcher.cpp @@ -211,7 +211,7 @@ char* CBlobFetcher::MakeNewBlock(unsigned len, unsigned align) { pChRet = m_pIndex[m_nIndexUsed].MakeNewBlock(len + pad, 0); // Did we run out of memory? - if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == NULL) + if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == 0) return NULL; if (pChRet == NULL) { diff --git a/src/coreclr/md/ceefilegen/stdafx.h b/src/coreclr/md/ceefilegen/stdafx.h index 36f42f95aa529f..4026a47f14107d 100644 --- a/src/coreclr/md/ceefilegen/stdafx.h +++ b/src/coreclr/md/ceefilegen/stdafx.h @@ -17,6 +17,7 @@ #include // for qsort #include #include +#include #include #include @@ -27,3 +28,6 @@ #include "ceegen.h" #include "ceesectionstring.h" + +using std::min; +using std::max; diff --git a/src/coreclr/md/compiler/import.cpp b/src/coreclr/md/compiler/import.cpp index 9c7d4c5a01b8c2..060d3261af6ab0 100644 --- a/src/coreclr/md/compiler/import.cpp +++ b/src/coreclr/md/compiler/import.cpp @@ -2172,7 +2172,7 @@ STDMETHODIMP RegMeta::GetUserString( // S_OK or error. memcpy( wszString, userString.GetDataPointer(), - min(userString.GetSize(), cbStringSize)); + min((ULONG)userString.GetSize(), cbStringSize)); if (cbStringSize < userString.GetSize()) { if ((wszString != NULL) && (cchStringSize > 0)) diff --git a/src/coreclr/md/compiler/stdafx.h b/src/coreclr/md/compiler/stdafx.h index 56e29559cafe00..b8ae250e008cda 100644 --- a/src/coreclr/md/compiler/stdafx.h +++ b/src/coreclr/md/compiler/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -25,4 +26,7 @@ #include "utsem.h" +using std::min; +using std::max; + #endif // __STDAFX_H_ diff --git a/src/coreclr/md/enc/rwutil.cpp b/src/coreclr/md/enc/rwutil.cpp index 69ad55f571c3e2..a828249fea6b26 100644 --- a/src/coreclr/md/enc/rwutil.cpp +++ b/src/coreclr/md/enc/rwutil.cpp @@ -230,7 +230,7 @@ HRESULT HENUMInternal::EnumWithCount( } // we can only fill the minimum of what caller asked for or what we have left - cTokens = min ( (pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax); + cTokens = min ( (ULONG)(pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax); if (pEnum->m_EnumType == MDSimpleEnum) { @@ -296,7 +296,7 @@ HRESULT HENUMInternal::EnumWithCount( _ASSERTE(! ((pEnum->u.m_ulEnd - pEnum->u.m_ulCur) % 2) ); // we can only fill the minimum of what caller asked for or what we have left - cTokens = min ( (pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax * 2); + cTokens = min ( (ULONG)(pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax * 2); // get the embedded dynamic array TOKENLIST *pdalist = (TOKENLIST *)&(pEnum->m_cursor); diff --git a/src/coreclr/md/enc/stdafx.h b/src/coreclr/md/enc/stdafx.h index e1b3962a14e602..10d1cf0f32d69a 100644 --- a/src/coreclr/md/enc/stdafx.h +++ b/src/coreclr/md/enc/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -26,4 +27,7 @@ #include "utsem.h" +using std::min; +using std::max; + #endif // __STDAFX_H__ diff --git a/src/coreclr/md/runtime/stdafx.h b/src/coreclr/md/runtime/stdafx.h index aca84b431773e2..957cbd7e006d0a 100644 --- a/src/coreclr/md/runtime/stdafx.h +++ b/src/coreclr/md/runtime/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include diff --git a/src/coreclr/nativeaot/CMakeLists.txt b/src/coreclr/nativeaot/CMakeLists.txt index 4b10cefe57387a..74ee982ab98058 100644 --- a/src/coreclr/nativeaot/CMakeLists.txt +++ b/src/coreclr/nativeaot/CMakeLists.txt @@ -1,5 +1,6 @@ if(WIN32) add_definitions(-DUNICODE=1) + add_compile_definitions(NOMINMAX) endif (WIN32) if(MSVC) diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index c1bb58caa30b9a..f070214054f7f9 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -131,7 +131,6 @@ else() include_directories(unix) # sal.h, pshpack/poppack.h - add_definitions(-DPAL_STDCPP_COMPAT) include_directories(../../pal/inc/rt) include(CheckIncludeFiles) diff --git a/src/coreclr/nativeaot/Runtime/CommonMacros.h b/src/coreclr/nativeaot/Runtime/CommonMacros.h index 27f14f09eae4ff..a2b4183fb1ecd5 100644 --- a/src/coreclr/nativeaot/Runtime/CommonMacros.h +++ b/src/coreclr/nativeaot/Runtime/CommonMacros.h @@ -89,16 +89,6 @@ inline bool IS_ALIGNED(T* val, uintptr_t alignment); #define ZeroMemory(_dst, _size) memset((_dst), 0, (_size)) #endif -//------------------------------------------------------------------------------------------------- -// min/max - -#ifndef min -#define min(_a, _b) ((_a) < (_b) ? (_a) : (_b)) -#endif -#ifndef max -#define max(_a, _b) ((_a) < (_b) ? (_b) : (_a)) -#endif - #endif // !DACCESS_COMPILE //------------------------------------------------------------------------------------------------- diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp index 259d07e7ab0bb7..d94ef850a1c9bb 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.cpp +++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp @@ -230,7 +230,7 @@ void SpinWait(int iteration, int usecLimit) int64_t ticksPerSecond = PalQueryPerformanceFrequency(); int64_t endTicks = startTicks + (usecLimit * ticksPerSecond) / 1000000; - int l = min((unsigned)iteration, 30); + int l = iteration >= 0 ? min(iteration, 30): 30; for (int i = 0; i < l; i++) { for (int j = 0; j < (1 << i); j++) diff --git a/src/coreclr/pal/CMakeLists.txt b/src/coreclr/pal/CMakeLists.txt index 4509e9fc0f8b5b..9213941ba6da01 100644 --- a/src/coreclr/pal/CMakeLists.txt +++ b/src/coreclr/pal/CMakeLists.txt @@ -7,7 +7,6 @@ include_directories(${COREPAL_SOURCE_DIR}/src) include_directories(${COREPAL_SOURCE_DIR}/../inc) add_compile_options(-fexceptions) -add_definitions(-DUSE_STL) add_subdirectory(src) add_subdirectory(tests) diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 7d27a1109b4507..9a1d25515bb32e 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -33,7 +33,6 @@ Module Name: #ifndef __PAL_H__ #define __PAL_H__ -#ifdef PAL_STDCPP_COMPAT #include #include #include @@ -42,12 +41,22 @@ Module Name: #include #include #include +#include #include #include #include #include #include #include +#include + +#ifdef __cplusplus +extern "C++" +{ + +#include + +} #endif #ifdef __cplusplus @@ -175,77 +184,13 @@ extern bool g_arm64_atomics_present; #define __has_cpp_attribute(x) (0) #endif -#ifndef PAL_STDCPP_COMPAT - -#if __GNUC__ - -typedef __builtin_va_list va_list; - -/* We should consider if the va_arg definition here is actually necessary. - Could we use the standard va_arg definition? */ - -#define va_start __builtin_va_start -#define va_arg __builtin_va_arg - -#define va_copy __builtin_va_copy -#define va_end __builtin_va_end - -#define VOID void - -#else // __GNUC__ - -typedef char * va_list; - -#define _INTSIZEOF(n) ( (sizeof(n) + sizeof(int) - 1) & ~(sizeof(int) - 1) ) - -#if _MSC_VER >= 1400 - -#ifdef __cplusplus -#define _ADDRESSOF(v) ( &reinterpret_cast(v) ) -#else -#define _ADDRESSOF(v) ( &(v) ) -#endif - -#define _crt_va_start(ap,v) ( ap = (va_list)_ADDRESSOF(v) + _INTSIZEOF(v) ) -#define _crt_va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) -#define _crt_va_end(ap) ( ap = (va_list)0 ) - -#define va_start _crt_va_start -#define va_arg _crt_va_arg -#define va_end _crt_va_end - -#else // _MSC_VER - -#define va_start(ap,v) (ap = (va_list) (&(v)) + _INTSIZEOF(v)) -#define va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) -#define va_end(ap) - -#endif // _MSC_VER - -#define va_copy(dest,src) (dest = src) - -#endif // __GNUC__ - -#define CHAR_BIT 8 - -#define SCHAR_MIN (-128) -#define SCHAR_MAX 127 -#define UCHAR_MAX 0xff - -#define SHRT_MIN (-32768) -#define SHRT_MAX 32767 -#define USHRT_MAX 0xffff - -#define INT_MIN (-2147483647 - 1) -#define INT_MAX 2147483647 -#define UINT_MAX 0xffffffff - -// LONG_MIN, LONG_MAX, ULONG_MAX -- use INT32_MIN etc. instead. - -#define FLT_MAX 3.402823466e+38F -#define DBL_MAX 1.7976931348623157e+308 - -#endif // !PAL_STDCPP_COMPAT +#ifndef FALLTHROUGH +#if __has_cpp_attribute(fallthrough) +#define FALLTHROUGH [[fallthrough]] +#else // __has_cpp_attribute(fallthrough) +#define FALLTHROUGH +#endif // __has_cpp_attribute(fallthrough) +#endif // FALLTHROUGH /******************* PAL-Specific Entrypoints *****************************/ @@ -256,44 +201,6 @@ BOOL PALAPI PAL_IsDebuggerPresent(); -/* minimum signed 64 bit value */ -#define _I64_MIN (I64(-9223372036854775807) - 1) -/* maximum signed 64 bit value */ -#define _I64_MAX I64(9223372036854775807) -/* maximum unsigned 64 bit value */ -#define _UI64_MAX UI64(0xffffffffffffffff) - -#define _I8_MAX SCHAR_MAX -#define _I8_MIN SCHAR_MIN -#define _I16_MAX SHRT_MAX -#define _I16_MIN SHRT_MIN -#define _I32_MAX INT_MAX -#define _I32_MIN INT_MIN -#define _UI8_MAX UCHAR_MAX -#define _UI8_MIN UCHAR_MIN -#define _UI16_MAX USHRT_MAX -#define _UI16_MIN USHRT_MIN -#define _UI32_MAX UINT_MAX -#define _UI32_MIN UINT_MIN - -#undef NULL - -#if defined(__cplusplus) -#define NULL 0 -#else -#define NULL ((PVOID)0) -#endif - -#if defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) -#define nullptr NULL -#endif // defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) - -#ifndef PAL_STDCPP_COMPAT - -typedef __int64 time_t; -#define _TIME_T_DEFINED -#endif // !PAL_STDCPP_COMPAT - #define DLL_PROCESS_ATTACH 1 #define DLL_THREAD_ATTACH 2 #define DLL_THREAD_DETACH 3 @@ -3934,31 +3841,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #endif //FEATURE_PAL_ANSI /******************* C Runtime Entrypoints *******************************/ -/* Some C runtime functions needs to be reimplemented by the PAL. - To avoid name collisions, those functions have been renamed using - defines */ -#ifndef PAL_STDCPP_COMPAT -#define exit PAL_exit -#define realloc PAL_realloc -#define rand PAL_rand -#define time PAL_time -#define getenv PAL_getenv -#define qsort PAL_qsort -#define bsearch PAL_bsearch -#define malloc PAL_malloc -#define free PAL_free - -#ifdef HOST_AMD64 -#define _mm_getcsr PAL__mm_getcsr -#define _mm_setcsr PAL__mm_setcsr -#endif // HOST_AMD64 - -// Forward declare functions that are in header files we can't include yet -int printf(const char *, ...); -int vprintf(const char *, va_list); - -#endif // !PAL_STDCPP_COMPAT - #ifndef _CONST_RETURN #ifdef __cplusplus #define _CONST_RETURN const @@ -3971,134 +3853,16 @@ int vprintf(const char *, va_list); /* For backwards compatibility */ #define _WConst_return _CONST_RETURN -#define EOF (-1) - -typedef int errno_t; - -#if defined(__WINT_TYPE__) -typedef __WINT_TYPE__ wint_t; -#else -typedef unsigned int wint_t; -#endif - -#ifndef PAL_STDCPP_COMPAT -PALIMPORT void * __cdecl memcpy(void *, const void *, size_t); -PALIMPORT int __cdecl memcmp(const void *, const void *, size_t); -PALIMPORT void * __cdecl memset(void *, int, size_t); -PALIMPORT void * __cdecl memmove(void *, const void *, size_t); -PALIMPORT void * __cdecl memchr(const void *, int, size_t); -PALIMPORT long long int __cdecl atoll(const char *) MATH_THROW_DECL; -PALIMPORT size_t __cdecl strlen(const char *); -PALIMPORT int __cdecl strcmp(const char*, const char *); -PALIMPORT int __cdecl strncmp(const char*, const char *, size_t); -PALIMPORT int __cdecl strncasecmp(const char *, const char *, size_t); -PALIMPORT char * __cdecl strcat(char *, const char *); -PALIMPORT char * __cdecl strncat(char *, const char *, size_t); -PALIMPORT char * __cdecl strcpy(char *, const char *); -PALIMPORT char * __cdecl strncpy(char *, const char *, size_t); -PALIMPORT char * __cdecl strchr(const char *, int); -PALIMPORT char * __cdecl strrchr(const char *, int); -PALIMPORT char * __cdecl strpbrk(const char *, const char *); -PALIMPORT char * __cdecl strstr(const char *, const char *); -PALIMPORT char * __cdecl strtok_r(char *, const char *, char **); -PALIMPORT char * __cdecl strdup(const char*); -PALIMPORT int __cdecl atoi(const char *); -PALIMPORT unsigned long __cdecl strtoul(const char *, char **, int); -PALIMPORT ULONGLONG __cdecl strtoull(const char *, char **, int); -PALIMPORT double __cdecl atof(const char *); -PALIMPORT double __cdecl strtod(const char *, char **); -PALIMPORT size_t strnlen(const char *, size_t); -PALIMPORT int __cdecl isprint(int); -PALIMPORT int __cdecl isspace(int); -PALIMPORT int __cdecl isalpha(int); -PALIMPORT int __cdecl isalnum(int); -PALIMPORT int __cdecl isdigit(int); -PALIMPORT int __cdecl isxdigit(int); -PALIMPORT int __cdecl tolower(int); -PALIMPORT int __cdecl toupper(int); -PALIMPORT int __cdecl iswalpha(wint_t); -PALIMPORT int __cdecl iswdigit(wint_t); -PALIMPORT int __cdecl iswupper(wint_t); -PALIMPORT int __cdecl iswprint(wint_t); -PALIMPORT int __cdecl iswspace(wint_t); -PALIMPORT int __cdecl iswxdigit(wint_t); -PALIMPORT wint_t __cdecl towupper(wint_t); -PALIMPORT wint_t __cdecl towlower(wint_t); -PALIMPORT int remove(const char*); - -#define SEEK_SET 0 -#define SEEK_CUR 1 -#define SEEK_END 2 - -/* Locale categories */ -#define LC_ALL 0 -#define LC_COLLATE 1 -#define LC_CTYPE 2 -#define LC_MONETARY 3 -#define LC_NUMERIC 4 -#define LC_TIME 5 - -#define _IOFBF 0 /* setvbuf should set fully buffered */ -#define _IOLBF 1 /* setvbuf should set line buffered */ -#define _IONBF 2 /* setvbuf should set unbuffered */ - -struct _FILE; - -#ifdef DEFINE_DUMMY_FILE_TYPE -#define FILE _PAL_FILE -struct _PAL_FILE; -#else -typedef _FILE FILE; -#endif // DEFINE_DUMMY_FILE_TYPE - -PALIMPORT int __cdecl fclose(FILE *); -PALIMPORT int __cdecl fflush(FILE *); -PALIMPORT size_t __cdecl fwrite(const void *, size_t, size_t, FILE *); -PALIMPORT size_t __cdecl fread(void *, size_t, size_t, FILE *); -PALIMPORT char * __cdecl fgets(char *, int, FILE *); -PALIMPORT int __cdecl fputs(const char *, FILE *); -PALIMPORT int __cdecl fprintf(FILE *, const char *, ...); -PALIMPORT int __cdecl vfprintf(FILE *, const char *, va_list); -PALIMPORT int __cdecl fseek(FILE *, LONG, int); -PALIMPORT LONG __cdecl ftell(FILE *); -PALIMPORT int __cdecl ferror(FILE *); -PALIMPORT FILE * __cdecl fopen(const char *, const char *); -PALIMPORT int __cdecl setvbuf(FILE *stream, char *, int, size_t); - -// We need a PAL shim for errno and the standard streams as it's not possible to replicate these definition from the standard library -// in all cases. Instead, we shim it and implement the PAL function where we can include the standard headers. -// When we allow people to include the standard headers, then we can remove this. - -PALIMPORT DLLEXPORT int * __cdecl PAL_errno(); -#define errno (*PAL_errno()) - -// Only provide a prototype for the PAL forwarders for the standard streams if we are not including the standard headers. -#ifndef DEFINE_DUMMY_FILE_TYPE - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdout(); -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdin(); -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr(); -#define stdout PAL_stdout() -#define stdin PAL_stdin() -#define stderr PAL_stderr() - -#endif - -#ifdef DEFINE_DUMMY_FILE_TYPE -#undef FILE -#endif -#endif // PAL_STDCPP_COMPAT - /* _TRUNCATE */ #if !defined(_TRUNCATE) #define _TRUNCATE ((size_t)-1) #endif +// errno_t is only defined when the Secure CRT Extensions library is available (which no standard library that we build with implements anyway) +typedef int errno_t; + PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); -PALIMPORT DLLEXPORT int __cdecl strcasecmp(const char *, const char *); -PALIMPORT char * __cdecl _gcvt_s(char *, int, double, int); -PALIMPORT int __cdecl __iscsym(int); PALIMPORT DLLEXPORT int __cdecl _wcsicmp(const WCHAR *, const WCHAR*); PALIMPORT int __cdecl _wcsnicmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT int __cdecl _vsnprintf_s(char *, size_t, size_t, const char *, va_list); @@ -4125,10 +3889,7 @@ PALIMPORT DLLEXPORT double __cdecl PAL_wcstod(const WCHAR *, WCHAR **); PALIMPORT errno_t __cdecl _wcslwr_s(WCHAR *, size_t sz); PALIMPORT DLLEXPORT errno_t __cdecl _i64tow_s(long long, WCHAR *, size_t, int); PALIMPORT int __cdecl _wtoi(const WCHAR *); - -#ifndef DEFINE_DUMMY_FILE_TYPE PALIMPORT FILE * __cdecl _wfopen(const WCHAR *, const WCHAR *); -#endif inline int _stricmp(const char* a, const char* b) { @@ -4145,6 +3906,10 @@ inline char* _strdup(const char* a) return strdup(a); } +// Define the MSVC implementation of the alloca concept. +// As this allocates on the current stack frame, use a macro instead of an inline function. +#define _alloca(x) alloca(x) + #ifdef __cplusplus extern "C++" { inline WCHAR *PAL_wcschr(WCHAR* S, WCHAR C) @@ -4178,11 +3943,6 @@ unsigned int __cdecl _rotl(unsigned int value, int shift) } #endif // !__has_builtin(_rotl) -// On 64 bit unix, make the long an int. -#ifdef HOST_64BIT -#define _lrotl _rotl -#endif - #if !__has_builtin(_rotr) /*++ @@ -4205,137 +3965,7 @@ unsigned int __cdecl _rotr(unsigned int value, int shift) #endif // !__has_builtin(_rotr) -PALIMPORT int __cdecl abs(int); -// clang complains if this is declared with __int64 -PALIMPORT long long __cdecl llabs(long long); -#ifndef PAL_STDCPP_COMPAT - -PALIMPORT double __cdecl copysign(double, double); -PALIMPORT double __cdecl acos(double); -PALIMPORT double __cdecl acosh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl asin(double); -PALIMPORT double __cdecl asinh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atan(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atanh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atan2(double, double); -PALIMPORT double __cdecl cbrt(double) MATH_THROW_DECL; -PALIMPORT double __cdecl ceil(double); -PALIMPORT double __cdecl cos(double); -PALIMPORT double __cdecl cosh(double); -PALIMPORT double __cdecl exp(double); -PALIMPORT double __cdecl fabs(double); -PALIMPORT double __cdecl floor(double); -PALIMPORT double __cdecl fmod(double, double); -PALIMPORT double __cdecl fma(double, double, double) MATH_THROW_DECL; -PALIMPORT int __cdecl ilogb(double); -PALIMPORT double __cdecl log(double); -PALIMPORT double __cdecl log2(double) MATH_THROW_DECL; -PALIMPORT double __cdecl log10(double); -PALIMPORT double __cdecl modf(double, double*); -PALIMPORT double __cdecl pow(double, double); -PALIMPORT double __cdecl sin(double); -PALIMPORT void __cdecl sincos(double, double*, double*); -#ifdef __APPLE__ -PALIMPORT void __cdecl __sincos(double, double*, double*); -#endif -PALIMPORT double __cdecl sinh(double); -PALIMPORT double __cdecl sqrt(double); -PALIMPORT double __cdecl tan(double); -PALIMPORT double __cdecl tanh(double); -PALIMPORT double __cdecl trunc(double); - -PALIMPORT float __cdecl copysignf(float, float); -PALIMPORT float __cdecl acosf(float); -PALIMPORT float __cdecl acoshf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl asinf(float); -PALIMPORT float __cdecl asinhf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atanf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atanhf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atan2f(float, float); -PALIMPORT float __cdecl cbrtf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl ceilf(float); -PALIMPORT float __cdecl cosf(float); -PALIMPORT float __cdecl coshf(float); -PALIMPORT float __cdecl expf(float); -PALIMPORT float __cdecl fabsf(float); -PALIMPORT float __cdecl floorf(float); -PALIMPORT float __cdecl fmodf(float, float); -PALIMPORT float __cdecl fmaf(float, float, float) MATH_THROW_DECL; -PALIMPORT int __cdecl ilogbf(float); -PALIMPORT float __cdecl logf(float); -PALIMPORT float __cdecl log2f(float) MATH_THROW_DECL; -PALIMPORT float __cdecl log10f(float); -PALIMPORT float __cdecl modff(float, float*); -PALIMPORT float __cdecl powf(float, float); -PALIMPORT float __cdecl sinf(float); -PALIMPORT void __cdecl sincosf(float, float*, float*); -#ifdef __APPLE__ -PALIMPORT void __cdecl __sincosf(float, float*, float*); -#endif -PALIMPORT float __cdecl sinhf(float); -PALIMPORT float __cdecl sqrtf(float); -PALIMPORT float __cdecl tanf(float); -PALIMPORT float __cdecl tanhf(float); -PALIMPORT float __cdecl truncf(float); -#endif // !PAL_STDCPP_COMPAT - -#ifndef PAL_STDCPP_COMPAT - -#ifdef __cplusplus -extern "C++" { - -inline __int64 abs(__int64 _X) { - return llabs(_X); -} - -#ifdef __APPLE__ -inline __int64 abs(SSIZE_T _X) { - return llabs((__int64)_X); -} -#endif - -} -#endif - -PALIMPORT DLLEXPORT void * __cdecl malloc(size_t); -PALIMPORT DLLEXPORT void __cdecl free(void *); -PALIMPORT DLLEXPORT void * __cdecl realloc(void *, size_t); - -#if defined(_MSC_VER) -#define alloca _alloca -#else -#define _alloca alloca -#endif //_MSC_VER - -#define alloca __builtin_alloca - -#define max(a, b) (((a) > (b)) ? (a) : (b)) -#define min(a, b) (((a) < (b)) ? (a) : (b)) - -#endif // !PAL_STDCPP_COMPAT - -PALIMPORT PAL_NORETURN void __cdecl exit(int); - -#ifndef PAL_STDCPP_COMPAT - -PALIMPORT DLLEXPORT void __cdecl qsort(void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); -PALIMPORT DLLEXPORT void * __cdecl bsearch(const void *, const void *, size_t, size_t, - int(__cdecl *)(const void *, const void *)); - -PALIMPORT time_t __cdecl time(time_t *); - -#endif // !PAL_STDCPP_COMPAT - -/* Maximum value that can be returned by the rand function. */ - -#ifndef PAL_STDCPP_COMPAT -#define RAND_MAX 0x7fff -#endif // !PAL_STDCPP_COMPAT - -PALIMPORT int __cdecl rand(void); -PALIMPORT void __cdecl srand(unsigned int); - -PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); +PALIMPORT DLLEXPORT char * __cdecl PAL_getenv(const char *); PALIMPORT DLLEXPORT int __cdecl _putenv(const char *); #define ERANGE 34 @@ -4366,15 +3996,7 @@ PALAPI PAL_GetCpuTickCount(); #endif // PAL_PERF -/******************* PAL functions for SIMD extensions *****************/ - -PALIMPORT -unsigned int _mm_getcsr(void); - -PALIMPORT -void _mm_setcsr(unsigned int i); - -/******************* PAL functions for CPU capability detection *******/ +/******************* PAL functions for exceptions *******/ #ifdef __cplusplus diff --git a/src/coreclr/pal/inc/pal_mstypes.h b/src/coreclr/pal/inc/pal_mstypes.h index 1eee6b2bbbd245..d59103002d18a4 100644 --- a/src/coreclr/pal/inc/pal_mstypes.h +++ b/src/coreclr/pal/inc/pal_mstypes.h @@ -64,13 +64,15 @@ extern "C" { #define _cdecl #define CDECL -// On ARM __fastcall is ignored and causes a compile error -#if !defined(PAL_STDCPP_COMPAT) || defined(__arm__) -# undef __fastcall -# undef _fastcall -# define __fastcall -# define _fastcall -#endif // !defined(PAL_STDCPP_COMPAT) || defined(__arm__) +// Some platforms (such as FreeBSD) define the __fastcall macro +// on all targets, even when using it will fail. +// Undefine it here so we can use it on all platforms without error. +#ifdef __fastcall +#undef __fastcall +#endif + +#define __fastcall +#define _fastcall #endif // !defined(__i386__) @@ -101,7 +103,9 @@ extern "C" { #else #define PALIMPORT +#ifndef DLLEXPORT #define DLLEXPORT __attribute__((visibility("default"))) +#endif #define PAL_NORETURN __attribute__((noreturn)) #endif @@ -206,21 +210,6 @@ extern "C" { #endif // _MSC_VER -#ifndef PAL_STDCPP_COMPAT -// Defined in gnu's types.h. For non PAL_IMPLEMENTATION system -// includes are not included, so we need to define them. -#ifndef PAL_IMPLEMENTATION - -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int16 int16_t; -typedef unsigned __int16 uint16_t; -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; - -#endif // PAL_IMPLEMENTATION #ifndef _MSC_VER @@ -229,7 +218,6 @@ typedef long double LONG_DOUBLE; #endif #endif // _MSC_VER -#endif // !PAL_STDCPP_COMPAT typedef void VOID; @@ -565,49 +553,10 @@ static_assert(sizeof(SSIZE_T) == sizeof(void*), "SSIZE_T should be pointer sized #define SSIZE_T_MIN (ssize_t)I64(0x8000000000000000) #endif -#ifndef PAL_STDCPP_COMPAT -#ifdef HOST_64BIT -typedef unsigned long size_t; -typedef long ssize_t; -typedef long ptrdiff_t; -#else // !HOST_64BIT -typedef unsigned int size_t; -typedef int ptrdiff_t; -#endif // !HOST_64BIT -#endif // !PAL_STDCPP_COMPAT -#define _SIZE_T_DEFINED - typedef LONG_PTR LPARAM; -#define _PTRDIFF_T_DEFINED -#ifdef _MINGW_ -// We need to define _PTRDIFF_T to make sure ptrdiff_t doesn't get defined -// again by system headers - but only for MinGW. -#define _PTRDIFF_T -#endif - typedef char16_t WCHAR; -#ifndef PAL_STDCPP_COMPAT - -#if defined(__linux__) -#ifdef HOST_64BIT -typedef long int intptr_t; -typedef unsigned long int uintptr_t; -#else // !HOST_64BIT -typedef int intptr_t; -typedef unsigned int uintptr_t; -#endif // !HOST_64BIT -#else -typedef long int intptr_t; -typedef unsigned long int uintptr_t; -#endif - -#endif // PAL_STDCPP_COMPAT - -#define _INTPTR_T_DEFINED -#define _UINTPTR_T_DEFINED - typedef DWORD LCID; typedef PDWORD PLCID; typedef WORD LANGID; diff --git a/src/coreclr/pal/inc/rt/cpp/assert.h b/src/coreclr/pal/inc/rt/cpp/assert.h deleted file mode 100644 index 7493b151d6a0f1..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/assert.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: assert.h -// -// =========================================================================== -// dummy assert.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/cstdlib b/src/coreclr/pal/inc/rt/cpp/cstdlib deleted file mode 100644 index 1cfd40828a47c1..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/cstdlib +++ /dev/null @@ -1,13 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clrosdev -// -// =========================================================================== -// File: cstdlib -// -// =========================================================================== -// dummy cstdlib for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/ctype.h b/src/coreclr/pal/inc/rt/cpp/ctype.h deleted file mode 100644 index cb41fcd88e6e08..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/ctype.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: ctype.h -// -// =========================================================================== -// dummy ctype.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/emmintrin.h b/src/coreclr/pal/inc/rt/cpp/emmintrin.h deleted file mode 100644 index f2e8e0c1fd6627..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/emmintrin.h +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// From llvm-3.9/clang-3.9.1 emmintrin.h: - -/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------=== - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - *===-----------------------------------------------------------------------=== - */ - -#include "palrt.h" -#ifdef __GNUC__ -#ifndef __EMMINTRIN_H -#define __IMMINTRIN_H - -typedef long long __m128i __attribute__((__vector_size__(16))); - -typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16))); -typedef short __v8hi __attribute__((__vector_size__(16))); -typedef char __v16qi __attribute__((__vector_size__(16))); - - -/* Define the default attribute for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, NODEBUG_ATTRIBUTE)) - -/// \brief Performs a bitwise OR of two 128-bit integer vectors. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VPOR / POR instruction. -/// -/// \param __a -/// A 128-bit integer vector containing one of the source operands. -/// \param __b -/// A 128-bit integer vector containing one of the source operands. -/// \returns A 128-bit integer vector containing the bitwise OR of the values -/// in both operands. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_or_si128(__m128i __a, __m128i __b) -{ - return (__m128i)((__v2du)__a | (__v2du)__b); -} - -/// \brief Compares each of the corresponding 16-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0h for false, FFFFh -/// for true. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VPCMPEQW / PCMPEQW instruction. -/// -/// \param __a -/// A 128-bit integer vector. -/// \param __b -/// A 128-bit integer vector. -/// \returns A 128-bit integer vector containing the comparison results. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_cmpeq_epi16(__m128i __a, __m128i __b) -{ - return (__m128i)((__v8hi)__a == (__v8hi)__b); -} - -/// \brief Moves packed integer values from an unaligned 128-bit memory location -/// to elements in a 128-bit integer vector. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVDQU / MOVDQU instruction. -/// -/// \param __p -/// A pointer to a memory location containing integer values. -/// \returns A 128-bit integer vector containing the moved values. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_loadu_si128(__m128i const *__p) -{ - struct __loadu_si128 { - __m128i __v; - } __attribute__((__packed__, __may_alias__)); - return ((struct __loadu_si128*)__p)->__v; -} - -/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the -/// specified 16-bit value. -/// -/// \headerfile -/// -/// This intrinsic is a utility function and does not correspond to a specific -/// instruction. -/// -/// \param __w -/// A 16-bit value used to initialize the elements of the destination integer -/// vector. -/// \returns An initialized 128-bit vector of [8 x i16] with all elements -/// containing the value provided in the operand. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set1_epi16(short __w) -{ - return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w }; -} - -static __inline__ int __DEFAULT_FN_ATTRS -_mm_movemask_epi8(__m128i __a) -{ - return __builtin_ia32_pmovmskb128((__v16qi)__a); -} - -#undef __DEFAULT_FN_ATTRS - -#endif /* __EMMINTRIN_H */ -#endif // __GNUC__ diff --git a/src/coreclr/pal/inc/rt/cpp/fcntl.h b/src/coreclr/pal/inc/rt/cpp/fcntl.h deleted file mode 100644 index 556145a9f0847a..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/fcntl.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: fcntl.h -// -// =========================================================================== -// dummy fcntl.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/float.h b/src/coreclr/pal/inc/rt/cpp/float.h deleted file mode 100644 index a1dc803380e44b..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/float.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: float.h -// -// =========================================================================== -// dummy float.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/limits.h b/src/coreclr/pal/inc/rt/cpp/limits.h deleted file mode 100644 index bd667f14eaf99c..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/limits.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: limits.h -// -// =========================================================================== -// dummy limits.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/malloc.h b/src/coreclr/pal/inc/rt/cpp/malloc.h deleted file mode 100644 index 255a2c7f2fa226..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/malloc.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: malloc.h -// -// =========================================================================== -// dummy malloc.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/math.h b/src/coreclr/pal/inc/rt/cpp/math.h deleted file mode 100644 index e42c1852c13992..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/math.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: math.h -// -// =========================================================================== -// dummy math.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/memory.h b/src/coreclr/pal/inc/rt/cpp/memory.h deleted file mode 100644 index bcc0d7d9c5d5b5..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/memory.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: memory.h -// -// =========================================================================== -// dummy memory.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdarg.h b/src/coreclr/pal/inc/rt/cpp/stdarg.h deleted file mode 100644 index 59d0d046d5f91d..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdarg.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdarg.h -// -// =========================================================================== -// dummy stdarg.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdbool.h b/src/coreclr/pal/inc/rt/cpp/stdbool.h deleted file mode 100644 index b23533a2940dd7..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdbool.h +++ /dev/null @@ -1,4 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stddef.h b/src/coreclr/pal/inc/rt/cpp/stddef.h deleted file mode 100644 index b347dbf4149702..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stddef.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stddef.h -// -// =========================================================================== -// dummy stddef.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdint.h b/src/coreclr/pal/inc/rt/cpp/stdint.h deleted file mode 100644 index b23533a2940dd7..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdint.h +++ /dev/null @@ -1,4 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdio.h b/src/coreclr/pal/inc/rt/cpp/stdio.h deleted file mode 100644 index 33c1912bb2b72a..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdio.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdio.h -// -// =========================================================================== -// dummy stdio.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdlib.h b/src/coreclr/pal/inc/rt/cpp/stdlib.h deleted file mode 100644 index d2d49357b88e03..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdlib.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdlib.h -// -// =========================================================================== -// dummy stdlib.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/string.h b/src/coreclr/pal/inc/rt/cpp/string.h deleted file mode 100644 index b66d883338e104..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/string.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: string.h -// -// =========================================================================== -// dummy string.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/time.h b/src/coreclr/pal/inc/rt/cpp/time.h deleted file mode 100644 index 00c83f99d34389..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/time.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: time.h -// -// =========================================================================== -// dummy time.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/wchar.h b/src/coreclr/pal/inc/rt/cpp/wchar.h deleted file mode 100644 index 5497d729e43b8d..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/wchar.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: wchar.h -// -// =========================================================================== -// dummy wchar.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/xmmintrin.h b/src/coreclr/pal/inc/rt/cpp/xmmintrin.h deleted file mode 100644 index 826d2d788676fb..00000000000000 --- a/src/coreclr/pal/inc/rt/cpp/xmmintrin.h +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// From llvm-3.9/clang-3.9.1 xmmintrin.h: - -/*===---- xmmintrin.h - SSE intrinsics -------------------------------------=== -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -* -*===-----------------------------------------------------------------------=== -*/ - -#ifdef __GNUC__ - -typedef float __m128 __attribute__((__vector_size__(16))); - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, NODEBUG_ATTRIBUTE)) - -/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned -/// memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location has to be 128-bit aligned. -/// \returns A 128-bit vector of [4 x float] containing the loaded valus. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_load_ps(const float *__p) -{ - return *(__m128*)__p; -} - -/// \brief Loads a 128-bit floating-point vector of [4 x float] from an -/// unaligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location does not have to be aligned. -/// \returns A 128-bit vector of [4 x float] containing the loaded values. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_loadu_ps(const float *__p) -{ - struct __loadu_ps - { - __m128 __v; - } __attribute__((__packed__, __may_alias__)); - return ((struct __loadu_ps*)__p)->__v; -} - -/// \brief Stores float values from a 128-bit vector of [4 x float] to an -/// unaligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location does not have to be aligned. -/// \param __a -/// A 128-bit vector of [4 x float] containing the values to be stored. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_storeu_ps(float *__p, __m128 __a) -{ - struct __storeu_ps - { - __m128 __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_ps*)__p)->__v = __a; -} - -/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into -/// four contiguous elements in an aligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling -/// instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. -/// \param __a -/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each -/// of the four contiguous elements pointed by __p. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_store_ps(float *__p, __m128 __a) -{ - *(__m128*)__p = __a; -} - -#undef __DEFAULT_FN_ATTRS - -#endif // __GNUC__ diff --git a/src/coreclr/pal/inc/rt/palrt.h b/src/coreclr/pal/inc/rt/palrt.h index 1f7f413456965c..18e25222c5db8e 100644 --- a/src/coreclr/pal/inc/rt/palrt.h +++ b/src/coreclr/pal/inc/rt/palrt.h @@ -135,18 +135,6 @@ typedef enum tagEFaultRepRetVal #include "pal.h" -#ifndef PAL_STDCPP_COMPAT -#ifdef __cplusplus -#ifndef __PLACEMENT_NEW_INLINE -#define __PLACEMENT_NEW_INLINE -inline void *__cdecl operator new(size_t, void *_P) -{ - return (_P); -} -#endif // __PLACEMENT_NEW_INLINE -#endif // __cplusplus -#endif // !PAL_STDCPP_COMPAT - #include #define NTAPI __cdecl @@ -280,9 +268,7 @@ typedef union _ULARGE_INTEGER { DWORD HighPart; #endif } -#ifndef PAL_STDCPP_COMPAT u -#endif // PAL_STDCPP_COMPAT ; ULONGLONG QuadPart; } ULARGE_INTEGER, *PULARGE_INTEGER; diff --git a/src/coreclr/pal/inc/rt/safecrt.h b/src/coreclr/pal/inc/rt/safecrt.h index 12b5eceaad5896..df31623d903c8c 100644 --- a/src/coreclr/pal/inc/rt/safecrt.h +++ b/src/coreclr/pal/inc/rt/safecrt.h @@ -86,15 +86,6 @@ #endif #endif -/* NULL */ -#if !defined(NULL) -#if !defined(__cplusplus) -#define NULL 0 -#else -#define NULL ((void *)0) -#endif -#endif - /* _W64 */ #if !defined(_W64) #if !defined(__midl) && (defined(HOST_X86) || defined(_M_IX86)) && _MSC_VER >= 1300 @@ -104,16 +95,6 @@ #endif #endif -/* uintptr_t */ -#if !defined(_UINTPTR_T_DEFINED) -#if defined(HOST_64BIT) -typedef unsigned __int64 uintptr_t; -#else -typedef _W64 unsigned int uintptr_t; -#endif -#define _UINTPTR_T_DEFINED -#endif - #ifdef __GNUC__ #define SAFECRT_DEPRECATED __attribute__((deprecated)) #else @@ -1116,10 +1097,8 @@ errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_ #endif -#ifndef PAL_STDCPP_COMPAT - /* wcsnlen */ -_SAFECRT__EXTERN_C +extern size_t __cdecl wcsnlen(const WCHAR *inString, size_t inMaxSize); #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL @@ -1140,7 +1119,6 @@ size_t __cdecl wcsnlen(const WCHAR *inString, size_t inMaxSize) } #endif -#endif // PAL_STDCPP_COMPAT /* _wmakepath_s */ _SAFECRT__EXTERN_C diff --git a/src/coreclr/pal/inc/rt/sal.h b/src/coreclr/pal/inc/rt/sal.h index bec3352aa3f16d..9d461e8050f57e 100644 --- a/src/coreclr/pal/inc/rt/sal.h +++ b/src/coreclr/pal/inc/rt/sal.h @@ -2399,19 +2399,14 @@ extern "C" { #define _SA_SPECSTRIZE( x ) #x /* - __null p __notnull p __maybenull p - Annotates a pointer p. States that pointer p is null. Commonly used - in the negated form __notnull or the possibly null form __maybenull. + Annotates a pointer p. States that pointer p is never null or maybe null. */ -#ifndef PAL_STDCPP_COMPAT - #define __null _Null_impl_ #define __notnull _Notnull_impl_ #define __maybenull _Maybenull_impl_ -#endif // !PAL_STDCPP_COMPAT /* __readonly l @@ -2598,11 +2593,8 @@ extern "C" { #else // ][ -#ifndef PAL_STDCPP_COMPAT - #define __null #define __notnull #define __deref -#endif // !PAL_STDCPP_COMPAT #define __maybenull #define __readonly #define __notreadonly diff --git a/src/coreclr/pal/inc/rt/specstrings.h b/src/coreclr/pal/inc/rt/specstrings.h index 21a40d91a0dd58..1cccb42e1554df 100644 --- a/src/coreclr/pal/inc/rt/specstrings.h +++ b/src/coreclr/pal/inc/rt/specstrings.h @@ -309,11 +309,9 @@ __ANNOTATION(SAL_failureDefault(enum __SAL_failureKind)); __byte_readableTo((expr) ? (size) : (size) * 2) #define __post_invalid _Post_ __notvalid /* integer related macros */ -#ifndef PAL_STDCPP_COMPAT #define __allocator __inner_allocator #define __deallocate(kind) _Pre_ __notnull __post_invalid #define __deallocate_opt(kind) _Pre_ __maybenull __post_invalid -#endif #define __bound __inner_bound #define __range(lb,ub) __inner_range(lb,ub) #define __in_bound _Pre_ __inner_bound diff --git a/src/coreclr/pal/inc/rt/specstrings_strict.h b/src/coreclr/pal/inc/rt/specstrings_strict.h index 52ade79cde13c0..d066f76b3c43b0 100644 --- a/src/coreclr/pal/inc/rt/specstrings_strict.h +++ b/src/coreclr/pal/inc/rt/specstrings_strict.h @@ -655,7 +655,6 @@ /*************************************************************************** * Expert Macros ***************************************************************************/ -#define __null __allowed(on_typedecl) #define __notnull __allowed(on_typedecl) #define __maybenull __allowed(on_typedecl) #define __exceptthat __allowed(on_typedecl) diff --git a/src/coreclr/pal/inc/rt/specstrings_undef.h b/src/coreclr/pal/inc/rt/specstrings_undef.h index 374b10069c1bf8..884ad919bc7b80 100644 --- a/src/coreclr/pal/inc/rt/specstrings_undef.h +++ b/src/coreclr/pal/inc/rt/specstrings_undef.h @@ -5,10 +5,8 @@ */ -#ifndef PAL_STDCPP_COMPAT #undef __in #undef __out -#endif // !PAL_STDCPP_COMPAT #undef _At_ #undef _Deref_out_ @@ -387,7 +385,6 @@ #undef __notnull #undef __notreadonly #undef __notvalid -#undef __null #undef __nullnullterminated #undef __nullterminated #undef __out_awcount diff --git a/src/coreclr/pal/inc/strsafe.h b/src/coreclr/pal/inc/strsafe.h index b69feb73c25129..b833526e61777a 100644 --- a/src/coreclr/pal/inc/strsafe.h +++ b/src/coreclr/pal/inc/strsafe.h @@ -27,15 +27,6 @@ #include // for memset #include // for va_start, etc. -#ifndef _SIZE_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 size_t; -#else -typedef __w64 unsigned int size_t; -#endif // !HOST_64BIT -#define _SIZE_T_DEFINED -#endif // !_SIZE_T_DEFINED - #ifndef SUCCEEDED #define SUCCEEDED(hr) ((HRESULT)(hr) >= 0) #endif diff --git a/src/coreclr/pal/src/CMakeLists.txt b/src/coreclr/pal/src/CMakeLists.txt index 745162987a42e4..cb3693655dcad9 100644 --- a/src/coreclr/pal/src/CMakeLists.txt +++ b/src/coreclr/pal/src/CMakeLists.txt @@ -129,9 +129,6 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND (CLR_CMAKE_HOST_ARCH_AMD64 OR CLR_CM endif() set(SOURCES - cruntime/malloc.cpp - cruntime/misc.cpp - cruntime/thread.cpp cruntime/wchar.cpp debug/debug.cpp exception/seh.cpp diff --git a/src/coreclr/pal/src/cruntime/malloc.cpp b/src/coreclr/pal/src/cruntime/malloc.cpp deleted file mode 100644 index c4b3797e0b30a6..00000000000000 --- a/src/coreclr/pal/src/cruntime/malloc.cpp +++ /dev/null @@ -1,106 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - malloc.cpp - -Abstract: - - Implementation of suspension safe memory allocation functions. - -Revision History: - - - ---*/ - -#include "pal/corunix.hpp" -#include "pal/thread.hpp" -#include "pal/malloc.hpp" -#include "pal/dbgmsg.h" - -#include - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -using namespace CorUnix; - -void * -__cdecl -PAL_realloc( - void* pvMemblock, - size_t szSize - ) -{ - return InternalRealloc(pvMemblock, szSize); -} - -void * -CorUnix::InternalRealloc( - void* pvMemblock, - size_t szSize - ) -{ - void *pvMem; - - PERF_ENTRY(InternalRealloc); - ENTRY("realloc (memblock:%p size=%d)\n", pvMemblock, szSize); - - if (szSize == 0) - { - // If pvMemblock is NULL, there's no reason to call free. - if (pvMemblock != NULL) - { - free(pvMemblock); - } - pvMem = NULL; - } - else - { - pvMem = realloc(pvMemblock, szSize); - } - - LOGEXIT("realloc returns void * %p\n", pvMem); - PERF_EXIT(InternalRealloc); - return pvMem; -} - -void -__cdecl -PAL_free( - void *pvMem - ) -{ - free(pvMem); -} - -void * -__cdecl -PAL_malloc( - size_t szSize - ) -{ - return InternalMalloc(szSize); -} - -void * -CorUnix::InternalMalloc( - size_t szSize - ) -{ - void *pvMem; - - if (szSize == 0) - { - // malloc may return null for a requested size of zero bytes. Force a nonzero size to get a valid pointer. - szSize = 1; - } - - pvMem = (void*)malloc(szSize); - return pvMem; -} diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp deleted file mode 100644 index d079cd0abc4d6d..00000000000000 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ /dev/null @@ -1,264 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - cruntime/misc.cpp - -Abstract: - - Implementation of C runtime functions that don't fit anywhere else. - - - ---*/ - -#include "pal/thread.hpp" -#include "pal/threadsusp.hpp" -#include "pal/palinternal.h" -#include "pal/dbgmsg.h" -#include "pal/misc.h" - -#include -/* needs to be included after "palinternal.h" to avoid name - collision for va_start and va_end */ -#include -#include -#include - -#if defined(HOST_AMD64) || defined(_x86_) -#include -#endif // defined(HOST_AMD64) || defined(_x86_) -#if defined(_DEBUG) -#include -#endif //defined(_DEBUG) - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -using namespace CorUnix; - -/*++ -Function: - _gcvt_s - -See MSDN doc. ---*/ -char * -__cdecl -_gcvt_s( char * buffer, int iSize, double value, int digits ) -{ - PERF_ENTRY(_gcvt); - ENTRY( "_gcvt( value:%f digits=%d, buffer=%p )\n", value, digits, buffer ); - - if ( !buffer ) - { - ERROR( "buffer was an invalid pointer.\n" ); - } - - switch ( digits ) - { - case 7 : - /* Fall through */ - case 8 : - /* Fall through */ - case 15 : - /* Fall through */ - case 17 : - - sprintf_s( buffer, iSize, "%.*g", digits, value ); - break; - - default : - ASSERT( "Only the digits 7, 8, 15, and 17 are valid.\n" ); - *buffer = '\0'; - } - - LOGEXIT( "_gcvt returns %p (%s)\n", buffer , buffer ); - PERF_EXIT(_gcvt); - return buffer; -} - - -/*++ -Function : - - __iscsym - -See MSDN for more details. ---*/ -int -__cdecl -__iscsym( int c ) -{ - PERF_ENTRY(__iscsym); - ENTRY( "__iscsym( c=%d )\n", c ); - - if ( isalnum( c ) || c == '_' ) - { - LOGEXIT( "__iscsym returning 1\n" ); - PERF_EXIT(__iscsym); - return 1; - } - - LOGEXIT( "__iscsym returning 0\n" ); - PERF_EXIT(__iscsym); - return 0; -} -/*++ - -PAL forwarders for standard macro headers. - ---*/ -PALIMPORT DLLEXPORT int * __cdecl PAL_errno() -{ - return &errno; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdout() -{ - return stdout; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdin() -{ - return stdin; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr() -{ - return stderr; -} - -/*++ -Function: - - rand - - The RAND_MAX value can vary by platform. - -See MSDN for more details. ---*/ -int -__cdecl -PAL_rand(void) -{ - int ret; - PERF_ENTRY(rand); - ENTRY("rand(void)\n"); - - ret = (rand() % (PAL_RAND_MAX + 1)); - - LOGEXIT("rand() returning %d\n", ret); - PERF_EXIT(rand); - return ret; -} - - -/*++ -Function: - - time - -See MSDN for more details. ---*/ -PAL_time_t -__cdecl -PAL_time(PAL_time_t *tloc) -{ - time_t result; - - PERF_ENTRY(time); - ENTRY( "time( tloc=%p )\n",tloc ); - - time_t t; - result = time(&t); - if (tloc != NULL) - { - *tloc = t; - } - - LOGEXIT( "time returning %#lx\n",result ); - PERF_EXIT(time); - return result; -} - -PALIMPORT -void __cdecl -PAL_qsort(void *base, size_t nmemb, size_t size, - int (__cdecl *compar )(const void *, const void *)) -{ - PERF_ENTRY(qsort); - ENTRY("qsort(base=%p, nmemb=%lu, size=%lu, compar=%p\n", - base,(unsigned long) nmemb,(unsigned long) size, compar); - -/* reset ENTRY nesting level back to zero, qsort will invoke app-defined - callbacks and we want their entry traces... */ -#if _ENABLE_DEBUG_MESSAGES_ -{ - int old_level; - old_level = DBG_change_entrylevel(0); -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - qsort(base,nmemb,size,compar); - -/* ...and set nesting level back to what it was */ -#if _ENABLE_DEBUG_MESSAGES_ - DBG_change_entrylevel(old_level); -} -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - LOGEXIT("qsort returns\n"); - PERF_EXIT(qsort); -} - -PALIMPORT -void * __cdecl -PAL_bsearch(const void *key, const void *base, size_t nmemb, size_t size, - int (__cdecl *compar)(const void *, const void *)) -{ - void *retval; - - PERF_ENTRY(bsearch); - ENTRY("bsearch(key=%p, base=%p, nmemb=%lu, size=%lu, compar=%p\n", - key, base, (unsigned long) nmemb, (unsigned long) size, compar); - -/* reset ENTRY nesting level back to zero, bsearch will invoke app-defined - callbacks and we want their entry traces... */ -#if _ENABLE_DEBUG_MESSAGES_ -{ - int old_level; - old_level = DBG_change_entrylevel(0); -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - retval = bsearch(key,base,nmemb,size,compar); - -/* ...and set nesting level back to what it was */ -#if _ENABLE_DEBUG_MESSAGES_ - DBG_change_entrylevel(old_level); -} -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - LOGEXIT("bsearch returns %p\n",retval); - PERF_EXIT(bsearch); - return retval; -} - -#ifdef HOST_AMD64 - -PALIMPORT -unsigned int PAL__mm_getcsr(void) -{ - return _mm_getcsr(); -} - -PALIMPORT -void PAL__mm_setcsr(unsigned int i) -{ - _mm_setcsr(i); -} - -#endif // HOST_AMD64 diff --git a/src/coreclr/pal/src/cruntime/thread.cpp b/src/coreclr/pal/src/cruntime/thread.cpp deleted file mode 100644 index 883c5d1b00190c..00000000000000 --- a/src/coreclr/pal/src/cruntime/thread.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - thread.c - -Abstract: - - Implementation of the threads/process functions in the C runtime library - that are Windows specific. - - - ---*/ - -#include "pal/palinternal.h" -#include "pal/dbgmsg.h" -#include "pal/init.h" - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -void -PAL_exit(int status) -{ - PERF_ENTRY(exit); - ENTRY ("exit(status=%d)\n", status); - - /* should also clean up any resources allocated by pal/cruntime, if any */ - ExitProcess(status); - - LOGEXIT ("exit returns void"); - PERF_EXIT(exit); -} diff --git a/src/coreclr/pal/src/cruntime/wchar.cpp b/src/coreclr/pal/src/cruntime/wchar.cpp index d5704ef0ca9d82..88340538ebca2c 100644 --- a/src/coreclr/pal/src/cruntime/wchar.cpp +++ b/src/coreclr/pal/src/cruntime/wchar.cpp @@ -55,10 +55,10 @@ _wtoi( GetLastError()); return -1; } - tempStr = (char *) PAL_malloc(len); + tempStr = (char *) malloc(len); if (!tempStr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); return -1; } @@ -67,12 +67,12 @@ _wtoi( { ASSERT("WideCharToMultiByte failed. Error is %d\n", GetLastError()); - PAL_free(tempStr); + free(tempStr); return -1; } ret = atoi(tempStr); - PAL_free(tempStr); + free(tempStr); LOGEXIT("_wtoi returns int %d\n", ret); PERF_EXIT(_wtoi); return ret; @@ -261,10 +261,10 @@ PAL_wcstoul( res = 0; goto PAL_wcstoulExit; } - s_nptr = (char *)PAL_malloc(size); + s_nptr = (char *)malloc(size); if (!s_nptr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); res = 0; goto PAL_wcstoulExit; @@ -310,7 +310,7 @@ PAL_wcstoul( } PAL_wcstoulExit: - PAL_free(s_nptr); + free(s_nptr); LOGEXIT("wcstoul returning unsigned long %lu\n", res); PERF_EXIT(wcstoul); @@ -351,10 +351,10 @@ PAL__wcstoui64( res = 0; goto PAL__wcstoui64Exit; } - s_nptr = (char *)PAL_malloc(size); + s_nptr = (char *)malloc(size); if (!s_nptr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); res = 0; goto PAL__wcstoui64Exit; @@ -381,7 +381,7 @@ PAL__wcstoui64( } PAL__wcstoui64Exit: - PAL_free(s_nptr); + free(s_nptr); LOGEXIT("_wcstoui64 returning unsigned long long %llu\n", res); PERF_EXIT(_wcstoui64); @@ -896,7 +896,7 @@ PAL_wcstod( const wchar_16 * nptr, wchar_16 **endptr ) if ( lpEndOfExpression != lpStartOfExpression ) { Length = lpEndOfExpression - lpStartOfExpression; - lpStringRep = (LPSTR)PAL_malloc( Length + 1); + lpStringRep = (LPSTR)malloc( Length + 1); if ( lpStringRep ) { @@ -939,7 +939,7 @@ PAL_wcstod( const wchar_16 * nptr, wchar_16 **endptr ) *endptr = lpEndOfExpression; } - PAL_free( lpStringRep ); + free( lpStringRep ); LOGEXIT( "wcstod returning %f.\n", RetVal ); PERF_EXIT(wcstod); return RetVal; diff --git a/src/coreclr/pal/src/debug/debug.cpp b/src/coreclr/pal/src/debug/debug.cpp index f0a504452c59bb..b38810864a587f 100644 --- a/src/coreclr/pal/src/debug/debug.cpp +++ b/src/coreclr/pal/src/debug/debug.cpp @@ -40,6 +40,7 @@ SET_DEFAULT_DEBUG_CHANNEL(DEBUG); // some headers have code with asserts, so do #include #include +#include #if HAVE_PROCFS_CTL #include #elif defined(HAVE_TTRACE) // HAVE_PROCFS_CTL @@ -203,7 +204,7 @@ OutputDebugStringW( } /* strLen includes the null terminator */ - if ((lpOutputStringA = (LPSTR) InternalMalloc((strLen * sizeof(CHAR)))) == NULL) + if ((lpOutputStringA = (LPSTR) malloc((strLen * sizeof(CHAR)))) == NULL) { ERROR("Insufficient memory available !\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt b/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt index e0105865f9aec9..09986597b7c1b3 100644 --- a/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt +++ b/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt @@ -24,7 +24,6 @@ foreach(DUMMY_PROVIDER_FILE ${DUMMY_PROVIDER_OUTPUT}) list(APPEND DUMMY_PROVIDER_SOURCES ${DUMMY_PROVIDER_FILE}) endforeach() -add_definitions(-DPAL_STDCPP_COMPAT=1) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) include_directories(${CMAKE_CURRENT_BINARY_DIR}/dummy) diff --git a/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt b/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt index d116c0095ea55f..40f65bf171142e 100644 --- a/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt +++ b/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt @@ -30,7 +30,6 @@ foreach(LTTNG_PROVIDER_FILE ${LTTNG_PROVIDER_OUTPUT}) endif() endforeach() -add_definitions(-DPAL_STDCPP_COMPAT=1) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) include_directories(${CMAKE_CURRENT_BINARY_DIR}/lttng) diff --git a/src/coreclr/pal/src/file/directory.cpp b/src/coreclr/pal/src/file/directory.cpp index 4cd0600e5cf603..e06afd0b19e1c2 100644 --- a/src/coreclr/pal/src/file/directory.cpp +++ b/src/coreclr/pal/src/file/directory.cpp @@ -75,11 +75,11 @@ CreateDirectoryW( goto done; } - if (((mb_dir = (char *)PAL_malloc(mb_size)) == NULL) || + if (((mb_dir = (char *)malloc(mb_size)) == NULL) || (WideCharToMultiByte( CP_ACP, 0, lpPathName, -1, mb_dir, mb_size, NULL, NULL) != mb_size)) { - ASSERT("WideCharToMultiByte or PAL_malloc failure! LastError:%d errno:%d\n", + ASSERT("WideCharToMultiByte or malloc failure! LastError:%d errno:%d\n", GetLastError(), errno); dwLastError = ERROR_INTERNAL_ERROR; goto done; @@ -93,7 +93,7 @@ CreateDirectoryW( } if (mb_dir != NULL) { - PAL_free(mb_dir); + free(mb_dir); } LOGEXIT("CreateDirectoryW returns BOOL %d\n", bRet); PERF_EXIT(CreateDirectoryW); @@ -280,7 +280,7 @@ GetCurrentDirectoryA(PathCharString& lpBuffer) dwDirLen = strlen( current_dir ); lpBuffer.Set(current_dir, dwDirLen); - PAL_free(current_dir); + free(current_dir); done: if ( dwLastError ) @@ -486,7 +486,7 @@ CreateDirectoryA( { SetLastError( dwLastError ); } - PAL_free( unixPathName ); + free( unixPathName ); LOGEXIT("CreateDirectoryA returns BOOL %d\n", bRet); PERF_EXIT(CreateDirectoryA); return bRet; diff --git a/src/coreclr/pal/src/file/file.cpp b/src/coreclr/pal/src/file/file.cpp index 8139f87c5d8613..1712be93f698f4 100644 --- a/src/coreclr/pal/src/file/file.cpp +++ b/src/coreclr/pal/src/file/file.cpp @@ -2881,7 +2881,7 @@ GetTempFileNameW( prefix_stringPS.CloseBuffer(prefix_size - 1); } - tempfile_name = (char*)InternalMalloc(MAX_LONGPATH); + tempfile_name = (char*)malloc(MAX_LONGPATH); if (tempfile_name == NULL) { pThread->SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/file/find.cpp b/src/coreclr/pal/src/file/find.cpp index b874885992f8c4..ead5c4335e76e7 100644 --- a/src/coreclr/pal/src/file/find.cpp +++ b/src/coreclr/pal/src/file/find.cpp @@ -138,7 +138,7 @@ FindFirstFileA( goto done; } - find_data = (find_obj *)InternalMalloc(sizeof(find_obj)); + find_data = (find_obj *)malloc(sizeof(find_obj)); if ( find_data == NULL ) { ERROR("Unable to allocate memory for find_data\n"); diff --git a/src/coreclr/pal/src/handlemgr/handlemgr.cpp b/src/coreclr/pal/src/handlemgr/handlemgr.cpp index 5dc198c7f5a3a1..09405f1ec514b7 100644 --- a/src/coreclr/pal/src/handlemgr/handlemgr.cpp +++ b/src/coreclr/pal/src/handlemgr/handlemgr.cpp @@ -51,7 +51,7 @@ CSimpleHandleManager::Initialize( field, with the head in the global 'm_hiFreeListStart'. */ m_dwTableSize = m_dwTableGrowthRate; - m_rghteHandleTable = reinterpret_cast(InternalMalloc((m_dwTableSize * sizeof(HANDLE_TABLE_ENTRY)))); + m_rghteHandleTable = reinterpret_cast(malloc((m_dwTableSize * sizeof(HANDLE_TABLE_ENTRY)))); if(NULL == m_rghteHandleTable) { ERROR("Unable to create initial handle table array"); @@ -108,7 +108,7 @@ CSimpleHandleManager::AllocateHandle( } /* grow handle table */ - rghteTempTable = reinterpret_cast(InternalRealloc( + rghteTempTable = reinterpret_cast(realloc( m_rghteHandleTable, (m_dwTableSize + m_dwTableGrowthRate) * sizeof(HANDLE_TABLE_ENTRY))); diff --git a/src/coreclr/pal/src/include/pal/file.h b/src/coreclr/pal/src/include/pal/file.h index 9b6e3195364043..0ec765317d48ff 100644 --- a/src/coreclr/pal/src/include/pal/file.h +++ b/src/coreclr/pal/src/include/pal/file.h @@ -25,6 +25,7 @@ Revision History: #include "pal/stackstring.hpp" #include #include +#include #ifdef __cplusplus extern "C" diff --git a/src/coreclr/pal/src/include/pal/malloc.hpp b/src/coreclr/pal/src/include/pal/malloc.hpp index 4e7b96da0e2283..65715fa9387aec 100644 --- a/src/coreclr/pal/src/include/pal/malloc.hpp +++ b/src/coreclr/pal/src/include/pal/malloc.hpp @@ -26,96 +26,17 @@ Module Name: #include #include -extern "C" -{ - void * - __cdecl - PAL_realloc( - void* pvMemblock, - size_t szSize - ); - - void * - __cdecl - PAL_malloc( - size_t szSize - ); - - void - __cdecl - PAL_free( - void *pvMem - ); -} - namespace CorUnix{ - - void * - InternalRealloc( - void *pvMemblock, - size_t szSize - ); - - void * - InternalMalloc( - size_t szSize - ); - - // Define common code for "new" style allocators below. -#define INTERNAL_NEW_COMMON() \ - T *pMem = (T*)InternalMalloc(sizeof(T)); \ - if (pMem == NULL) \ - return NULL; - - // Define "new" style allocators (which allocate then call a constructor) for different numbers of - // constructor arguments. Added based on usage. - - // Default constructor (0 args) case. - template - T* InternalNew() + // Define "new" style allocators (which allocate then call a constructor). + template + T* InternalNew(Ts... args) { - INTERNAL_NEW_COMMON(); - return new (pMem) T(); - } + T* pMem = (T*)malloc(sizeof(T)); - // 1 arg case. - template - T* InternalNew(A1 arg1) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1); - } - - // 2 args case. - template - T* InternalNew(A1 arg1, A2 arg2) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2); - } - - // 3 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3); - } - - // 4 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3, A4 arg4) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3, arg4); - } + if (pMem == NULL) + return NULL; - // 5 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3, A4 arg4, A5 arg5) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3, arg4, arg5); + return new (pMem) T(args...); } template T* InternalNewArray(size_t cElements) @@ -123,7 +44,7 @@ namespace CorUnix{ size_t cbSize = (cElements * sizeof(T)) + sizeof(size_t); T *pMem; - pMem = (T*)InternalMalloc(cbSize); + pMem = (T*)malloc(cbSize); if (pMem == NULL) return NULL; diff --git a/src/coreclr/pal/src/include/pal/misc.h b/src/coreclr/pal/src/include/pal/misc.h index aa5b2b4852b6ea..ffa6448ed7d308 100644 --- a/src/coreclr/pal/src/include/pal/misc.h +++ b/src/coreclr/pal/src/include/pal/misc.h @@ -25,23 +25,6 @@ extern "C" { #endif // __cplusplus -/*++ -Function : - - PAL_rand - - Calls rand and mitigates the difference between RAND_MAX - on Windows and FreeBSD. ---*/ -int __cdecl PAL_rand(void); - -/*++ -Function : - - PAL_time ---*/ -PAL_time_t __cdecl PAL_time(PAL_time_t*); - /*++ Function : MsgBoxInitialize diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 15887d03773822..3fa16f38cfbe77 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -161,190 +161,6 @@ function_name() to call the system's implementation #include "pal_perf.h" #endif -/* C runtime functions needed to be renamed to avoid duplicate definition - of those functions when including standard C header files */ -#define memcpy DUMMY_memcpy -#define memcmp DUMMY_memcmp -#define memset DUMMY_memset -#define memmove DUMMY_memmove -#define memchr DUMMY_memchr -#define atoll DUMMY_atoll -#define strlen DUMMY_strlen -#define stricmp DUMMY_stricmp -#define strstr DUMMY_strstr -#define strcmp DUMMY_strcmp -#define strcat DUMMY_strcat -#define strncat DUMMY_strncat -#define strcpy DUMMY_strcpy -#define strncmp DUMMY_strncmp -#define strncpy DUMMY_strncpy -#define strchr DUMMY_strchr -#define strrchr DUMMY_strrchr -#define strpbrk DUMMY_strpbrk -#define strtod DUMMY_strtod -#define strtoul DUMMY_strtoul -#define strtoull DUMMY_strtoull -#define strnlen DUMMY_strnlen -#define strcasecmp DUMMY_strcasecmp -#define strncasecmp DUMMY_strncasecmp -#define strdup DUMMY_strdup -#define strtok_r DUMMY_strtok_r -#define tolower DUMMY_tolower -#define toupper DUMMY_toupper -#define isprint DUMMY_isprint -#define isdigit DUMMY_isdigit -#define iswalpha DUMMY_iswalpha -#define iswdigit DUMMY_iswdigit -#define iswupper DUMMY_iswupper -#define towupper DUMMY_towupper -#define towlower DUMMY_towlower -#define iswprint DUMMY_iswprint -#define iswspace DUMMY_iswspace -#define iswxdigit DUMMY_iswxdigit -#define wint_t DUMMY_wint_t -#define srand DUMMY_srand -#define atoi DUMMY_atoi -#define atof DUMMY_atof -#define size_t DUMMY_size_t -#define time_t PAL_time_t -#define va_list DUMMY_va_list -#define abs DUMMY_abs -#define llabs DUMMY_llabs -#define ceil DUMMY_ceil -#define cos DUMMY_cos -#define cosh DUMMY_cosh -#define fabs DUMMY_fabs -#define floor DUMMY_floor -#define fmod DUMMY_fmod -#define modf DUMMY_modf -#define sin DUMMY_sin -#define sinh DUMMY_sinh -#define sqrt DUMMY_sqrt -#define tan DUMMY_tan -#define tanh DUMMY_tanh -#define trunc DUMMY_trunc -#define ceilf DUMMY_ceilf -#define cosf DUMMY_cosf -#define coshf DUMMY_coshf -#define fabsf DUMMY_fabsf -#define floorf DUMMY_floorf -#define fmodf DUMMY_fmodf -#define modff DUMMY_modff -#define sinf DUMMY_sinf -#define sinhf DUMMY_sinhf -#define sqrtf DUMMY_sqrtf -#define tanf DUMMY_tanf -#define tanhf DUMMY_tanhf -#define truncf DUMMY_truncf -#define acos DUMMMY_acos -#define asin DUMMMY_asin -#define atan2 DUMMMY_atan2 -#define exp DUMMMY_exp -#define ilogb DUMMMY_ilogb -#define log DUMMMY_log -#define log10 DUMMMY_log10 -#define pow DUMMMY_pow -#define sincos DUMMMY_sincos -#define acosf DUMMMY_acosf -#define asinf DUMMMY_asinf -#define atan2f DUMMMY_atan2f -#define expf DUMMMY_expf -#define ilogbf DUMMMY_ilogbf -#define logf DUMMMY_logf -#define log10f DUMMMY_log10f -#define powf DUMMMY_powf -#define sincosf DUMMMY_sincosf -#define copysign DUMMY_copysign -#define copysignf DUMMY_copysignf -#define remove DUMMY_remove -#define printf DUMMY_printf -#define vprintf DUMMY_vprintf -#define fopen DUMMY_fopen -#define setvbuf DUMMY_setvbuf -#define fprintf DUMMY_fprintf -#define vfprintf DUMMY_vfprintf -#define fgets DUMMY_fgets -#define ferror DUMMY_ferror -#define fread DUMMY_fread -#define fwrite DUMMY_fwrite -#define ftell DUMMY_ftell -#define fclose DUMMY_fclose -#define fflush DUMMY_fflush -#define fputs DUMMY_fputs -#define fseek DUMMY_fseek -#define fgetpos DUMMY_fgetpos -#define fsetpos DUMMY_fsetpos - -/* RAND_MAX needed to be renamed to avoid duplicate definition when including - stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX - defined in pal.h */ -#define PAL_RAND_MAX 0x7fff - -/* The standard headers define isspace and isxdigit as macros and functions, - To avoid redefinition problems, undefine those macros. */ -#ifdef isspace -#undef isspace -#endif -#ifdef isxdigit -#undef isxdigit -#endif -#ifdef isalpha -#undef isalpha -#endif -#ifdef isalnum -#undef isalnum -#endif -#define isspace DUMMY_isspace -#define isxdigit DUMMY_isxdigit -#define isalpha DUMMY_isalpha -#define isalnum DUMMY_isalnum - -#ifdef stdin -#undef stdin -#endif -#ifdef stdout -#undef stdout -#endif -#ifdef stderr -#undef stderr -#endif - -#ifdef SCHAR_MIN -#undef SCHAR_MIN -#endif -#ifdef SCHAR_MAX -#undef SCHAR_MAX -#endif -#ifdef SHRT_MIN -#undef SHRT_MIN -#endif -#ifdef SHRT_MAX -#undef SHRT_MAX -#endif -#ifdef UCHAR_MAX -#undef UCHAR_MAX -#endif -#ifdef USHRT_MAX -#undef USHRT_MAX -#endif -#ifdef ULONG_MAX -#undef ULONG_MAX -#endif -#ifdef LONG_MIN -#undef LONG_MIN -#endif -#ifdef LONG_MAX -#undef LONG_MAX -#endif -#ifdef RAND_MAX -#undef RAND_MAX -#endif -#ifdef DBL_MAX -#undef DBL_MAX -#endif -#ifdef FLT_MAX -#undef FLT_MAX -#endif #ifdef __record_type_class #undef __record_type_class #endif @@ -352,24 +168,6 @@ function_name() to call the system's implementation #undef __real_type_class #endif -// The standard headers define va_start and va_end as macros, -// To avoid redefinition problems, undefine those macros. -#ifdef va_start -#undef va_start -#endif -#ifdef va_end -#undef va_end -#endif -#ifdef va_copy -#undef va_copy -#endif - -#define ptrdiff_t PAL_ptrdiff_t -#define intptr_t PAL_intptr_t -#define uintptr_t PAL_uintptr_t -#define timeval PAL_timeval - -#define DEFINE_DUMMY_FILE_TYPE #include "pal.h" #include "palprivate.h" @@ -386,222 +184,6 @@ function_name() to call the system's implementation #undef _BitScanReverse64 #endif -/* pal.h defines alloca(3) as a compiler builtin. - Redefining it to native libc will result in undefined breakage because - a compiler is allowed to make assumptions about the stack and frame - pointers. */ - -/* Undef all functions and types previously defined so those functions and - types could be mapped to the C runtime and socket implementation of the - native OS */ -#undef exit -#undef memcpy -#undef memcmp -#undef memset -#undef memmove -#undef memchr -#undef atoll -#undef strlen -#undef strnlen -#undef wcsnlen -#undef stricmp -#undef strstr -#undef strcmp -#undef strcat -#undef strncat -#undef strcpy -#undef strncmp -#undef strncpy -#undef strchr -#undef strrchr -#undef strpbrk -#undef strtoul -#undef strtoull -#undef strcasecmp -#undef strncasecmp -#undef strdup -#undef strtod -#undef strtok_r -#undef strdup -#undef tolower -#undef toupper -#undef isprint -#undef isdigit -#undef isspace -#undef iswdigit -#undef iswxdigit -#undef iswalpha -#undef iswprint -#undef isxdigit -#undef isalpha -#undef isalnum -#undef iswalpha -#undef iswdigit -#undef iswupper -#undef towupper -#undef towlower -#undef wint_t -#undef atoi -#undef atof -#undef malloc -#undef realloc -#undef free -#undef qsort -#undef bsearch -#undef time -#undef fclose -#undef fopen -#undef fread -#undef ferror -#undef ftell -#undef fflush -#undef fwrite -#undef fgets -#undef fputs -#undef fseek -#undef fgetpos -#undef fsetpos -#undef getcwd -#undef setvbuf -#undef unlink -#undef size_t -#undef time_t -#undef va_list -#undef va_start -#undef va_end -#undef va_copy -#undef va_arg -#undef stdin -#undef stdout -#undef stderr -#undef abs -#undef llabs -#undef acos -#undef acosh -#undef asin -#undef asinh -#undef atan -#undef atanh -#undef atan2 -#undef cbrt -#undef ceil -#undef cos -#undef cosh -#undef exp -#undef fabs -#undef floor -#undef fmod -#undef fma -#undef ilogb -#undef log -#undef log2 -#undef log10 -#undef modf -#undef pow -#undef sin -#undef sincos -#undef copysign -#undef sinh -#undef sqrt -#undef tan -#undef tanh -#undef trunc -#undef acosf -#undef acoshf -#undef asinf -#undef asinhf -#undef atanf -#undef atanhf -#undef atan2f -#undef cbrtf -#undef ceilf -#undef cosf -#undef coshf -#undef expf -#undef fabsf -#undef floorf -#undef fmodf -#undef fmaf -#undef ilogbf -#undef logf -#undef log2f -#undef log10f -#undef modff -#undef powf -#undef sinf -#undef sincosf -#undef copysignf -#undef sinhf -#undef sqrtf -#undef tanf -#undef tanhf -#undef truncf -#undef acos -#undef asin -#undef atan2 -#undef exp -#undef ilogb -#undef log -#undef log10 -#undef pow -#undef sincos -#undef acosf -#undef asinf -#undef atan2f -#undef expf -#undef ilogbf -#undef logf -#undef log10f -#undef powf -#undef sincosf -#undef rand -#undef srand -#undef errno -#undef getenv -#undef open -#undef glob -#undef remove -#undef printf -#undef vprintf -#undef ptrdiff_t -#undef intptr_t -#undef uintptr_t -#undef timeval - -#undef fprintf -#undef vfprintf -#undef iswupper -#undef iswspace -#undef towlower -#undef towupper - -#ifdef HOST_AMD64 -#undef _mm_getcsr -#undef _mm_setcsr -#endif // HOST_AMD64 - -#undef min -#undef max - -#undef SCHAR_MIN -#undef SCHAR_MAX -#undef UCHAR_MAX -#undef SHRT_MIN -#undef SHRT_MAX -#undef USHRT_MAX -#undef LONG_MIN -#undef LONG_MAX -#undef ULONG_MAX -#undef RAND_MAX -#undef DBL_MAX -#undef FLT_MAX -#undef __record_type_class -#undef __real_type_class - -#if HAVE_CHAR_BIT -#undef CHAR_BIT -#endif - // We need a sigsetjmp prototype in pal.h for the SEH macros, but we // can't use the "real" prototype (because we don't want to define sigjmp_buf). // So we must rename the "real" sigsetjmp to avoid redefinition errors. @@ -623,18 +205,6 @@ function_name() to call the system's implementation // https://gcc.gnu.org/ml/libstdc++/2016-01/msg00025.html #define _GLIBCXX_INCLUDE_NEXT_C_HEADERS 1 -#define _WITH_GETLINE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #ifdef __APPLE__ #undef GetCurrentThread diff --git a/src/coreclr/pal/src/include/pal/stackstring.hpp b/src/coreclr/pal/src/include/pal/stackstring.hpp index 4a27a15579c7de..22e79a571502aa 100644 --- a/src/coreclr/pal/src/include/pal/stackstring.hpp +++ b/src/coreclr/pal/src/include/pal/stackstring.hpp @@ -21,7 +21,7 @@ class StackString void DeleteBuffer() { if (m_innerBuffer != m_buffer) - PAL_free(m_buffer); + free(m_buffer); m_buffer = NULL; return; @@ -44,7 +44,7 @@ class StackString m_buffer = NULL; } - T * newBuffer = (T *)PAL_realloc(m_buffer, (count_allocated + 1) * sizeof(T)); + T * newBuffer = (T *)realloc(m_buffer, (count_allocated + 1) * sizeof(T)); if (NULL == newBuffer) { SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/include/pal/utils.h b/src/coreclr/pal/src/include/pal/utils.h index fdd5b3b965a167..980cdf56ab6c60 100644 --- a/src/coreclr/pal/src/include/pal/utils.h +++ b/src/coreclr/pal/src/include/pal/utils.h @@ -194,7 +194,7 @@ class StringHolder StringHolder() : data(NULL) { } ~StringHolder() { - PAL_free( data); + free( data); } operator LPSTR () { return data;} diff --git a/src/coreclr/pal/src/init/pal.cpp b/src/coreclr/pal/src/init/pal.cpp index 9d0c82ac4ae53f..67fcbb92bd2512 100644 --- a/src/coreclr/pal/src/init/pal.cpp +++ b/src/coreclr/pal/src/init/pal.cpp @@ -1170,7 +1170,7 @@ static LPWSTR INIT_FormatCommandLine (int argc, const char * const *argv) length+=3; length+=strlen(argv[i])*2; } - command_line = reinterpret_cast(InternalMalloc(length)); + command_line = reinterpret_cast(malloc(length != 0 ? length : 1)); if(!command_line) { @@ -1222,7 +1222,7 @@ static LPWSTR INIT_FormatCommandLine (int argc, const char * const *argv) return nullptr; } - retval = reinterpret_cast(InternalMalloc((sizeof(WCHAR)*i))); + retval = reinterpret_cast(malloc((sizeof(WCHAR)*i))); if(retval == nullptr) { ERROR("can't allocate memory for Unicode command line!\n"); @@ -1278,7 +1278,7 @@ static LPWSTR INIT_GetCurrentEXEPath() return nullptr; } - return_value = reinterpret_cast(InternalMalloc((return_size*sizeof(WCHAR)))); + return_value = reinterpret_cast(malloc((return_size*sizeof(WCHAR)))); if (nullptr == return_value) { ERROR("Not enough memory to create full path\n"); diff --git a/src/coreclr/pal/src/loader/module.cpp b/src/coreclr/pal/src/loader/module.cpp index f0651d3bad5861..0cda5045e01ef1 100644 --- a/src/coreclr/pal/src/loader/module.cpp +++ b/src/coreclr/pal/src/loader/module.cpp @@ -1548,7 +1548,7 @@ static MODSTRUCT *LOADAllocModule(NATIVE_LIBRARY_HANDLE dl_handle, LPCSTR name) LPWSTR wide_name; /* no match found : try to create a new module structure */ - module = (MODSTRUCT *)InternalMalloc(sizeof(MODSTRUCT)); + module = (MODSTRUCT *)malloc(sizeof(MODSTRUCT)); if (nullptr == module) { ERROR("malloc() failed! errno is %d (%s)\n", errno, strerror(errno)); @@ -1805,11 +1805,11 @@ MODSTRUCT *LOADGetPalLibrary() if (g_szCoreCLRPath == nullptr) { size_t cbszCoreCLRPath = strlen(info.dli_fname) + 1; - g_szCoreCLRPath = (char*) InternalMalloc(cbszCoreCLRPath); + g_szCoreCLRPath = (char*) malloc(cbszCoreCLRPath); if (g_szCoreCLRPath == nullptr) { - ERROR("LOADGetPalLibrary: InternalMalloc failed!"); + ERROR("LOADGetPalLibrary: malloc failed!"); goto exit; } diff --git a/src/coreclr/pal/src/map/map.cpp b/src/coreclr/pal/src/map/map.cpp index 707284b58fad95..4f8cb6190c6d99 100644 --- a/src/coreclr/pal/src/map/map.cpp +++ b/src/coreclr/pal/src/map/map.cpp @@ -35,6 +35,7 @@ Module Name: #include #include #include +#include #include #include "rt/ntimage.h" @@ -1128,7 +1129,7 @@ CorUnix::InternalMapViewOfFile( // the global list. // - PMAPPED_VIEW_LIST pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(*pNewView)); + PMAPPED_VIEW_LIST pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(*pNewView)); if (NULL != pNewView) { pNewView->lpAddress = pvBaseAddress; @@ -1832,7 +1833,7 @@ static PMAPPED_VIEW_LIST FindSharedMappingReplacement( /* The new desired mapping is fully contained in the one just found: we can reuse this one */ - pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(MAPPED_VIEW_LIST)); + pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(MAPPED_VIEW_LIST)); if (pNewView) { memcpy(pNewView, pView, sizeof(*pNewView)); @@ -1867,7 +1868,7 @@ static NativeMapHolder * NewNativeMapHolder(CPalThread *pThread, LPVOID address, } pThisMapHolder = - (NativeMapHolder *)InternalMalloc(sizeof(NativeMapHolder)); + (NativeMapHolder *)malloc(sizeof(NativeMapHolder)); if (pThisMapHolder) { @@ -1933,7 +1934,7 @@ MAPRecordMapping( PAL_ERROR palError = NO_ERROR; PMAPPED_VIEW_LIST pNewView; - pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(*pNewView)); + pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(*pNewView)); if (NULL != pNewView) { pNewView->lpAddress = addr; diff --git a/src/coreclr/pal/src/map/virtual.cpp b/src/coreclr/pal/src/map/virtual.cpp index 364f3bba1f0255..3145faac5f5056 100644 --- a/src/coreclr/pal/src/map/virtual.cpp +++ b/src/coreclr/pal/src/map/virtual.cpp @@ -401,7 +401,7 @@ static BOOL VIRTUALStoreAllocationInfo( return FALSE; } - if (!(pNewEntry = (PCMI)InternalMalloc(sizeof(*pNewEntry)))) + if (!(pNewEntry = (PCMI)malloc(sizeof(*pNewEntry)))) { ERROR( "Unable to allocate memory for the structure.\n"); return FALSE; diff --git a/src/coreclr/pal/src/misc/cgroup.cpp b/src/coreclr/pal/src/misc/cgroup.cpp index f23ff4c970fe9b..ecdbccf2ee669f 100644 --- a/src/coreclr/pal/src/misc/cgroup.cpp +++ b/src/coreclr/pal/src/misc/cgroup.cpp @@ -54,7 +54,7 @@ class CGroup static void Cleanup() { - PAL_free(s_cpu_cgroup_path); + free(s_cpu_cgroup_path); } static bool GetCpuLimit(UINT *val) @@ -129,7 +129,7 @@ class CGroup len = strlen(hierarchy_mount); len += strlen(cgroup_path_relative_to_mount); - cgroup_path = (char*)PAL_malloc(len+1); + cgroup_path = (char*)malloc(len+1); if (cgroup_path == nullptr) goto done; @@ -160,8 +160,8 @@ class CGroup strcat_s(cgroup_path, len+1, cgroup_path_relative_to_mount + common_path_prefix_len); done: - PAL_free(hierarchy_root); - PAL_free(cgroup_path_relative_to_mount); + free(hierarchy_root); + free(cgroup_path_relative_to_mount); *pcgroup_path = cgroup_path; if (pcgroup_hierarchy_mount != nullptr) { @@ -169,7 +169,7 @@ class CGroup } else { - PAL_free(hierarchy_mount); + free(hierarchy_mount); } } @@ -190,14 +190,14 @@ class CGroup { if (filesystemType == nullptr || lineLen > maxLineLen) { - PAL_free(filesystemType); + free(filesystemType); filesystemType = nullptr; - PAL_free(options); + free(options); options = nullptr; - filesystemType = (char*)PAL_malloc(lineLen+1); + filesystemType = (char*)malloc(lineLen+1); if (filesystemType == nullptr) goto done; - options = (char*)PAL_malloc(lineLen+1); + options = (char*)malloc(lineLen+1); if (options == nullptr) goto done; maxLineLen = lineLen; @@ -230,10 +230,10 @@ class CGroup } if (isSubsystemMatch) { - mountpath = (char*)PAL_malloc(lineLen+1); + mountpath = (char*)malloc(lineLen+1); if (mountpath == nullptr) goto done; - mountroot = (char*)PAL_malloc(lineLen+1); + mountroot = (char*)malloc(lineLen+1); if (mountroot == nullptr) goto done; @@ -252,10 +252,10 @@ class CGroup } } done: - PAL_free(mountpath); - PAL_free(mountroot); - PAL_free(filesystemType); - PAL_free(options); + free(mountpath); + free(mountroot); + free(filesystemType); + free(options); free(line); if (mountinfofile) fclose(mountinfofile); @@ -278,14 +278,14 @@ class CGroup { if (subsystem_list == nullptr || lineLen > maxLineLen) { - PAL_free(subsystem_list); + free(subsystem_list); subsystem_list = nullptr; - PAL_free(cgroup_path); + free(cgroup_path); cgroup_path = nullptr; - subsystem_list = (char*)PAL_malloc(lineLen+1); + subsystem_list = (char*)malloc(lineLen+1); if (subsystem_list == nullptr) goto done; - cgroup_path = (char*)PAL_malloc(lineLen+1); + cgroup_path = (char*)malloc(lineLen+1); if (cgroup_path == nullptr) goto done; maxLineLen = lineLen; @@ -335,10 +335,10 @@ class CGroup } } done: - PAL_free(subsystem_list); + free(subsystem_list); if (!result) { - PAL_free(cgroup_path); + free(cgroup_path); cgroup_path = nullptr; } free(line); diff --git a/src/coreclr/pal/src/misc/environ.cpp b/src/coreclr/pal/src/misc/environ.cpp index a31d6b177760b9..4980d213fa3bc5 100644 --- a/src/coreclr/pal/src/misc/environ.cpp +++ b/src/coreclr/pal/src/misc/environ.cpp @@ -183,7 +183,7 @@ GetEnvironmentVariableW( goto done; } - inBuff = (CHAR *)PAL_malloc(inBuffSize); + inBuff = (CHAR *)malloc(inBuffSize); if (inBuff == nullptr) { ERROR("malloc failed\n"); @@ -193,7 +193,7 @@ GetEnvironmentVariableW( if (nSize) { - outBuff = (CHAR *)PAL_malloc(nSize*2); + outBuff = (CHAR *)malloc(nSize*2); if (outBuff == nullptr) { ERROR("malloc failed\n"); @@ -243,8 +243,8 @@ GetEnvironmentVariableW( } done: - PAL_free(outBuff); - PAL_free(inBuff); + free(outBuff); + free(inBuff); LOGEXIT("GetEnvironmentVariableW returns DWORD 0x%x\n", size); PERF_EXIT(GetEnvironmentVariableW); @@ -310,7 +310,7 @@ SetEnvironmentVariableW( goto done; } - name = (PCHAR)PAL_malloc(sizeof(CHAR)* nameSize); + name = (PCHAR)malloc(sizeof(CHAR)* nameSize); if (name == nullptr) { ERROR("malloc failed\n"); @@ -336,7 +336,7 @@ SetEnvironmentVariableW( goto done; } - value = (PCHAR)PAL_malloc(sizeof(CHAR)*valueSize); + value = (PCHAR)malloc(sizeof(CHAR)*valueSize); if (value == nullptr) { @@ -356,8 +356,8 @@ SetEnvironmentVariableW( bRet = SetEnvironmentVariableA(name, value); done: - PAL_free(value); - PAL_free(name); + free(value); + free(name); LOGEXIT("SetEnvironmentVariableW returning BOOL %d\n", bRet); PERF_EXIT(SetEnvironmentVariableW); @@ -414,7 +414,7 @@ GetEnvironmentStringsW( envNum += len; } - wenviron = (WCHAR *)PAL_malloc(sizeof(WCHAR)* (envNum + 1)); + wenviron = (WCHAR *)malloc(sizeof(WCHAR)* (envNum + 1)); if (wenviron == nullptr) { ERROR("malloc failed\n"); @@ -476,7 +476,7 @@ FreeEnvironmentStringsW( if (lpValue != nullptr) { - PAL_free(lpValue); + free(lpValue); } LOGEXIT("FreeEnvironmentStringW returning BOOL TRUE\n"); @@ -559,7 +559,7 @@ SetEnvironmentVariableA( { // All the conditions are met. Set the variable. int iLen = strlen(lpName) + strlen(lpValue) + 2; - LPSTR string = (LPSTR) PAL_malloc(iLen); + LPSTR string = (LPSTR) malloc(iLen); if (string == nullptr) { bRet = FALSE; @@ -571,7 +571,7 @@ SetEnvironmentVariableA( sprintf_s(string, iLen, "%s=%s", lpName, lpValue); nResult = EnvironPutenv(string, FALSE) ? 0 : -1; - PAL_free(string); + free(string); string = nullptr; // If EnvironPutenv returns FALSE, it almost certainly failed to allocate memory. diff --git a/src/coreclr/pal/src/misc/fmtmessage.cpp b/src/coreclr/pal/src/misc/fmtmessage.cpp index c7de98718c1d35..0598914b06cb51 100644 --- a/src/coreclr/pal/src/misc/fmtmessage.cpp +++ b/src/coreclr/pal/src/misc/fmtmessage.cpp @@ -61,7 +61,7 @@ static LPWSTR FMTMSG_GetMessageString( DWORD dwErrCode ) allocChars = MAX_ERROR_STRING_LENGTH + 1; } - LPWSTR lpRetVal = (LPWSTR)PAL_malloc(allocChars * sizeof(WCHAR)); + LPWSTR lpRetVal = (LPWSTR)malloc(allocChars * sizeof(WCHAR)); if (lpRetVal) { @@ -140,7 +140,7 @@ static INT FMTMSG__watoi( LPWSTR str ) UINT NumOfBytes = 0; \ nSize *= 2; \ NumOfBytes = nSize * sizeof( WCHAR ); \ - lpTemp = static_cast( PAL_malloc( NumOfBytes ) ); \ + lpTemp = static_cast( malloc( NumOfBytes ) ); \ TRACE( "Growing the buffer.\n" );\ \ if ( !lpTemp ) \ @@ -327,7 +327,7 @@ FormatMessageW( } lpWorkingString = static_cast( - PAL_malloc( nSize * sizeof( WCHAR ) ) ); + malloc( nSize * sizeof( WCHAR ) ) ); if ( !lpWorkingString ) { ERROR( "Unable to allocate memory for the working string.\n" ); diff --git a/src/coreclr/pal/src/misc/miscpalapi.cpp b/src/coreclr/pal/src/misc/miscpalapi.cpp index 06129210ef9c9f..f0d32f0388e86f 100644 --- a/src/coreclr/pal/src/misc/miscpalapi.cpp +++ b/src/coreclr/pal/src/misc/miscpalapi.cpp @@ -29,6 +29,7 @@ Revision History: #include #include +#include #include #include #include diff --git a/src/coreclr/pal/src/misc/perftrace.cpp b/src/coreclr/pal/src/misc/perftrace.cpp index a0d52a415017bf..9419005099f415 100644 --- a/src/coreclr/pal/src/misc/perftrace.cpp +++ b/src/coreclr/pal/src/misc/perftrace.cpp @@ -321,7 +321,7 @@ PERFInitialize(LPWSTR command_line, LPWSTR exe_path) if( ret == TRUE ) { - pal_function_map = (char*)PAL_malloc(PAL_API_NUMBER); + pal_function_map = (char*)malloc(PAL_API_NUMBER); if(pal_function_map != NULL) { bRead = PERFReadSetting( ); // we don't quit even we failed to read the file. @@ -355,7 +355,7 @@ void PERFTerminate( ) PERFlushAllLogs(); pthread_key_delete(PERF_tlsTableKey ); - PAL_free(pal_function_map); + free(pal_function_map); } @@ -376,21 +376,21 @@ BOOL PERFAllocThreadInfo( ) memory resources could be exhausted. If this ever becomes a problem, the memory allocated per thread should be freed when a thread exits. */ - node = ( pal_thread_list_node * )PAL_malloc(sizeof(pal_thread_list_node)); + node = ( pal_thread_list_node * )malloc(sizeof(pal_thread_list_node)); if(node == NULL) { ret = FALSE; goto PERFAllocThreadInfoExit; } - local_info = (pal_perf_thread_info *)PAL_malloc(sizeof(pal_perf_thread_info)); + local_info = (pal_perf_thread_info *)malloc(sizeof(pal_perf_thread_info)); if (local_info == NULL) { ret = FALSE; goto PERFAllocThreadInfoExit; } - apiTable = (pal_perf_api_info *)PAL_malloc( PAL_API_NUMBER * sizeof(pal_perf_api_info)); + apiTable = (pal_perf_api_info *)malloc( PAL_API_NUMBER * sizeof(pal_perf_api_info)); if (apiTable == NULL) { ret = FALSE; @@ -411,7 +411,7 @@ BOOL PERFAllocThreadInfo( ) apiTable[i].sum_of_square_duration = 0.0; if (pal_perf_histogram_size > 0) { - apiTable[i].histograms = (DWORD *)PAL_malloc(pal_perf_histogram_size*sizeof(DWORD)); + apiTable[i].histograms = (DWORD *)malloc(pal_perf_histogram_size*sizeof(DWORD)); if (apiTable[i].histograms == NULL) { ret = FALSE; @@ -425,7 +425,7 @@ BOOL PERFAllocThreadInfo( ) } } - log_buf = (char * )PAL_malloc( PAL_PERF_PROFILE_BUFFER_SIZE ); + log_buf = (char * )malloc( PAL_PERF_PROFILE_BUFFER_SIZE ); if(log_buf == NULL) { @@ -454,11 +454,11 @@ BOOL PERFAllocThreadInfo( ) { if (node != NULL) { - PAL_free(node); + free(node); } if (local_info != NULL) { - PAL_free(local_info); + free(local_info); } if (apiTable != NULL) { @@ -466,14 +466,14 @@ BOOL PERFAllocThreadInfo( ) { if (apiTable[i].histograms != NULL) { - PAL_free(apiTable[i].histograms); + free(apiTable[i].histograms); } } - PAL_free(apiTable); + free(apiTable); } if (log_buf != NULL) { - PAL_free(log_buf); + free(log_buf); } } return ret; @@ -554,26 +554,26 @@ PERFlushAllLogs( ) PERFUpdateProgramInfo(current->thread_info); if (table1->histograms != NULL) { - PAL_free(table1->histograms); + free(table1->histograms); } - PAL_free(table1); + free(table1); } PERFFlushLog(current->thread_info, FALSE); - PAL_free(current->thread_info->pal_write_buf); - PAL_free(current->thread_info); + free(current->thread_info->pal_write_buf); + free(current->thread_info); } - PAL_free(current); + free(current); } PERFWriteCounters(table0); if (table0->histograms != NULL) { - PAL_free(table0->histograms); + free(table0->histograms); } - PAL_free(table0); + free(table0); PERFFlushLog(node->thread_info, FALSE); - PAL_free(node->thread_info->pal_write_buf); - PAL_free(node->thread_info); - PAL_free(node); + free(node->thread_info->pal_write_buf); + free(node->thread_info); + free(node); } static diff --git a/src/coreclr/pal/src/misc/strutil.cpp b/src/coreclr/pal/src/misc/strutil.cpp index ed29831232cab6..e665e22b652914 100644 --- a/src/coreclr/pal/src/misc/strutil.cpp +++ b/src/coreclr/pal/src/misc/strutil.cpp @@ -53,7 +53,7 @@ CPalString::CopyString( _ASSERTE(psSource->GetMaxLength() > psSource->GetStringLength()); WCHAR *pwsz = reinterpret_cast( - InternalMalloc(psSource->GetMaxLength() * sizeof(WCHAR)) + malloc(psSource->GetMaxLength() * sizeof(WCHAR)) ); if (NULL != pwsz) diff --git a/src/coreclr/pal/src/misc/utils.cpp b/src/coreclr/pal/src/misc/utils.cpp index 0d96cc991305ae..261be25bcabaa4 100644 --- a/src/coreclr/pal/src/misc/utils.cpp +++ b/src/coreclr/pal/src/misc/utils.cpp @@ -190,7 +190,7 @@ LPSTR UTIL_WCToMB_Alloc(LPCWSTR lpWideCharStr, int cchWideChar) } /* allocate required buffer */ - lpMultiByteStr = (LPSTR)PAL_malloc(length); + lpMultiByteStr = (LPSTR)malloc(length); if(NULL == lpMultiByteStr) { ERROR("malloc() failed! errno is %d (%s)\n", errno,strerror(errno)); @@ -204,7 +204,7 @@ LPSTR UTIL_WCToMB_Alloc(LPCWSTR lpWideCharStr, int cchWideChar) if(0 == length) { ASSERT("WCToMB error; GetLastError returns %#x\n", GetLastError()); - PAL_free(lpMultiByteStr); + free(lpMultiByteStr); return NULL; } return lpMultiByteStr; @@ -250,7 +250,7 @@ LPWSTR UTIL_MBToWC_Alloc(LPCSTR lpMultiByteStr, int cbMultiByte) return NULL; } - lpWideCharStr = (LPWSTR)PAL_malloc(fullsize); + lpWideCharStr = (LPWSTR)malloc(fullsize); if(NULL == lpWideCharStr) { ERROR("malloc() failed! errno is %d (%s)\n", errno,strerror(errno)); @@ -264,7 +264,7 @@ LPWSTR UTIL_MBToWC_Alloc(LPCSTR lpMultiByteStr, int cbMultiByte) if(0 >= length) { ASSERT("MCToMB error; GetLastError returns %#x\n", GetLastError()); - PAL_free(lpWideCharStr); + free(lpWideCharStr); return NULL; } return lpWideCharStr; diff --git a/src/coreclr/pal/src/objmgr/palobjbase.cpp b/src/coreclr/pal/src/objmgr/palobjbase.cpp index dbfdf3b0c71565..c39b5df7e268f3 100644 --- a/src/coreclr/pal/src/objmgr/palobjbase.cpp +++ b/src/coreclr/pal/src/objmgr/palobjbase.cpp @@ -58,7 +58,7 @@ CPalObjectBase::Initialize( if (0 != m_pot->GetImmutableDataSize()) { - m_pvImmutableData = InternalMalloc(m_pot->GetImmutableDataSize()); + m_pvImmutableData = malloc(m_pot->GetImmutableDataSize()); if (NULL != m_pvImmutableData) { ZeroMemory(m_pvImmutableData, m_pot->GetImmutableDataSize()); @@ -80,7 +80,7 @@ CPalObjectBase::Initialize( goto InitializeExit; } - m_pvLocalData = InternalMalloc(m_pot->GetProcessLocalDataSize()); + m_pvLocalData = malloc(m_pot->GetProcessLocalDataSize()); if (NULL != m_pvLocalData) { ZeroMemory(m_pvLocalData, m_pot->GetProcessLocalDataSize()); diff --git a/src/coreclr/pal/src/objmgr/shmobject.cpp b/src/coreclr/pal/src/objmgr/shmobject.cpp index 55b0e87c088a18..282dd113da2e21 100644 --- a/src/coreclr/pal/src/objmgr/shmobject.cpp +++ b/src/coreclr/pal/src/objmgr/shmobject.cpp @@ -119,7 +119,7 @@ CSharedMemoryObject::Initialize( // Allocate local memory to hold the shared data // - m_pvSharedData = InternalMalloc(m_pot->GetSharedDataSize()); + m_pvSharedData = malloc(m_pot->GetSharedDataSize()); if (NULL == m_pvSharedData) { ERROR("Failure allocating m_pvSharedData (local copy)\n"); diff --git a/src/coreclr/pal/src/safecrt/input.inl b/src/coreclr/pal/src/safecrt/input.inl index 9934eeb33f54f3..556fafa6f6b4fa 100644 --- a/src/coreclr/pal/src/safecrt/input.inl +++ b/src/coreclr/pal/src/safecrt/input.inl @@ -46,9 +46,9 @@ #define _istspace(x) isspace((unsigned char)x) -#define _malloc_crt PAL_malloc -#define _realloc_crt PAL_realloc -#define _free_crt PAL_free +#define _malloc_crt malloc +#define _realloc_crt realloc +#define _free_crt free #define _FASSIGN(flag, argument, number, dec_point, locale) _safecrt_fassign((flag), (argument), (number)) #define _WFASSIGN(flag, argument, number, dec_point, locale) _safecrt_wfassign((flag), (argument), (number)) diff --git a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp index ea5aae444dad00..ba9447b889c39c 100644 --- a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp +++ b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp @@ -139,7 +139,7 @@ const UINT64 SharedMemoryHelpers::InvalidSharedThreadId = static_cast(-1 void *SharedMemoryHelpers::Alloc(SIZE_T byteCount) { - void *buffer = InternalMalloc(byteCount); + void *buffer = malloc(byteCount != 0 ? byteCount : 1); if (buffer == nullptr) { throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); diff --git a/src/coreclr/pal/src/synchmgr/synchmanager.cpp b/src/coreclr/pal/src/synchmgr/synchmanager.cpp index c34aa4e27fc8c4..c6b0b3db1cfd67 100644 --- a/src/coreclr/pal/src/synchmgr/synchmanager.cpp +++ b/src/coreclr/pal/src/synchmgr/synchmanager.cpp @@ -28,6 +28,7 @@ SET_DEFAULT_DEBUG_CHANNEL(SYNC); // some headers have code with asserts, so do t #include #include #include +#include #include #include #include diff --git a/src/coreclr/pal/src/synchmgr/synchmanager.hpp b/src/coreclr/pal/src/synchmgr/synchmanager.hpp index 925b896e7e5728..ce325f75ecc1e6 100644 --- a/src/coreclr/pal/src/synchmgr/synchmanager.hpp +++ b/src/coreclr/pal/src/synchmgr/synchmanager.hpp @@ -496,7 +496,7 @@ namespace CorUnix class CPalSynchronizationManager : public IPalSynchronizationManager { friend class CPalSynchMgrController; - template friend T *CorUnix::InternalNew(); + template friend T *CorUnix::InternalNew(Ts... args); public: // types diff --git a/src/coreclr/pal/src/thread/process.cpp b/src/coreclr/pal/src/thread/process.cpp index 757ed25ade1d6f..033996645cb5ec 100644 --- a/src/coreclr/pal/src/thread/process.cpp +++ b/src/coreclr/pal/src/thread/process.cpp @@ -44,6 +44,7 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d #endif // HAVE_POLL #include +#include #include #include #include @@ -84,6 +85,7 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d #ifdef __APPLE__ #include +#include #include #include #include @@ -732,7 +734,7 @@ CorUnix::InternalCreateProcess( } } EnvironmentEntries++; - EnvironmentArray = (char **)InternalMalloc(EnvironmentEntries * sizeof(char *)); + EnvironmentArray = (char **)malloc(EnvironmentEntries * sizeof(char *)); EnvironmentEntries = 0; // Convert the environment block to array of strings @@ -2026,7 +2028,7 @@ PROCNotifyProcessShutdownDestructor() char* PROCFormatInt(ULONG32 value) { - char* buffer = (char*)InternalMalloc(128); + char* buffer = (char*)malloc(128); if (buffer != nullptr) { if (sprintf_s(buffer, 128, "%d", value) == -1) @@ -2048,7 +2050,7 @@ PROCFormatInt(ULONG32 value) char* PROCFormatInt64(ULONG64 value) { - char* buffer = (char*)InternalMalloc(128); + char* buffer = (char*)malloc(128); if (buffer != nullptr) { if (sprintf_s(buffer, 128, "%lld", value) == -1) @@ -2087,7 +2089,7 @@ PROCBuildCreateDumpCommandLine( } const char* DumpGeneratorName = "createdump"; int programLen = strlen(g_szCoreCLRPath) + strlen(DumpGeneratorName) + 1; - char* program = *pprogram = (char*)InternalMalloc(programLen); + char* program = *pprogram = (char*)malloc(programLen); if (program == nullptr) { return FALSE; @@ -2833,7 +2835,7 @@ CorUnix::InitializeProcessCommandLine( size_t n = PAL_wcslen(lpwstrFullPath) + 1; size_t iLen = n; - initial_dir = reinterpret_cast(InternalMalloc(iLen*sizeof(WCHAR))); + initial_dir = reinterpret_cast(malloc(iLen*sizeof(WCHAR))); if (NULL == initial_dir) { ERROR("malloc() failed! (initial_dir) \n"); @@ -3760,7 +3762,7 @@ buildArgv( pThread = InternalGetCurrentThread(); /* make sure to allocate enough space, up for the worst case scenario */ int iLength = (iWlen + lpAppPath.GetCount() + 2); - lpAsciiCmdLine = (char *) InternalMalloc(iLength); + lpAsciiCmdLine = (char *) malloc(iLength); if (lpAsciiCmdLine == NULL) { @@ -3940,7 +3942,7 @@ buildArgv( /* allocate lppargv according to the number of arguments in the command line */ - lppArgv = (char **) InternalMalloc((((*pnArg)+1) * sizeof(char *))); + lppArgv = (char **) malloc((((*pnArg)+1) * sizeof(char *))); if (lppArgv == NULL) { diff --git a/src/coreclr/pal/src/thread/thread.cpp b/src/coreclr/pal/src/thread/thread.cpp index 9420a442c1f6ae..d388521da16055 100644 --- a/src/coreclr/pal/src/thread/thread.cpp +++ b/src/coreclr/pal/src/thread/thread.cpp @@ -1564,7 +1564,7 @@ CorUnix::InternalSetThreadDescription( goto InternalSetThreadDescriptionExit; } - nameBuf = (char *)PAL_malloc(nameSize); + nameBuf = (char *)malloc(nameSize); if (nameBuf == NULL) { palError = ERROR_OUTOFMEMORY; @@ -1616,7 +1616,7 @@ CorUnix::InternalSetThreadDescription( } if (NULL != nameBuf) { - PAL_free(nameBuf); + free(nameBuf); } #endif //defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) diff --git a/src/coreclr/pal/src/thread/threadsusp.cpp b/src/coreclr/pal/src/thread/threadsusp.cpp index d2fae05c4256f6..867f46b3fa38f2 100644 --- a/src/coreclr/pal/src/thread/threadsusp.cpp +++ b/src/coreclr/pal/src/thread/threadsusp.cpp @@ -29,6 +29,7 @@ Revision History: #include #include +#include #include #include #include diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index 4b9960f910c5c1..3d5dc9a749089d 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -65,9 +65,6 @@ add_executable_clr(paltests #composite/wfmo/mutex.cpp c_runtime/atof/test1/test1.cpp c_runtime/atoi/test1/test1.cpp - c_runtime/bsearch/test1/test1.cpp - c_runtime/bsearch/test2/test2.cpp - c_runtime/free/test1/test1.cpp c_runtime/isalnum/test1/test1.cpp c_runtime/isalpha/test1/test1.cpp c_runtime/isdigit/test1/test1.cpp @@ -80,16 +77,10 @@ add_executable_clr(paltests c_runtime/iswupper/test1/test1.cpp c_runtime/isxdigit/test1/test1.cpp c_runtime/llabs/test1/test1.cpp - c_runtime/malloc/test1/test1.cpp - c_runtime/malloc/test2/test2.cpp c_runtime/memchr/test1/test1.cpp c_runtime/memcmp/test1/test1.cpp c_runtime/memmove/test1/test1.cpp c_runtime/memset/test1/test1.cpp - c_runtime/qsort/test1/test1.cpp - c_runtime/qsort/test2/test2.cpp - c_runtime/rand_srand/test1/test1.cpp - c_runtime/realloc/test1/test1.cpp c_runtime/sscanf_s/test1/test1.cpp c_runtime/sscanf_s/test10/test10.cpp c_runtime/sscanf_s/test11/test11.cpp @@ -118,7 +109,6 @@ add_executable_clr(paltests c_runtime/strpbrk/test1/test1.cpp c_runtime/strrchr/test1/test1.cpp c_runtime/strstr/test1/test1.cpp - c_runtime/time/test1/test1.cpp c_runtime/tolower/test1/test1.cpp c_runtime/toupper/test1/test1.cpp c_runtime/towlower/test1/test1.cpp @@ -178,7 +168,6 @@ add_executable_clr(paltests c_runtime/_wfopen/test6/test6.cpp c_runtime/_wfopen/test7/test7.cpp c_runtime/_wtoi/test1/test1.cpp - c_runtime/__iscsym/test1/__iscsym.cpp #debug_api/DebugBreak/test1/test1.cpp debug_api/OutputDebugStringA/test1/helper.cpp debug_api/OutputDebugStringA/test1/test1.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp deleted file mode 100644 index 9244c5f0a32e2d..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================= -** -** Source: __iscsym.c -** -** Purpose: Positive test the __iscsym API. -** Call __iscsym to letter, digit and underscore -** -** -**============================================================*/ -#include - -PALTEST(c_runtime___iscsym_test1_paltest_iscsym_test1, "c_runtime/__iscsym/test1/paltest_iscsym_test1") -{ - int err; - int index; - char non_letter_set[]= - {'~','`','!','@','#','$','%','^','&','*','(',')',')', - '-','+','=','|','\\',';',':','"','\'','<','>', - ',','.','?','/','\0'}; - char errBuffer[200]; - - /*Initialize the PAL environment*/ - err = PAL_Initialize(argc, argv); - if(0 != err) - { - return FAIL; - } - - /*To check if the parameter passed in is a character*/ - for(index = 'a'; index <= 'z'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize a " - "lower-case letter:%c!\n", index); - } - } - - /*To check if the parameter passed in is a character*/ - for(index = 'A'; index <= 'Z'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize an " - "upper-case letter: %c!\n", index); - } - } - - /*To check if the parameter passed in is a digit*/ - for(index = '0'; index <= '9'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize a digit %c!\n", - index); - } - } - - /*To check if the parameter passed in is a underscore*/ - err = __iscsym('_'); - if(0 == err) - { - Fail("\n__iscsym failed to recognize an underscore!\n"); - } - - memset(errBuffer, 0, 200); - - for(index = 0; non_letter_set[index]; index++) - { - err = __iscsym(non_letter_set[index]); - if(0 != err) - { - strncat(errBuffer, &non_letter_set[index], 1); - strcat(errBuffer, ", "); - } - } - - if(strlen(errBuffer) > 0) - { - Fail("\n__iscsym failed to identify the characters '%s' " - "as not letters, digits " - "or underscores\n", errBuffer); - } - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp index a7ebbe4fa6189d..6b9b6d94ee5302 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp @@ -56,7 +56,7 @@ PALTEST(c_runtime__putenv_test1_paltest_putenv_test1, "c_runtime/_putenv/test1/p */ if (TestCases[i].bValidString == TRUE) { - variableValue = getenv(TestCases[i].varName); + variableValue = PAL_getenv(TestCases[i].varName); if (variableValue == NULL) { @@ -81,7 +81,7 @@ PALTEST(c_runtime__putenv_test1_paltest_putenv_test1, "c_runtime/_putenv/test1/p * Check to see that putenv fails for malformed _putenvString values */ { - variableValue = getenv(TestCases[i].varName); + variableValue = PAL_getenv(TestCases[i].varName); if (variableValue != NULL) { diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp index ee84e375c2e2ec..ef118e513260b5 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp @@ -35,7 +35,7 @@ PALTEST(c_runtime__putenv_test2_paltest_putenv_test2, "c_runtime/_putenv/test2/p "_putenv(%s)\n", _putenvString0); } - variableValue = getenv(variable); + variableValue = PAL_getenv(variable); if (variableValue == NULL) { @@ -60,7 +60,7 @@ PALTEST(c_runtime__putenv_test2_paltest_putenv_test2, "c_runtime/_putenv/test2/p "_putenv(%s)\n", _putenvString1); } - variableValue = getenv(variable); + variableValue = PAL_getenv(variable); if (variableValue != NULL) { diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp index ab1397193ce3e1..07380e1a514523 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp @@ -50,7 +50,7 @@ PALTEST(c_runtime__putenv_test3_paltest_putenv_test3, "c_runtime/_putenv/test3/p differing only by case, returns it's own value. */ - result = getenv(FirstVarName); + result = PAL_getenv(FirstVarName); if(result == NULL) { Fail("ERROR: The result of getenv on a valid Environment Variable " @@ -77,7 +77,7 @@ PALTEST(c_runtime__putenv_test3_paltest_putenv_test3, "c_runtime/_putenv/test3/p /* Verify that the environment variables */ - result = getenv(FirstVarName); + result = PAL_getenv(FirstVarName); if(result == NULL) { Fail("ERROR: The result of getenv on a valid Environment Variable " diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp deleted file mode 100644 index eacb660dee096c..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls bsearch to find a character in a sorted buffer, and -** verifies that the correct position is returned. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_bsearch_test1(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 1); -} - -PALTEST(c_runtime_bsearch_test1_paltest_bsearch_test1, "c_runtime/bsearch/test1/paltest_bsearch_test1") -{ - - const char array[] = "abcdefghij"; - char * found=NULL; - - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - found = (char *)bsearch(&"d", array, sizeof(array) - 1, (sizeof(char)) - , charcmp_bsearch_test1); - if (found != array + 3) - { - Fail ("bsearch was unable to find a specified character in a " - "sorted list.\n"); - } - PAL_Terminate(); - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp deleted file mode 100644 index a916e61362ee4d..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls bsearch to find a character in a sorted buffer, -** that does not exist. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_bsearch_test2(const void *pa, const void *pb) -{ - return *(const char *)pa - *(const char *)pb; -} - -PALTEST(c_runtime_bsearch_test2_paltest_bsearch_test2, "c_runtime/bsearch/test2/paltest_bsearch_test2") -{ - - const char array[] = "abcefghij"; - const char missing[] = "0dz"; - char * found=NULL; - const char * candidate = missing; - - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - while (*candidate) { - found = (char *)bsearch(candidate, array, sizeof(array) - 1, - (sizeof(char)), charcmp_bsearch_test2); - if (found != NULL) - { - Fail ("ERROR: bsearch was able to find a specified character '%c' " - "in a sorted list '%s' as '%c' " - "even though the character is not in the list.\n", - *candidate, array, *found); - } - - candidate++; - } - - PAL_Terminate(); - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp deleted file mode 100644 index 2bb42e3563c428..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls exit, and verifies that it actually stops program execution. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_exit_test1_paltest_exit_test1, "c_runtime/exit/test1/paltest_exit_test1") -{ - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - /*should return 0*/ - exit(0); - - Fail ("Exit didn't actually stop execution.\n"); - - return FAIL; -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp deleted file mode 100644 index 6125b3c38899d8..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Calls exit on fail, and verifies that it actually -** stops program execution and return 1. - -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_exit_test2_paltest_exit_test2, "c_runtime/exit/test2/paltest_exit_test2") -{ - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - /*should return 1*/ - exit(1); - -} - - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp deleted file mode 100644 index dc8d13158862ea..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Repeatedly allocates and frees a chunk of memory, to verify -** that free is really returning memory to the heap -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_free_test1_paltest_free_test1, "c_runtime/free/test1/paltest_free_test1") -{ - - char *testA; - - long i; - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - /* check that free really returns memory to the heap. */ - for(i=1; i<1000000; i++) - { - testA = (char *)malloc(1000*sizeof(char)); - if (testA==NULL) - { - Fail("Either free is failing to return memory to the heap, or" - " the system is running out of memory for some other " - "reason.\n"); - } - free(testA); - } - - free(NULL); /*should do nothing*/ - PAL_Terminate(); - return PASS; -} - - - - - - - - - - - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp deleted file mode 100644 index 067791fe866dfd..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Test that malloc returns usable memory -** -** -**==========================================================================*/ - -#include - - -PALTEST(c_runtime_malloc_test1_paltest_malloc_test1, "c_runtime/malloc/test1/paltest_malloc_test1") -{ - - char *testA; - int i; - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* check that malloc really gives us addressable memory */ - testA = (char *)malloc(20 * sizeof(char)); - if (testA == NULL) - { - Fail("Call to malloc failed.\n"); - } - for (i = 0; i < 20; i++) - { - testA[i] = 'a'; - } - for (i = 0; i < 20; i++) - { - if (testA[i] != 'a') - { - Fail("The memory doesn't seem to be properly allocated.\n"); - } - } - free(testA); - - PAL_Terminate(); - - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp deleted file mode 100644 index 9f94f1050d6ac2..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Test that malloc(0) returns non-zero value -** -**==========================================================================*/ - -#include - - -PALTEST(c_runtime_malloc_test2_paltest_malloc_test2, "c_runtime/malloc/test2/paltest_malloc_test2") -{ - - char *testA; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* check that malloc(0) returns non-zero value */ - testA = (char *)malloc(0); - if (testA == NULL) - { - Fail("Call to malloc(0) failed.\n"); - } - - free(testA); - - PAL_Terminate(); - - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp deleted file mode 100644 index 57b288a809ea5f..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls qsort to sort a buffer, and verifies that it has done -** the job correctly. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_qsort_test1(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 1); -} - -PALTEST(c_runtime_qsort_test1_paltest_qsort_test1, "c_runtime/qsort/test1/paltest_qsort_test1") -{ - char before[] = "cgaiehdbjf"; - const char after[] = "abcdefghij"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - qsort(before, sizeof(before) - 1, sizeof(char), charcmp_qsort_test1); - - if (memcmp(before, after, sizeof(before)) != 0) - { - Fail("qsort did not correctly sort an array of characters.\n"); - } - - PAL_Terminate(); - return PASS; - -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp deleted file mode 100644 index 20d76c5677e3d3..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Calls qsort to sort a buffer, and verifies that it has done -** the job correctly. -** -** -**==========================================================================*/ - -#include - -int __cdecl twocharcmp_qsort_test2(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 2); -} - -PALTEST(c_runtime_qsort_test2_paltest_qsort_test2, "c_runtime/qsort/test2/paltest_qsort_test2") -{ - char before[] = "ccggaaiieehhddbbjjff"; - const char after[] = "aabbccddeeffgghhiijj"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - qsort(before, (sizeof(before) - 1) / 2, 2 * sizeof(char), twocharcmp_qsort_test2); - - if (memcmp(before, after, sizeof(before)) != 0) - { - Fail("qsort did not correctly sort an array of 2-character " - "buffers.\n"); - } - - PAL_Terminate(); - return PASS; - -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp deleted file mode 100644 index cd752c39f5396e..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================= -** -** Source: test1.c -** -** Purpose: Test to ensure that srand provide random -** number to rand. Also make sure that rand result from a -** srand with seed 1 and no call to srand are the same. -** -** Dependencies: PAL_Initialize -** PAL_Terminate -** Fail -** srand() -** - -** -**===========================================================================*/ - -#include - - -PALTEST(c_runtime_rand_srand_test1_paltest_rand_srand_test1, "c_runtime/rand_srand/test1/paltest_rand_srand_test1") -{ - int RandNumber[10]; - int TempRandNumber; - int i; - int SRAND_SEED; - int SRAND_REINIT = 1; - - /* - * Initialize the PAL and return FAILURE if this fails - */ - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - SRAND_SEED = time(NULL); - - /* does not initialize srand and call rand. */ - for (i=0; i<10; i++) - { - /* keep the value in an array */ - RandNumber[i]=rand(); - if (RandNumber[i] < 0 || RandNumber[i] > RAND_MAX) - { - Fail("1) ERROR: random generated an invalid value: %d", RandNumber[i]); - } - } - - - /* initialize random generator */ - srand(SRAND_SEED); - - - /* choose 10 numbers with a different seed. - the numbers should be different than - those the previously generated one */ - for(i = 0; i < 10; i++) - { - TempRandNumber=rand(); - if (TempRandNumber < 0 || TempRandNumber > RAND_MAX) - { - Fail("2) ERROR: random generated an invalid value: %d", TempRandNumber); - } - } - - - - /* renitialize the srand with 1 */ - srand(SRAND_REINIT); - - - - /* choose 10 numbers with seed 1, - the number should be the same as those we kept in the array. */ - for( i = 0; i < 10;i++ ) - { - /* pick the random number*/ - TempRandNumber=rand(); - /* test if it is the same number generated in the first sequences*/ - if(RandNumber[i]!=TempRandNumber) - { - Fail ("ERROR: rand should return the same value when srand " - "is initialized with 1 or not initialized at all"); - } - if (TempRandNumber < 0 || TempRandNumber > RAND_MAX) - { - Fail("3) ERROR: random generated an invalid value: %d", TempRandNumber); - } - } - - - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp deleted file mode 100644 index edd075da23f70e..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp +++ /dev/null @@ -1,65 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Uses realloc to allocate and realloate memory, checking -** that memory contents are copied when the memory is reallocated. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_realloc_test1_paltest_realloc_test1, "c_runtime/realloc/test1/paltest_realloc_test1") -{ - char *testA; - const int len1 = 10; - const char str1[] = "aaaaaaaaaa"; - - const int len2 = 20; - const char str2[] = "bbbbbbbbbbbbbbbbbbbb"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* this should work like malloc */ - testA = (char *)realloc(NULL, len1*sizeof(char)); - memcpy(testA, str1, len1); - if (testA == NULL) - { - Fail("We ran out of memory (unlikely), or realloc is broken.\n"); - } - - if (memcmp(testA, str1, len1) != 0) - { - Fail("realloc doesn't properly allocate new memory.\n"); - } - - testA = (char *)realloc(testA, len2*sizeof(char)); - if (memcmp(testA, str1, len1) != 0) - { - Fail("realloc doesn't move the contents of the original memory " - "block to the newly allocated block.\n"); - } - - memcpy(testA, str2, len2); - if (memcmp(testA, str2, len2) != 0) - { - Fail("Couldn't write to memory allocated by realloc.\n"); - } - - /* free the buffer */ - testA = (char*)realloc(testA, 0); - if (testA != NULL) - { - Fail("Realloc didn't return NULL when called with a length " - "of zero.\n"); - } - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp deleted file mode 100644 index 72d905be0a51e0..00000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls the time function and verifies that the time returned -** is at least a positive value. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_time_test1_paltest_time_test1, "c_runtime/time/test1/paltest_time_test1") -{ - time_t t = 0; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - time(&t); - /*I was going to test that the time returned didn't exceed some - reasonable value, but decided not to, for fear of creating my own - little Y2K-style disaster.*/ - - if (t <= 0) - { - Fail("time() function doesn't return a time.\n"); - } - t = 0; - t = time(NULL); - if (t <= 0) - { - Fail("time() function doesn't return a time.\n"); - } - PAL_Terminate(); - return PASS; -} - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp index 2ffab4b9de051e..428a5f24caa66a 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp @@ -32,9 +32,9 @@ PALTEST(c_runtime_wcstoul_test5_paltest_wcstoul_test5, "c_runtime/wcstoul/test5/ errno = 0; l = wcstoul(overstr, &end, 10); - if (l != _UI32_MAX) + if (l != UINT32_MAX) { - Fail("ERROR: Expected wcstoul to return %u, got %u\n", _UI32_MAX, l); + Fail("ERROR: Expected wcstoul to return %u, got %u\n", UINT32_MAX, l); } if (end != overstr + 10) { @@ -49,9 +49,9 @@ PALTEST(c_runtime_wcstoul_test5_paltest_wcstoul_test5, "c_runtime/wcstoul/test5/ errno = 0; l = wcstoul(understr, &end, 10); - if (l != _UI32_MAX) + if (l != UINT32_MAX) { - Fail("ERROR: Expected wcstoul to return %u, got %u\n", _UI32_MAX, l); + Fail("ERROR: Expected wcstoul to return %u, got %u\n", UINT32_MAX, l); } if (end != understr + 2) { diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index 9a26d821065301..4d865fc63417ef 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -1,7 +1,5 @@ c_runtime/atof/test1/paltest_atof_test1 c_runtime/atoi/test1/paltest_atoi_test1 -c_runtime/bsearch/test1/paltest_bsearch_test1 -c_runtime/bsearch/test2/paltest_bsearch_test2 c_runtime/cbrt/test1/paltest_cbrt_test1 c_runtime/cbrtf/test1/paltest_cbrtf_test1 c_runtime/ceil/test1/paltest_ceil_test1 @@ -12,9 +10,6 @@ c_runtime/cosh/test1/paltest_cosh_test1 c_runtime/coshf/test1/paltest_coshf_test1 c_runtime/errno/test1/paltest_errno_test1 c_runtime/errno/test2/paltest_errno_test2 -c_runtime/exit/test1/paltest_exit_test1 -c_runtime/exit/test2/paltest_exit_test2 -c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 c_runtime/isdigit/test1/paltest_isdigit_test1 @@ -26,16 +21,10 @@ c_runtime/iswspace/test1/paltest_iswspace_test1 c_runtime/iswupper/test1/paltest_iswupper_test1 c_runtime/isxdigit/test1/paltest_isxdigit_test1 c_runtime/llabs/test1/paltest_llabs_test1 -c_runtime/malloc/test1/paltest_malloc_test1 -c_runtime/malloc/test2/paltest_malloc_test2 c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 -c_runtime/qsort/test1/paltest_qsort_test1 -c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/rand_srand/test1/paltest_rand_srand_test1 -c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 @@ -64,7 +53,6 @@ c_runtime/strncpy/test1/paltest_strncpy_test1 c_runtime/strpbrk/test1/paltest_strpbrk_test1 c_runtime/strrchr/test1/paltest_strrchr_test1 c_runtime/strstr/test1/paltest_strstr_test1 -c_runtime/time/test1/paltest_time_test1 c_runtime/tolower/test1/paltest_tolower_test1 c_runtime/toupper/test1/paltest_toupper_test1 c_runtime/towlower/test1/paltest_towlower_test1 @@ -122,7 +110,6 @@ c_runtime/_wfopen/test5/paltest_wfopen_test5 c_runtime/_wfopen/test6/paltest_wfopen_test6 c_runtime/_wfopen/test7/paltest_wfopen_test7 c_runtime/_wtoi/test1/paltest_wtoi_test1 -c_runtime/__iscsym/test1/paltest_iscsym_test1 debug_api/OutputDebugStringA/test1/paltest_outputdebugstringa_test1 debug_api/OutputDebugStringW/test1/paltest_outputdebugstringw_test1 exception_handling/RaiseException/test1/paltest_raiseexception_test1 diff --git a/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp index 98f0a1b95b0a36..a8f55d7f9c04c6 100644 --- a/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp +++ b/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp @@ -44,13 +44,10 @@ PALTEST(debug_api_OutputDebugStringA_test1_paltest_outputdebugstringa_test1, "de FALSE, 0, NULL, NULL, &si, &pi)) { DWORD dwError = GetLastError(); - free(name); Fail("ERROR: CreateProcess failed to load executable 'helper'. " "GetLastError() returned %d.\n", dwError); } - free(name); - /* This is the main loop. It exits when the process which is being debugged is finished executing. */ diff --git a/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat b/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat index c7a2a3913e7b29..78e1831fbc6a42 100644 --- a/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat +++ b/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat @@ -1,9 +1,6 @@ # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. -#This test is negative and will exit with exit(1). -#Therefore, the harness would record it as a failure -c_runtime/exit/test2,1 # A successful DebugBreak test run dumps core or throws up an ASSERT # dialog box (or...) and returns an exit code != 0 debug_api/debugbreak/test1,1 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 0a0bd955f3adb6..e141789e71f066 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -1,8 +1,5 @@ c_runtime/atof/test1/paltest_atof_test1 c_runtime/atoi/test1/paltest_atoi_test1 -c_runtime/bsearch/test1/paltest_bsearch_test1 -c_runtime/bsearch/test2/paltest_bsearch_test2 -c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 c_runtime/isdigit/test1/paltest_isdigit_test1 @@ -13,17 +10,10 @@ c_runtime/iswdigit/test1/paltest_iswdigit_test1 c_runtime/iswspace/test1/paltest_iswspace_test1 c_runtime/iswupper/test1/paltest_iswupper_test1 c_runtime/isxdigit/test1/paltest_isxdigit_test1 -c_runtime/llabs/test1/paltest_llabs_test1 -c_runtime/malloc/test1/paltest_malloc_test1 -c_runtime/malloc/test2/paltest_malloc_test2 c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 -c_runtime/qsort/test1/paltest_qsort_test1 -c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/rand_srand/test1/paltest_rand_srand_test1 -c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 @@ -52,7 +42,6 @@ c_runtime/strncpy/test1/paltest_strncpy_test1 c_runtime/strpbrk/test1/paltest_strpbrk_test1 c_runtime/strrchr/test1/paltest_strrchr_test1 c_runtime/strstr/test1/paltest_strstr_test1 -c_runtime/time/test1/paltest_time_test1 c_runtime/tolower/test1/paltest_tolower_test1 c_runtime/toupper/test1/paltest_toupper_test1 c_runtime/towlower/test1/paltest_towlower_test1 @@ -110,7 +99,6 @@ c_runtime/_wfopen/test5/paltest_wfopen_test5 c_runtime/_wfopen/test6/paltest_wfopen_test6 c_runtime/_wfopen/test7/paltest_wfopen_test7 c_runtime/_wtoi/test1/paltest_wtoi_test1 -c_runtime/__iscsym/test1/paltest_iscsym_test1 debug_api/OutputDebugStringW/test1/paltest_outputdebugstringw_test1 exception_handling/RaiseException/test1/paltest_raiseexception_test1 exception_handling/RaiseException/test2/paltest_raiseexception_test2 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt b/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt index 2dbbd64e9f2b15..bc5589f4323c11 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt @@ -1,7 +1,6 @@ This is a list of failing PAL tests that need to be reviewed because. They should either be fixed or deleted if they are no longer applicable. -c_runtime/exit/test2/paltest_exit_test2 c_runtime/ferror/test1/paltest_ferror_test1 c_runtime/ferror/test2/paltest_ferror_test2 c_runtime/fputs/test2/paltest_fputs_test2 diff --git a/src/coreclr/pal/tests/palsuite/runpaltests.sh b/src/coreclr/pal/tests/palsuite/runpaltests.sh index c10930e2acc391..39c492212bc884 100755 --- a/src/coreclr/pal/tests/palsuite/runpaltests.sh +++ b/src/coreclr/pal/tests/palsuite/runpaltests.sh @@ -9,7 +9,7 @@ then echo "runpaltests.sh [] []" echo echo "For example:" - echo "runpaltests.sh /projectk/build/debug" + echo "runpaltests.sh artifacts/bin/coreclr/linux.x64.Debug/paltests/" echo exit 1 fi diff --git a/src/coreclr/pal/tests/palsuite/tests-manual.dat b/src/coreclr/pal/tests/palsuite/tests-manual.dat index b87a39486af1bb..3f32f49df043cf 100644 --- a/src/coreclr/pal/tests/palsuite/tests-manual.dat +++ b/src/coreclr/pal/tests/palsuite/tests-manual.dat @@ -1,7 +1,6 @@ # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. -c_runtime/exit/test2,1 pal_specific/pal_get_stderr/test1,1 pal_specific/pal_get_stdin/test1,1 pal_specific/pal_get_stdout/test1,1 diff --git a/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp b/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp index 24f22afa456ce0..8081b69109a9a8 100644 --- a/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp +++ b/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp @@ -12,6 +12,7 @@ ** **===================================================================*/ #include +#include #define MAX_THREAD_COUNT 128 #define DEFAULT_THREAD_COUNT 10 diff --git a/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp b/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp index 80ecbaa2016c03..6ac838bd24b046 100644 --- a/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp +++ b/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp @@ -12,6 +12,7 @@ **=========================================================*/ #include +#include #define MAX_COUNT 10000 #define MAX_THREADS 256 diff --git a/src/coreclr/palrt/memorystream.cpp b/src/coreclr/palrt/memorystream.cpp index 0ed06547f3bfa5..91a5ca8b2d3181 100644 --- a/src/coreclr/palrt/memorystream.cpp +++ b/src/coreclr/palrt/memorystream.cpp @@ -23,6 +23,10 @@ Revision History: #include "common.h" #include "objidl.h" +#include + +using std::min; +using std::max; class MemoryStream : public IStream { @@ -44,7 +48,7 @@ class MemoryStream : public IStream n = min(2 * n, n + n / 4 + 0x100000); // don't allocate tiny chunks - n = max(n, 0x100); + n = max(n, (ULONG)0x100); // compare with the hard limit nNewData = max(n, nNewData); diff --git a/src/coreclr/scripts/genDummyProvider.py b/src/coreclr/scripts/genDummyProvider.py index ccf421421ca0b4..90ec297b0bf08a 100644 --- a/src/coreclr/scripts/genDummyProvider.py +++ b/src/coreclr/scripts/genDummyProvider.py @@ -124,8 +124,6 @@ def generateDummyFiles(etwmanifest, out_dirname, runtimeFlavor, extern, dryRun): #include "pal_mstypes.h" #include "pal_error.h" #include "pal.h" -#define PAL_free free -#define PAL_realloc realloc #include "pal/stackstring.hpp" #endif diff --git a/src/coreclr/scripts/genLttngProvider.py b/src/coreclr/scripts/genLttngProvider.py index d75a222a27b8a3..fc93f240315c3a 100644 --- a/src/coreclr/scripts/genLttngProvider.py +++ b/src/coreclr/scripts/genLttngProvider.py @@ -580,8 +580,6 @@ def generateLttngFiles(etwmanifest, eventprovider_directory, runtimeFlavor, dryR #include "pal_mstypes.h" #include "pal_error.h" #include "pal.h" -#define PAL_free free -#define PAL_realloc realloc #include "pal/stackstring.hpp" """) lttngimpl_file.write("#include \"" + lttngevntheadershortname + "\"\n\n") diff --git a/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp b/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp index 49bf662c21dde5..eadc27e2557c2d 100644 --- a/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp +++ b/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp @@ -24,6 +24,10 @@ class MapViewHolder #include "../../../inc/stresslog.h" #include "StressMsgReader.h" +#ifdef HOST_WINDOWS +#include +#endif + void GcHistClear(); void GcHistAddLog(LPCSTR msg, StressMsgReader stressMsg); diff --git a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp index dc4a63c0e8db19..20d0f1b6b22918 100644 --- a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp +++ b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #ifndef INFINITY #define INFINITY 1e300 // Practically good enough - not sure why we miss this in our Linux build. @@ -42,6 +43,9 @@ bool IsInCantAllocStressLogRegion() #include "../../../inc/stresslog.h" #include "StressMsgReader.h" +using std::min; +using std::max; + size_t StressLog::writing_base_address; size_t StressLog::reading_base_address; @@ -1323,7 +1327,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) double latestTime = FindLatestTime(hdr); if (s_timeFilterStart < 0) { - s_timeFilterStart = max(latestTime + s_timeFilterStart, 0); + s_timeFilterStart = max(latestTime + s_timeFilterStart, 0.0); s_timeFilterEnd = latestTime; } for (ThreadStressLog* tsl = StressLog::TranslateMemoryMappedPointer(hdr->logs.t); tsl != nullptr; tsl = StressLog::TranslateMemoryMappedPointer(tsl->next)) @@ -1346,7 +1350,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) SYSTEM_INFO systemInfo; GetSystemInfo(&systemInfo); - DWORD threadCount = min(systemInfo.dwNumberOfProcessors, MAXIMUM_WAIT_OBJECTS); + DWORD threadCount = min(systemInfo.dwNumberOfProcessors, (DWORD)MAXIMUM_WAIT_OBJECTS); HANDLE threadHandle[64]; for (DWORD i = 0; i < threadCount; i++) { @@ -1361,7 +1365,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) // the interlocked increment may have increased s_msgCount beyond MAX_MESSAGE_COUNT - // make sure we don't go beyond the end of the buffer - s_msgCount = min(s_msgCount, MAX_MESSAGE_COUNT); + s_msgCount = min((LONG64)s_msgCount, MAX_MESSAGE_COUNT); if (s_gcFilterStart != 0) { diff --git a/src/coreclr/tools/StressLogAnalyzer/util.h b/src/coreclr/tools/StressLogAnalyzer/util.h index 6999676c2e7ccd..752509277a7701 100644 --- a/src/coreclr/tools/StressLogAnalyzer/util.h +++ b/src/coreclr/tools/StressLogAnalyzer/util.h @@ -12,7 +12,6 @@ typedef void* CRITSEC_COOKIE; #define STRESS_LOG_ANALYZER -#include #include "staticcontract.h" // This macro is used to standardize the wide character string literals between UNIX and Windows. diff --git a/src/coreclr/tools/metainfo/mdinfo.cpp b/src/coreclr/tools/metainfo/mdinfo.cpp index 579a5362f96d52..84d7f8d50f265b 100644 --- a/src/coreclr/tools/metainfo/mdinfo.cpp +++ b/src/coreclr/tools/metainfo/mdinfo.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -3772,7 +3773,7 @@ int MDInfo::DumpHex( ++nLines; // Calculate spacing. - nPrint = min(cbData, nLine); + nPrint = std::min(cbData, nLine); nSpace = nLine - nPrint; // dump in hex. diff --git a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp index 6f7952477d0645..7b0ddc8ded5ae2 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp @@ -6632,7 +6632,7 @@ size_t MethodContext::repPrint( size_t bytesWritten = 0; if ((buffer != nullptr) && (bufferSize > 0)) { - bytesWritten = min(bufferSize - 1, res.stringBufferSize); + bytesWritten = min(bufferSize - 1, (size_t)res.stringBufferSize); if (bytesWritten > 0) { // The "full buffer" check above ensures this given that diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp index 725f52cbcc74ca..4a2d55c1dbb234 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp @@ -93,7 +93,7 @@ void SpmiDumpHelper::FormatHandleArray(char*& pbuf, int& sizeOfBuffer, const Den sizeOfBuffer -= cch; const unsigned int maxHandleArrayDisplayElems = 5; // Don't display more than this. - const unsigned int handleArrayDisplayElems = min(maxHandleArrayDisplayElems, count); + const unsigned int handleArrayDisplayElems = min(maxHandleArrayDisplayElems, (unsigned int)count); bool first = true; for (DWORD i = startIndex; i < startIndex + handleArrayDisplayElems; i++) diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h index 4dc1f28991a75f..b989fb50d1c468 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h @@ -110,8 +110,8 @@ inline std::string SpmiDumpHelper::DumpPSig( pbuf += cch; sizeOfBuffer -= cch; - const unsigned int maxSigDisplayBytes = 25; // Don't display more than this. - const unsigned int sigDisplayBytes = min(maxSigDisplayBytes, cbSig); + const size_t maxSigDisplayBytes = 25; // Don't display more than this. + const size_t sigDisplayBytes = min(maxSigDisplayBytes, (size_t)cbSig); // TODO: display character representation of the types? diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 8c511b45e91b9b..9b926556fdbb67 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -59,21 +58,11 @@ #include #include -// Getting STL to work with PAL is difficult, so reimplement STL functionality to not require it. -#ifdef TARGET_UNIX -#include "clr_std/utility" -#include "clr_std/string" -#include "clr_std/algorithm" -#include "clr_std/vector" -#else // !TARGET_UNIX -#ifndef USE_STL -#define USE_STL -#endif // USE_STL #include #include #include #include -#endif // !TARGET_UNIX + #ifdef USE_MSVCDIS #define DISLIB @@ -128,6 +117,9 @@ static inline void __debugbreak() } #endif +using std::min; +using std::max; + #include #endif // STANDARDPCH_H diff --git a/src/coreclr/utilcode/clrconfig.cpp b/src/coreclr/utilcode/clrconfig.cpp index 8ea705a917e830..b531018eb08ae1 100644 --- a/src/coreclr/utilcode/clrconfig.cpp +++ b/src/coreclr/utilcode/clrconfig.cpp @@ -201,7 +201,11 @@ namespace // Validate the cache and no-cache logic result in the same answer SString nameToConvert(name); +#ifdef HOST_WINDOWS CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix); +#else + CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix, &PAL_getenv); +#endif LPCSTR valueNoCache = nonCache.AsString(); _ASSERTE(SString::_stricmp(valueNoCache, temp.GetUTF8()) == 0); diff --git a/src/coreclr/utilcode/clrhost_nodependencies.cpp b/src/coreclr/utilcode/clrhost_nodependencies.cpp index b385474b6dc0c3..7aceae763c43b8 100644 --- a/src/coreclr/utilcode/clrhost_nodependencies.cpp +++ b/src/coreclr/utilcode/clrhost_nodependencies.cpp @@ -246,6 +246,11 @@ FORCEINLINE void* ClrMalloc(size_t size) p = HeapAlloc(hHeap, 0, size); #else + if (size == 0) + { + // Allocate at least one byte. + size = 1; + } p = malloc(size); #endif diff --git a/src/coreclr/utilcode/loaderheap.cpp b/src/coreclr/utilcode/loaderheap.cpp index 72d0d1a6f6f7fd..985df665be6f4f 100644 --- a/src/coreclr/utilcode/loaderheap.cpp +++ b/src/coreclr/utilcode/loaderheap.cpp @@ -1154,7 +1154,7 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) } // Figure out how much to reserve - dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize); + dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize); // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY dwSizeToReserve = ALIGN_UP(dwSizeToReserve, VIRTUAL_ALLOC_RESERVE_GRANULARITY); diff --git a/src/coreclr/utilcode/md5.cpp b/src/coreclr/utilcode/md5.cpp index 7297114f21febd..cc86a48bedc2c4 100644 --- a/src/coreclr/utilcode/md5.cpp +++ b/src/coreclr/utilcode/md5.cpp @@ -141,7 +141,7 @@ void MD5::GetHashValue(MD5HASHDATA* phash) // // but our compiler has an intrinsic! - #if (defined(HOST_X86) || defined(HOST_ARM)) && defined(TARGET_UNIX) + #if (defined(HOST_X86) || defined(HOST_ARM) || !defined(__clang__)) && defined(TARGET_UNIX) #define ROL(x, n) (((x) << (n)) | ((x) >> (32-(n)))) #define ROTATE_LEFT(x,n) (x) = ROL(x,n) #else diff --git a/src/coreclr/utilcode/stdafx.h b/src/coreclr/utilcode/stdafx.h index 18b820306f7757..78e98405758193 100644 --- a/src/coreclr/utilcode/stdafx.h +++ b/src/coreclr/utilcode/stdafx.h @@ -12,6 +12,9 @@ #include #include #include +#include +using std::min; +using std::max; #define IN_WINFIX_CPP diff --git a/src/coreclr/utilcode/stgpool.cpp b/src/coreclr/utilcode/stgpool.cpp index f04f6e9e7b3bf4..e7aebc55d6ca9e 100644 --- a/src/coreclr/utilcode/stgpool.cpp +++ b/src/coreclr/utilcode/stgpool.cpp @@ -1938,7 +1938,7 @@ CInMemoryStream::CopyTo( _ASSERTE(cb.QuadPart <= UINT32_MAX); ULONG cbTotal = min(static_cast(cb.QuadPart), m_cbSize - m_cbCurrent); - ULONG cbRead=min(1024, cbTotal); + ULONG cbRead=min((ULONG)1024, cbTotal); CQuickBytes rBuf; void *pBuf = rBuf.AllocNoThrow(cbRead); if (pBuf == 0) @@ -2061,7 +2061,7 @@ CGrowableStream::CGrowableStream(float multiplicativeGrowthRate, DWORD additiveG m_multiplicativeGrowthRate = min(max(1.0F, multiplicativeGrowthRate), 2.0F); _ASSERTE(additiveGrowthRate >= 1); - m_additiveGrowthRate = max(1, additiveGrowthRate); + m_additiveGrowthRate = max((DWORD)1, additiveGrowthRate); } // CGrowableStream::CGrowableStream #ifndef DACCESS_COMPILE @@ -2115,7 +2115,7 @@ HRESULT CGrowableStream::EnsureCapacity(DWORD newLogicalSize) multSize = (DWORD)multSizeF; } - DWORD newBufferSize = max(max(newLogicalSize, multSize), addSize.Value()); + DWORD newBufferSize = max(max(newLogicalSize, multSize), (DWORD)addSize.Value()); char *tmp = new (nothrow) char[newBufferSize]; if(tmp == NULL) diff --git a/src/coreclr/utilcode/stresslog.cpp b/src/coreclr/utilcode/stresslog.cpp index 90ad5900473ed7..37abeb2cb92f4e 100644 --- a/src/coreclr/utilcode/stresslog.cpp +++ b/src/coreclr/utilcode/stresslog.cpp @@ -227,7 +227,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByte // in this case, interpret the number as GB maxBytesPerThread *= (1024 * 1024 * 1024); } - theLog.MaxSizePerThread = (unsigned)min(maxBytesPerThread,0xffffffff); + theLog.MaxSizePerThread = (unsigned)min(maxBytesPerThread,(size_t)0xffffffff); size_t maxBytesTotal = maxBytesTotalArg; if (maxBytesTotal < STRESSLOG_CHUNK_SIZE * 256) @@ -235,7 +235,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByte // in this case, interpret the number as GB maxBytesTotal *= (1024 * 1024 * 1024); } - theLog.MaxSizeTotal = (unsigned)min(maxBytesTotal, 0xffffffff); + theLog.MaxSizeTotal = (unsigned)min(maxBytesTotal, (size_t)0xffffffff); theLog.totalChunk = 0; theLog.facilitiesToLog = facilities | LF_ALWAYS; theLog.levelToLog = level; diff --git a/src/coreclr/utilcode/util.cpp b/src/coreclr/utilcode/util.cpp index 13668d244135e5..1819f38e0a4381 100644 --- a/src/coreclr/utilcode/util.cpp +++ b/src/coreclr/utilcode/util.cpp @@ -816,7 +816,7 @@ DWORD LCM(DWORD u, DWORD v) DWORD currentProcsInGroup = 0; for (WORD i = 0; i < m_nGroups; i++) { - currentProcsInGroup = max(currentProcsInGroup, m_CPUGroupInfoArray[i].nr_active); + currentProcsInGroup = max(currentProcsInGroup, (DWORD)m_CPUGroupInfoArray[i].nr_active); } *max_procs_per_group = currentProcsInGroup; return true; diff --git a/src/coreclr/utilcode/utsem.cpp b/src/coreclr/utilcode/utsem.cpp index 94c1636dbe6ea8..e8e786cc3af911 100644 --- a/src/coreclr/utilcode/utsem.cpp +++ b/src/coreclr/utilcode/utsem.cpp @@ -84,7 +84,7 @@ SpinConstants g_SpinConstants = { inline void InitializeSpinConstants_NoHost() { - g_SpinConstants.dwMaximumDuration = max(2, g_SystemInfo.dwNumberOfProcessors) * 20000; + g_SpinConstants.dwMaximumDuration = max((DWORD)2, g_SystemInfo.dwNumberOfProcessors) * 20000; } #else //!SELF_NO_HOST diff --git a/src/coreclr/vm/.vscode/c_cpp_properties.json b/src/coreclr/vm/.vscode/c_cpp_properties.json index d8abc20bf0bcbf..4192e236e35486 100644 --- a/src/coreclr/vm/.vscode/c_cpp_properties.json +++ b/src/coreclr/vm/.vscode/c_cpp_properties.json @@ -31,6 +31,7 @@ "_UNICODE", "_WIN32", "_WIN32_WINNT=0x0602", + "NOMINMAX", "HOST_64BIT", "AMD64", "HOST_64BIT=1", diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp index dd8a98eb4583d6..07f24361dcccae 100644 --- a/src/coreclr/vm/appdomain.cpp +++ b/src/coreclr/vm/appdomain.cpp @@ -313,7 +313,7 @@ OBJECTREF* PinnedHeapHandleTable::AllocateHandles(DWORD nRequested) // Retrieve the remaining number of handles in the bucket. DWORD numRemainingHandlesInBucket = (m_pHead != NULL) ? m_pHead->GetNumRemainingHandles() : 0; PTRARRAYREF pinnedHandleArrayObj = NULL; - DWORD nextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE); + DWORD nextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE); // create a new block if this request doesn't fit in the current block if (nRequested > numRemainingHandlesInBucket) @@ -4177,7 +4177,7 @@ void DomainLocalModule::EnsureDynamicClassIndex(DWORD dwID) return; } - SIZE_T aDynamicEntries = max(16, oldDynamicEntries); + SIZE_T aDynamicEntries = max(16, oldDynamicEntries); while (aDynamicEntries <= dwID) { aDynamicEntries *= 2; diff --git a/src/coreclr/vm/callcounting.cpp b/src/coreclr/vm/callcounting.cpp index c464949f7aeee0..a6577fd42de34d 100644 --- a/src/coreclr/vm/callcounting.cpp +++ b/src/coreclr/vm/callcounting.cpp @@ -664,7 +664,7 @@ bool CallCountingManager::SetCodeEntryPoint( // direct calls in codegen and they need to be promoted earlier than their callers. if (methodDesc->GetMethodTable() == g_pCastHelpers) { - callCountThreshold = max(1, (CallCount)(callCountThreshold / 2)); + callCountThreshold = max(1, (CallCount)(callCountThreshold / 2)); } NewHolder callCountingInfoHolder = new CallCountingInfo(activeCodeVersion, callCountThreshold); diff --git a/src/coreclr/vm/castcache.cpp b/src/coreclr/vm/castcache.cpp index 1e59f7862d72f4..27105f3d25efaa 100644 --- a/src/coreclr/vm/castcache.cpp +++ b/src/coreclr/vm/castcache.cpp @@ -12,6 +12,7 @@ BASEARRAYREF* CastCache::s_pTableRef = NULL; OBJECTHANDLE CastCache::s_sentinelTable = NULL; DWORD CastCache::s_lastFlushSize = INITIAL_CACHE_SIZE; +const DWORD CastCache::INITIAL_CACHE_SIZE; BASEARRAYREF CastCache::CreateCastCache(DWORD size) { diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index 4b5fa46e483b19..0696635e90273a 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -946,26 +946,26 @@ void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker) case ELEMENT_TYPE_I2: case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: - dwAlignment[kk] = max(2, dwAlignment[kk]); + dwAlignment[kk] = max(2, dwAlignment[kk]); dwClassNonGCBytes[kk] += 2; break; case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: case ELEMENT_TYPE_R4: - dwAlignment[kk] = max(4, dwAlignment[kk]); + dwAlignment[kk] = max(4, dwAlignment[kk]); dwClassNonGCBytes[kk] += 4; break; case ELEMENT_TYPE_FNPTR: case ELEMENT_TYPE_PTR: case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: - dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]); + dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]); dwClassNonGCBytes[kk] += (1 << LOG2_PTRSIZE); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: case ELEMENT_TYPE_R8: - dwAlignment[kk] = max(8, dwAlignment[kk]); + dwAlignment[kk] = max(8, dwAlignment[kk]); dwClassNonGCBytes[kk] += 8; break; case ELEMENT_TYPE_VAR: @@ -989,7 +989,7 @@ void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker) { // We'll have to be pessimistic here dwClassNonGCBytes[kk] += MAX_PRIMITIVE_FIELD_SIZE; - dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]); + dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]); dwClassGCHandles[kk] += 1; break; @@ -1532,7 +1532,7 @@ DWORD Module::AllocateDynamicEntry(MethodTable *pMT) if (newId >= m_maxDynamicEntries) { - SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries); + SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries); while (maxDynamicEntries <= newId) { maxDynamicEntries *= 2; diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp index a9bb4e3bbbe866..1c29942942c050 100644 --- a/src/coreclr/vm/ceemain.cpp +++ b/src/coreclr/vm/ceemain.cpp @@ -933,7 +933,7 @@ void EEStartupHelper() // retrieve configured max size for the mini-metadata buffer (defaults to 64KB) g_MiniMetaDataBuffMaxSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MiniMdBufferCapacity); // align up to GetOsPageSize(), with a maximum of 1 MB - g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), 1024 * 1024); + g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), (DWORD)(1024 * 1024)); // allocate the buffer. this is never touched while the process is running, so it doesn't // contribute to the process' working set. it is needed only as a "shadow" for a mini-metadata // buffer that will be set up and reported / updated in the Watson process (the diff --git a/src/coreclr/vm/cgensys.h b/src/coreclr/vm/cgensys.h index 1396d7558c29fe..a3accc91a99736 100644 --- a/src/coreclr/vm/cgensys.h +++ b/src/coreclr/vm/cgensys.h @@ -84,8 +84,6 @@ BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDes #endif // DACCESS_COMPILE - - // // ResetProcessorStateHolder saves/restores processor state around calls to // CoreLib during exception handling. diff --git a/src/coreclr/vm/classhash.cpp b/src/coreclr/vm/classhash.cpp index 5d2be11c9b3280..1a2af02a057417 100644 --- a/src/coreclr/vm/classhash.cpp +++ b/src/coreclr/vm/classhash.cpp @@ -234,7 +234,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN #endif // If IsCaseInsensitiveTable() is true for the hash table, strings passed to the ConstructKeyCallback instance - // will be dynamically allocated. This is to prevent wasting bytes in the Loader Heap. Thusly, it is important + // will be dynamically allocated. This is to prevent wasting bytes in the Loader Heap. Thusly, it is important // to note that in this case, the lifetime of Key is bounded by the lifetime of the single call to UseKeys, and // will be freed when that function returns. @@ -452,7 +452,7 @@ EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, Al // Allocate the table and verify that we actually got one. EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule, - max(BaseGetElementCount() / 2, 11), + max(BaseGetElementCount() / 2, (DWORD)11), this, pamTracker); diff --git a/src/coreclr/vm/classlayoutinfo.cpp b/src/coreclr/vm/classlayoutinfo.cpp index 468dc63d59de36..8336f890660329 100644 --- a/src/coreclr/vm/classlayoutinfo.cpp +++ b/src/coreclr/vm/classlayoutinfo.cpp @@ -136,7 +136,7 @@ namespace ) { UINT32 cbCurOffset = parentSize; - BYTE LargestAlignmentRequirement = max(1, min(packingSize, parentAlignmentRequirement)); + BYTE LargestAlignmentRequirement = max(1, min(packingSize, parentAlignmentRequirement)); // Start with the size inherited from the parent (if any). uint32_t calcTotalSize = parentSize; @@ -198,7 +198,7 @@ namespace COMPlusThrowOM(); // size must be large enough to accommodate layout. If not, we use the layout size instead. - calcTotalSize = max(classSize, calcTotalSize); + calcTotalSize = max((uint32_t)classSize, calcTotalSize); } else { diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index c6bac693c679a6..228c65f0041299 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2302,7 +2302,7 @@ VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SI int allocMode = 0; // Try to reserve at least 16MB at a time - SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); + SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); while (reserveSize > 0) { @@ -2820,11 +2820,11 @@ void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveFo if ((flag & CORJIT_ALLOCMEM_FLG_32BYTE_ALIGN) != 0) { - alignment = max(alignment, 32); + alignment = max(alignment, 32u); } else if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0) { - alignment = max(alignment, 16); + alignment = max(alignment, 16u); } #if defined(TARGET_X86) @@ -2832,7 +2832,7 @@ void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveFo // the JIT can in turn 8-byte align the loop entry headers. else if ((g_pConfig->GenOptimizeType() != OPT_SIZE)) { - alignment = max(alignment, 8); + alignment = max(alignment, 8u); } #endif @@ -3206,7 +3206,7 @@ JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD n CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap); - if (mem == NULL) + if (mem == (TADDR)0) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); @@ -3755,7 +3755,7 @@ static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & r } CONTRACTL_END; TADDR address = (TADDR) request.GetStartAddress(); - _ASSERTE(address != NULL); + _ASSERTE(address != (TADDR)0); CodeHeader * pHeader = dac_cast(address & ~3) - 1; _ASSERTE(pHeader != NULL); @@ -3925,7 +3925,7 @@ BOOL EEJitManager::JitCodeToMethodInfo( return FALSE; TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == NULL) + if (start == (TADDR)0) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -3970,7 +3970,7 @@ StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSectio } TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == NULL) + if (start == (TADDR)0) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED; @@ -4425,7 +4425,7 @@ ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) SUPPORTS_DAC; } CONTRACTL_END; - if (currentPC == NULL) + if (currentPC == (PCODE)NULL) return NULL; if (scanFlag == ScanReaderLock) @@ -4463,7 +4463,7 @@ ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) PCODE ExecutionManager::GetCodeStartAddress(PCODE currentPC) { WRAPPER_NO_CONTRACT; - _ASSERTE(currentPC != NULL); + _ASSERTE(currentPC != (PCODE)NULL); EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) @@ -4511,7 +4511,7 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) GC_NOTRIGGER; } CONTRACTL_END; - if (currentPC == NULL) + if (currentPC == (PCODE)NULL) return FALSE; if (GetScanFlags() == ScanReaderLock) @@ -4568,7 +4568,7 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC, RangeSectionLockStat // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. TADDR start = dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); - if (start == NULL) + if (start == (TADDR)0) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (!pCHdr->IsStubCodeBlock()) @@ -4968,7 +4968,7 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; - PCODE jumpStub = NULL; + PCODE jumpStub = (PCODE)NULL; if (pLoaderAllocator == NULL) { @@ -5018,7 +5018,7 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, { jumpStub = i->m_jumpStub; - _ASSERTE(jumpStub != NULL); + _ASSERTE(jumpStub != (PCODE)NULL); // Is the matching entry with the requested range? if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)) @@ -5030,10 +5030,10 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, // If we get here we need to create a new jump stub // add or change the jump stub table to point at the new one jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); // this statement can throw - if (jumpStub == NULL) + if (jumpStub == (PCODE)NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); - RETURN(NULL); + RETURN((PCODE)NULL); } _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)); @@ -5133,7 +5133,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, if (curBlock == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); - RETURN(NULL); + RETURN((PCODE)NULL); } curBlockWriterHolder.AssignExecutableWriterHolder(curBlock, sizeof(JumpStubBlockHeader) + ((size_t) (curBlock->m_used + 1) * BACK_TO_BACK_JUMP_ALLOCATE_SIZE)); diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 038ce3fe875ec4..3319d3e0c30033 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -1358,7 +1358,12 @@ class RangeSectionMap // This level is completely empty. Free it, and then null out the pointer to it. pointerToLevelData->Uninstall(); +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfree-nonheap-object" // The compiler can't tell that this pointer always comes from a malloc call. free((void*)rawData); +#pragma GCC diagnostic pop +#endif } } diff --git a/src/coreclr/vm/common.h b/src/coreclr/vm/common.h index f0edc0f15cd007..8b8ff9e842b3ae 100644 --- a/src/coreclr/vm/common.h +++ b/src/coreclr/vm/common.h @@ -58,7 +58,7 @@ #include #include #include - +#include #include #include @@ -66,13 +66,16 @@ #include #include #include -#include +#include #include #include #include #include +using std::max; +using std::min; + #ifdef _MSC_VER //non inline intrinsics are faster #pragma function(memcpy,memcmp,strcmp,strcpy,strlen,strcat) diff --git a/src/coreclr/vm/dacenumerablehash.inl b/src/coreclr/vm/dacenumerablehash.inl index a6083e26fda4ea..93d63116e0f42a 100644 --- a/src/coreclr/vm/dacenumerablehash.inl +++ b/src/coreclr/vm/dacenumerablehash.inl @@ -7,7 +7,7 @@ // See DacEnumerableHash.h for a more detailed description. // -#include "clr_std/type_traits" +#include // Our implementation embeds entry data supplied by the hash sub-class into a larger entry structure // containing DacEnumerableHash metadata. We often end up returning pointers to the inner entry to sub-class code and diff --git a/src/coreclr/vm/dllimportcallback.h b/src/coreclr/vm/dllimportcallback.h index fb2214a8c18d5a..ac2f2e93cdfd01 100644 --- a/src/coreclr/vm/dllimportcallback.h +++ b/src/coreclr/vm/dllimportcallback.h @@ -185,7 +185,7 @@ class UMEntryThunk uMThunkMarshInfoWriterHolder.GetRW()->RunTimeInit(); // Ensure that we have either the managed target or the delegate. - if (m_pObjectHandle == NULL && m_pManagedTarget == NULL) + if (m_pObjectHandle == NULL && m_pManagedTarget == (TADDR)0) m_pManagedTarget = m_pMD->GetMultiCallableAddrOfCode(); m_code.Encode(&pUMEntryThunkRX->m_code, (BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), pUMEntryThunkRX); @@ -223,7 +223,7 @@ class UMEntryThunk } else { - if (m_pManagedTarget != NULL) + if (m_pManagedTarget != (TADDR)0) { RETURN m_pManagedTarget; } diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 065d80d57fcc14..beeb0cd64b2a86 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -515,7 +515,7 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si // The space left is not big enough for a new block, let's just // update the TrackAllocation record for the current block - if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) + if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item removed %p, size 0x%X\n", this, pCurrent, pCurrent->size)); // remove current diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index b12aac718c84bd..4cdfc0852a6a3e 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -430,7 +430,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - oldNumVars = max(oldNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1); + oldNumVars = max(oldNumVars, (unsigned)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } @@ -484,7 +484,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - newNumVars = max(newNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1); + newNumVars = max(newNumVars, (unsigned)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } diff --git a/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h b/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h index e6036353d8b9f7..fb6c0c3feeda09 100644 --- a/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h +++ b/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h @@ -391,7 +391,11 @@ ds_rt_server_log_pause_message (void) STATIC_CONTRACT_NOTHROW; const char diagPortsName[] = "DiagnosticPorts"; - CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName); +#ifdef HOST_WINDOWS + CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName); +#else + CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName, /* noPrefix */ false, &PAL_getenv); +#endif LPCSTR ports = nullptr; if (diagPorts.IsSet()) { diff --git a/src/coreclr/vm/interpreter.h b/src/coreclr/vm/interpreter.h index 86a0a36efe4525..7f7eed175dea0a 100644 --- a/src/coreclr/vm/interpreter.h +++ b/src/coreclr/vm/interpreter.h @@ -13,7 +13,7 @@ #include "crst.h" #include "callhelpers.h" #include "codeversion.h" -#include "clr_std/type_traits" +#include typedef SSIZE_T NativeInt; typedef SIZE_T NativeUInt; diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 9f2ed013aa1db0..18629a0da24141 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -58,6 +58,9 @@ #include "exinfo.h" +using std::isfinite; +using std::isnan; + //======================================================================== // // This file contains implementation of all JIT helpers. The helpers are @@ -264,7 +267,7 @@ HCIMPL2(INT32, JIT_Div, INT32 dividend, INT32 divisor) } else if (divisor == -1) { - if (dividend == _I32_MIN) + if (dividend == INT32_MIN) { ehKind = kOverflowException; goto ThrowExcep; @@ -296,7 +299,7 @@ HCIMPL2(INT32, JIT_Mod, INT32 dividend, INT32 divisor) } else if (divisor == -1) { - if (dividend == _I32_MIN) + if (dividend == INT32_MIN) { ehKind = kOverflowException; goto ThrowExcep; diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index bcaf0351e52b3c..0889e131f037cb 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -700,7 +700,7 @@ size_t CEEInfo::printObjectDescription ( const UTF8* utf8data = stackStr.GetUTF8(); if (bufferSize > 0) { - bytesWritten = min(bufferSize - 1, stackStr.GetCount()); + bytesWritten = min(bufferSize - 1, stackStr.GetCount()); memcpy((BYTE*)buffer, (BYTE*)utf8data, bytesWritten); // Always null-terminate @@ -11452,7 +11452,7 @@ void CEEJitInfo::recordRelocation(void * location, // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory // on retry to increase chances that the retry succeeds. - m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 0x10); + m_reserveForJumpStubs = max((size_t)0x400, m_reserveForJumpStubs + 0x10); } } @@ -11511,7 +11511,7 @@ void CEEJitInfo::recordRelocation(void * location, // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory // on retry to increase chances that the retry succeeds. - m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE); + m_reserveForJumpStubs = max((size_t)0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE); if (jumpStubAddr == 0) { diff --git a/src/coreclr/vm/methodtable.cpp b/src/coreclr/vm/methodtable.cpp index a59440400b6341..67903433833b51 100644 --- a/src/coreclr/vm/methodtable.cpp +++ b/src/coreclr/vm/methodtable.cpp @@ -9586,7 +9586,7 @@ int MethodTable::GetFieldAlignmentRequirement() { return GetClass()->GetOverriddenFieldAlignmentRequirement(); } - return min(GetNumInstanceFieldBytes(), TARGET_POINTER_SIZE); + return min((int)GetNumInstanceFieldBytes(), TARGET_POINTER_SIZE); } UINT32 MethodTable::GetNativeSize() diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index 5a9670b19f2da7..69d2a105ecd8d5 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8419,7 +8419,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach else #endif // FEATURE_64BIT_ALIGNMENT if (dwNumInstanceFieldBytes > TARGET_POINTER_SIZE) { - minAlign = containsGCPointers ? TARGET_POINTER_SIZE : (unsigned)largestAlignmentRequirement; + minAlign = (unsigned)(containsGCPointers ? TARGET_POINTER_SIZE : largestAlignmentRequirement); } else { minAlign = 1; @@ -8427,7 +8427,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach minAlign *= 2; } - if (minAlign != min(dwNumInstanceFieldBytes, TARGET_POINTER_SIZE)) + if (minAlign != min(dwNumInstanceFieldBytes, (DWORD)TARGET_POINTER_SIZE)) { EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap()); GetHalfBakedClass()->GetOptionalFields()->m_requiredFieldAlignment = (BYTE)minAlign; diff --git a/src/coreclr/vm/object.inl b/src/coreclr/vm/object.inl index 7f58c122097beb..491aab1d4c873f 100644 --- a/src/coreclr/vm/object.inl +++ b/src/coreclr/vm/object.inl @@ -91,7 +91,7 @@ inline void Object::EnumMemoryRegions(void) // Unfortunately, DacEnumMemoryRegion takes only ULONG32 as size argument while (size > 0) { // Use 0x10000000 instead of MAX_ULONG32 so that the chunks stays aligned - SIZE_T chunk = min(size, 0x10000000); + SIZE_T chunk = min(size, (SIZE_T)0x10000000); // If for any reason we can't enumerate the memory, stop. This would generally mean // that we have target corruption, or that the target is executing, etc. if (!DacEnumMemoryRegion(ptr, chunk)) diff --git a/src/coreclr/vm/perfmap.cpp b/src/coreclr/vm/perfmap.cpp index 4ede16efca0766..d032dc6031dcee 100644 --- a/src/coreclr/vm/perfmap.cpp +++ b/src/coreclr/vm/perfmap.cpp @@ -46,7 +46,11 @@ void PerfMap::Initialize() const char * PerfMap::InternalConstructPath() { +#ifdef HOST_WINDOWS CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath"); +#else + CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath", /* noPrefix */ false, &PAL_getenv); +#endif if (value.IsSet()) { return value.AsString(); diff --git a/src/coreclr/vm/profdetach.cpp b/src/coreclr/vm/profdetach.cpp index 09f11458c38871..7bfcba8ed2cc6b 100644 --- a/src/coreclr/vm/profdetach.cpp +++ b/src/coreclr/vm/profdetach.cpp @@ -446,8 +446,8 @@ void ProfilingAPIDetach::SleepWhileProfilerEvacuates(ProfilerDetachInfo *pDetach } // ...but keep it in bounds! - ui64SleepMilliseconds = min( - max(ui64SleepMilliseconds, s_dwMinSleepMs), + ui64SleepMilliseconds = min( + max(ui64SleepMilliseconds, s_dwMinSleepMs), s_dwMaxSleepMs); // At this point it's safe to cast ui64SleepMilliseconds down to a DWORD since we diff --git a/src/coreclr/vm/proftoeeinterfaceimpl.cpp b/src/coreclr/vm/proftoeeinterfaceimpl.cpp index 8c94aca1c40898..e57cba3c597d7c 100644 --- a/src/coreclr/vm/proftoeeinterfaceimpl.cpp +++ b/src/coreclr/vm/proftoeeinterfaceimpl.cpp @@ -5605,7 +5605,7 @@ HRESULT ProfToEEInterfaceImpl::GetAssemblyInfo(AssemblyID assemblyId, if ((NULL != szName) && (cchName > 0)) { - wcsncpy_s(szName, cchName, name.GetUnicode(), min(nameLength, cchName - 1)); + wcsncpy_s(szName, cchName, name.GetUnicode(), min((size_t)nameLength, (size_t)(cchName - 1))); } if (NULL != pcchName) diff --git a/src/coreclr/vm/qcall.h b/src/coreclr/vm/qcall.h index d5f355ad9662b9..e3154c7b1334c5 100644 --- a/src/coreclr/vm/qcall.h +++ b/src/coreclr/vm/qcall.h @@ -7,7 +7,7 @@ #ifndef __QCall_h__ #define __QCall_h__ -#include "clr_std/type_traits" +#include // // QCALLS diff --git a/src/coreclr/vm/stackingallocator.cpp b/src/coreclr/vm/stackingallocator.cpp index cee2b40049100a..7db829eb1b441a 100644 --- a/src/coreclr/vm/stackingallocator.cpp +++ b/src/coreclr/vm/stackingallocator.cpp @@ -188,7 +188,7 @@ bool StackingAllocator::AllocNewBlockForBytes(unsigned n) // request is larger than MaxBlockSize then allocate exactly that // amount. unsigned lower = MinBlockSize; - size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), MaxBlockSize)); + size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), (unsigned)MaxBlockSize)); // Allocate the block. // @todo: Is it worth implementing a non-thread safe standard heap for diff --git a/src/coreclr/vm/stringliteralmap.cpp b/src/coreclr/vm/stringliteralmap.cpp index 29d457207bf8d6..55d2267f02c8c6 100644 --- a/src/coreclr/vm/stringliteralmap.cpp +++ b/src/coreclr/vm/stringliteralmap.cpp @@ -442,7 +442,7 @@ static void LogStringLiteral(_In_z_ const char* action, EEStringData *pStringDat STATIC_CONTRACT_FORBID_FAULT; ULONG length = pStringData->GetCharCount(); - length = min(length, 128); + length = min(length, (ULONG)128); WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR)); memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR)); szString[length] = '\0'; diff --git a/src/coreclr/vm/syncblk.cpp b/src/coreclr/vm/syncblk.cpp index 2cc7de6bd2b09e..eb0b29fafdd026 100644 --- a/src/coreclr/vm/syncblk.cpp +++ b/src/coreclr/vm/syncblk.cpp @@ -2681,7 +2681,7 @@ BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut) { duration = end - start; } - duration = min(duration, (DWORD)timeOut); + duration = min(duration, (ULONGLONG)timeOut); timeOut -= (INT32)duration; } } diff --git a/src/coreclr/vm/threadstatics.cpp b/src/coreclr/vm/threadstatics.cpp index 94088ba3999474..6a8a43a0821efb 100644 --- a/src/coreclr/vm/threadstatics.cpp +++ b/src/coreclr/vm/threadstatics.cpp @@ -125,7 +125,7 @@ void ThreadLocalBlock::EnsureModuleIndex(ModuleIndex index) return; } - SIZE_T aModuleIndices = max(16, m_TLMTableSize); + SIZE_T aModuleIndices = max((SIZE_T)16, m_TLMTableSize); while (aModuleIndices <= index.m_dwIndex) { aModuleIndices *= 2; @@ -411,7 +411,7 @@ void ThreadLocalModule::EnsureDynamicClassIndex(DWORD dwID) return; } - SIZE_T aDynamicEntries = max(16, m_aDynamicEntries); + SIZE_T aDynamicEntries = max((SIZE_T)16, m_aDynamicEntries); while (aDynamicEntries <= dwID) { aDynamicEntries *= 2; diff --git a/src/coreclr/vm/util.hpp b/src/coreclr/vm/util.hpp index e7b311d8724d4e..ef05074b186a41 100644 --- a/src/coreclr/vm/util.hpp +++ b/src/coreclr/vm/util.hpp @@ -16,7 +16,7 @@ #include "clrdata.h" #include "xclrdata.h" #include "posterror.h" -#include "clr_std/type_traits" +#include // Hot cache lines need to be aligned to cache line size to improve performance #if defined(TARGET_ARM64) diff --git a/src/coreclr/vm/vars.hpp b/src/coreclr/vm/vars.hpp index 63a27a8701e542..51533187c8b656 100644 --- a/src/coreclr/vm/vars.hpp +++ b/src/coreclr/vm/vars.hpp @@ -16,46 +16,6 @@ typedef DPTR(SLOT) PTR_SLOT; typedef LPVOID DictionaryEntry; -/* Define the implementation dependent size types */ - -#ifndef _INTPTR_T_DEFINED -#ifdef HOST_64BIT -typedef __int64 intptr_t; -#else -typedef int intptr_t; -#endif -#define _INTPTR_T_DEFINED -#endif - -#ifndef _UINTPTR_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 uintptr_t; -#else -typedef unsigned int uintptr_t; -#endif -#define _UINTPTR_T_DEFINED -#endif - -#ifndef _PTRDIFF_T_DEFINED -#ifdef HOST_64BIT -typedef __int64 ptrdiff_t; -#else -typedef int ptrdiff_t; -#endif -#define _PTRDIFF_T_DEFINED -#endif - - -#ifndef _SIZE_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 size_t; -#else -typedef unsigned int size_t; -#endif -#define _SIZE_T_DEFINED -#endif - - #include "util.hpp" #include #include diff --git a/src/coreclr/vm/virtualcallstub.cpp b/src/coreclr/vm/virtualcallstub.cpp index e82f8b84a580b0..a87eac54f607cd 100644 --- a/src/coreclr/vm/virtualcallstub.cpp +++ b/src/coreclr/vm/virtualcallstub.cpp @@ -823,6 +823,8 @@ void VirtualCallStubManager::ReclaimAll() g_reclaim_counter++; } +const UINT32 VirtualCallStubManager::counter_block::MAX_COUNTER_ENTRIES; + /* reclaim/rearrange any structures that can only be done during a gc sync point i.e. need to be serialized and non-concurrant. */ void VirtualCallStubManager::Reclaim() diff --git a/src/coreclr/vm/virtualcallstub.h b/src/coreclr/vm/virtualcallstub.h index e6d89dcf503819..156353b6c1862b 100644 --- a/src/coreclr/vm/virtualcallstub.h +++ b/src/coreclr/vm/virtualcallstub.h @@ -282,7 +282,7 @@ class VirtualCallStubManager : public StubManager m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), - m_cur_counter_block_for_reclaim_index(NULL), + m_cur_counter_block_for_reclaim_index(0), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; diff --git a/src/mono/dlls/mscordbi/CMakeLists.txt b/src/mono/dlls/mscordbi/CMakeLists.txt index 52b4e62ad34120..e39aeac5ac6d25 100644 --- a/src/mono/dlls/mscordbi/CMakeLists.txt +++ b/src/mono/dlls/mscordbi/CMakeLists.txt @@ -110,7 +110,6 @@ if (CLR_CMAKE_HOST_UNIX) add_subdirectory(${CLR_DIR}/pal pal) include_directories(${CLR_DIR}/pal/inc/rt/cpp) - add_compile_options(-nostdinc) endif (CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_UNIX) From 470df6172572054bdcdd4f798f9a34abd282dd49 Mon Sep 17 00:00:00 2001 From: Nikola Milosavljevic Date: Wed, 3 Apr 2024 23:33:32 -0700 Subject: [PATCH 080/132] Pass SourceBuiltSymbolsDir property to inner build (#100612) --- eng/DotNetBuild.props | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eng/DotNetBuild.props b/eng/DotNetBuild.props index 56609e6046afa0..06ea2ee0466575 100644 --- a/eng/DotNetBuild.props +++ b/eng/DotNetBuild.props @@ -75,11 +75,12 @@ $(InnerBuildArgs) /p:PortableBuild=$(PortableBuild) $(InnerBuildArgs) /p:RestoreConfigFile=$(RestoreConfigFile) - + $(InnerBuildArgs) /p:SourceBuiltAssetsDir=$(SourceBuiltAssetsDir) $(InnerBuildArgs) /p:SourceBuiltShippingPackagesDir=$(SourceBuiltShippingPackagesDir) $(InnerBuildArgs) /p:SourceBuiltNonShippingPackagesDir=$(SourceBuiltNonShippingPackagesDir) $(InnerBuildArgs) /p:SourceBuiltAssetManifestsDir=$(SourceBuiltAssetManifestsDir) + $(InnerBuildArgs) /p:SourceBuiltSymbolsDir=$(SourceBuiltSymbolsDir) From a72ef3e2ee8cfafabe1305f7da04cd2c0fd47cac Mon Sep 17 00:00:00 2001 From: Radek Zikmund <32671551+rzikm@users.noreply.github.com> Date: Thu, 4 Apr 2024 09:51:35 +0200 Subject: [PATCH 081/132] Perform manual intermediate certificate lookup when creating MsQuicConfiguration (#100584) * Perform manual intermediate certificate lookup when creating MsQuicConfiguration * Delete src/libraries/System.Net.Security/tests/StressTests/SslStress/log * Move code after cache check --- .../src/System/Net/Quic/Internal/MsQuicConfiguration.cs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicConfiguration.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicConfiguration.cs index e99f1a68ae9ec5..d45be601ae8673 100644 --- a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicConfiguration.cs +++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicConfiguration.cs @@ -199,6 +199,15 @@ private static MsQuicConfigurationSafeHandle Create(QuicConnectionOptions option private static unsafe MsQuicConfigurationSafeHandle CreateInternal(QUIC_SETTINGS settings, QUIC_CREDENTIAL_FLAGS flags, X509Certificate? certificate, ReadOnlyCollection? intermediates, List alpnProtocols, QUIC_ALLOWED_CIPHER_SUITE_FLAGS allowedCipherSuites) { + if (!MsQuicApi.UsesSChannelBackend && certificate is X509Certificate2 cert && intermediates is null) + { + // MsQuic will not lookup intermediates in local CA store if not explicitly provided, + // so we build the cert context to get on feature parity with SslStream. Note that this code + // path runs after the MsQuicConfigurationCache check. + SslStreamCertificateContext context = SslStreamCertificateContext.Create(cert, additionalCertificates: null, offline: true, trust: null); + intermediates = context.IntermediateCertificates; + } + QUIC_HANDLE* handle; using MsQuicBuffers msquicBuffers = new MsQuicBuffers(); From 5087f12090d24cb4130859289045958becf0d3af Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Thu, 4 Apr 2024 10:48:28 +0200 Subject: [PATCH 082/132] Add extra logging to the unhandled exception test (#100620) The test has failed with timeout couple of times in the CI, this change adds logging print after the target process completes. --- .../baseservices/exceptions/unhandled/unhandledTester.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs b/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs index 2c30f593dfd90f..cca4e573c0a1de 100644 --- a/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs +++ b/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs @@ -37,6 +37,7 @@ static void RunExternalProcess(string unhandledType, string assembly) testProcess.Start(); testProcess.BeginErrorReadLine(); testProcess.WaitForExit(); + Console.WriteLine($"Test process {assembly} with argument {unhandledType} exited"); testProcess.CancelErrorRead(); int expectedExitCode; @@ -111,6 +112,8 @@ static void RunExternalProcess(string unhandledType, string assembly) throw new Exception("Missing exception source frame"); } } + + Console.WriteLine("Test process exited with expected error code and produced expected output"); } [Fact] @@ -118,7 +121,7 @@ public static void TestEntryPoint() { RunExternalProcess("main", "unhandled.dll"); RunExternalProcess("foreign", "unhandled.dll"); - File.Delete(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "dependencytodelete.dll")); + File.Delete(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "dependencytodelete.dll")); RunExternalProcess("missingdependency", "unhandledmissingdependency.dll"); } } From e4d9e41dab1b42d7128ca020763f2a5779747a75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Strehovsk=C3=BD?= Date: Thu, 4 Apr 2024 17:57:33 +0900 Subject: [PATCH 083/132] Fix official build break (#100624) #100512 caused an official build break by exposing a latent issue in how we build the managed artifacts. Official builds split managed and native builds for some reason. This is throwing off the heuristic that decides whether to use native AOT's corelib in build. We were always using the native AOT's corelib in x64 and arm64 legs because of the thrown off heuristic. This apparently didn't cause an immediate problem there. It causes a problem on x86 because crossgen2 is a trimmed+r2r+singlefile executable on x86 (and nowhere else) and ILLink execution doesn't like the layouts with `UseNativeAotCoreLib`. Tweak the heuristic to not use native AOT corelib if we're building JIT corelib. --- eng/Subsets.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eng/Subsets.props b/eng/Subsets.props index cbe9cfc7d73c99..63aca146463a5e 100644 --- a/eng/Subsets.props +++ b/eng/Subsets.props @@ -124,7 +124,7 @@ true - true + true From b4765bd242a49cf932ae96b2bced86830de97b55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Strehovsk=C3=BD?= Date: Thu, 4 Apr 2024 18:07:23 +0900 Subject: [PATCH 084/132] Mark managed/Compilation test as NativeAotIncompatible (#100625) The issues.targets exclusion doesn't seem to kick in with the merged wrapper, but whatever: this test compiles into a 100 MB executable because it includes a fully rooted Roslyn, and doesn't work anyway. Stop building it. --- src/tests/issues.targets | 3 --- src/tests/managed/Compilation/Compilation.csproj | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/issues.targets b/src/tests/issues.targets index 8515eecf4d8e93..886d6dd9f743e8 100644 --- a/src/tests/issues.targets +++ b/src/tests/issues.targets @@ -1084,9 +1084,6 @@ https://github.com/dotnet/runtimelab/issues/155: Reflection.Emit - - expects to see System.Private.CoreLib in CORE_ROOT - diff --git a/src/tests/managed/Compilation/Compilation.csproj b/src/tests/managed/Compilation/Compilation.csproj index ecd42497008239..acf9a54b686803 100644 --- a/src/tests/managed/Compilation/Compilation.csproj +++ b/src/tests/managed/Compilation/Compilation.csproj @@ -4,6 +4,9 @@ true true + + + true From 223e9e71954433544106d5c90e06b5c8113e51d9 Mon Sep 17 00:00:00 2001 From: rzsc <160726116+rzsc@users.noreply.github.com> Date: Thu, 4 Apr 2024 11:49:02 +0200 Subject: [PATCH 085/132] Enable tail call generation for test Runtime_87393 (#100593) To pass the test needs to be built with --tailcalls compiler flag enabled. --- .../JIT/Regression/JitBlue/Runtime_87393/Runtime_87393.fsproj | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_87393/Runtime_87393.fsproj b/src/tests/JIT/Regression/JitBlue/Runtime_87393/Runtime_87393.fsproj index 5da8e9a9edeb2a..01cde35672f6a2 100644 --- a/src/tests/JIT/Regression/JitBlue/Runtime_87393/Runtime_87393.fsproj +++ b/src/tests/JIT/Regression/JitBlue/Runtime_87393/Runtime_87393.fsproj @@ -7,6 +7,7 @@ True $(NetCoreAppToolCurrent) True + --tailcalls+ From d96f2247db0e24b4c5397c5c6670e60d3bd78412 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Fi=C5=A1era?= Date: Thu, 4 Apr 2024 13:36:18 +0200 Subject: [PATCH 086/132] [browser] Run tests in parallel (#98492) Co-authored-by: Larry Ewing Co-authored-by: Pavel Savara --- eng/testing/tests.browser.targets | 2 ++ .../tests/WasmTestRunner/WasmTestRunner.cs | 20 ++++++++++++++----- .../System.Net.WebSockets.Client.Tests.csproj | 1 + ...me.InteropServices.JavaScript.Tests.csproj | 1 + .../System.Buffers.Tests.csproj | 3 +++ .../System.Text.Json.Tests.csproj | 1 + 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/eng/testing/tests.browser.targets b/eng/testing/tests.browser.targets index bce044984e9379..982b8589e76c14 100644 --- a/eng/testing/tests.browser.targets +++ b/eng/testing/tests.browser.targets @@ -94,6 +94,7 @@ $(WasmXHarnessMonoArgs) --setenv=XHARNESS_LOG_TEST_START=true $(WasmXHarnessMonoArgs) --setenv=IsBrowserThreadingSupported=true + 8 @@ -112,6 +113,7 @@ <_XHarnessArgs Condition="'$(WasmXHarnessArgsCli)' != ''" >$(_XHarnessArgs) $(WasmXHarnessArgsCli) <_AppArgs Condition="'$(WasmEnableThreads)' == 'true'">$(_AppArgs) -threads + <_AppArgs Condition="'$(WasmXHarnessMaxParallelThreads)' != ''">$(_AppArgs) -parallelThreads $(WasmXHarnessMaxParallelThreads) $HARNESS_RUNNER $(_XHarnessArgs) %24XHARNESS_ARGS %24WasmXHarnessArgs -- $(WasmXHarnessMonoArgs) %24WasmXHarnessMonoArgs $(_AppArgs) %24WasmTestAppArgs %HARNESS_RUNNER% $(_XHarnessArgs) %XHARNESS_ARGS% %WasmXHarnessArgs% -- $(WasmXHarnessMonoArgs) %WasmXHarnessMonoArgs% $(_AppArgs) %WasmTestAppArgs% diff --git a/src/libraries/Common/tests/WasmTestRunner/WasmTestRunner.cs b/src/libraries/Common/tests/WasmTestRunner/WasmTestRunner.cs index 2eb1c1e440c33b..9935182c69d038 100644 --- a/src/libraries/Common/tests/WasmTestRunner/WasmTestRunner.cs +++ b/src/libraries/Common/tests/WasmTestRunner/WasmTestRunner.cs @@ -9,8 +9,8 @@ public class WasmTestRunner : WasmApplicationEntryPoint { - // TODO: Set max threads for run in parallel - // protected override int? MaxParallelThreads => RunInParallel ? 8 : base.MaxParallelThreads; + protected int MaxParallelThreadsFromArg { get; set; } + protected override int? MaxParallelThreads => RunInParallel ? MaxParallelThreadsFromArg : base.MaxParallelThreads; public static async Task Main(string[] args) { @@ -65,9 +65,11 @@ public static async Task Main(string[] args) break; case "-threads": runner.IsThreadless = false; - // TODO: Enable run in parallel - // runner.RunInParallel = true; - // Console.WriteLine($"Running in parallel with {runner.MaxParallelThreads} threads."); + break; + case "-parallelThreads": + runner.MaxParallelThreadsFromArg = Math.Max(1, int.Parse(args[i + 1])); + runner.RunInParallel = runner.MaxParallelThreadsFromArg > 1; + i++; break; case "-verbosity": runner.MinimumLogLevel = Enum.Parse(args[i + 1]); @@ -105,4 +107,12 @@ public static async Task Main(string[] args) return res; } + + public override Task RunAsync() + { + if (RunInParallel) + Console.WriteLine($"Running in parallel with {MaxParallelThreads} threads."); + + return base.RunAsync(); + } } diff --git a/src/libraries/System.Net.WebSockets.Client/tests/System.Net.WebSockets.Client.Tests.csproj b/src/libraries/System.Net.WebSockets.Client/tests/System.Net.WebSockets.Client.Tests.csproj index 0292fd4c043532..2c7762cc6bc861 100644 --- a/src/libraries/System.Net.WebSockets.Client/tests/System.Net.WebSockets.Client.Tests.csproj +++ b/src/libraries/System.Net.WebSockets.Client/tests/System.Net.WebSockets.Client.Tests.csproj @@ -17,6 +17,7 @@ 01:15:00 + 1 diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj index e21c5d8c5e16dc..135b1b78297ab8 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System.Runtime.InteropServices.JavaScript.Tests.csproj @@ -14,6 +14,7 @@ true true + 1 diff --git a/src/libraries/System.Runtime/tests/System.Buffers.Tests/System.Buffers.Tests.csproj b/src/libraries/System.Runtime/tests/System.Buffers.Tests/System.Buffers.Tests.csproj index 10b79281bf52b6..66ae6833a50eb5 100644 --- a/src/libraries/System.Runtime/tests/System.Buffers.Tests/System.Buffers.Tests.csproj +++ b/src/libraries/System.Runtime/tests/System.Buffers.Tests/System.Buffers.Tests.csproj @@ -5,6 +5,9 @@ true $(NetCoreAppCurrent) + + 1 + diff --git a/src/libraries/System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj b/src/libraries/System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj index ddb87ab85f5745..61bea23cac8200 100644 --- a/src/libraries/System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj +++ b/src/libraries/System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj @@ -21,6 +21,7 @@ true 01:15:00 + 1 From ffb257818db89672d84d538964cec1b54fb2a094 Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Thu, 4 Apr 2024 13:41:18 +0200 Subject: [PATCH 087/132] [browser][MT] Handling blocking wait (#99833) --- .../ref/System.Private.CoreLib.ExtraApis.cs | 2 + .../ref/System.Private.CoreLib.ExtraApis.txt | 1 + .../System/Threading/LowLevelLifoSemaphore.cs | 4 + .../System/Threading/ManualResetEventSlim.cs | 4 + .../src/System/Threading/Thread.cs | 22 ++++- .../src/System/Threading/WaitHandle.cs | 4 + .../JavaScript/Interop/JavaScriptExports.cs | 71 ++++++++++------ .../JavaScript/JSFunctionBinding.cs | 26 +----- .../JavaScript/JSHostImplementation.Types.cs | 36 +-------- .../JavaScript/JSProxyContext.cs | 6 +- .../JavaScript/JSSynchronizationContext.cs | 13 ++- .../JavaScript/WebWorkerTest.Http.cs | 2 +- .../JavaScript/WebWorkerTest.cs | 53 +++++++----- .../JavaScript/WebWorkerTestBase.cs | 81 ++++++++++++++----- .../JavaScript/WebWorkerTestHelper.cs | 2 +- .../CompatibilitySuppressions.Threading.xml | 4 + src/mono/browser/runtime/corebindings.c | 2 + src/mono/browser/runtime/exports-binding.ts | 4 +- src/mono/browser/runtime/interp-pgo.ts | 2 - src/mono/browser/runtime/loader/config.ts | 35 +------- src/mono/browser/runtime/managed-exports.ts | 51 ++++++++---- src/mono/browser/runtime/multi-threading.md | 52 ------------ src/mono/browser/runtime/pthreads/index.ts | 8 ++ src/mono/browser/runtime/startup.ts | 18 ++--- src/mono/browser/runtime/types/internal.ts | 63 +++++++-------- src/mono/browser/test-main.js | 3 +- .../sample/wasm/browser-threads/Program.cs | 7 +- src/mono/sample/wasm/browser-threads/main.js | 3 + 28 files changed, 298 insertions(+), 281 deletions(-) delete mode 100644 src/mono/browser/runtime/multi-threading.md diff --git a/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.cs b/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.cs index b16548c7b4c374..45a5b9f0372696 100644 --- a/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.cs +++ b/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.cs @@ -45,6 +45,8 @@ public partial class Thread { [ThreadStatic] public static bool ThrowOnBlockingWaitOnJSInteropThread; + [ThreadStatic] + public static bool WarnOnBlockingWaitOnJSInteropThread; public static void AssureBlockingPossible() { throw null; } public static void ForceBlockingWait(Action action, object? state) { throw null; } diff --git a/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.txt b/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.txt index 3b80cb0de6753b..2a6434973ff1e3 100644 --- a/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.txt +++ b/src/libraries/System.Private.CoreLib/ref/System.Private.CoreLib.ExtraApis.txt @@ -7,4 +7,5 @@ T:System.Diagnostics.DebugProvider M:System.Diagnostics.Debug.SetProvider(System.Diagnostics.DebugProvider) M:System.Threading.Thread.AssureBlockingPossible F:System.Threading.Thread.ThrowOnBlockingWaitOnJSInteropThread +F:System.Threading.Thread.WarnOnBlockingWaitOnJSInteropThread F:System.Threading.Thread.ForceBlockingWait diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/LowLevelLifoSemaphore.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/LowLevelLifoSemaphore.cs index 7f7bddf24737b3..39233c87c15c96 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Threading/LowLevelLifoSemaphore.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Threading/LowLevelLifoSemaphore.cs @@ -41,6 +41,10 @@ public bool Wait(int timeoutMs, bool spinWait) { Debug.Assert(timeoutMs >= -1); +#if FEATURE_WASM_MANAGED_THREADS + Thread.AssureBlockingPossible(); +#endif + int spinCount = spinWait ? _spinCount : 0; // Try to acquire the semaphore or diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/ManualResetEventSlim.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/ManualResetEventSlim.cs index a385543f9174ab..516fb42bf0a52e 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Threading/ManualResetEventSlim.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Threading/ManualResetEventSlim.cs @@ -485,6 +485,10 @@ public bool Wait(int millisecondsTimeout, CancellationToken cancellationToken) ArgumentOutOfRangeException.ThrowIfLessThan(millisecondsTimeout, -1); +#if FEATURE_WASM_MANAGED_THREADS + Thread.AssureBlockingPossible(); +#endif + if (!IsSet) { if (millisecondsTimeout == 0) diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs index 9d3fd7a0466d72..3ef77076a01988 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs @@ -729,26 +729,46 @@ public static int GetCurrentProcessorId() [ThreadStatic] public static bool ThrowOnBlockingWaitOnJSInteropThread; - public static void AssureBlockingPossible() + [ThreadStatic] + public static bool WarnOnBlockingWaitOnJSInteropThread; + +#pragma warning disable CS3001 + [MethodImplAttribute(MethodImplOptions.InternalCall)] + private static extern unsafe void WarnAboutBlockingWait(char* stack, int length); + + public static unsafe void AssureBlockingPossible() { if (ThrowOnBlockingWaitOnJSInteropThread) { throw new PlatformNotSupportedException(SR.WasmThreads_BlockingWaitNotSupportedOnJSInterop); } + else if (WarnOnBlockingWaitOnJSInteropThread) + { + var st = $"Blocking the thread with JS interop is dangerous and could lead to deadlock. ManagedThreadId: {Environment.CurrentManagedThreadId}\n{Environment.StackTrace}"; + fixed (char* stack = st) + { + WarnAboutBlockingWait(stack, st.Length); + } + } } +#pragma warning restore CS3001 + public static void ForceBlockingWait(Action action, object? state = null) { var flag = ThrowOnBlockingWaitOnJSInteropThread; + var wflag = WarnOnBlockingWaitOnJSInteropThread; try { ThrowOnBlockingWaitOnJSInteropThread = false; + WarnOnBlockingWaitOnJSInteropThread = false; action(state); } finally { ThrowOnBlockingWaitOnJSInteropThread = flag; + WarnOnBlockingWaitOnJSInteropThread = wflag; } } #endif diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs index 21920bc39b754f..d215a82cd3234f 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs @@ -117,6 +117,10 @@ internal bool WaitOneNoCheck( SafeWaitHandle? waitHandle = _waitHandle; ObjectDisposedException.ThrowIf(waitHandle is null, this); +#if FEATURE_WASM_MANAGED_THREADS + Thread.AssureBlockingPossible(); +#endif + bool success = false; try { diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptExports.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptExports.cs index 6faf786f3bd539..c7bb4a81d3bd56 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptExports.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/Interop/JavaScriptExports.cs @@ -123,18 +123,25 @@ public static void CallDelegate(JSMarshalerArgument* arguments_buffer) // arg_2 set by JS caller when there are arguments // arg_3 set by JS caller when there are arguments // arg_4 set by JS caller when there are arguments +#if !FEATURE_WASM_MANAGED_THREADS try { -#if FEATURE_WASM_MANAGED_THREADS - // when we arrive here, we are on the thread which owns the proxies - // if we need to dispatch the call to another thread in the future - // we may need to consider how to solve blocking of the synchronous call - // see also https://github.com/dotnet/runtime/issues/76958#issuecomment-1921418290 - arg_exc.AssertCurrentThreadContext(); +#else + // when we arrive here, we are on the thread which owns the proxies + var ctx = arg_exc.AssertCurrentThreadContext(); - if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.AllowBlockingWaitInAsyncCode) + try + { + if (ctx.IsMainThread) { - Thread.ThrowOnBlockingWaitOnJSInteropThread = true; + if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.ThrowWhenBlockingWait) + { + Thread.ThrowOnBlockingWaitOnJSInteropThread = true; + } + else if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.WarnWhenBlockingWait) + { + Thread.WarnOnBlockingWaitOnJSInteropThread = true; + } } #endif @@ -156,9 +163,16 @@ public static void CallDelegate(JSMarshalerArgument* arguments_buffer) #if FEATURE_WASM_MANAGED_THREADS finally { - if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.AllowBlockingWaitInAsyncCode) + if (ctx.IsMainThread) { - Thread.ThrowOnBlockingWaitOnJSInteropThread = false; + if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.ThrowWhenBlockingWait) + { + Thread.ThrowOnBlockingWaitOnJSInteropThread = false; + } + else if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.WarnWhenBlockingWait) + { + Thread.WarnOnBlockingWaitOnJSInteropThread = false; + } } } #endif @@ -189,12 +203,9 @@ public static void CompleteTask(JSMarshalerArgument* arguments_buffer) } } - if (holder.CallbackReady != null) - { -#pragma warning disable CA1416 // Validate platform compatibility - Thread.ForceBlockingWait(static (callbackReady) => ((ManualResetEventSlim)callbackReady!).Wait(), holder.CallbackReady); -#pragma warning restore CA1416 // Validate platform compatibility - } + // this is always running on I/O thread, so it will not throw PNSE + // it's also OK to block here, because we know we will only block shortly, as this is just race with the other thread. + holder.CallbackReady?.Wait(); lock (ctx) { @@ -247,21 +258,17 @@ public static void GetManagedStackTrace(JSMarshalerArgument* arguments_buffer) // this is here temporarily, until JSWebWorker becomes public API [DynamicDependency(DynamicallyAccessedMemberTypes.NonPublicMethods, "System.Runtime.InteropServices.JavaScript.JSWebWorker", "System.Runtime.InteropServices.JavaScript")] - // the marshaled signature is: GCHandle InstallMainSynchronizationContext(nint jsNativeTID, JSThreadBlockingMode jsThreadBlockingMode, JSThreadInteropMode jsThreadInteropMode, MainThreadingMode mainThreadingMode) + // the marshaled signature is: GCHandle InstallMainSynchronizationContext(nint jsNativeTID, JSThreadBlockingMode jsThreadBlockingMode) public static void InstallMainSynchronizationContext(JSMarshalerArgument* arguments_buffer) { ref JSMarshalerArgument arg_exc = ref arguments_buffer[0]; // initialized by caller in alloc_stack_frame() ref JSMarshalerArgument arg_res = ref arguments_buffer[1];// initialized and set by caller ref JSMarshalerArgument arg_1 = ref arguments_buffer[2];// initialized and set by caller ref JSMarshalerArgument arg_2 = ref arguments_buffer[3];// initialized and set by caller - ref JSMarshalerArgument arg_3 = ref arguments_buffer[4];// initialized and set by caller - ref JSMarshalerArgument arg_4 = ref arguments_buffer[5];// initialized and set by caller try { JSProxyContext.ThreadBlockingMode = (JSHostImplementation.JSThreadBlockingMode)arg_2.slot.Int32Value; - JSProxyContext.ThreadInteropMode = (JSHostImplementation.JSThreadInteropMode)arg_3.slot.Int32Value; - JSProxyContext.MainThreadingMode = (JSHostImplementation.MainThreadingMode)arg_4.slot.Int32Value; var jsSynchronizationContext = JSSynchronizationContext.InstallWebWorkerInterop(true, CancellationToken.None); jsSynchronizationContext.ProxyContext.JSNativeTID = arg_1.slot.IntPtrValue; arg_res.slot.GCHandle = jsSynchronizationContext.ProxyContext.ContextHandle; @@ -283,9 +290,16 @@ public static void BeforeSyncJSExport(JSMarshalerArgument* arguments_buffer) { var ctx = arg_exc.AssertCurrentThreadContext(); ctx.IsPendingSynchronousCall = true; - if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.AllowBlockingWaitInAsyncCode) + if (ctx.IsMainThread) { - Thread.ThrowOnBlockingWaitOnJSInteropThread = true; + if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.ThrowWhenBlockingWait) + { + Thread.ThrowOnBlockingWaitOnJSInteropThread = true; + } + else if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.WarnWhenBlockingWait) + { + Thread.WarnOnBlockingWaitOnJSInteropThread = true; + } } } catch (Exception ex) @@ -305,9 +319,16 @@ public static void AfterSyncJSExport(JSMarshalerArgument* arguments_buffer) { var ctx = arg_exc.AssertCurrentThreadContext(); ctx.IsPendingSynchronousCall = false; - if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.AllowBlockingWaitInAsyncCode) + if (ctx.IsMainThread) { - Thread.ThrowOnBlockingWaitOnJSInteropThread = false; + if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.ThrowWhenBlockingWait) + { + Thread.ThrowOnBlockingWaitOnJSInteropThread = false; + } + else if (JSProxyContext.ThreadBlockingMode == JSHostImplementation.JSThreadBlockingMode.WarnWhenBlockingWait) + { + Thread.WarnOnBlockingWaitOnJSInteropThread = false; + } } } catch (Exception ex) diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSFunctionBinding.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSFunctionBinding.cs index 61ea2a85467966..f2d908d947074f 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSFunctionBinding.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSFunctionBinding.cs @@ -230,11 +230,7 @@ internal static unsafe void InvokeJSFunction(JSObject jsFunction, Span arguments) { #if FEATURE_WASM_MANAGED_THREADS - if (JSProxyContext.ThreadInteropMode == JSHostImplementation.JSThreadInteropMode.NoSyncJSInterop) - { - throw new PlatformNotSupportedException("Cannot call synchronous JS functions."); - } - else if (jsFunction.ProxyContext.IsPendingSynchronousCall) + if (jsFunction.ProxyContext.IsPendingSynchronousCall && jsFunction.ProxyContext.IsMainThread) { throw new PlatformNotSupportedException("Cannot call synchronous JS function from inside a synchronous call to a C# method."); } @@ -260,11 +256,7 @@ internal static unsafe void InvokeJSFunctionCurrent(JSObject jsFunction, Span arguments) { #if FEATURE_WASM_MANAGED_THREADS - if (JSProxyContext.ThreadInteropMode == JSHostImplementation.JSThreadInteropMode.NoSyncJSInterop) - { - throw new PlatformNotSupportedException("Cannot call synchronous JS functions."); - } - else if (jsFunction.ProxyContext.IsPendingSynchronousCall) + if (jsFunction.ProxyContext.IsPendingSynchronousCall && jsFunction.ProxyContext.IsMainThread) { throw new PlatformNotSupportedException("Cannot call synchronous JS function from inside a synchronous call to a C# method."); } @@ -274,10 +266,8 @@ internal static unsafe void DispatchJSFunctionSync(JSObject jsFunction, Span(async () => { CancellationTokenSource cts = new CancellationTokenSource(); var promise = response.Content.ReadAsStringAsync(cts.Token); - Console.WriteLine("HttpClient_CancelInDifferentThread: ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); + WebWorkerTestHelper.Log("HttpClient_CancelInDifferentThread: ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); cts.Cancel(); var res = await promise; throw new Exception("This should be unreachable: " + res); diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTest.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTest.cs index c67bac997294b9..0a2ae44142dc35 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTest.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTest.cs @@ -11,15 +11,9 @@ namespace System.Runtime.InteropServices.JavaScript.Tests // TODO test: // JSExport 2x - // JSExport async - // lock - // thread allocation, many threads // ProxyContext flow, child thread, child task // use JSObject after JSWebWorker finished, especially HTTP - // WS on JSWebWorker - // HTTP continue on TP // event pipe - // FS // JS setTimeout till after JSWebWorker close // synchronous .Wait for JS setTimeout on the same thread -> deadlock problem **7)** @@ -159,7 +153,7 @@ public async Task JSSynchronizationContext_Send_Post_Items_Cancellation() } catch (Exception ex) { - Console.WriteLine("Unexpected exception " + ex); + WebWorkerTestHelper.Log("Unexpected exception " + ex); postReady.SetException(ex); return Task.FromException(ex); } @@ -344,7 +338,7 @@ public async Task ManagedConsole(Executor executor) using var cts = CreateTestCaseTimeoutSource(); await executor.Execute(() => { - Console.WriteLine("C# Hello from ManagedThreadId: " + Environment.CurrentManagedThreadId); + WebWorkerTestHelper.Log("C# Hello from ManagedThreadId: " + Environment.CurrentManagedThreadId); Console.Clear(); return Task.CompletedTask; }, cts.Token); @@ -392,7 +386,7 @@ public async Task ThreadingTimer(Executor executor) await executor.Execute(async () => { TaskCompletionSource tcs = new TaskCompletionSource(); - Console.WriteLine("ThreadingTimer: Start Time: " + DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); + WebWorkerTestHelper.Log("ThreadingTimer: Start Time: " + DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); using var timer = new Timer(_ => { @@ -405,7 +399,7 @@ await executor.Execute(async () => await tcs.Task; }, cts.Token); - Console.WriteLine("ThreadingTimer: End Time: " + DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); + WebWorkerTestHelper.Log("ThreadingTimer: End Time: " + DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); Assert.True(hit); } @@ -496,7 +490,7 @@ await executor.Execute(async () => } [Theory, MemberData(nameof(GetTargetThreadsAndBlockingCalls))] - public async Task WaitDoesNotAssertInAsyncCode(Executor executor, NamedCall method) + public async Task WaitInAsyncAssertsOnlyOnJSWebWorker(Executor executor, NamedCall method) { using var cts = CreateTestCaseTimeoutSource(); await executor.Execute(async () => @@ -513,7 +507,15 @@ await executor.Execute(async () => exception = ex; } - Assert.Null(exception); + if (method.IsBlocking && executor.Type == ExecutorType.JSWebWorker) + { + Assert.NotNull(exception); + Assert.IsType(exception); + } + else + { + Assert.Null(exception); + } }, cts.Token); } @@ -527,7 +529,8 @@ await executor.Execute(async () => Exception? exception = null; // the callback will hit Main or JSWebWorker, not the original executor thread - await WebWorkerTestHelper.CallMeBackSync(() => { + await WebWorkerTestHelper.CallMeBackSync(() => + { // when we are inside of synchronous callback, all blocking .Wait is forbidden try { @@ -539,9 +542,15 @@ await WebWorkerTestHelper.CallMeBackSync(() => { } }); - Console.WriteLine("WaitAssertsOnJSInteropThreads: ExecuterType: " + executor.Type + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); - Assert.NotNull(exception); - Assert.IsType(exception); + if (method.IsBlocking) + { + Assert.NotNull(exception); + Assert.IsType(exception); + } + else + { + Assert.Null(exception); + } }, cts.Token); } @@ -558,9 +567,15 @@ await executor.Execute(async () => // the callback will hit Main or JSWebWorker, not the original executor thread await WebWorkerTestHelper.CallExportBackSync(nameof(WebWorkerTestHelper.CallCurrentCallback)); - Console.WriteLine("WaitAssertsOnJSInteropThreads: ExecuterType: " + executor.Type + " ManagedThreadId: " + Environment.CurrentManagedThreadId + " NativeThreadId: " + WebWorkerTestHelper.NativeThreadId); - Assert.NotNull(WebWorkerTestHelper.LastException); - Assert.IsType(WebWorkerTestHelper.LastException); + if (method.IsBlocking) + { + Assert.NotNull(WebWorkerTestHelper.LastException); + Assert.IsType(WebWorkerTestHelper.LastException); + } + else + { + Assert.Null(WebWorkerTestHelper.LastException); + } }, cts.Token); } diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestBase.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestBase.cs index 1df4c61e6bcbc6..77aef0857a8d2e 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestBase.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestBase.cs @@ -1,6 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.IO; using System.Threading.Tasks; using System.Threading; using Xunit; @@ -35,7 +36,7 @@ protected CancellationTokenSource CreateTestCaseTimeoutSource([CallerMemberName] cts.Token.Register(() => { var end = DateTime.Now; - Console.WriteLine($"Unexpected test case {memberName} timeout after {end - start} ManagedThreadId:{Environment.CurrentManagedThreadId}"); + WebWorkerTestHelper.Log($"Unexpected test case {memberName} timeout after {end - start} ManagedThreadId:{Environment.CurrentManagedThreadId}"); }); return cts; } @@ -90,7 +91,7 @@ async Task ActionsInDifferentThreads1() } catch (Exception ex) { - Console.WriteLine("ActionsInDifferentThreads1 failed\n" + ex); + WebWorkerTestHelper.Log("ActionsInDifferentThreads1 failed\n" + ex); job1ReadyTCS.SetResult(default); e1Failed = true; throw; @@ -137,36 +138,74 @@ async Task ActionsInDifferentThreads2() } if (!e1Done || !e2Done) { - Console.WriteLine("ActionsInDifferentThreads canceling because of unexpected fail: \n" + ex); + WebWorkerTestHelper.Log("ActionsInDifferentThreads canceling because of unexpected fail: \n" + ex); cts.Cancel(); } else { - Console.WriteLine("ActionsInDifferentThreads failed with: \n" + ex); + WebWorkerTestHelper.Log("ActionsInDifferentThreads failed with: \n" + ex); } throw; } } + static void LocalCtsIgnoringCall(Action action) + { + var cts = new CancellationTokenSource(8); + try + { + action(cts.Token); + } + catch (OperationCanceledException exception) + { + if (exception.CancellationToken != cts.Token) + { + throw; + } + /* ignore the local one */ + } + } + public static IEnumerable BlockingCalls = new List { - new NamedCall { Name = "Task.Wait", Call = delegate (CancellationToken ct) { Task.Delay(10, ct).Wait(ct); }}, - new NamedCall { Name = "Task.WaitAll", Call = delegate (CancellationToken ct) { Task.WaitAll(Task.Delay(10, ct)); }}, - new NamedCall { Name = "Task.WaitAny", Call = delegate (CancellationToken ct) { Task.WaitAny(Task.Delay(10, ct)); }}, - new NamedCall { Name = "ManualResetEventSlim.Wait", Call = delegate (CancellationToken ct) { - using var mr = new ManualResetEventSlim(false); - using var cts = new CancellationTokenSource(8); - try { - mr.Wait(cts.Token); - } catch (OperationCanceledException) { /* ignore */ } - }}, - new NamedCall { Name = "SemaphoreSlim.Wait", Call = delegate (CancellationToken ct) { - using var sem = new SemaphoreSlim(2); - var cts = new CancellationTokenSource(8); - try { - sem.Wait(cts.Token); - } catch (OperationCanceledException) { /* ignore */ } - }}, + // things that should NOT throw PNSE + new NamedCall { IsBlocking = false, Name = "Console.WriteLine", Call = delegate (CancellationToken ct) { Console.WriteLine("Blocking"); }}, + new NamedCall { IsBlocking = false, Name = "Directory.GetCurrentDirectory", Call = delegate (CancellationToken ct) { Directory.GetCurrentDirectory(); }}, + new NamedCall { IsBlocking = false, Name = "CancellationTokenSource.ctor", Call = delegate (CancellationToken ct) { + using var cts = new CancellationTokenSource(8); + }}, + new NamedCall { IsBlocking = false, Name = "Task.Delay", Call = delegate (CancellationToken ct) { + Task.Delay(30, ct); + }}, + new NamedCall { IsBlocking = false, Name = "new Timer", Call = delegate (CancellationToken ct) { + new Timer((_) => { }, null, 1, -1); + }}, + + // things which should throw PNSE on sync JSExport and JSWebWorker + new NamedCall { IsBlocking = true, Name = "Task.Wait", Call = delegate (CancellationToken ct) { Task.Delay(30, ct).Wait(ct); }}, + new NamedCall { IsBlocking = true, Name = "Task.WaitAll", Call = delegate (CancellationToken ct) { Task.WaitAll(Task.Delay(30, ct)); }}, + new NamedCall { IsBlocking = true, Name = "Task.WaitAny", Call = delegate (CancellationToken ct) { Task.WaitAny(Task.Delay(30, ct)); }}, + new NamedCall { IsBlocking = true, Name = "ManualResetEventSlim.Wait", Call = delegate (CancellationToken ct) { + using var mr = new ManualResetEventSlim(false); + LocalCtsIgnoringCall(mr.Wait); + }}, + new NamedCall { IsBlocking = true, Name = "SemaphoreSlim.Wait", Call = delegate (CancellationToken ct) { + using var sem = new SemaphoreSlim(2); + LocalCtsIgnoringCall(sem.Wait); + }}, + new NamedCall { IsBlocking = true, Name = "Mutex.WaitOne", Call = delegate (CancellationToken ct) { + using var mr = new ManualResetEventSlim(false); + var mutex = new Mutex(); + var thread = new Thread(() => { + mutex.WaitOne(); + mr.Set(); + Thread.Sleep(50); + mutex.ReleaseMutex(); + }); + thread.Start(); + Thread.ForceBlockingWait(static (b) => ((ManualResetEventSlim)b).Wait(), mr); + mutex.WaitOne(); + }}, }; public static IEnumerable GetTargetThreadsAndBlockingCalls() diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestHelper.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestHelper.cs index 9a1856780d5d5b..fa83846f149291 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestHelper.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/tests/System.Runtime.InteropServices.JavaScript.UnitTests/System/Runtime/InteropServices/JavaScript/WebWorkerTestHelper.cs @@ -385,10 +385,10 @@ public static Task RunOnTargetAsync(SynchronizationContext ctx, Func job, public class NamedCall { public string Name { get; set; } + public bool IsBlocking { get; set; } public delegate void Method(CancellationToken ct); public Method Call { get; set; } override public string ToString() => Name; } - } diff --git a/src/libraries/System.Threading.Thread/src/CompatibilitySuppressions.Threading.xml b/src/libraries/System.Threading.Thread/src/CompatibilitySuppressions.Threading.xml index 5fc41e30ed4415..8eb2f78e6a7066 100644 --- a/src/libraries/System.Threading.Thread/src/CompatibilitySuppressions.Threading.xml +++ b/src/libraries/System.Threading.Thread/src/CompatibilitySuppressions.Threading.xml @@ -20,6 +20,10 @@ CP0002 F:System.Threading.Thread.ThrowOnBlockingWaitOnJSInteropThread + + CP0002 + F:System.Threading.Thread.WarnOnBlockingWaitOnJSInteropThread + CP0002 M:System.Threading.Thread.AssureBlockingPossible diff --git a/src/mono/browser/runtime/corebindings.c b/src/mono/browser/runtime/corebindings.c index 37a8598ec6c4eb..485ba3189af6e2 100644 --- a/src/mono/browser/runtime/corebindings.c +++ b/src/mono/browser/runtime/corebindings.c @@ -51,6 +51,7 @@ void mono_wasm_invoke_js_function_send (pthread_t target_tid, int function_js_ha extern void mono_threads_wasm_async_run_in_target_thread_vi (pthread_t target_thread, void (*func) (gpointer), gpointer user_data1); extern void mono_threads_wasm_async_run_in_target_thread_vii (pthread_t target_thread, void (*func) (gpointer, gpointer), gpointer user_data1, gpointer user_data2); extern void mono_threads_wasm_sync_run_in_target_thread_vii (pthread_t target_thread, void (*func) (gpointer, gpointer), gpointer user_data1, gpointer args); +extern void mono_wasm_warn_about_blocking_wait (void* ptr, int32_t length); #else extern void* mono_wasm_bind_js_import_ST (void *signature); extern void mono_wasm_invoke_jsimport_ST (int function_handle, void *args); @@ -86,6 +87,7 @@ void bindings_initialize_internals (void) mono_add_internal_call ("Interop/Runtime::InvokeJSImportAsyncPost", mono_wasm_invoke_jsimport_async_post); mono_add_internal_call ("Interop/Runtime::InvokeJSFunctionSend", mono_wasm_invoke_js_function_send); mono_add_internal_call ("Interop/Runtime::CancelPromisePost", mono_wasm_cancel_promise_post); + mono_add_internal_call ("System.Threading.Thread::WarnAboutBlockingWait", mono_wasm_warn_about_blocking_wait); #else mono_add_internal_call ("Interop/Runtime::BindJSImportST", mono_wasm_bind_js_import_ST); mono_add_internal_call ("Interop/Runtime::InvokeJSImportST", mono_wasm_invoke_jsimport_ST); diff --git a/src/mono/browser/runtime/exports-binding.ts b/src/mono/browser/runtime/exports-binding.ts index bcecfec8b10ea4..f5420a3a729e14 100644 --- a/src/mono/browser/runtime/exports-binding.ts +++ b/src/mono/browser/runtime/exports-binding.ts @@ -29,12 +29,11 @@ import { mono_wasm_cancel_promise } from "./cancelable-promise"; import { mono_wasm_start_deputy_thread_async, mono_wasm_pthread_on_pthread_attached, mono_wasm_pthread_on_pthread_unregistered, - mono_wasm_pthread_on_pthread_registered, mono_wasm_pthread_set_name, mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop, mono_wasm_start_io_thread_async + mono_wasm_pthread_on_pthread_registered, mono_wasm_pthread_set_name, mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop, mono_wasm_start_io_thread_async, mono_wasm_warn_about_blocking_wait } from "./pthreads"; import { mono_wasm_dump_threads } from "./pthreads/ui-thread"; import { mono_wasm_schedule_synchronization_context } from "./pthreads/shared"; - // the JS methods would be visible to EMCC linker and become imports of the WASM module export const mono_wasm_threads_imports = !WasmEnableThreads ? [] : [ @@ -58,6 +57,7 @@ export const mono_wasm_threads_imports = !WasmEnableThreads ? [] : [ mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop, mono_wasm_invoke_jsimport_MT, + mono_wasm_warn_about_blocking_wait, ]; export const mono_wasm_imports = [ diff --git a/src/mono/browser/runtime/interp-pgo.ts b/src/mono/browser/runtime/interp-pgo.ts index 0a5a6e6c728534..14697385800b16 100644 --- a/src/mono/browser/runtime/interp-pgo.ts +++ b/src/mono/browser/runtime/interp-pgo.ts @@ -206,9 +206,7 @@ export async function getCacheKey (prefix: string): Promise { delete inputs.enableDownloadRetry; delete inputs.extensions; delete inputs.runtimeId; - delete inputs.mainThreadingMode; delete inputs.jsThreadBlockingMode; - delete inputs.jsThreadInteropMode; inputs.GitHash = loaderHelpers.gitHash; inputs.ProductVersion = ProductVersion; diff --git a/src/mono/browser/runtime/loader/config.ts b/src/mono/browser/runtime/loader/config.ts index 8fd7f00fe3152d..5ff185827c106b 100644 --- a/src/mono/browser/runtime/loader/config.ts +++ b/src/mono/browser/runtime/loader/config.ts @@ -4,7 +4,7 @@ import BuildConfiguration from "consts:configuration"; import WasmEnableThreads from "consts:wasmEnableThreads"; -import { MainThreadingMode, type DotnetModuleInternal, type MonoConfigInternal, JSThreadBlockingMode, JSThreadInteropMode } from "../types/internal"; +import { type DotnetModuleInternal, type MonoConfigInternal, JSThreadBlockingMode } from "../types/internal"; import type { DotnetModuleConfig, MonoConfig, ResourceGroups, ResourceList } from "../types"; import { exportedRuntimeAPI, loaderHelpers, runtimeHelpers } from "./globals"; import { mono_log_error, mono_log_debug } from "./logging"; @@ -12,7 +12,6 @@ import { importLibraryInitializers, invokeLibraryInitializers } from "./libraryI import { mono_exit } from "./exit"; import { makeURLAbsoluteWithApplicationBase } from "./polyfills"; import { appendUniqueQuery } from "./assets"; -import { mono_log_warn } from "./logging"; export function deep_merge_config (target: MonoConfigInternal, source: MonoConfigInternal): MonoConfigInternal { // no need to merge the same object @@ -198,38 +197,8 @@ export function normalizeConfig () { if (!Number.isInteger(config.finalizerThreadStartDelayMs)) { config.finalizerThreadStartDelayMs = 200; } - if (config.mainThreadingMode == undefined) { - config.mainThreadingMode = MainThreadingMode.DeputyAndIOThreads; - } if (config.jsThreadBlockingMode == undefined) { - config.jsThreadBlockingMode = JSThreadBlockingMode.AllowBlockingWaitInAsyncCode; - } - if (config.jsThreadInteropMode == undefined) { - config.jsThreadInteropMode = JSThreadInteropMode.SimpleSynchronousJSInterop; - } - let validModes = false; - if (config.mainThreadingMode == MainThreadingMode.DeputyThread - && config.jsThreadBlockingMode == JSThreadBlockingMode.NoBlockingWait - && config.jsThreadInteropMode == JSThreadInteropMode.SimpleSynchronousJSInterop - ) { - validModes = true; - } else if (config.mainThreadingMode == MainThreadingMode.DeputyAndIOThreads - && config.jsThreadBlockingMode == JSThreadBlockingMode.AllowBlockingWaitInAsyncCode - && config.jsThreadInteropMode == JSThreadInteropMode.SimpleSynchronousJSInterop - ) { - validModes = true; - } else if (config.mainThreadingMode == MainThreadingMode.DeputyThread - && config.jsThreadBlockingMode == JSThreadBlockingMode.AllowBlockingWait - && config.jsThreadInteropMode == JSThreadInteropMode.SimpleSynchronousJSInterop - ) { - validModes = true; - } - if (!validModes) { - mono_log_warn("Unsupported threading configuration", { - mainThreadingMode: config.mainThreadingMode, - jsThreadBlockingMode: config.jsThreadBlockingMode, - jsThreadInteropMode: config.jsThreadInteropMode - }); + config.jsThreadBlockingMode = JSThreadBlockingMode.PreventSynchronousJSExport; } } diff --git a/src/mono/browser/runtime/managed-exports.ts b/src/mono/browser/runtime/managed-exports.ts index f90fbb049d1504..065136faaba476 100644 --- a/src/mono/browser/runtime/managed-exports.ts +++ b/src/mono/browser/runtime/managed-exports.ts @@ -3,7 +3,7 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; -import { GCHandle, GCHandleNull, JSMarshalerArguments, JSThreadInteropMode, MarshalerToCs, MarshalerToJs, MarshalerType, MonoMethod, PThreadPtr } from "./types/internal"; +import { GCHandle, GCHandleNull, JSMarshalerArguments, JSThreadBlockingMode, MarshalerToCs, MarshalerToJs, MarshalerType, MonoMethod, PThreadPtr } from "./types/internal"; import cwraps, { threads_c_functions as twraps } from "./cwraps"; import { runtimeHelpers, Module, loaderHelpers, mono_assert } from "./globals"; import { JavaScriptMarshalerArgSize, alloc_stack_frame, get_arg, get_arg_gc_handle, is_args_exception, set_arg_i32, set_arg_intptr, set_arg_type, set_gc_handle, set_receiver_should_free } from "./marshal"; @@ -165,10 +165,12 @@ export function complete_task (holder_gc_handle: GCHandle, error?: any, data?: a export function call_delegate (callback_gc_handle: GCHandle, arg1_js: any, arg2_js: any, arg3_js: any, res_converter?: MarshalerToJs, arg1_converter?: MarshalerToCs, arg2_converter?: MarshalerToCs, arg3_converter?: MarshalerToCs) { loaderHelpers.assert_runtime_running(); if (WasmEnableThreads) { - if (runtimeHelpers.config.jsThreadInteropMode == JSThreadInteropMode.NoSyncJSInterop) { - throw new Error("Cannot call synchronous C# methods."); - } else if (runtimeHelpers.isPendingSynchronousCall) { - throw new Error("Cannot call synchronous C# method from inside a synchronous call to a JS method."); + if (monoThreadInfo.isUI) { + if (runtimeHelpers.config.jsThreadBlockingMode == JSThreadBlockingMode.PreventSynchronousJSExport) { + throw new Error("Cannot call synchronous C# methods."); + } else if (runtimeHelpers.isPendingSynchronousCall) { + throw new Error("Cannot call synchronous C# method from inside a synchronous call to a JS method."); + } } } const sp = Module.stackSave(); @@ -225,26 +227,39 @@ export function get_managed_stack_trace (exception_gc_handle: GCHandle) { } } -// GCHandle InstallMainSynchronizationContext(nint jsNativeTID, JSThreadBlockingMode jsThreadBlockingMode, JSThreadInteropMode jsThreadInteropMode, MainThreadingMode mainThreadingMode) -export function install_main_synchronization_context (jsThreadBlockingMode: number, jsThreadInteropMode: number, mainThreadingMode: number): GCHandle { +// GCHandle InstallMainSynchronizationContext(nint jsNativeTID, JSThreadBlockingMode jsThreadBlockingMode) +export function install_main_synchronization_context (jsThreadBlockingMode: JSThreadBlockingMode): GCHandle { if (!WasmEnableThreads) return GCHandleNull; assert_c_interop(); try { // this block is like alloc_stack_frame() but without set_args_context() - const bytes = JavaScriptMarshalerArgSize * 6; + const bytes = JavaScriptMarshalerArgSize * 4; const args = Module.stackAlloc(bytes) as any; _zero_region(args, bytes); const res = get_arg(args, 1); const arg1 = get_arg(args, 2); const arg2 = get_arg(args, 3); - const arg3 = get_arg(args, 4); - const arg4 = get_arg(args, 5); set_arg_intptr(arg1, mono_wasm_main_thread_ptr() as any); - set_arg_i32(arg2, jsThreadBlockingMode); - set_arg_i32(arg3, jsThreadInteropMode); - set_arg_i32(arg4, mainThreadingMode); + + // sync with JSHostImplementation.Types.cs + switch (jsThreadBlockingMode) { + case JSThreadBlockingMode.PreventSynchronousJSExport: + set_arg_i32(arg2, 0); + break; + case JSThreadBlockingMode.ThrowWhenBlockingWait: + set_arg_i32(arg2, 1); + break; + case JSThreadBlockingMode.WarnWhenBlockingWait: + set_arg_i32(arg2, 2); + break; + case JSThreadBlockingMode.DangerousAllowBlockingWait: + set_arg_i32(arg2, 100); + break; + default: + throw new Error("Invalid jsThreadBlockingMode"); + } // this block is like invoke_sync_jsexport() but without assert_js_interop() cwraps.mono_wasm_invoke_jsexport(managedExports.InstallMainSynchronizationContext!, args); @@ -281,10 +296,12 @@ export function invoke_sync_jsexport (method: MonoMethod, args: JSMarshalerArgum if (!WasmEnableThreads) { cwraps.mono_wasm_invoke_jsexport(method, args as any); } else { - if (runtimeHelpers.config.jsThreadInteropMode == JSThreadInteropMode.NoSyncJSInterop) { - throw new Error("Cannot call synchronous C# methods."); - } else if (runtimeHelpers.isPendingSynchronousCall) { - throw new Error("Cannot call synchronous C# method from inside a synchronous call to a JS method."); + if (monoThreadInfo.isUI) { + if (runtimeHelpers.config.jsThreadBlockingMode == JSThreadBlockingMode.PreventSynchronousJSExport) { + throw new Error("Cannot call synchronous C# methods."); + } else if (runtimeHelpers.isPendingSynchronousCall) { + throw new Error("Cannot call synchronous C# method from inside a synchronous call to a JS method."); + } } if (runtimeHelpers.isManagedRunningOnCurrentThread) { twraps.mono_wasm_invoke_jsexport_sync(method, args as any); diff --git a/src/mono/browser/runtime/multi-threading.md b/src/mono/browser/runtime/multi-threading.md deleted file mode 100644 index e4b3985923d503..00000000000000 --- a/src/mono/browser/runtime/multi-threading.md +++ /dev/null @@ -1,52 +0,0 @@ -# Multi-threading with JavaScript interop - -## Meaningful configurations are: - - * Single-threaded mode as you know it since .Net 6 - - default, safe, tested, supported - - from .Net 8 it could be easily started also as a web worker, but you need your own messaging between main and worker - * `MainThreadingMode.DeputyThread` + `JSThreadBlockingMode.NoBlockingWait` + `JSThreadInteropMode.SimpleSynchronousJSInterop` - + **default threading**, safe, tested, supported - + blocking `.Wait` is allowed on thread pool and new threads - - blocking `.Wait` throws `PlatformNotSupportedException` on `JSWebWorker` and main thread - - DOM events like `onClick` need to be asynchronous, if the handler needs use synchronous `[JSImport]` - - synchronous calls to `[JSImport]`/`[JSExport]` can't synchronously call back - - * `MainThreadingMode.DeputyAndIOThreads` + `JSThreadBlockingMode.AllowBlockingWaitInAsyncCode` + `JSThreadInteropMode.SimpleSynchronousJSInterop` - + **default threading**, safe, tested, supported - + blocking `.Wait` is allowed on thread pool and new threads - - blocking `.Wait` throws `PlatformNotSupportedException` on `JSWebWorker` and main thread only when they are called from JS via synchronous `JSExport` - - DOM events like `onClick` need to be asynchronous, if the handler needs use synchronous `[JSImport]` - - synchronous calls to `[JSImport]`/`[JSExport]` can't synchronously call back - - * `MainThreadingMode.DeputyThread` + `JSThreadBlockingMode.AllowBlockingWait` + `JSThreadInteropMode.SimpleSynchronousJSInterop` - + pragmatic for legacy codebase, which contains blocking code and can't be fully executed on thread pool or new threads - - ** could cause deadlocks !!!** - - Use your own judgment before you opt in. - - blocking .Wait is allowed on all threads! - - blocking .Wait on pending JS `Task`/`Promise` (like HTTP/WS requests) could cause deadlocks! - - reason is that blocked thread can't process the browser event loop - - so it can't resolve the promises - - even when it's longer `Promise`/`Task` chain - - DOM events like `onClick` need to be asynchronous, if the handler needs use synchronous `[JSImport]` - - synchronous calls to `[JSImport]`/`[JSExport]` can't synchronously call back - -## Unsupported combinations are: - * `MainThreadingMode.DeputyThread` + `JSThreadBlockingMode.NoBlockingWait` + `JSThreadInteropMode.NoSyncJSInterop` - + very safe - - HTTP/WS requests are not possible because it currently uses synchronous JS interop - - Blazor doesn't work because it currently uses synchronous JS interop - * `MainThreadingMode.UIThread` - - not recommended, not tested, not supported! - - can deadlock on creating new threads - - can deadlock on blocking `.Wait` for a pending JS `Promise`/`Task`, including HTTP/WS requests - - .Wait is spin-waiting - it blocks debugger, network, UI rendering, ... - + JS interop to UI is faster, synchronous and re-entrant - -### There could be more JSThreadInteropModes: - - allow re-entrant synchronous JS interop on `JSWebWorker`. - - This is possible because managed code is running on same thread as JS. - - But it's nuanced to debug it, when things go wrong. - - allow re-entrant synchronous JS interop also on deputy thread. - - This is not possible for deputy, because it would deadlock on call back to different thread. - - The thread receiving the callback is still blocked waiting for the first synchronous call to finish. diff --git a/src/mono/browser/runtime/pthreads/index.ts b/src/mono/browser/runtime/pthreads/index.ts index 195df3e126ab65..3678d709d1050b 100644 --- a/src/mono/browser/runtime/pthreads/index.ts +++ b/src/mono/browser/runtime/pthreads/index.ts @@ -1,6 +1,9 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +import { mono_log_warn } from "../logging"; +import { utf16ToString } from "../strings"; + export { mono_wasm_main_thread_ptr, mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop, mono_wasm_pthread_ptr, update_thread_info, isMonoThreadMessage, monoThreadInfo, @@ -18,3 +21,8 @@ export { export { mono_wasm_start_deputy_thread_async } from "./deputy-thread"; export { mono_wasm_start_io_thread_async } from "./io-thread"; + +export function mono_wasm_warn_about_blocking_wait (ptr: number, length: number) { + const warning = utf16ToString(ptr, ptr + (length * 2)); + mono_log_warn(warning); +} diff --git a/src/mono/browser/runtime/startup.ts b/src/mono/browser/runtime/startup.ts index 24fbb1d16a04f2..c3958289627cf3 100644 --- a/src/mono/browser/runtime/startup.ts +++ b/src/mono/browser/runtime/startup.ts @@ -4,7 +4,7 @@ import WasmEnableThreads from "consts:wasmEnableThreads"; import BuildConfiguration from "consts:configuration"; -import { DotnetModuleInternal, CharPtrNull, MainThreadingMode } from "./types/internal"; +import { DotnetModuleInternal, CharPtrNull } from "./types/internal"; import { exportedRuntimeAPI, INTERNAL, loaderHelpers, Module, runtimeHelpers, createPromiseController, mono_assert } from "./globals"; import cwraps, { init_c_exports, threads_c_functions as tcwraps } from "./cwraps"; import { mono_wasm_raise_debug_event, mono_wasm_runtime_ready } from "./debug"; @@ -274,19 +274,14 @@ async function onRuntimeInitializedAsync (userOnRuntimeInitialized: () => void) mono_log_info("UI thread is alive!"); }, 3000); - if (WasmEnableThreads && - (runtimeHelpers.config.mainThreadingMode == MainThreadingMode.DeputyThread - || runtimeHelpers.config.mainThreadingMode == MainThreadingMode.DeputyAndIOThreads)) { + if (WasmEnableThreads) { // this will create thread and call start_runtime() on it runtimeHelpers.monoThreadInfo = monoThreadInfo; runtimeHelpers.isManagedRunningOnCurrentThread = false; update_thread_info(); runtimeHelpers.managedThreadTID = tcwraps.mono_wasm_create_deputy_thread(); runtimeHelpers.proxyGCHandle = await runtimeHelpers.afterMonoStarted.promise; - - if (WasmEnableThreads && runtimeHelpers.config.mainThreadingMode == MainThreadingMode.DeputyAndIOThreads) { - runtimeHelpers.ioThreadTID = tcwraps.mono_wasm_create_io_thread(); - } + runtimeHelpers.ioThreadTID = tcwraps.mono_wasm_create_io_thread(); // TODO make UI thread not managed/attached https://github.com/dotnet/runtime/issues/100411 tcwraps.mono_wasm_register_ui_thread(); @@ -303,7 +298,7 @@ async function onRuntimeInitializedAsync (userOnRuntimeInitialized: () => void) await start_runtime(); } - if (WasmEnableThreads && runtimeHelpers.config.mainThreadingMode == MainThreadingMode.DeputyAndIOThreads) { + if (WasmEnableThreads) { await runtimeHelpers.afterIOStarted.promise; } @@ -541,10 +536,7 @@ export async function start_runtime () { monoThreadInfo.isRegistered = true; runtimeHelpers.currentThreadTID = monoThreadInfo.pthreadId = runtimeHelpers.managedThreadTID = mono_wasm_pthread_ptr(); update_thread_info(); - runtimeHelpers.proxyGCHandle = install_main_synchronization_context( - runtimeHelpers.config.jsThreadBlockingMode!, - runtimeHelpers.config.jsThreadInteropMode!, - runtimeHelpers.config.mainThreadingMode!); + runtimeHelpers.proxyGCHandle = install_main_synchronization_context(runtimeHelpers.config.jsThreadBlockingMode!); runtimeHelpers.isManagedRunningOnCurrentThread = true; // start finalizer thread, lazy diff --git a/src/mono/browser/runtime/types/internal.ts b/src/mono/browser/runtime/types/internal.ts index 2017a86922423e..0e564f8c872827 100644 --- a/src/mono/browser/runtime/types/internal.ts +++ b/src/mono/browser/runtime/types/internal.ts @@ -95,9 +95,7 @@ export type MonoConfigInternal = MonoConfig & { GitHash?: string, ProductVersion?: string, - mainThreadingMode?: MainThreadingMode, jsThreadBlockingMode?: JSThreadBlockingMode, - jsThreadInteropMode?: JSThreadInteropMode, }; export type RunArguments = { @@ -570,36 +568,37 @@ export interface MonoThreadMessage { cmd: string; } -// keep in sync with JSHostImplementation.Types.cs -export const enum MainThreadingMode { - // Running the managed main thread on UI thread. - // Managed GC and similar scenarios could be blocking the UI. - // Easy to deadlock. Not recommended for production. - UIThread = 0, - // Running the managed main thread on dedicated WebWorker. Marshaling all JavaScript calls to and from the main thread. - DeputyThread = 1, - // TODO comment - DeputyAndIOThreads = 2, -} - // keep in sync with JSHostImplementation.Types.cs export const enum JSThreadBlockingMode { - // throw PlatformNotSupportedException if blocking .Wait is called on threads with JS interop, like JSWebWorker and Main thread. - // Avoids deadlocks (typically with pending JS promises on the same thread) by throwing exceptions. - NoBlockingWait = 0, - // TODO comment - AllowBlockingWaitInAsyncCode = 1, - // allow .Wait on all threads. - // Could cause deadlocks with blocking .Wait on a pending JS Task/Promise on the same thread or similar Task/Promise chain. - AllowBlockingWait = 100, -} - -// keep in sync with JSHostImplementation.Types.cs -export const enum JSThreadInteropMode { - // throw PlatformNotSupportedException if synchronous JSImport/JSExport is called on threads with JS interop, like JSWebWorker and Main thread. - // calling synchronous JSImport on thread pool or new threads is allowed. - NoSyncJSInterop = 0, - // allow non-re-entrant synchronous blocking calls to and from JS on JSWebWorker on threads with JS interop, like JSWebWorker and Main thread. - // calling synchronous JSImport on thread pool or new threads is allowed. - SimpleSynchronousJSInterop = 1, + /** + * Prevents synchronous JSExport from being called from JavaScript code in UI thread. + * On JSWebWorker synchronous JSExport always works. + * On JSWebWorker blocking .Wait always warns. + * This is the default mode. + */ + PreventSynchronousJSExport = "PreventSynchronousJSExport", + /** + * Allows synchronous JSExport to be called from JavaScript code also in UI thread. + * Inside of that call blocking .Wait throws PNSE. + * Inside of that call nested call back to synchronous JSImport throws PNSE (because it would deadlock otherwise in 100% cases). + * On JSWebWorker synchronous JSExport always works. + * On JSWebWorker blocking .Wait always throws PNSE. + */ + ThrowWhenBlockingWait = "ThrowWhenBlockingWait", + /** + * Allows synchronous JSExport to be called from JavaScript code also in UI thread. + * Inside of that call blocking .Wait warns. + * Inside of that call nested call back to synchronous JSImport throws PNSE (because it would deadlock otherwise in 100% cases). + * On JSWebWorker synchronous JSExport always works. + * On JSWebWorker blocking .Wait always warns. + */ + WarnWhenBlockingWait = "WarnWhenBlockingWait", + /** + * Allows synchronous JSExport to be called from JavaScript code, and allows managed code to use blocking .Wait + * .Wait on Promise/Task chains could lead to deadlock because JS event loop is not processed and it can't resolve JS promises. + * This mode is dangerous and not supported. + * Allows synchronous JSExport to be called from JavaScript code also in Main thread. + * Inside of that call nested call back to synchronous JSImport throws PNSE (because it would deadlock otherwise in 100% cases). + */ + DangerousAllowBlockingWait = "DangerousAllowBlockingWait", } diff --git a/src/mono/browser/test-main.js b/src/mono/browser/test-main.js index 3aacd8e2c67d68..1feb21ef2f796b 100644 --- a/src/mono/browser/test-main.js +++ b/src/mono/browser/test-main.js @@ -252,7 +252,8 @@ function configureRuntime(dotnet, runArgs) { .withInteropCleanupOnExit() .withDumpThreadsOnNonZeroExit() .withConfig({ - loadAllSatelliteResources: true + loadAllSatelliteResources: true, + jsThreadBlockingMode: "ThrowWhenBlockingWait", }); if (ENVIRONMENT_IS_NODE) { diff --git a/src/mono/sample/wasm/browser-threads/Program.cs b/src/mono/sample/wasm/browser-threads/Program.cs index 331f8e35de3ccf..8783ace18be7b2 100644 --- a/src/mono/sample/wasm/browser-threads/Program.cs +++ b/src/mono/sample/wasm/browser-threads/Program.cs @@ -38,9 +38,14 @@ public static async Task Main(string[] args) public static void Progress2() { // both calls here are sync POSIX calls dispatched to UI thread, which is already blocked because this is synchronous method on deputy thread - // in should not deadlock anyway, see also invoke_later_when_on_ui_thread_sync and emscripten_yield + // it should not deadlock anyway, see also invoke_later_when_on_ui_thread_sync and emscripten_yield var cwd = Directory.GetCurrentDirectory(); Console.WriteLine("Progress! "+ cwd); + + // below is blocking call, which means that UI will spin-lock little longer + // it will warn about blocking wait because of jsThreadBlockingMode: "WarnWhenBlockingWait" + // but it will not deadlock because underlying task chain is not JS promise + Task.Delay(10).Wait(); } [JSExport] diff --git a/src/mono/sample/wasm/browser-threads/main.js b/src/mono/sample/wasm/browser-threads/main.js index 8da1e4fb608e6d..ea97a5ce200c8c 100644 --- a/src/mono/sample/wasm/browser-threads/main.js +++ b/src/mono/sample/wasm/browser-threads/main.js @@ -17,6 +17,9 @@ try { .withElementOnExit() .withExitCodeLogging() .withExitOnUnhandledError() + .withConfig({ + jsThreadBlockingMode: "WarnWhenBlockingWait", + }) .create(); setModuleImports("main.js", { From e61064949214b9eebdbf9faad58b71afd240ff15 Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Thu, 4 Apr 2024 15:05:53 +0200 Subject: [PATCH 088/132] [browser][MT] fix void Main (#100629) --- .../Runtime/InteropServices/JavaScript/JSHostImplementation.cs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSHostImplementation.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSHostImplementation.cs index b62f347a00d05f..f4653c5e35041f 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSHostImplementation.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/src/System/Runtime/InteropServices/JavaScript/JSHostImplementation.cs @@ -229,6 +229,9 @@ public static void LoadSatelliteAssembly(byte[] dllBytes) if (method.ReturnType == typeof(void)) { method.Invoke(null, argsToPass); +#if FEATURE_WASM_MANAGED_THREADS + result = Task.FromResult(0); +#endif } else if (method.ReturnType == typeof(int)) { From cf9b28c670d387303da54ff5d4bee208a74a28bc Mon Sep 17 00:00:00 2001 From: Qiao Pengcheng Date: Thu, 4 Apr 2024 21:55:07 +0800 Subject: [PATCH 089/132] [LoongArch64] amend the disasmbly formate of lu52id and branches. (#100568) Also modify `STACKWALK_CONTROLPC_ADJUST_OFFSET` as there is no branch delay. --- .../debug/inc/loongarch64/primitives.h | 2 +- src/coreclr/jit/emitloongarch64.cpp | 21 ++++++++++++------- src/coreclr/vm/loongarch64/cgencpu.h | 2 +- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/coreclr/debug/inc/loongarch64/primitives.h b/src/coreclr/debug/inc/loongarch64/primitives.h index b30e7dcdd2ea91..750f8a617c17e1 100644 --- a/src/coreclr/debug/inc/loongarch64/primitives.h +++ b/src/coreclr/debug/inc/loongarch64/primitives.h @@ -20,7 +20,7 @@ typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE; // Given a return address retrieved during stackwalk, // this is the offset by which it should be decremented to land at the call instruction. -#define STACKWALK_CONTROLPC_ADJUST_OFFSET 8 +#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4 #define PRD_TYPE LONG #define CORDbg_BREAK_INSTRUCTION_SIZE 4 diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 24b408b4b8db38..539e07a1136541 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -4039,15 +4039,15 @@ void emitter::emitDisInsName(code_t code, const BYTE* addr, instrDesc* id) { printf("%s, %s, 0x%lx\n", RegNames[regd], RegNames[regj], offs16); } - else if (INS_OPTS_NONE == id->idInsOpt()) + else if ((unsigned)(addr - emitCodeBlock) < emitPrologIG->igSize) // only for prolog { if (offs16 < 0) { - printf("-%d ins\n", -offs16 >> 2); + printf("%s, %s, -%d ins\n", RegNames[regj], RegNames[regd], -offs16 >> 2); } else { - printf("+%d ins\n", offs16 >> 2); + printf("%s, %s, +%d ins\n", RegNames[regj], RegNames[regd], offs16 >> 2); } } else @@ -4060,12 +4060,12 @@ void emitter::emitDisInsName(code_t code, const BYTE* addr, instrDesc* id) { tmp = (((code >> 10) & 0xffff) | ((code & 0x1f) << 16)) << 11; tmp >>= 9; - if (INS_OPTS_NONE == id->idInsOpt()) + if ((unsigned)(addr - emitCodeBlock) < emitPrologIG->igSize) // only for prolog { tmp >>= 2; if (tmp < 0) { - printf("%s, -%d ins\n", RegNames[regj], tmp); + printf("%s, -%d ins\n", RegNames[regj], -tmp); } else { @@ -4089,12 +4089,12 @@ void emitter::emitDisInsName(code_t code, const BYTE* addr, instrDesc* id) methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie); printf("# %s\n", methodName); } - else if (INS_OPTS_NONE == id->idInsOpt()) + else if ((unsigned)(addr - emitCodeBlock) < emitPrologIG->igSize) // only for prolog { tmp >>= 2; if (tmp < 0) { - printf("-%d ins\n", tmp); + printf("-%d ins\n", -tmp); } else { @@ -4134,7 +4134,12 @@ void emitter::emitDisInsName(code_t code, const BYTE* addr, instrDesc* id) tmp >>= 20; if (ins == INS_preld) { - printf("0x%x, %s, 0x%x\n", regd, RegNames[regj], tmp); + printf("0x%x, %s, %d\n", regd, RegNames[regj], tmp); + return; + } + else if (ins == INS_lu52i_d) + { + printf("%s, %s, 0x%x\n", RegNames[regd], RegNames[regj], tmp & 0xfff); return; } printf("%s, %s, %d\n", RegNames[regd], RegNames[regj], tmp); diff --git a/src/coreclr/vm/loongarch64/cgencpu.h b/src/coreclr/vm/loongarch64/cgencpu.h index 21bf41f150cc5d..129d3a35589d04 100644 --- a/src/coreclr/vm/loongarch64/cgencpu.h +++ b/src/coreclr/vm/loongarch64/cgencpu.h @@ -81,7 +81,7 @@ extern PCODE GetPreStubEntryPoint(); // Given a return address retrieved during stackwalk, // this is the offset by which it should be decremented to arrive at the callsite. -#define STACKWALK_CONTROLPC_ADJUST_OFFSET 8 +#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4 //********************************************************************** // Parameter size From 195b7a88ec5cee386bbec4c8696d27c00897662e Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Thu, 4 Apr 2024 16:18:54 +0200 Subject: [PATCH 090/132] Remove true randomness from a libraries tests (#100633) --- .../tests/Xslt/XslCompiledTransformApi/Errata4.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libraries/System.Private.Xml/tests/Xslt/XslCompiledTransformApi/Errata4.cs b/src/libraries/System.Private.Xml/tests/Xslt/XslCompiledTransformApi/Errata4.cs index 68e0964199f160..0b5c13f8d95c0a 100644 --- a/src/libraries/System.Private.Xml/tests/Xslt/XslCompiledTransformApi/Errata4.cs +++ b/src/libraries/System.Private.Xml/tests/Xslt/XslCompiledTransformApi/Errata4.cs @@ -22,7 +22,7 @@ public Errata4(ITestOutputHelper output) : base(output) _output = output; } - private Random _rand = new Random(unchecked((int)DateTime.Now.Ticks)); + private Random _rand = new Random(12345678); #region private const string xmlDocTemplate = ... From e629779764abd70bde54e780eeeb8fd5bee84fac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleksey=20Kliger=20=28=CE=BBgeek=29?= Date: Thu, 4 Apr 2024 10:39:49 -0400 Subject: [PATCH 091/132] [cdac] Physical contract descriptor spec (#100365) Building on #100253 , describe an in-memory representation of the toplevel contract descriptor, comprisied of: * some target architecture properties * a data descriptor * a collection of compatible contracts Contributes to #99298 Fixes https://github.com/dotnet/runtime/issues/99299 --- * [cdac] Physical contract descriptor spec * Add "contracts" to the data descriptor * one runtime per module if there are multiple hosted runtimes, diagnostic tooling should look in each loaded module to discover the contract descriptor * Apply suggestions from code review * Review feedback - put the aux data and descriptor sizes closer to the pointers - Don't include trailing nul `descriptor_size`. Clarify it is counting bytes and that `descriptor` is in UTF-8 - Simplify `DotNetRuntimeContractDescriptor` naming discussion --------- Co-authored-by: Elinor Fung --- .../datacontracts/contract-descriptor.md | 100 ++++++++++++++++++ docs/design/datacontracts/data_descriptor.md | 9 +- .../datacontracts/datacontracts_design.md | 2 +- 3 files changed, 107 insertions(+), 4 deletions(-) create mode 100644 docs/design/datacontracts/contract-descriptor.md diff --git a/docs/design/datacontracts/contract-descriptor.md b/docs/design/datacontracts/contract-descriptor.md new file mode 100644 index 00000000000000..1e3ddabd6dd735 --- /dev/null +++ b/docs/design/datacontracts/contract-descriptor.md @@ -0,0 +1,100 @@ +# Contract Descriptor + +## Summary + +The [data contracts design](./datacontracts_design.md) is a mechanism that allows diagnostic tooling +to understand the behavior of certain .NET runtime subsystems and data structures. In a typical +scenario, a diagnostic tool such as a debugger may have access to a target .NET process (or a memory +dump of such a process) from which it may request to read and write certain regions of memory. + +This document describes a mechanism by which a diagnostic tool may acquire the following information: +* some details about the target process' architecture +* a collection of types and their sizes and/or the offsets of certain fields within each type +* a collection of global values +* a collection of /algorithmic contracts/ that are satisfied by the target process + +## Contract descriptor + +The contract descriptor consists of the follow structure. All multi-byte values are in target architecture endianness. + +```c +struct DotNetRuntimeContractDescriptor +{ + uint64_t magic; + uint32_t flags; + uint32_t descriptor_size; + const char *descriptor; + uint32_t aux_data_count; + uint32_t pad0; + uintptr_t *aux_data; +}; +``` + +The `magic` is `0x44_4e_43_43_44_41_43_00` ("DNCCDAC\0") stored using the target architecture +endianness. This is sufficient to discover the target architecture endianness by comparing the +value in memory to `0x44_4e_43_43_44_41_43_00` and to `0x00_43_41_44_43_43_4e_44`. + +The following `flags` bits are defined: + +| Bits 31-2 | Bit 1 | Bit 0 | +| --------- | ------- | ----- | +| Reserved | ptrSize | 1 | + +If `ptrSize` is 0, the architecture is 64-bit. If it is 1, the architecture is 32-bit. The +reserved bits should be written as zero. Diagnostic tooling may ignore non-zero reserved bits. + +The `descriptor` is a pointer to a UTF-8 JSON string described in [data descriptor physical layout](./data_descriptor.md#Physical_JSON_descriptor). The total number of bytes is given by `descriptor_size`. + +The auxiliary data for the JSON descriptor is stored at the location `aux_data` in `aux_data_count` pointer-sized slots. + +### Architecture properties + +Although `DotNetRuntimeContractDescriptor` contains enough information to discover the target +architecture endianness pointer size, it is expected that in all scenarios diagnostic tooling will +already have this information available through other channels. Diagnostic tools may use the +information derived from `DotNetRuntimeContractDescriptor` for validation. + +### Compatible contracts + +The `descriptor` is a JSON dictionary that is used for storing the [in-memory data descriptor](./data_descriptor.md#Physical_JSON_Descriptor) +and the [compatible contracts](./datacontracts_design.md#Compatible_Contract). + +The compatible contracts are stored in the top-level key `"contracts"`. The value will be a +dictionary that contains each contract name as a key. Each value is the version of the contract as +a JSON integer constant. + +**Contract example**: + +``` jsonc +{"Thread":1,"GCHandle":1,...} +``` + +**Complete in-memory data descriptor example**: + +``` jsonc +{ + "version": "0", + "baseline": "example-64", + "types": + { + "Thread": { "ThreadId": 32, "ThreadState": 0, "Next": 128 }, + "ThreadStore": { "ThreadCount": 32, "ThreadList": 8 } + }, + "globals": + { + "FEATURE_COMINTEROP": 0, + "s_pThreadStore": [ 0 ] // indirect from aux data offset 0 + }, + "contracts": {"Thread": 1, "GCHandle": 1, "ThreadStore": 1} +} +``` + +## Contract symbol + +To aid in discovery, the contract descriptor should be exported by the module hosting the .NET +runtime with the name `DotNetRuntimeContractDescriptor` using the C symbol naming conventions of the +target platform. + +In scenarios where multiple .NET runtimes may be present in a single process, diagnostic tooling +should look for the symbol in each loaded module to discover all the runtimes. + diff --git a/docs/design/datacontracts/data_descriptor.md b/docs/design/datacontracts/data_descriptor.md index cd0d5ce92e82c5..1338e1ae87aa60 100644 --- a/docs/design/datacontracts/data_descriptor.md +++ b/docs/design/datacontracts/data_descriptor.md @@ -130,6 +130,10 @@ The toplevel dictionary will contain: * `"types": TYPES_DESCRIPTOR` see below * `"globals": GLOBALS_DESCRIPTOR` see below +Additional toplevel keys may be present. For example, the in-memory data descriptor will contain a +`"contracts"` key (see [contract descriptor](./contract_descriptor.md#Compatible_contracts)) for the +set of compatible contracts. + ### Baseline data descriptor identifier The in-memory descriptor may contain an optional string identifying a well-known baseline @@ -243,9 +247,8 @@ Rationale: This allows tooling to generate the in-memory data descriptor as a si string. For pointers, the address can be stored at a known offset in an in-proc array of pointers and the offset written into the constant JSON string. -The indirection array is not part of the data descriptor spec. It is expected that the data -contract descriptor will include it. (The data contract descriptor must contain: the data -descriptor, the set of compatible algorithmic contracts, the aux array of globals). +The indirection array is not part of the data descriptor spec. It is part of the [contract +descriptor](./contract_descriptor.md#Contract_descriptor). diff --git a/docs/design/datacontracts/datacontracts_design.md b/docs/design/datacontracts/datacontracts_design.md index f88e0abfd06e5a..630dc9fc5639e1 100644 --- a/docs/design/datacontracts/datacontracts_design.md +++ b/docs/design/datacontracts/datacontracts_design.md @@ -12,7 +12,7 @@ Diagnostic data contract addressed these challenges by eliminating the need for Data contracts represent the manner in which a tool which is not the runtime can reliably understand and observe the behavior of the runtime. Contracts are defined by their documentation, and the runtime describes what contracts are applicable to understanding that runtime. ## Data Contract Descriptor -The physical layout of this data is not defined in this document, but its practical effects are. +The physical layout of this data is defined in [the contract descriptor](./contract_descriptor.md) doc. Its practical effects are discussed here. The Data Contract Descriptor has a set of records of the following forms. From b5fbdeb8edce5213b2fa3fdccc203ad675e29528 Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 09:47:07 -0500 Subject: [PATCH 092/132] Update dependencies from https://github.com/dotnet/source-build-externals build 20240403.1 (#100635) Microsoft.SourceBuild.Intermediate.source-build-externals From Version 9.0.0-alpha.1.24201.3 -> To Version 9.0.0-alpha.1.24203.1 Co-authored-by: dotnet-maestro[bot] --- eng/Version.Details.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 8d15853b82f753..d77a5d23a33418 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -85,9 +85,9 @@ - + https://github.com/dotnet/source-build-externals - bcd44732882bc2b81b30146c778eb6ccb7fea793 + 1e2e91d2544726b2cf68109f946178ef6bef3ad9 From c3a2141905add546bce436d74f9496ee528fdfdf Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:42:27 -0500 Subject: [PATCH 093/132] [main] Update dependencies from dotnet/arcade (#100401) * Update dependencies from https://github.com/dotnet/arcade build 20240327.2 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24177.2 * Update dependencies from https://github.com/dotnet/arcade build 20240328.6 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24178.6 * Update dependencies from https://github.com/dotnet/arcade build 20240329.6 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24179.6 * Update dependencies from https://github.com/dotnet/arcade build 20240329.6 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24179.6 * Update dependencies from https://github.com/dotnet/arcade build 20240329.6 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24179.6 * Update dependencies from https://github.com/dotnet/arcade build 20240401.3 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24201.3 * Update dependencies from https://github.com/dotnet/arcade build 20240403.1 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24203.1 * Update dependencies from https://github.com/dotnet/arcade build 20240403.1 Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24176.6 -> To Version 9.0.0-beta.24203.1 --------- Co-authored-by: dotnet-maestro[bot] --- eng/Version.Details.xml | 84 +++++++++---------- eng/Versions.props | 32 +++---- .../templates-official/job/onelocbuild.yml | 2 +- .../job/publish-build-assets.yml | 4 +- .../templates-official/job/source-build.yml | 2 +- .../post-build/post-build.yml | 10 +-- .../variables/pool-providers.yml | 2 +- global.json | 6 +- 8 files changed, 71 insertions(+), 71 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index d77a5d23a33418..b7cbff725f6d4e 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -92,87 +92,87 @@ - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 https://github.com/dotnet/runtime-assets @@ -332,9 +332,9 @@ https://github.com/dotnet/xharness 006ea312a94e8b7f5b7ae47a6470f733ddd1738a - + https://github.com/dotnet/arcade - b6fada3ec4fa37e08dcbafaa6ddf59213f3f8687 + 532f956a119bce77ca279994054d08dbc24418f7 https://dev.azure.com/dnceng/internal/_git/dotnet-optimization diff --git a/eng/Versions.props b/eng/Versions.props index acc78b3a7b26bb..c9ff6989f03a28 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -83,22 +83,22 @@ 9.0.100-preview.4.24175.4 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 2.6.7-beta.24176.6 - 9.0.0-beta.24176.6 - 2.6.7-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 - 9.0.0-beta.24176.6 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 2.6.7-beta.24203.1 + 9.0.0-beta.24203.1 + 2.6.7-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 1.4.0 diff --git a/eng/common/templates-official/job/onelocbuild.yml b/eng/common/templates-official/job/onelocbuild.yml index ba9ba49303292a..52b4d05d3f8dd6 100644 --- a/eng/common/templates-official/job/onelocbuild.yml +++ b/eng/common/templates-official/job/onelocbuild.yml @@ -56,7 +56,7 @@ jobs: # If it's not devdiv, it's dnceng ${{ if ne(variables['System.TeamProject'], 'DevDiv') }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + image: 1es-windows-2022 os: windows steps: diff --git a/eng/common/templates-official/job/publish-build-assets.yml b/eng/common/templates-official/job/publish-build-assets.yml index d72e4ea6d9f293..38340d3e38614a 100644 --- a/eng/common/templates-official/job/publish-build-assets.yml +++ b/eng/common/templates-official/job/publish-build-assets.yml @@ -60,8 +60,8 @@ jobs: os: windows # If it's not devdiv, it's dnceng ${{ if ne(variables['System.TeamProject'], 'DevDiv') }}: - name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + name: NetCore1ESPool-Publishing-Internal + image: windows.vs2019.amd64 os: windows steps: - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}: diff --git a/eng/common/templates-official/job/source-build.yml b/eng/common/templates-official/job/source-build.yml index 50f04e642a3543..50d4b98e201a31 100644 --- a/eng/common/templates-official/job/source-build.yml +++ b/eng/common/templates-official/job/source-build.yml @@ -52,7 +52,7 @@ jobs: ${{ if eq(variables['System.TeamProject'], 'internal') }}: name: $[replace(replace(eq(contains(coalesce(variables['System.PullRequest.TargetBranch'], variables['Build.SourceBranch'], 'refs/heads/main'), 'release'), 'true'), True, 'NetCore1ESPool-Svc-Internal'), False, 'NetCore1ESPool-Internal')] - image: 1es-mariner-2-pt + image: 1es-mariner-2 os: linux ${{ if ne(parameters.platform.pool, '') }}: diff --git a/eng/common/templates-official/post-build/post-build.yml b/eng/common/templates-official/post-build/post-build.yml index 5c98fe1c0f3a96..da1f40958b450d 100644 --- a/eng/common/templates-official/post-build/post-build.yml +++ b/eng/common/templates-official/post-build/post-build.yml @@ -110,7 +110,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + image: 1es-windows-2022 os: windows steps: @@ -150,7 +150,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + image: 1es-windows-2022 os: windows steps: - template: setup-maestro-vars.yml @@ -208,7 +208,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + image: 1es-windows-2022 os: windows steps: - template: setup-maestro-vars.yml @@ -261,8 +261,8 @@ stages: os: windows # If it's not devdiv, it's dnceng ${{ else }}: - name: $(DncEngInternalBuildPool) - image: 1es-windows-2022-pt + name: NetCore1ESPool-Publishing-Internal + image: windows.vs2019.amd64 os: windows steps: - template: setup-maestro-vars.yml diff --git a/eng/common/templates-official/variables/pool-providers.yml b/eng/common/templates-official/variables/pool-providers.yml index beab7d1bfba062..1f308b24efc43d 100644 --- a/eng/common/templates-official/variables/pool-providers.yml +++ b/eng/common/templates-official/variables/pool-providers.yml @@ -23,7 +23,7 @@ # # pool: # name: $(DncEngInternalBuildPool) -# image: 1es-windows-2022-pt +# image: 1es-windows-2022 variables: # Coalesce the target and source branches so we know when a PR targets a release branch diff --git a/global.json b/global.json index c6da44ecdbcdf2..3f9d14a6e2e518 100644 --- a/global.json +++ b/global.json @@ -8,9 +8,9 @@ "dotnet": "9.0.100-preview.1.24101.2" }, "msbuild-sdks": { - "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24176.6", - "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24176.6", - "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24176.6", + "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24203.1", + "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24203.1", + "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24203.1", "Microsoft.Build.NoTargets": "3.7.0", "Microsoft.Build.Traversal": "3.4.0", "Microsoft.NET.Sdk.IL": "9.0.0-preview.4.24175.1" From cf382659e228e0e7c62e12557ede1fbec3bdc619 Mon Sep 17 00:00:00 2001 From: Ahmet Ibrahim Aksoy Date: Thu, 4 Apr 2024 18:17:04 +0200 Subject: [PATCH 094/132] [QUIC] Delete the ByteMixing MsQuic test (#100640) --- .../tests/FunctionalTests/MsQuicTests.cs | 55 ------------------- 1 file changed, 55 deletions(-) diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs index 4b1f07a2188e83..b2042adffe33b4 100644 --- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs +++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs @@ -1204,61 +1204,6 @@ public BufferSegment Append(ReadOnlyMemory memory) } } - [Fact] - [OuterLoop("May take several seconds")] - [ActiveIssue("https://github.com/dotnet/runtime/issues/85331", typeof(PlatformDetection), nameof(PlatformDetection.IsWindows10Version20348OrLower))] - public async Task ByteMixingOrNativeAVE_MinimalFailingTest() - { - const int writeSize = 64 * 1024; - const int NumberOfWrites = 512; - byte[] data1 = new byte[writeSize * NumberOfWrites]; - byte[] data2 = new byte[writeSize * NumberOfWrites]; - Array.Fill(data1, (byte)1); - Array.Fill(data2, (byte)2); - - Task t1 = RunTest(data1); - Task t2 = RunTest(data2); - - async Task RunTest(byte[] data) - { - await RunClientServer( - iterations: 20, - serverFunction: async connection => - { - await using QuicStream stream = await connection.AcceptInboundStreamAsync(); - - byte[] buffer = new byte[data.Length]; - int bytesRead = await ReadAll(stream, buffer); - Assert.Equal(data.Length, bytesRead); - AssertExtensions.SequenceEqual(data, buffer); - - for (int pos = 0; pos < data.Length; pos += writeSize) - { - await stream.WriteAsync(data[pos..(pos + writeSize)]); - } - await stream.WriteAsync(Memory.Empty, completeWrites: true); - }, - clientFunction: async connection => - { - await using QuicStream stream = await connection.OpenOutboundStreamAsync(QuicStreamType.Bidirectional); - - for (int pos = 0; pos < data.Length; pos += writeSize) - { - await stream.WriteAsync(data[pos..(pos + writeSize)]); - } - await stream.WriteAsync(Memory.Empty, completeWrites: true); - - byte[] buffer = new byte[data.Length]; - int bytesRead = await ReadAll(stream, buffer); - Assert.Equal(data.Length, bytesRead); - AssertExtensions.SequenceEqual(data, buffer); - } - ); - } - - await (new[] { t1, t2 }).WhenAllOrAnyFailed(millisecondsTimeout: 1000000); - } - [Fact] public async Task ManagedAVE_MinimalFailingTest() { From 9a1da4d9d0e669ce278ca98316200f193136b144 Mon Sep 17 00:00:00 2001 From: skyoxZ Date: Fri, 5 Apr 2024 02:33:05 +0800 Subject: [PATCH 095/132] Fix `Int128` checked-convert to signed IntX (#100342) * Fix Int128 checked-convert to signed IntX * Simplify implementation --- .../src/System/Int128.cs | 55 +++++-------------- .../System/Int128Tests.cs | 45 +++++++++++++++ 2 files changed, 60 insertions(+), 40 deletions(-) diff --git a/src/libraries/System.Private.CoreLib/src/System/Int128.cs b/src/libraries/System.Private.CoreLib/src/System/Int128.cs index 12cc6c7dec5622..c1e6c459f7b543 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Int128.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Int128.cs @@ -262,17 +262,12 @@ public static explicit operator Half(Int128 value) /// is not representable by . public static explicit operator checked short(Int128 value) { - if (~value._upper == 0) - { - long lower = (long)value._lower; - return checked((short)lower); - } - - if (value._upper != 0) + long lower = (long)value._lower; + if ((long)value._upper != lower >> 63) { ThrowHelper.ThrowOverflowException(); } - return checked((short)value._lower); + return checked((short)lower); } /// Explicitly converts a 128-bit signed integer to a value. @@ -286,17 +281,12 @@ public static explicit operator checked short(Int128 value) /// is not representable by . public static explicit operator checked int(Int128 value) { - if (~value._upper == 0) - { - long lower = (long)value._lower; - return checked((int)lower); - } - - if (value._upper != 0) + long lower = (long)value._lower; + if ((long)value._upper != lower >> 63) { ThrowHelper.ThrowOverflowException(); } - return checked((int)value._lower); + return checked((int)lower); } /// Explicitly converts a 128-bit signed integer to a value. @@ -310,17 +300,12 @@ public static explicit operator checked int(Int128 value) /// is not representable by . public static explicit operator checked long(Int128 value) { - if (~value._upper == 0) - { - long lower = (long)value._lower; - return lower; - } - - if (value._upper != 0) + long lower = (long)value._lower; + if ((long)value._upper != lower >> 63) { ThrowHelper.ThrowOverflowException(); } - return checked((long)value._lower); + return lower; } /// Explicitly converts a 128-bit signed integer to a value. @@ -334,17 +319,12 @@ public static explicit operator checked long(Int128 value) /// is not representable by . public static explicit operator checked nint(Int128 value) { - if (~value._upper == 0) - { - long lower = (long)value._lower; - return checked((nint)lower); - } - - if (value._upper != 0) + long lower = (long)value._lower; + if ((long)value._upper != lower >> 63) { ThrowHelper.ThrowOverflowException(); } - return checked((nint)value._lower); + return checked((nint)lower); } /// Explicitly converts a 128-bit signed integer to a value. @@ -360,17 +340,12 @@ public static explicit operator checked nint(Int128 value) [CLSCompliant(false)] public static explicit operator checked sbyte(Int128 value) { - if (~value._upper == 0) - { - long lower = (long)value._lower; - return checked((sbyte)lower); - } - - if (value._upper != 0) + long lower = (long)value._lower; + if ((long)value._upper != lower >> 63) { ThrowHelper.ThrowOverflowException(); } - return checked((sbyte)value._lower); + return checked((sbyte)lower); } /// Explicitly converts a 128-bit signed integer to a value. diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Int128Tests.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Int128Tests.cs index 8c1910e2fbd8dc..9b9103b56ec20c 100644 --- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Int128Tests.cs +++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Int128Tests.cs @@ -107,6 +107,51 @@ public static void EqualsTest(Int128 i1, object obj, bool expected) Assert.Equal(expected, i1.Equals(obj)); } + [Fact] + public static void CheckedConvertToInt64() + { + Assert.Equal(123L, checked((long)new Int128(0, 123))); + Assert.Equal(-123L, checked((long)(Int128)(-123))); + Assert.Throws(() => checked((long)new Int128(1, 1))); + Assert.Throws(() => checked((long)new Int128(ulong.MaxValue, 42))); + } + + [Fact] + public static void CheckedConvertToInt32() + { + Assert.Equal(123, checked((int)new Int128(0, 123))); + Assert.Equal(-123, checked((int)(Int128)(-123))); + Assert.Throws(() => checked((int)new Int128(1, 1))); + Assert.Throws(() => checked((int)new Int128(ulong.MaxValue, 42))); + } + + [Fact] + public static void CheckedConvertToInt16() + { + Assert.Equal((short)123, checked((short)new Int128(0, 123))); + Assert.Equal((short)(-123), checked((short)(Int128)(-123))); + Assert.Throws(() => checked((short)new Int128(1, 1))); + Assert.Throws(() => checked((short)new Int128(ulong.MaxValue, 42))); + } + + [Fact] + public static void CheckedConvertToSByte() + { + Assert.Equal((sbyte)123, checked((sbyte)new Int128(0, 123))); + Assert.Equal((sbyte)(-123), checked((sbyte)(Int128)(-123))); + Assert.Throws(() => checked((sbyte)new Int128(1, 1))); + Assert.Throws(() => checked((sbyte)new Int128(ulong.MaxValue, 42))); + } + + [Fact] + public static void CheckedConvertToIntPtr() + { + Assert.Equal((nint)123, checked((nint)new Int128(0, 123))); + Assert.Equal((nint)(-123), checked((nint)(Int128)(-123))); + Assert.Throws(() => checked((nint)new Int128(1, 1))); + Assert.Throws(() => checked((nint)new Int128(ulong.MaxValue, 42))); + } + public static IEnumerable ToString_TestData() { foreach (NumberFormatInfo defaultFormat in new[] { null, NumberFormatInfo.CurrentInfo }) From 9f3058ff2a62424591e5342c933b7bb3c03ed2db Mon Sep 17 00:00:00 2001 From: Radek Zikmund <32671551+rzikm@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:47:13 +0200 Subject: [PATCH 096/132] Add log to SendAsync_SlowServerRespondsAfterDefaultReceiveTimeout_ThrowsHttpRequestException. (#100525) --- .../tests/FunctionalTests/WinHttpHandlerTest.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libraries/System.Net.Http.WinHttpHandler/tests/FunctionalTests/WinHttpHandlerTest.cs b/src/libraries/System.Net.Http.WinHttpHandler/tests/FunctionalTests/WinHttpHandlerTest.cs index 08e2850e5b0f54..358344d6708a0c 100644 --- a/src/libraries/System.Net.Http.WinHttpHandler/tests/FunctionalTests/WinHttpHandlerTest.cs +++ b/src/libraries/System.Net.Http.WinHttpHandler/tests/FunctionalTests/WinHttpHandlerTest.cs @@ -122,6 +122,7 @@ await LoopbackServer.CreateServerAsync(async (server, url) => await triggerRequestWait.Task; var _ = await t; }); + _output.WriteLine($"ex: {ex}"); Assert.IsType(ex.InnerException); Assert.NotNull(ex.InnerException.InnerException); Assert.Contains("The operation timed out", ex.InnerException.InnerException.Message); From 48aa85fa8fdc274b0b8f6f81223857d8ab386656 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 4 Apr 2024 11:48:37 -0700 Subject: [PATCH 097/132] Rebase the installer build jobs on the global build job template (#100110) --- eng/pipelines/common/global-build-job.yml | 15 +- .../common/templates/global-build-step.yml | 5 +- eng/pipelines/installer/jobs/build-job.yml | 248 ------------------ .../jobs/steps/build-linux-package.yml | 31 --- .../jobs/steps/upload-job-artifacts.yml | 65 ----- .../installer/steps/build-linux-package.yml | 33 +++ .../installer/steps/upload-job-artifacts.yml | 26 ++ eng/pipelines/runtime-official.yml | 12 +- eng/pipelines/runtime.yml | 85 +++++- 9 files changed, 151 insertions(+), 369 deletions(-) delete mode 100644 eng/pipelines/installer/jobs/build-job.yml delete mode 100644 eng/pipelines/installer/jobs/steps/build-linux-package.yml delete mode 100644 eng/pipelines/installer/jobs/steps/upload-job-artifacts.yml create mode 100644 eng/pipelines/installer/steps/build-linux-package.yml create mode 100644 eng/pipelines/installer/steps/upload-job-artifacts.yml diff --git a/eng/pipelines/common/global-build-job.yml b/eng/pipelines/common/global-build-job.yml index aac4e64b80fb41..402e9271dfafe0 100644 --- a/eng/pipelines/common/global-build-job.yml +++ b/eng/pipelines/common/global-build-job.yml @@ -12,6 +12,13 @@ parameters: targetRid: '' timeoutInMinutes: '' dependsOn: [] + # The following parameter is used to specify dependencies on other global build for the same platform. + # We provide this mechanism to allow for global builds to depend on other global builds and use the multiplexing + # that platform-matrix.yml enables. + # Each item can have the following properties: + # - nameSuffix: The suffix of the job name to depend on. + # - buildConfig: The configuration of the job to depend on. + dependsOnGlobalBuilds: [] pool: '' platform: '' condition: true @@ -62,8 +69,12 @@ jobs: workspace: clean: all - ${{ if ne(parameters.dependsOn,'') }}: - dependsOn: ${{ parameters.dependsOn }} + ${{ if or(ne(parameters.dependsOn,''), ne(parameters.dependsOnGlobalBuilds,'')) }}: + dependsOn: + - ${{ each build in parameters.dependsOn }}: + - ${{ build }} + - ${{ each globalBuild in parameters.dependsOnGlobalBuilds }}: + - ${{ format('build_{0}{1}_{2}_{3}_{4}', parameters.osGroup, parameters.osSubgroup, parameters.archType, coalesce(globalBuild.buildConfig, parameters.buildConfig), globalBuild.nameSuffix) }} variables: - ${{ if eq(variables['System.TeamProject'], 'internal') }}: diff --git a/eng/pipelines/common/templates/global-build-step.yml b/eng/pipelines/common/templates/global-build-step.yml index 6597ac4feaefcd..b88207c516c19b 100644 --- a/eng/pipelines/common/templates/global-build-step.yml +++ b/eng/pipelines/common/templates/global-build-step.yml @@ -3,13 +3,16 @@ parameters: useContinueOnErrorDuringBuild: false shouldContinueOnError: false archParameter: $(_archParameter) + crossArg: $(crossArg) displayName: Build product container: '' + condition: succeeded() steps: - - script: $(Build.SourcesDirectory)$(dir)build$(scriptExt) -ci ${{ parameters.archParameter }} $(_osParameter) $(crossArg) ${{ parameters.buildArgs }} $(_officialBuildParameter) $(_buildDarwinFrameworksParameter) $(_overrideTestScriptWindowsCmdParameter) + - script: $(Build.SourcesDirectory)$(dir)build$(scriptExt) -ci ${{ parameters.archParameter }} $(_osParameter) ${{ parameters.crossArg }} ${{ parameters.buildArgs }} $(_officialBuildParameter) $(_buildDarwinFrameworksParameter) $(_overrideTestScriptWindowsCmdParameter) displayName: ${{ parameters.displayName }} ${{ if eq(parameters.useContinueOnErrorDuringBuild, true) }}: continueOnError: ${{ parameters.shouldContinueOnError }} ${{ if ne(parameters.container, '') }}: target: ${{ parameters.container }} + condition: ${{ parameters.condition }} diff --git a/eng/pipelines/installer/jobs/build-job.yml b/eng/pipelines/installer/jobs/build-job.yml deleted file mode 100644 index 9a444b2bde170d..00000000000000 --- a/eng/pipelines/installer/jobs/build-job.yml +++ /dev/null @@ -1,248 +0,0 @@ -parameters: - buildConfig: '' - osGroup: '' - archType: '' - osSubgroup: '' - platform: '' - crossBuild: false - timeoutInMinutes: 120 - condition: true - shouldContinueOnError: false - container: '' - buildSteps: [] - dependsOn: [] - globalBuildSuffix: '' - variables: [] - name: '' - displayName: '' - runtimeVariant: '' - pool: '' - - # The target names here should match container names in the resources section in our pipelines, like runtime.yml - packageDistroList: - - target: debpkg - packageType: deb - packagingArgs: /p:BuildDebPackage=true - - target: rpmpkg - packageType: rpm - packagingArgs: /p:BuildRpmPackage=true - - isOfficialBuild: false - buildFullPlatformManifest: false - - liveRuntimeBuildConfig: '' - liveLibrariesBuildConfig: '' - runtimeFlavor: 'coreclr' - unifiedArtifactsName: '' - unifiedBuildNameSuffix: '' - unifiedBuildConfigOverride: '' - -### Product build -jobs: -- template: /eng/common/templates/job/job.yml - parameters: - buildConfig: ${{ parameters.buildConfig }} - archType: ${{ parameters.archType }} - osGroup: ${{ parameters.osGroup }} - osSubgroup: ${{ parameters.osSubgroup }} - runtimeVariant: ${{ parameters.runtimeVariant }} - testGroup: ${{ parameters.testGroup }} - helixType: 'build/product/' - enableMicrobuild: true - pool: ${{ parameters.pool }} - - # Compute job name from template parameters - name: ${{ format('installer_{0}_{1}', coalesce(parameters.name, parameters.platform), parameters.buildConfig) }} - displayName: ${{ format('Installer Build and Test {0} {1}', coalesce(parameters.name, parameters.platform), parameters.buildConfig) }} - - # Run all steps in the container. - # Note that the containers are defined in platform-matrix.yml - container: ${{ parameters.container }} - - timeoutInMinutes: ${{ parameters.timeoutInMinutes }} - - crossBuild: ${{ parameters.crossBuild }} - - gatherAssetManifests: true - - # Component governance does not work on musl machines - ${{ if eq(parameters.osSubGroup, '_musl') }}: - disableComponentGovernance: true - - variables: - - ${{ each variable in parameters.variables }}: - - ${{ variable }} - - - name: BuildAction - value: -test - - - name: buildCommandSourcesDirectory - value: '$(Build.SourcesDirectory)/' - - - name: librariesBuildConfigArg - value: -lc ${{ coalesce(parameters.liveLibrariesBuildConfig, parameters.buildConfig) }} - - - name: runtimeBuildConfigArg - value: -rc ${{ coalesce(parameters.liveRuntimeBuildConfig, parameters.buildConfig) }} - - ### - ### Platform-specific variable setup - ### - - - ${{ if eq(parameters.osGroup, 'windows') }}: - - - name: CommonMSBuildArgs - value: >- - /p:TargetArchitecture=${{ parameters.archType }} - /p:PortableBuild=true - /p:RuntimeFlavor=${{ parameters.runtimeFlavor }} - - name: TargetArchitecture - value: ${{ parameters.archType }} - - - name: BaseJobBuildCommand - value: >- - build.cmd -subset host+packs -ci - $(BuildAction) - -configuration $(_BuildConfig) - $(librariesBuildConfigArg) - $(runtimeBuildConfigArg) - $(CommonMSBuildArgs) - - - ${{ elseif eq(parameters.osGroup, 'osx') }}: - - - name: CommonMSBuildArgs - value: >- - /p:PortableBuild=true - /p:RuntimeFlavor=${{ parameters.runtimeFlavor }} - /p:TargetArchitecture=${{ parameters.archType }} - /p:CrossBuild=${{ parameters.crossBuild }} - - - name: BaseJobBuildCommand - value: >- - $(Build.SourcesDirectory)/build.sh -subset host+packs -ci - $(BuildAction) - -configuration $(_BuildConfig) - -arch ${{ parameters.archType }} - $(librariesBuildConfigArg) - $(runtimeBuildConfigArg) - $(CommonMSBuildArgs) - - - ${{ elseif in(parameters.osGroup, 'linux', 'freebsd') }}: - - name: CommonMSBuildArgs - value: >- - /p:Configuration=$(_BuildConfig) - /p:TargetOS=${{ parameters.osGroup }} - /p:TargetArchitecture=${{ parameters.archType }} - /p:RuntimeFlavor=${{ parameters.runtimeFlavor }} - - - name: _PortableBuild - value: ${{ eq(parameters.osSubgroup, '') }} - - - ${{ if and(eq(parameters.osSubgroup, '_musl'), eq(parameters.osGroup, 'linux')) }}: - - name: _PortableBuild - value: true - - - ${{ if eq(parameters.crossBuild, true) }}: - - name: ArchArg - value: -arch ${{ parameters.archType }} - - - name: BaseJobBuildCommand - value: >- - $(Build.SourcesDirectory)/build.sh -subset host+packs -ci - $(BuildAction) - $(librariesBuildConfigArg) - $(runtimeBuildConfigArg) - $(ArchArg) - /p:CrossBuild=${{ parameters.crossBuild }} - /p:PortableBuild=$(_PortableBuild) - $(CommonMSBuildArgs) - - - name: installersSubsetArg - value: --subset packs.installers - - dependsOn: - - 'build_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_${{ coalesce(parameters.unifiedBuildConfigOverride, parameters.buildConfig) }}_${{ parameters.unifiedBuildNameSuffix }}' - - ${{ parameters.dependsOn }} - steps: - - ${{ if eq(parameters.osGroup, 'windows') }}: - - template: /eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml - - checkout: self - clean: true - fetchDepth: $(checkoutFetchDepth) - - - ${{ if ne(variables['System.TeamProject'], 'public') }}: - - ${{ if ne(parameters.osGroup, 'windows') }}: - - task: Bash@3 - displayName: Setup Private Feeds Credentials - inputs: - filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh - arguments: $(Build.SourcesDirectory)/NuGet.config $Token - env: - Token: $(dn-bot-dnceng-artifact-feeds-rw) - - ${{ else }}: - - task: PowerShell@2 - displayName: Setup Private Feeds Credentials - inputs: - filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.ps1 - arguments: -ConfigFile $(Build.SourcesDirectory)/NuGet.config -Password $Env:Token - env: - Token: $(dn-bot-dnceng-artifact-feeds-rw) - - - template: /eng/pipelines/common/download-artifact-step.yml - parameters: - unpackFolder: $(Build.SourcesDirectory)/artifacts/bin - artifactFileName: '${{ parameters.unifiedArtifactsName }}$(archiveExtension)' - artifactName: '${{ parameters.unifiedArtifactsName }}' - displayName: 'unified artifacts' - - - ${{ if in(parameters.osGroup, 'osx', 'maccatalyst', 'ios', 'iossimulator', 'tvos', 'tvossimulator') }}: - - script: $(Build.SourcesDirectory)/eng/install-native-dependencies.sh ${{ parameters.osGroup }} - displayName: Install Build Dependencies - - - script: | - du -sh $(Build.SourcesDirectory)/* - df -h - displayName: Disk Usage before Build - - - script: $(BaseJobBuildCommand) - displayName: Build - continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }} - - - ${{ if and(eq(parameters.isOfficialBuild, true), eq(parameters.osGroup, 'windows')) }}: - - powershell: ./eng/collect_vsinfo.ps1 -ArchiveRunName postbuild_log - displayName: Collect vslogs on exit - condition: always() - - - ${{ if in(parameters.osGroup, 'osx', 'ios', 'tvos') }}: - - script: | - du -sh $(Build.SourcesDirectory)/* - df -h - displayName: Disk Usage after Build - - # Only in glibc leg, we produce RPMs and Debs - - ${{ if and(eq(parameters.runtimeFlavor, 'coreclr'), or(eq(parameters.platform, 'linux_x64'), eq(parameters.platform, 'linux_arm64')), eq(parameters.osSubgroup, ''))}}: - - ${{ each packageBuild in parameters.packageDistroList }}: - # This leg's RID matches the build image. Build its distro-dependent packages, as well as - # the distro-independent installers. (There's no particular reason to build the distro- - # independent installers on this leg, but we need to do it somewhere.) - # Currently, linux_arm64 supports 'rpm' type only. - - ${{ if or(not(eq(parameters.platform, 'linux_arm64')), eq(packageBuild.packageType, 'rpm')) }}: - - template: /eng/pipelines/installer/jobs/steps/build-linux-package.yml - parameters: - packageType: ${{ packageBuild.packageType }} - target: ${{ packageBuild.target }} - packageStepDescription: Runtime Deps, Runtime, Framework Packs installers - subsetArg: $(installersSubsetArg) - packagingArgs: ${{ packageBuild.packagingArgs }} - - - template: /eng/pipelines/installer/jobs/steps/upload-job-artifacts.yml - parameters: - name: ${{ coalesce(parameters.name, parameters.platform) }} - runtimeFlavor: ${{ parameters.runtimeFlavor }} - runtimeVariant: ${{ parameters.runtimeVariant }} - - - ${{ if ne(parameters.osGroup, 'windows') }}: - - script: set -x && df -h - displayName: Check remaining storage space - condition: always() - continueOnError: true diff --git a/eng/pipelines/installer/jobs/steps/build-linux-package.yml b/eng/pipelines/installer/jobs/steps/build-linux-package.yml deleted file mode 100644 index 7f8320b55dfe39..00000000000000 --- a/eng/pipelines/installer/jobs/steps/build-linux-package.yml +++ /dev/null @@ -1,31 +0,0 @@ -parameters: - packageType: null - target: '' - packageStepDescription: null - packagingArgs: '' - subsetArg: '' - condition: succeeded() - -steps: -## Run NuGet Authentication for each of the side containers -- ${{ if ne(variables['System.TeamProject'], 'public') }}: - - task: NuGetAuthenticate@1 - target: ${{ parameters.target }} -- script: | - $(Build.SourcesDirectory)/build.sh \ - --ci \ - ${{ parameters.subsetArg }} \ - ${{ parameters.packagingArgs }} \ - $(CommonMSBuildArgs) \ - $(librariesBuildConfigArg) \ - $(runtimeBuildConfigArg) \ - /bl:artifacts/log/$(_BuildConfig)/msbuild.${{ parameters.packageType }}.installers.binlog - displayName: Package ${{ parameters.packageStepDescription }} - ${{ parameters.packageType }} - target: ${{ parameters.target }} - condition: ${{ parameters.condition }} -# Broken symbolic links break the SBOM processing -# We make some symlinks during the installer generation process, -# but they aren't always valid on disk afterwards. Some of our tooling, -# in particular the SBOM tooling, breaks on broken symlinks. -- script: find . -xtype l -delete - displayName: Remove broken symbolic links diff --git a/eng/pipelines/installer/jobs/steps/upload-job-artifacts.yml b/eng/pipelines/installer/jobs/steps/upload-job-artifacts.yml deleted file mode 100644 index 8cdf6e8fb77787..00000000000000 --- a/eng/pipelines/installer/jobs/steps/upload-job-artifacts.yml +++ /dev/null @@ -1,65 +0,0 @@ -parameters: - name: '' - -steps: -- task: PublishTestResults@2 - displayName: Publish Test Results - inputs: - testResultsFormat: 'VSTest' - testResultsFiles: '*.trx' - searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults/$(_BuildConfig)' - mergeTestResults: true - testRunTitle: Installer-${{ parameters.name }}-$(_BuildConfig) - continueOnError: true - condition: always() - -# Upload binaries and symbols on failure to allow debugging issues -- task: CopyFiles@2 - displayName: Prepare binaries to publish - inputs: - SourceFolder: '$(Build.SourcesDirectory)/artifacts/bin' - Contents: | - */corehost/** - */corehost_test/** - TargetFolder: '$(Build.StagingDirectory)/Binaries' - continueOnError: true - condition: failed() - -- task: ArchiveFiles@2 - displayName: Zip binaries - inputs: - rootFolderOrFile: '$(Build.StagingDirectory)/Binaries' - archiveFile: '$(Build.StagingDirectory)/corehost-bin-${{ parameters.name }}-$(_BuildConfig)$(archiveExtension)' - archiveType: $(archiveType) - tarCompression: $(tarCompression) - includeRootFolder: false - continueOnError: true - condition: failed() - -- task: PublishBuildArtifacts@1 - displayName: Publish binaries - inputs: - pathtoPublish: '$(Build.StagingDirectory)/corehost-bin-${{ parameters.name }}-$(_BuildConfig)$(archiveExtension)' - artifactName: Installer-Binaries-${{ parameters.name }}-$(_BuildConfig) - continueOnError: true - condition: failed() - -- task: CopyFiles@2 - displayName: Prepare BuildLogs staging directory - inputs: - SourceFolder: '$(Build.SourcesDirectory)' - Contents: | - **/*.log - **/*.binlog - TargetFolder: '$(Build.StagingDirectory)/BuildLogs' - CleanTargetFolder: true - continueOnError: true - condition: always() - -- task: PublishPipelineArtifact@1 - displayName: Publish BuildLogs - inputs: - targetPath: '$(Build.StagingDirectory)/BuildLogs' - artifactName: Installer-Logs_Attempt$(System.JobAttempt)-${{ parameters.runtimeFlavor }}-${{ parameters.runtimeVariant }}-${{ parameters.name }}-$(_BuildConfig) - continueOnError: true - condition: always() diff --git a/eng/pipelines/installer/steps/build-linux-package.yml b/eng/pipelines/installer/steps/build-linux-package.yml new file mode 100644 index 00000000000000..ef905d6c164657 --- /dev/null +++ b/eng/pipelines/installer/steps/build-linux-package.yml @@ -0,0 +1,33 @@ +parameters: + osGroup: '' + osSubgroup: '' + packageType: null + target: '' + packageStepDescription: null + packagingArgs: '' + condition: succeeded() + +steps: +- ${{ if and(eq(parameters.osGroup, 'linux'), eq(parameters.osSubgroup, '')) }}: + ## Run NuGet Authentication for each of the side containers + - ${{ if and(ne(variables['System.TeamProject'], 'public'), ne(parameters.target, '')) }}: + - task: NuGetAuthenticate@1 + target: ${{ parameters.target }} + condition: ${{ parameters.condition }} + + - template: /eng/pipelines/common/templates/global-build-step.yml + parameters: + buildArgs: -s packs.installers ${{ parameters.packagingArgs }} /bl:artifacts/log/$(_BuildConfig)/msbuild.${{ parameters.packageType }}.installers.binlog + container: ${{ parameters.target }} + displayName: Package Runtime Deps, Runtime, Framework Packs - ${{ parameters.packageType }} packages + # Even for cross-build targets, our installer build steps are not cross-builds + crossArg: '' + condition: ${{ parameters.condition }} + + # Broken symbolic links break the SBOM processing + # We make some symlinks during the installer generation process, + # but they aren't always valid on disk afterwards. Some of our tooling, + # in particular the SBOM tooling, breaks on broken symlinks. + - script: find . -xtype l -delete + displayName: Remove broken symbolic links + condition: ${{ parameters.condition }} diff --git a/eng/pipelines/installer/steps/upload-job-artifacts.yml b/eng/pipelines/installer/steps/upload-job-artifacts.yml new file mode 100644 index 00000000000000..076f989780bbfe --- /dev/null +++ b/eng/pipelines/installer/steps/upload-job-artifacts.yml @@ -0,0 +1,26 @@ +parameters: + name: '' + +steps: +# Upload binaries and symbols on failure to allow debugging issues +- task: CopyFiles@2 + displayName: Prepare binaries to publish + inputs: + SourceFolder: '$(Build.SourcesDirectory)/artifacts/bin' + Contents: | + */corehost/** + */corehost_test/** + TargetFolder: '$(Build.StagingDirectory)/Binaries' + continueOnError: true + condition: failed() + +- template: /eng/pipelines/common/upload-artifact-step.yml + parameters: + rootFolder: '$(Build.StagingDirectory)/Binaries' + includeRootFolder: false + archiveType: $(archiveType) + archiveExtension: $(archiveExtension) + tarCompression: $(tarCompression) + artifactName: 'Installer-Binaries-${{ parameters.name }}-$(_BuildConfig)' + displayName: 'Binaries' + condition: failed() diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml index b8e74630b856ec..ba09a957605eb9 100644 --- a/eng/pipelines/runtime-official.yml +++ b/eng/pipelines/runtime-official.yml @@ -179,21 +179,17 @@ extends: PublishLocation: Container ArtifactName: CoreCLRCrossDacArtifacts # Create RPMs and DEBs - - template: /eng/pipelines/installer/jobs/steps/build-linux-package.yml + - template: /eng/pipelines/installer/steps/build-linux-package.yml parameters: packageType: deb target: debpkg - packageStepDescription: Runtime Deps, Runtime, Framework Packs Deb installers - subsetArg: -s packs.installers - packagingArgs: -c $(_BuildConfig) --arch $(archType) --os $(osGroup) --ci /p:OfficialBuildId=$(Build.BuildNumber) /p:BuildDebPackage=true + packagingArgs: -c $(_BuildConfig) /p:BuildDebPackage=true condition: and(succeeded(), eq(variables.osSubgroup, ''), eq(variables.archType, 'x64')) - - template: /eng/pipelines/installer/jobs/steps/build-linux-package.yml + - template: /eng/pipelines/installer/steps/build-linux-package.yml parameters: packageType: rpm target: rpmpkg - packageStepDescription: Runtime Deps, Runtime, Framework Packs RPM installers - subsetArg: -s packs.installers - packagingArgs: -c $(_BuildConfig) --arch $(archType) --os $(osGroup) --ci /p:OfficialBuildId=$(Build.BuildNumber) /p:BuildRpmPackage=true + packagingArgs: -c $(_BuildConfig) /p:BuildRpmPackage=true condition: and(succeeded(), eq(variables.osSubgroup, ''), in(variables.archType, 'x64', 'arm64')) # Upload the results. diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml index b81da3971a1559..98dc5285250f45 100644 --- a/eng/pipelines/runtime.yml +++ b/eng/pipelines/runtime.yml @@ -95,12 +95,33 @@ extends: buildConfig: ${{ variables.debugOnPrReleaseOnRolling }} platforms: - osx_arm64 + jobParameters: + nameSuffix: AllSubsets_CoreCLR + buildArgs: -s clr+libs+host+packs -rc Release -c Release -lc $(_BuildConfig) + timeoutInMinutes: 120 + condition: >- + or( + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_non_mono_and_wasm.containsChange'], true), + eq(variables['isRollingBuild'], true)) + + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/global-build-job.yml + buildConfig: ${{ variables.debugOnPrReleaseOnRolling }} + platforms: - linux_arm64 - linux_musl_x64 jobParameters: nameSuffix: AllSubsets_CoreCLR buildArgs: -s clr+libs+host+packs -rc Release -c Release -lc $(_BuildConfig) timeoutInMinutes: 120 + postBuildSteps: + - template: /eng/pipelines/installer/steps/build-linux-package.yml + parameters: + packageType: rpm + target: rpmpkg + packagingArgs: -c Release -lc $(_BuildConfig) /p:BuildRpmPackage=true + condition: and(succeeded(), eq(variables.osSubgroup, '')) condition: >- or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_non_mono_and_wasm.containsChange'], true), @@ -1353,16 +1374,29 @@ extends: # - template: /eng/pipelines/common/platform-matrix.yml parameters: - jobTemplate: /eng/pipelines/installer/jobs/build-job.yml + jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: ${{ variables.debugOnPrReleaseOnRolling }} platforms: - windows_x86 jobParameters: - liveRuntimeBuildConfig: release - liveLibrariesBuildConfig: Release - unifiedArtifactsName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_Release - unifiedBuildConfigOverride: release - unifiedBuildNameSuffix: CoreCLR_Libraries + nameSuffix: Installer_Build_And_Test + buildArgs: -s host+packs -c $(_BuildConfig) -lc Release -rc Release -test + dependsOnGlobalBuilds: + - nameSuffix: CoreCLR_Libraries + buildConfig: release + preBuildSteps: + - template: /eng/pipelines/common/download-artifact-step.yml + parameters: + artifactName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_Release + artifactFileName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_Release$(archiveExtension) + unpackFolder: $(Build.SourcesDirectory)/artifacts/bin + displayName: 'unified artifacts' + enablePublishTestResults: true + testRunTitle: Installer-$(osGroup)$(osSubgroup)_$(archType) + postBuildSteps: + - template: /eng/pipelines/installer/steps/upload-job-artifacts.yml + parameters: + name: $(osGroup)$(osSubgroup)_$(archType) condition: or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_non_mono_and_wasm.containsChange'], true), @@ -1370,18 +1404,41 @@ extends: - template: /eng/pipelines/common/platform-matrix.yml parameters: - jobTemplate: /eng/pipelines/installer/jobs/build-job.yml - buildConfig: Release + jobTemplate: /eng/pipelines/common/global-build-job.yml + buildConfig: release platforms: + - windows_x64 - osx_x64 - linux_x64 - - windows_x64 jobParameters: - liveRuntimeBuildConfig: release - liveLibrariesBuildConfig: ${{ variables.debugOnPrReleaseOnRolling }} - unifiedArtifactsName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_$(debugOnPrReleaseOnRolling) - unifiedBuildConfigOverride: ${{ variables.debugOnPrReleaseOnRolling }} - unifiedBuildNameSuffix: CoreCLR_Libraries + nameSuffix: Installer_Build_And_Test + buildArgs: -s host+packs -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} -rc Release -test + dependsOnGlobalBuilds: + - nameSuffix: CoreCLR_Libraries + buildConfig: ${{ variables.debugOnPrReleaseOnRolling }} + preBuildSteps: + - template: /eng/pipelines/common/download-artifact-step.yml + parameters: + artifactName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_$(debugOnPrReleaseOnRolling) + artifactFileName: CoreCLR_Libraries_BuildArtifacts_$(osGroup)$(osSubgroup)_$(archType)_$(debugOnPrReleaseOnRolling)$(archiveExtension) + unpackFolder: $(Build.SourcesDirectory)/artifacts/bin + displayName: 'unified artifacts' + enablePublishTestResults: true + testRunTitle: Installer-$(osGroup)$(osSubgroup)_$(archType) + postBuildSteps: + - template: /eng/pipelines/installer/steps/upload-job-artifacts.yml + parameters: + name: $(osGroup)$(osSubgroup)_$(archType) + - template: /eng/pipelines/installer/steps/build-linux-package.yml + parameters: + packageType: deb + target: debpkg + packagingArgs: -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} /p:BuildDebPackage=true + - template: /eng/pipelines/installer/steps/build-linux-package.yml + parameters: + packageType: rpm + target: rpmpkg + packagingArgs: -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} /p:BuildRpmPackage=true condition: or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_non_mono_and_wasm.containsChange'], true), From 16492b919d553f5d39e58db17d23119b981421c3 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Thu, 4 Apr 2024 19:04:03 +0000 Subject: [PATCH 098/132] JIT: Support Swift error handling for reverse P/Invokes (#100429) --- src/coreclr/jit/abi.cpp | 11 +++ src/coreclr/jit/codegenarm.cpp | 2 +- src/coreclr/jit/codegenarm64.cpp | 2 +- src/coreclr/jit/codegenarmarch.cpp | 6 +- src/coreclr/jit/codegencommon.cpp | 30 ++++++- src/coreclr/jit/codegenloongarch64.cpp | 4 +- src/coreclr/jit/codegenriscv64.cpp | 4 +- src/coreclr/jit/codegenxarch.cpp | 10 +-- src/coreclr/jit/compiler.cpp | 2 +- src/coreclr/jit/compiler.h | 10 ++- src/coreclr/jit/gtlist.h | 7 +- src/coreclr/jit/importer.cpp | 15 ++++ src/coreclr/jit/importercalls.cpp | 87 +++++++++---------- src/coreclr/jit/lclvars.cpp | 74 ++++++++++++++-- src/coreclr/jit/regset.cpp | 15 ++++ src/coreclr/jit/regset.h | 34 ++++++++ src/coreclr/jit/target.h | 6 ++ src/coreclr/jit/targetamd64.h | 2 +- .../SwiftErrorHandling/SwiftErrorHandling.cs | 68 +++++++++++++++ .../SwiftErrorHandling.swift | 8 ++ 20 files changed, 321 insertions(+), 76 deletions(-) diff --git a/src/coreclr/jit/abi.cpp b/src/coreclr/jit/abi.cpp index 3dd8fcec32fcc9..d54243aa47614e 100644 --- a/src/coreclr/jit/abi.cpp +++ b/src/coreclr/jit/abi.cpp @@ -242,6 +242,17 @@ ABIPassingInformation SwiftABIClassifier::Classify(Compiler* comp, TARGET_POINTER_SIZE)); } + if (wellKnownParam == WellKnownArg::SwiftError) + { + // We aren't actually going to pass the SwiftError* parameter in REG_SWIFT_ERROR. + // We won't be using this parameter at all, and shouldn't allocate registers/stack space for it, + // as that will mess with other args. + // Quirk: To work around the JIT for now, "pass" it in REG_SWIFT_ERROR, + // and let CodeGen::genFnProlog handle the rest. + return ABIPassingInformation::FromSegment(comp, ABIPassingSegment::InRegister(REG_SWIFT_ERROR, 0, + TARGET_POINTER_SIZE)); + } + return m_classifier.Classify(comp, type, structLayout, wellKnownParam); } #endif diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 65ba1bf5913c69..bddc03e0b41a54 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -2149,7 +2149,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { assert(compiler->compGeneratingEpilog); - regMaskTP maskPopRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP maskPopRegs = regSet.rsGetModifiedCalleeSavedRegsMask(); regMaskTP maskPopRegsFloat = maskPopRegs & RBM_ALLFLOAT; regMaskTP maskPopRegsInt = maskPopRegs & ~maskPopRegsFloat; diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index ca9ab73224d7a8..74258cdd55a73a 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -36,7 +36,7 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog) { assert(compiler->compGeneratingEpilog); - regMaskTP rsRestoreRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP rsRestoreRegs = regSet.rsGetModifiedCalleeSavedRegsMask(); if (isFramePointerUsed()) { diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 965b72721aaaa0..12512a958e08e4 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -4893,7 +4893,7 @@ void CodeGen::genPushCalleeSavedRegisters() intRegState.rsCalleeRegArgMaskLiveIn); #endif - regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP rsPushRegs = regSet.rsGetModifiedCalleeSavedRegsMask(); #if ETW_EBP_FRAMED if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE)) @@ -5540,8 +5540,8 @@ void CodeGen::genFnEpilog(BasicBlock* block) compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0); } - if (jmpEpilog || genStackAllocRegisterMask(compiler->compLclFrameSize, - regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == RBM_NONE) + if (jmpEpilog || + genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedFltCalleeSavedRegsMask()) == RBM_NONE) { genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted); } diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 5c733c5457d946..0502339718f8fc 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2980,6 +2980,17 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { continue; } + + // On a similar note, the SwiftError* parameter is not a real argument, + // and should not be allocated any registers/stack space. + // We mark it as being passed in REG_SWIFT_ERROR so it won't interfere with other args. + // In genFnProlog, we should have removed this callee-save register from intRegState.rsCalleeRegArgMaskLiveIn. + // TODO-CQ: Fix this. + if (varNum == compiler->lvaSwiftErrorArg) + { + assert((intRegState.rsCalleeRegArgMaskLiveIn & RBM_SWIFT_ERROR) == 0); + continue; + } #endif var_types regType = compiler->mangleVarArgsType(varDsc->TypeGet()); @@ -5382,7 +5393,7 @@ void CodeGen::genFinalizeFrame() noway_assert(!regSet.rsRegsModified(RBM_FPBASE)); #endif - regMaskTP maskCalleeRegsPushed = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP maskCalleeRegsPushed = regSet.rsGetModifiedCalleeSavedRegsMask(); #ifdef TARGET_ARMARCH if (isFramePointerUsed()) @@ -6062,7 +6073,7 @@ void CodeGen::genFnProlog() #ifdef TARGET_ARM maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize + extraFrameSize, - regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED); + regSet.rsGetModifiedFltCalleeSavedRegsMask()); #endif // TARGET_ARM if (maskStackAlloc == RBM_NONE) @@ -6137,6 +6148,10 @@ void CodeGen::genFnProlog() GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SWIFT_SELF, compiler->lvaSwiftSelfArg, 0); intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_SELF; } + else if (compiler->lvaSwiftErrorArg != BAD_VAR_NUM) + { + intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_ERROR; + } #endif // @@ -7820,6 +7835,17 @@ void CodeGen::genReturn(GenTree* treeNode) genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck); #endif // defined(DEBUG) && defined(TARGET_XARCH) + +#ifdef SWIFT_SUPPORT + // If this method has a SwiftError* out parameter, load the SwiftError pseudolocal value into the error register. + // TODO-CQ: Introduce GenTree node that models returning a normal and Swift error value. + if (compiler->lvaSwiftErrorArg != BAD_VAR_NUM) + { + assert(compiler->info.compCallConv == CorInfoCallConvExtension::Swift); + assert(compiler->lvaSwiftErrorLocal != BAD_VAR_NUM); + GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_SWIFT_ERROR, compiler->lvaSwiftErrorLocal, 0); + } +#endif // SWIFT_SUPPORT } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index ec27d2ff8ab4da..0762291af9da01 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -7714,7 +7714,7 @@ void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroe { assert(compiler->compGeneratingProlog); - regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP rsPushRegs = regSet.rsGetModifiedCalleeSavedRegsMask(); #if ETW_EBP_FRAMED if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE)) @@ -7879,7 +7879,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { assert(compiler->compGeneratingEpilog); - regMaskTP regsToRestoreMask = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP regsToRestoreMask = regSet.rsGetModifiedCalleeSavedRegsMask(); assert(isFramePointerUsed()); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 1d48582c6c316e..0b0199b0646150 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -7792,7 +7792,7 @@ void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroe // beforehand. We don't care if REG_SCRATCH will be overwritten, so we'll skip 'RegZeroed check'. // // Unlike on x86/x64, we can also push float registers to stack - regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP rsPushRegs = regSet.rsGetModifiedCalleeSavedRegsMask(); #if ETW_EBP_FRAMED if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE)) @@ -7955,7 +7955,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { assert(compiler->compGeneratingEpilog); - regMaskTP regsToRestoreMask = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP regsToRestoreMask = regSet.rsGetModifiedCalleeSavedRegsMask(); // On RV64 we always use the FP (frame-pointer) assert(isFramePointerUsed()); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index ede5df1bea39d1..132d4b01ccd240 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -9756,7 +9756,7 @@ void CodeGen::genOSRSaveRemainingCalleeSavedRegisters() // x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack // here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not // here. - regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED; + regMaskTP rsPushRegs = regSet.rsGetModifiedOsrIntCalleeSavedRegsMask(); #if ETW_EBP_FRAMED if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE)) @@ -9837,7 +9837,7 @@ void CodeGen::genPushCalleeSavedRegisters() // x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack // here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not // here. - regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED; + regMaskTP rsPushRegs = regSet.rsGetModifiedIntCalleeSavedRegsMask(); #if ETW_EBP_FRAMED if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE)) @@ -9895,7 +9895,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) // if (doesSupersetOfNormalPops) { - regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED; + regMaskTP rsPopRegs = regSet.rsGetModifiedOsrIntCalleeSavedRegsMask(); regMaskTP tier0CalleeSaves = ((regMaskTP)compiler->info.compPatchpointInfo->CalleeSaveRegisters()) & RBM_OSR_INT_CALLEE_SAVED; regMaskTP additionalCalleeSaves = rsPopRegs & ~tier0CalleeSaves; @@ -9915,7 +9915,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) // Registers saved by a normal prolog // - regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED; + regMaskTP rsPopRegs = regSet.rsGetModifiedIntCalleeSavedRegsMask(); const unsigned popCount = genPopCalleeSavedRegistersFromMask(rsPopRegs); noway_assert(compiler->compCalleeRegsPushed == popCount); } @@ -10102,7 +10102,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters(); regMaskTP const tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED; - regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED; + regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedOsrIntCalleeSavedRegsMask(); regMaskTP const allIntCalleeSaves = osrIntCalleeSaves | tier0IntCalleeSaves; unsigned const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES; unsigned const tier0IntCalleeSaveUsedSize = genCountBits(allIntCalleeSaves) * REGSIZE_BYTES; diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 10dc0ef5f07e5d..3661e8005cb4cd 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5925,7 +5925,7 @@ void Compiler::generatePatchpointInfo() // Record callee save registers. // Currently only needed for x64. // - regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; + regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedCalleeSavedRegsMask(); rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 30fedcd9cd56cc..6ad178b32feedc 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3876,6 +3876,8 @@ class Compiler #ifdef SWIFT_SUPPORT unsigned lvaSwiftSelfArg; + unsigned lvaSwiftErrorArg; + unsigned lvaSwiftErrorLocal; #endif #if defined(DEBUG) && defined(TARGET_XARCH) @@ -4005,7 +4007,7 @@ class Compiler void lvaClassifyParameterABI(); - bool lvaInitSpecialSwiftParam(InitVarDscInfo* varDscInfo, CorInfoType type, CORINFO_CLASS_HANDLE typeHnd); + bool lvaInitSpecialSwiftParam(CORINFO_ARG_LIST_HANDLE argHnd, InitVarDscInfo* varDscInfo, CorInfoType type, CORINFO_CLASS_HANDLE typeHnd); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); @@ -4420,12 +4422,12 @@ class Compiler void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); - void impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, CallArg** swiftErrorArg); - void impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, CallArg** swiftErrorArg); + void impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, GenTree** swiftErrorNode); + void impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, GenTree** swiftErrorNode); void impRetypeUnmanagedCallArgs(GenTreeCall* call); #ifdef SWIFT_SUPPORT - void impAppendSwiftErrorStore(GenTreeCall* call, CallArg* const swiftErrorArg); + void impAppendSwiftErrorStore(GenTree* const swiftErrorNode); #endif // SWIFT_SUPPORT void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index 00c117f8eb83c3..817b27a936a561 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -37,7 +37,6 @@ GTNODE(LABEL , GenTree ,0,0,GTK_LEAF) // Jump- GTNODE(JMP , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // Jump to another function GTNODE(FTN_ADDR , GenTreeFptrVal ,0,0,GTK_LEAF) // Address of a function GTNODE(RET_EXPR , GenTreeRetExpr ,0,0,GTK_LEAF|DBK_NOTLIR) // Place holder for the return expression from an inline candidate -GTNODE(SWIFT_ERROR , GenTree ,0,0,GTK_LEAF) // Error register value post-Swift call //----------------------------------------------------------------------------- // Constant nodes: @@ -287,6 +286,12 @@ GTNODE(RETFILT , GenTreeOp ,0,1,GTK_UNOP|GTK_NOVALUE) // End f GTNODE(END_LFIN , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally. #endif // !FEATURE_EH_FUNCLETS +//----------------------------------------------------------------------------- +// Swift interop-specific nodes: +//----------------------------------------------------------------------------- + +GTNODE(SWIFT_ERROR , GenTree ,0,0,GTK_LEAF) // Error register value post-Swift call + //----------------------------------------------------------------------------- // Nodes used by Lower to generate a closer CPU representation of other nodes //----------------------------------------------------------------------------- diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bb5231b11ce63f..4a1590fb7e0bfb 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -10462,6 +10462,21 @@ void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { lclNum = lvaArg0Var; } +#ifdef SWIFT_SUPPORT + else if (lclNum == lvaSwiftErrorArg) + { + // Convert any usages of the SwiftError pointer/ref parameter to pointers/refs to the SwiftError pseudolocal + // (set side effect flags so usages of references to pseudolocal aren't removed) + assert(info.compCallConv == CorInfoCallConvExtension::Swift); + assert(lvaSwiftErrorArg != BAD_VAR_NUM); + assert(lvaSwiftErrorLocal != BAD_VAR_NUM); + const var_types type = lvaGetDesc(lvaSwiftErrorArg)->TypeGet(); + GenTree* const swiftErrorLocalRef = gtNewLclVarAddrNode(lvaSwiftErrorLocal, type); + impPushOnStack(swiftErrorLocalRef, typeInfo(type)); + JITDUMP("\nCreated GT_LCL_ADDR of SwiftError pseudolocal\n"); + return; + } +#endif // SWIFT_SUPPORT impLoadVar(lclNum, offset); } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 52fdf5ab3cd476..24d7370b8f985a 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -100,7 +100,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Swift calls that might throw use a SwiftError* arg that requires additional IR to handle, // so if we're importing a Swift call, look for this type in the signature - CallArg* swiftErrorArg = nullptr; + GenTree* swiftErrorNode = nullptr; /*------------------------------------------------------------------------- * First create the call node @@ -669,7 +669,7 @@ var_types Compiler::impImportCall(OPCODE opcode, checkForSmallType = true; - impPopArgsForUnmanagedCall(call->AsCall(), sig, &swiftErrorArg); + impPopArgsForUnmanagedCall(call->AsCall(), sig, &swiftErrorNode); goto DONE; } @@ -1502,9 +1502,9 @@ var_types Compiler::impImportCall(OPCODE opcode, #ifdef SWIFT_SUPPORT // If call is a Swift call with error handling, append additional IR // to handle storing the error register's value post-call. - if (swiftErrorArg != nullptr) + if (swiftErrorNode != nullptr) { - impAppendSwiftErrorStore(call->AsCall(), swiftErrorArg); + impAppendSwiftErrorStore(swiftErrorNode); } #endif // SWIFT_SUPPORT @@ -1844,17 +1844,18 @@ GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugI // Arguments: // call - The unmanaged call // sig - The signature of the call site -// swiftErrorArg - [out] If this is a Swift call with a SwiftError* argument, then the argument is returned here. -// Otherwise left at its existing value. +// swiftErrorNode - [out] If this is a Swift call with a SwiftError* argument, +// then swiftErrorNode points to the node. +// Otherwise left at its existing value. // -void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, CallArg** swiftErrorArg) +void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, GenTree** swiftErrorNode) { assert(call->gtFlags & GTF_CALL_UNMANAGED); #ifdef SWIFT_SUPPORT if (call->unmgdCallConv == CorInfoCallConvExtension::Swift) { - impPopArgsForSwiftCall(call, sig, swiftErrorArg); + impPopArgsForSwiftCall(call, sig, swiftErrorNode); return; } #endif @@ -2000,12 +2001,12 @@ const CORINFO_SWIFT_LOWERING* Compiler::GetSwiftLowering(CORINFO_CLASS_HANDLE hC // impPopArgsForSwiftCall: Pop arguments from IL stack to a Swift pinvoke node. // // Arguments: -// call - The Swift call -// sig - The signature of the call site -// swiftErrorArg - [out] An argument that represents the SwiftError* -// argument. Left at its existing value if no such argument exists. +// call - The Swift call +// sig - The signature of the call site +// swiftErrorNode - [out] Pointer to the SwiftError* argument. +// Left at its existing value if no such argument exists. // -void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, CallArg** swiftErrorArg) +void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, GenTree** swiftErrorNode) { JITDUMP("Creating args for Swift call [%06u]\n", dspTreeID(call)); @@ -2023,13 +2024,12 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, { CORINFO_CLASS_HANDLE argClass; CorInfoType argType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); - bool argIsByrefOrPtr = false; + const bool argIsByrefOrPtr = (argType == CORINFO_TYPE_BYREF) || (argType == CORINFO_TYPE_PTR); - if ((argType == CORINFO_TYPE_BYREF) || (argType == CORINFO_TYPE_PTR)) + if (argIsByrefOrPtr) { - argClass = info.compCompHnd->getArgClass(sig, sigArg); - argType = info.compCompHnd->getChildType(argClass, &argClass); - argIsByrefOrPtr = true; + argClass = info.compCompHnd->getArgClass(sig, sigArg); + argType = info.compCompHnd->getChildType(argClass, &argClass); } if (argType != CORINFO_TYPE_VALUECLASS) @@ -2115,10 +2115,9 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, DISPTREE(call); JITDUMP("\n"); - if (swiftErrorIndex != sig->numArgs) - { - *swiftErrorArg = call->gtArgs.GetArgByIndex(swiftErrorIndex); - } + // Get SwiftError* arg (if it exists) before modifying the arg list + CallArg* const swiftErrorArg = + (swiftErrorIndex != sig->numArgs) ? call->gtArgs.GetArgByIndex(swiftErrorIndex) : nullptr; // Now expand struct args that must be lowered into primitives unsigned argIndex = 0; @@ -2261,6 +2260,23 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, arg = insertAfter->GetNext(); } + if (swiftErrorArg != nullptr) + { + // Before calling a Swift method that may throw, the error register must be cleared, + // as we will check for a nonzero error value after the call returns. + // By adding a well-known "sentinel" argument that uses the error register, + // the JIT will emit code for clearing the error register before the call, + // and will mark the error register as busy so it isn't used to hold the function call's address. + GenTree* const errorSentinelValueNode = gtNewIconNode(0); + call->gtArgs.InsertAfter(this, swiftErrorArg, + NewCallArg::Primitive(errorSentinelValueNode).WellKnown(WellKnownArg::SwiftError)); + + // Swift call isn't going to use the SwiftError* arg, so don't bother emitting it + assert(swiftErrorNode != nullptr); + *swiftErrorNode = swiftErrorArg->GetNode(); + call->gtArgs.Remove(swiftErrorArg); + } + #ifdef DEBUG if (verbose && call->TypeIs(TYP_STRUCT) && (sig->retTypeClass != NO_CLASS_HANDLE)) { @@ -2291,39 +2307,22 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, //------------------------------------------------------------------------ // impAppendSwiftErrorStore: Append IR to store the Swift error register value -// to the SwiftError* argument specified by swiftErrorArg, post-Swift call +// to the SwiftError* argument represented by swiftErrorNode, post-Swift call // // Arguments: -// call - the Swift call -// swiftErrorArg - the SwiftError* argument passed to call +// swiftErrorNode - the SwiftError* argument // -void Compiler::impAppendSwiftErrorStore(GenTreeCall* call, CallArg* const swiftErrorArg) +void Compiler::impAppendSwiftErrorStore(GenTree* const swiftErrorNode) { - assert(call != nullptr); - assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift); - assert(swiftErrorArg != nullptr); - - GenTree* const argNode = swiftErrorArg->GetNode(); - assert(argNode != nullptr); + assert(swiftErrorNode != nullptr); // Store the error register value to where the SwiftError* points to GenTree* errorRegNode = new (this, GT_SWIFT_ERROR) GenTree(GT_SWIFT_ERROR, TYP_I_IMPL); errorRegNode->SetHasOrderingSideEffect(); errorRegNode->gtFlags |= (GTF_CALL | GTF_GLOB_REF); - GenTreeStoreInd* swiftErrorStore = gtNewStoreIndNode(argNode->TypeGet(), argNode, errorRegNode); + GenTreeStoreInd* swiftErrorStore = gtNewStoreIndNode(swiftErrorNode->TypeGet(), swiftErrorNode, errorRegNode); impAppendTree(swiftErrorStore, CHECK_SPILL_ALL, impCurStmtDI, false); - - // Before calling a Swift method that may throw, the error register must be cleared for the error check to work. - // By adding a well-known "sentinel" argument that uses the error register, - // the JIT will emit code for clearing the error register before the call, and will mark the error register as busy - // so that it isn't used to hold the function call's address. - GenTree* errorSentinelValueNode = gtNewIconNode(0); - call->gtArgs.InsertAfter(this, swiftErrorArg, - NewCallArg::Primitive(errorSentinelValueNode).WellKnown(WellKnownArg::SwiftError)); - - // Swift call isn't going to use the SwiftError* arg, so don't bother emitting it - call->gtArgs.Remove(swiftErrorArg); } #endif // SWIFT_SUPPORT diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 0de4f52eee7560..6b3650416574c0 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -72,7 +72,8 @@ void Compiler::lvaInit() lvaRetAddrVar = BAD_VAR_NUM; #ifdef SWIFT_SUPPORT - lvaSwiftSelfArg = BAD_VAR_NUM; + lvaSwiftSelfArg = BAD_VAR_NUM; + lvaSwiftErrorArg = BAD_VAR_NUM; #endif lvaInlineeReturnSpillTemp = BAD_VAR_NUM; @@ -662,7 +663,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un #ifdef SWIFT_SUPPORT if ((info.compCallConv == CorInfoCallConvExtension::Swift) && - lvaInitSpecialSwiftParam(varDscInfo, strip(corInfoType), typeHnd)) + lvaInitSpecialSwiftParam(argLst, varDscInfo, strip(corInfoType), typeHnd)) { continue; } @@ -1366,19 +1367,32 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un #ifdef SWIFT_SUPPORT //----------------------------------------------------------------------------- -// lvaInitSpecialSwiftParam: -// If the parameter is a special Swift parameter then initialize it and return true. +// lvaInitSpecialSwiftParam: Initialize SwiftSelf/SwiftError* parameters. // // Parameters: +// argHnd - Handle for this parameter in the method's signature // varDsc - LclVarDsc* for the parameter // type - Type of the parameter // typeHnd - Class handle for the type of the parameter // -// Remarks: -// Handles SwiftSelf. +// Returns: +// true if parameter was initialized // -bool Compiler::lvaInitSpecialSwiftParam(InitVarDscInfo* varDscInfo, CorInfoType type, CORINFO_CLASS_HANDLE typeHnd) +bool Compiler::lvaInitSpecialSwiftParam(CORINFO_ARG_LIST_HANDLE argHnd, + InitVarDscInfo* varDscInfo, + CorInfoType type, + CORINFO_CLASS_HANDLE typeHnd) { + const bool argIsByrefOrPtr = (type == CORINFO_TYPE_BYREF) || (type == CORINFO_TYPE_PTR); + + if (argIsByrefOrPtr) + { + // For primitive types, we don't expect to be passed a CORINFO_CLASS_HANDLE; look up the actual handle + assert(typeHnd == nullptr); + CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->args, argHnd); + type = info.compCompHnd->getChildType(clsHnd, &typeHnd); + } + if (type != CORINFO_TYPE_VALUECLASS) { return false; @@ -1393,7 +1407,17 @@ bool Compiler::lvaInitSpecialSwiftParam(InitVarDscInfo* varDscInfo, CorInfoType const char* className = info.compCompHnd->getClassNameFromMetadata(typeHnd, &namespaceName); if ((strcmp(className, "SwiftSelf") == 0) && (strcmp(namespaceName, "System.Runtime.InteropServices.Swift") == 0)) { - LclVarDsc* varDsc = varDscInfo->varDsc; + if (argIsByrefOrPtr) + { + BADCODE("Expected SwiftSelf struct, got pointer/reference"); + } + + if (lvaSwiftSelfArg != BAD_VAR_NUM) + { + BADCODE("Duplicate SwiftSelf parameter"); + } + + LclVarDsc* const varDsc = varDscInfo->varDsc; varDsc->SetArgReg(REG_SWIFT_SELF); varDsc->SetOtherArgReg(REG_NA); varDsc->lvIsRegArg = true; @@ -1405,6 +1429,34 @@ bool Compiler::lvaInitSpecialSwiftParam(InitVarDscInfo* varDscInfo, CorInfoType return true; } + if ((strcmp(className, "SwiftError") == 0) && (strcmp(namespaceName, "System.Runtime.InteropServices.Swift") == 0)) + { + if (!argIsByrefOrPtr) + { + BADCODE("Expected SwiftError pointer/reference, got struct"); + } + + if (lvaSwiftErrorArg != BAD_VAR_NUM) + { + BADCODE("Duplicate SwiftError* parameter"); + } + + // We won't actually be passing this SwiftError* in REG_SWIFT_ERROR (or any register, for that matter). + // We will check for this quirk when generating the prolog, + // and ensure this fake parameter doesn't take any registers/stack space + LclVarDsc* const varDsc = varDscInfo->varDsc; + varDsc->SetArgReg(REG_SWIFT_ERROR); + varDsc->SetOtherArgReg(REG_NA); + varDsc->lvIsRegArg = true; + lvaSwiftErrorArg = varDscInfo->varNum; + + // Instead, all usages of the SwiftError* parameter will be redirected to this pseudolocal. + lvaSwiftErrorLocal = lvaGrabTempWithImplicitUse(false DEBUGARG("SwiftError pseudolocal")); + lvaSetStruct(lvaSwiftErrorLocal, typeHnd, false); + lvaSetVarAddrExposed(lvaSwiftErrorLocal DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); + return true; + } + return false; } #endif @@ -1644,6 +1696,10 @@ void Compiler::lvaClassifyParameterABI(Classifier& classifier) { wellKnownArg = WellKnownArg::SwiftSelf; } + else if (i == lvaSwiftErrorArg) + { + wellKnownArg = WellKnownArg::SwiftError; + } #endif lvaParameterPassingInfo[i] = classifier.Classify(this, dsc->TypeGet(), structLayout, wellKnownArg); @@ -1761,7 +1817,7 @@ void Compiler::lvaClassifyParameterABI() } } } -#endif +#endif // DEBUG } /***************************************************************************** diff --git a/src/coreclr/jit/regset.cpp b/src/coreclr/jit/regset.cpp index 5f5c80a4a19d6c..12975850a404ba 100644 --- a/src/coreclr/jit/regset.cpp +++ b/src/coreclr/jit/regset.cpp @@ -117,6 +117,16 @@ void RegSet::rsClearRegsModified() #endif // DEBUG rsModifiedRegsMask = RBM_NONE; + +#ifdef SWIFT_SUPPORT + // If this method has a SwiftError* parameter, we will return SwiftError::Value in REG_SWIFT_ERROR, + // so don't treat it as callee-save. + if (m_rsCompiler->lvaSwiftErrorArg != BAD_VAR_NUM) + { + rsAllCalleeSavedMask &= ~RBM_SWIFT_ERROR; + rsIntCalleeSavedMask &= ~RBM_SWIFT_ERROR; + } +#endif // SWIFT_SUPPORT } void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump)) @@ -258,6 +268,11 @@ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) rsMaskPreSpillAlign = RBM_NONE; #endif +#ifdef SWIFT_SUPPORT + rsAllCalleeSavedMask = RBM_CALLEE_SAVED; + rsIntCalleeSavedMask = RBM_INT_CALLEE_SAVED; +#endif // SWIFT_SUPPORT + #ifdef DEBUG rsModifiedRegsMaskInitialized = false; #endif // DEBUG diff --git a/src/coreclr/jit/regset.h b/src/coreclr/jit/regset.h index 0924c410e3b85b..dae93baebad306 100644 --- a/src/coreclr/jit/regset.h +++ b/src/coreclr/jit/regset.h @@ -74,6 +74,14 @@ class RegSet bool rsModifiedRegsMaskInitialized; // Has rsModifiedRegsMask been initialized? Guards against illegal use. #endif // DEBUG +#ifdef SWIFT_SUPPORT + regMaskTP rsAllCalleeSavedMask; + regMaskTP rsIntCalleeSavedMask; +#else // !SWIFT_SUPPORT + static constexpr regMaskTP rsAllCalleeSavedMask = RBM_CALLEE_SAVED; + static constexpr regMaskTP rsIntCalleeSavedMask = RBM_INT_CALLEE_SAVED; +#endif // !SWIFT_SUPPORT + public: regMaskTP rsGetModifiedRegsMask() const { @@ -81,6 +89,32 @@ class RegSet return rsModifiedRegsMask; } + regMaskTP rsGetModifiedCalleeSavedRegsMask() const + { + assert(rsModifiedRegsMaskInitialized); + return (rsModifiedRegsMask & rsAllCalleeSavedMask); + } + + regMaskTP rsGetModifiedIntCalleeSavedRegsMask() const + { + assert(rsModifiedRegsMaskInitialized); + return (rsModifiedRegsMask & rsIntCalleeSavedMask); + } + +#ifdef TARGET_AMD64 + regMaskTP rsGetModifiedOsrIntCalleeSavedRegsMask() const + { + assert(rsModifiedRegsMaskInitialized); + return (rsModifiedRegsMask & (rsIntCalleeSavedMask | RBM_EBP)); + } +#endif // TARGET_AMD64 + + regMaskTP rsGetModifiedFltCalleeSavedRegsMask() const + { + assert(rsModifiedRegsMaskInitialized); + return (rsModifiedRegsMask & RBM_FLT_CALLEE_SAVED); + } + void rsClearRegsModified(); void rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump = false)); diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h index 4b1461efde0fe3..06777fa9d5f709 100644 --- a/src/coreclr/jit/target.h +++ b/src/coreclr/jit/target.h @@ -498,6 +498,12 @@ inline regMaskTP fullIntArgRegMask(CorInfoCallConvExtension callConv) if (callConv == CorInfoCallConvExtension::Swift) { result |= RBM_SWIFT_SELF; + + // We don't pass any arguments in REG_SWIFT_ERROR, but as a quirk, + // we set the SwiftError* parameter to be passed in this register, + // and later ensure the parameter isn't given any registers/stack space + // to avoid interfering with other arguments. + result |= RBM_SWIFT_ERROR; } #endif diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index a15029c3c39249..5d37870d03b03a 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -575,6 +575,6 @@ #define REG_SWIFT_ARG_RET_BUFF REG_RAX #define RBM_SWIFT_ARG_RET_BUFF RBM_RAX #define SWIFT_RET_BUFF_ARGNUM MAX_REG_ARG -#endif +#endif // UNIX_AMD64_ABI // clang-format on diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs index d4b81bafcd4c8a..b1575e04deabd1 100644 --- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs +++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs @@ -23,6 +23,40 @@ public class ErrorHandlingTests [DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling018conditionallyThrowB004willE0s5Int32VAE_tKF")] public static extern nint conditionallyThrowErrorOnStack(int willThrow, int dummy1, int dummy2, int dummy3, int dummy4, int dummy5, int dummy6, int dummy7, int dummy8, int dummy9, ref SwiftError error); + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling26nativeFunctionWithCallback03setB0_ys5Int32V_yAEXEtF")] + public static extern unsafe void NativeFunctionWithCallback(int setError, delegate* unmanaged[Swift] callback, SwiftError* error); + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling26nativeFunctionWithCallback5value03setB0_s5Int32VAF_A3F_AFtXEtF")] + public static extern unsafe int NativeFunctionWithCallback(int value, int setError, delegate* unmanaged[Swift] callback, SwiftError* error); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static unsafe void ConditionallySetErrorTo21(SwiftError* error, int setError) { + if (setError != 0) + { + *error = new SwiftError((void*)21); + } + else + { + *error = new SwiftError(null); + } + } + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static unsafe int ConditionallySetErrorAndReturn(SwiftError* error, int value, int setError) { + if (setError != 0) + { + *error = new SwiftError((void*)value); + } + else + { + *error = new SwiftError(null); + } + + return (value * 2); + } + [DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling05getMyB7Message4from13messageLengthSPys6UInt16VGSgs0B0_p_s5Int32VztF")] public unsafe static extern void* GetErrorMessage(void* handle, out int length); @@ -92,6 +126,40 @@ public unsafe static void TestSwiftErrorOnStackNotThrown() Assert.True(error.Value == null, "No Swift error was expected to be thrown."); Assert.True(result == 42, "The result from Swift does not match the expected value."); } + + [Fact] + [SkipOnMono("needs reverse P/Invoke support")] + public static unsafe void TestUnmanagedCallersOnly() + { + SwiftError error; + int expectedValue = 21; + NativeFunctionWithCallback(1, &ConditionallySetErrorTo21, &error); + + int value = (int)error.Value; + Assert.True(value == expectedValue, string.Format("The value retrieved does not match the expected value. Expected: {0}, Actual: {1}", expectedValue, value)); + + NativeFunctionWithCallback(0, &ConditionallySetErrorTo21, &error); + + Assert.True(error.Value == null, "Expected SwiftError value to be null."); + } + + [Fact] + [SkipOnMono("needs reverse P/Invoke support")] + public static unsafe void TestUnmanagedCallersOnlyWithReturn() + { + SwiftError error; + int expectedValue = 42; + int retValue = NativeFunctionWithCallback(expectedValue, 1, &ConditionallySetErrorAndReturn, &error); + + int value = (int)error.Value; + Assert.True(value == expectedValue, string.Format("The value retrieved does not match the expected value. Expected: {0}, Actual: {1}", expectedValue, value)); + Assert.True(retValue == (expectedValue * 2), string.Format("Return value does not match expected value. Expected: {0}, Actual: {1}", (expectedValue * 2), retValue)); + + retValue = NativeFunctionWithCallback(expectedValue, 0, &ConditionallySetErrorAndReturn, &error); + + Assert.True(error.Value == null, "Expected SwiftError value to be null."); + Assert.True(retValue == (expectedValue * 2), string.Format("Return value does not match expected value. Expected: {0}, Actual: {1}", (expectedValue * 2), retValue)); + } private static void SetErrorMessageForSwift(string message) { diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift index 5058014a42ce3a..9067ea2372dba3 100644 --- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift +++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift @@ -37,3 +37,11 @@ public func getMyErrorMessage(from error: Error, messageLength: inout Int32) -> public func freeStringBuffer(buffer: UnsafeMutablePointer) { buffer.deallocate() } + +public func nativeFunctionWithCallback(setError: Int32, _ callback: (Int32) -> Void) { + callback(setError) +} + +public func nativeFunctionWithCallback(value: Int32, setError: Int32, _ callback: (Int32, Int32) -> Int32) -> Int32 { + return callback(value, setError) +} From 6ae3df61746d18680b72a3b98c9873beec160a77 Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Thu, 4 Apr 2024 12:15:08 -0700 Subject: [PATCH 099/132] Remove CLANG_FORMAT_COMMENT_ANCHOR (#100615) * Remove CLANG_FORMAT_COMMENT_ANCHOR Not needed after #100498 * Fix build * Formatting --- src/coreclr/jit/codegen.h | 2 -- src/coreclr/jit/codegenarmarch.cpp | 1 - src/coreclr/jit/codegencommon.cpp | 40 ++++++++--------------- src/coreclr/jit/codegenlinear.cpp | 2 -- src/coreclr/jit/codegenloongarch64.cpp | 1 - src/coreclr/jit/codegenriscv64.cpp | 1 - src/coreclr/jit/codegenxarch.cpp | 5 --- src/coreclr/jit/compiler.cpp | 6 ---- src/coreclr/jit/compiler.h | 4 --- src/coreclr/jit/compiler.hpp | 3 -- src/coreclr/jit/decomposelongs.cpp | 1 - src/coreclr/jit/ee_il_dll.cpp | 2 -- src/coreclr/jit/emit.cpp | 10 ------ src/coreclr/jit/emit.h | 21 ++++-------- src/coreclr/jit/emitloongarch64.cpp | 2 -- src/coreclr/jit/emitriscv64.cpp | 2 -- src/coreclr/jit/emitxarch.cpp | 4 --- src/coreclr/jit/fgbasic.cpp | 2 -- src/coreclr/jit/fginline.cpp | 1 - src/coreclr/jit/fgopt.cpp | 2 -- src/coreclr/jit/flowgraph.cpp | 1 - src/coreclr/jit/gcencode.cpp | 7 ---- src/coreclr/jit/gentree.cpp | 3 -- src/coreclr/jit/gentree.h | 4 --- src/coreclr/jit/importer.cpp | 24 +++++++------- src/coreclr/jit/importercalls.cpp | 3 -- src/coreclr/jit/inductionvariableopts.cpp | 1 - src/coreclr/jit/jit.h | 4 --- src/coreclr/jit/jiteh.cpp | 6 ---- src/coreclr/jit/jitgcinfo.h | 1 - src/coreclr/jit/lclvars.cpp | 9 ----- src/coreclr/jit/liveness.cpp | 1 - src/coreclr/jit/lower.cpp | 8 ----- src/coreclr/jit/lsra.cpp | 6 ---- src/coreclr/jit/lsra.h | 1 - src/coreclr/jit/lsrabuild.cpp | 1 - src/coreclr/jit/lsraxarch.cpp | 4 --- src/coreclr/jit/morph.cpp | 13 -------- src/coreclr/jit/optimizer.cpp | 2 -- src/coreclr/jit/stacklevelsetter.cpp | 1 - src/coreclr/jit/targetx86.h | 1 - src/coreclr/jit/unwindarmarch.cpp | 2 -- src/coreclr/jit/unwindloongarch64.cpp | 2 -- src/coreclr/jit/unwindriscv64.cpp | 2 -- src/coreclr/jit/utils.cpp | 2 -- 45 files changed, 31 insertions(+), 190 deletions(-) diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index 0ab8a81d89ef93..c5e65b081583df 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -173,7 +173,6 @@ class CodeGen final : public CodeGenInterface // the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize" // to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the // "codePtr", assumed to be a pointer to the start of the generated code. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef JIT32_GCENCODER void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); @@ -529,7 +528,6 @@ class CodeGen final : public CodeGenInterface // // Epilog functions // - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 12512a958e08e4..a9e2a41f73f945 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5708,7 +5708,6 @@ void CodeGen::genFnEpilog(BasicBlock* block) 0, // disp true); // isJump // clang-format on - CLANG_FORMAT_COMMENT_ANCHOR; #endif // TARGET_ARMARCH } #if FEATURE_FASTTAILCALL diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 0502339718f8fc..eed9a96a981724 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -43,7 +43,6 @@ void CodeGenInterface::setFramePointerRequiredEH(bool value) // if they are fully-interruptible. So if we have a catch // or finally that will keep frame-vars alive, we need to // force fully-interruptible. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -392,26 +391,25 @@ void CodeGen::genMarkLabelsForCodegen() case BBJ_CALLFINALLY: // The finally target itself will get marked by walking the EH table, below, and marking // all handler begins. - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_EH_CALLFINALLY_THUNKS + { + // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, + // as that's needed for identifying the range of the "duplicate finally" region in EH data. + BasicBlock* bbToLabel = block->Next(); + if (block->isBBCallFinallyPair()) { - // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, - // as that's needed for identifying the range of the "duplicate finally" region in EH data. - BasicBlock* bbToLabel = block->Next(); - if (block->isBBCallFinallyPair()) - { - bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET - } - if (bbToLabel != nullptr) - { - JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); - bbToLabel->SetFlags(BBF_HAS_LABEL); - } + bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET } + if (bbToLabel != nullptr) + { + JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); + bbToLabel->SetFlags(BBF_HAS_LABEL); + } + } #endif // FEATURE_EH_CALLFINALLY_THUNKS - break; + break; case BBJ_CALLFINALLYRET: JITDUMP(" " FMT_BB " : finally continuation\n", block->GetFinallyContinuation()->bbNum); @@ -932,7 +930,6 @@ void CodeGen::genAdjustStackLevel(BasicBlock* block) { #if !FEATURE_FIXED_OUT_ARGS // Check for inserted throw blocks and adjust genStackLevel. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_X86_ABI) if (isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block)) @@ -1081,7 +1078,6 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, constant, or we have gone through a GT_NOP or GT_COMMA node. We never come back here if we find a scaled index. */ - CLANG_FORMAT_COMMENT_ANCHOR; assert(mul == 0); @@ -3436,7 +3432,6 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere /* At this point, everything that has the "circular" flag * set to "true" forms a circular dependency */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (regArgMaskLive) @@ -4504,7 +4499,6 @@ void CodeGen::genCheckUseBlockInit() // find structs that are guaranteed to be block initialized. // If this logic changes, Compiler::fgVarNeedsExplicitZeroInit needs // to be modified. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT #if defined(TARGET_AMD64) @@ -5311,7 +5305,6 @@ void CodeGen::genFinalizeFrame() genCheckUseBlockInit(); // Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) @@ -5778,7 +5771,6 @@ void CodeGen::genFnProlog() // If there is a frame pointer used, due to frame pointer chaining it will point to the stored value of the // previous frame pointer. Thus, stkOffs can't be zero. - CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_AMD64) // However, on amd64 there is no requirement to chain frame pointers. @@ -6066,7 +6058,6 @@ void CodeGen::genFnProlog() // Subtract the local frame size from SP. // //------------------------------------------------------------------------- - CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) regMaskTP maskStackAlloc = RBM_NONE; @@ -6256,8 +6247,7 @@ void CodeGen::genFnProlog() // we've set the live-in regs with values from the Tier0 frame. // // Otherwise we'll do some of these fetches twice. - // - CLANG_FORMAT_COMMENT_ANCHOR; + #if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) genEnregisterOSRArgsAndLocals(initReg, &initRegZeroed); #else @@ -6648,7 +6638,6 @@ void CodeGen::genGeneratePrologsAndEpilogs() genFnProlog(); // Generate all the prologs and epilogs. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_EH_FUNCLETS) @@ -8487,7 +8476,6 @@ void CodeGen::genPoisonFrame(regMaskTP regLiveIn) if ((size / TARGET_POINTER_SIZE) > 16) { // This will require more than 16 instructions, switch to rep stosd/memset call. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_EDI, (int)varNum, 0); assert(size % 4 == 0); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 2d8a2093454f86..038f9fea696bbf 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -396,7 +396,6 @@ void CodeGen::genCodeForBBlist() // Traverse the block in linear order, generating code for each node as we // as we encounter it. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Set the use-order numbers for each node. @@ -1780,7 +1779,6 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, // If the op1 is already in the dstReg - nothing to do. // Otherwise load the op1 (the address) into the dstReg to copy the struct on the stack by value. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 assert(dstReg != REG_SPBASE); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 0762291af9da01..d6f18005c767c5 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1307,7 +1307,6 @@ void CodeGen::genFnEpilog(BasicBlock* block) 0, // disp true); // isJump // clang-format on - CLANG_FORMAT_COMMENT_ANCHOR; } #if FEATURE_FASTTAILCALL else diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 0b0199b0646150..17d30e5ada2754 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1294,7 +1294,6 @@ void CodeGen::genFnEpilog(BasicBlock* block) 0, // disp true); // isJump // clang-format on - CLANG_FORMAT_COMMENT_ANCHOR; } #if FEATURE_FASTTAILCALL else diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 132d4b01ccd240..77045cce0875ad 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -1719,7 +1719,6 @@ void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock, bool isRemovableJ // // Thus only on x86 do we need to assert that the stack level at the target block matches the current stack level. // - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_X86_ABI // bbTgtStkDepth is a (pure) argument count (stack alignment padding should be excluded). @@ -6638,7 +6637,6 @@ void CodeGen::genJmpMethod(GenTree* jmp) #endif // !defined(UNIX_AMD64_ABI) { // Register argument - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 noway_assert(isRegParamType(genActualType(varDsc->TypeGet())) || ((varDsc->TypeGet() == TYP_STRUCT) && @@ -9125,7 +9123,6 @@ void CodeGen::genAmd64EmitterUnitTestsSse2() // // Loads // - CLANG_FORMAT_COMMENT_ANCHOR; genDefineTempLabel(genCreateTempLabel()); @@ -10742,7 +10739,6 @@ void CodeGen::genFuncletProlog(BasicBlock* block) compiler->unwindEndProlog(); // TODO We may need EBP restore sequence here if we introduce PSPSym - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_X86_ABI // Add a padding for 16-byte alignment @@ -10890,7 +10886,6 @@ void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNu else { // Grab a non-argument, non-callee saved XMM reg - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // System V x64 first temp reg is xmm8 regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8); diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 3661e8005cb4cd..697c76527afe55 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -758,7 +758,6 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) @@ -1908,7 +1907,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, // // Initialize all the per-method statistics gathering data structures. // - CLANG_FORMAT_COMMENT_ANCHOR; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; @@ -2279,7 +2277,6 @@ void Compiler::compSetProcessor() // // Processor specific optimizations // - CLANG_FORMAT_COMMENT_ANCHOR; CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA.Reset(); @@ -2880,7 +2877,6 @@ void Compiler::compInitOptions(JitFlags* jitFlags) // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; @@ -5825,7 +5821,6 @@ void Compiler::generatePatchpointInfo() // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint @@ -9866,7 +9861,6 @@ JITDBGAPI void __cdecl cTreeFlags(Compiler* comp, GenTree* tree) chars += printf("flags="); // Node flags - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 6ad178b32feedc..7e6b2c57c89dc4 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -8945,7 +8945,6 @@ class Compiler // We need to report the ISA dependency to the VM so that scenarios // such as R2R work correctly for larger vector sizes, so we always // do `compExactlyDependsOn` for such cases. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) if (compExactlyDependsOn(InstructionSet_VectorT512)) @@ -10364,7 +10363,6 @@ class Compiler // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, @@ -10383,7 +10381,6 @@ class Compiler #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { @@ -10395,7 +10392,6 @@ class Compiler } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index b1329e88b0436b..62efd5282a16a1 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2594,7 +2594,6 @@ inline if (!FPbased) { // Worst case stack based offset. - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_FIXED_OUT_ARGS int outGoingArgSpaceSize = lvaOutgoingArgSpaceSize; #else @@ -2606,7 +2605,6 @@ inline else { // Worst case FP based offset. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); @@ -2694,7 +2692,6 @@ inline bool Compiler::lvaIsOriginalThisArg(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); // Should never write to or take the address of the original 'this' arg - CLANG_FORMAT_COMMENT_ANCHOR; #ifndef JIT32_GCENCODER // With the general encoder/decoder, when the original 'this' arg is needed as a generics context param, we diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp index 2f05779f9ff137..ea87a996dbb1aa 100644 --- a/src/coreclr/jit/decomposelongs.cpp +++ b/src/coreclr/jit/decomposelongs.cpp @@ -2169,7 +2169,6 @@ void DecomposeLongs::TryPromoteLongVar(unsigned lclNum) for (unsigned index = 0; index < 2; ++index) { // Grab the temp for the field local. - CLANG_FORMAT_COMMENT_ANCHOR; // Lifetime of field locals might span multiple BBs, so they are long lifetime temps. unsigned fieldLclNum = m_compiler->lvaGrabTemp( diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp index b33e6eed17bbc3..2f86c8b5274536 100644 --- a/src/coreclr/jit/ee_il_dll.cpp +++ b/src/coreclr/jit/ee_il_dll.cpp @@ -367,7 +367,6 @@ unsigned Compiler::eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE ty // Everything fits into a single 'slot' size // to accommodate irregular sized structs, they are passed byref - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI if (varTypeIsStruct(argType)) @@ -396,7 +395,6 @@ unsigned Compiler::eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE ty // For each target that supports passing struct args in multiple registers // apply the target specific rules for them here: - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_MULTIREG_ARGS #if defined(TARGET_ARM64) diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index cabad877f83835..85bbda9f3cfbc6 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -4667,7 +4667,6 @@ void emitter::emitRemoveJumpToNextInst() // the last instruction in the group is the jmp we're looking for // and it jumps to the next instruction group so we don't need it - CLANG_FORMAT_COMMENT_ANCHOR #ifdef DEBUG unsigned instructionCount = jmpGroup->igInsCnt; @@ -5079,7 +5078,6 @@ void emitter::emitJumpDistBind() jmp->idjOffs -= adjLJ; // If this is a jump via register, the instruction size does not change, so we are done. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // JIT code and data will be allocated together for arm64 so the relative offset to JIT data is known. @@ -5145,7 +5143,6 @@ void emitter::emitJumpDistBind() else { /* First time we've seen this label, convert its target */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (EMITVERBOSE) @@ -5554,7 +5551,6 @@ void emitter::emitJumpDistBind() #endif /* Is there a chance of other jumps becoming short? */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG #if defined(TARGET_ARM) if (EMITVERBOSE) @@ -5881,7 +5877,6 @@ unsigned emitter::getLoopSize(insGroup* igLoopHeader, // jne IG06 // // - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if ((igInLoop->igLoopBackEdge != nullptr) && (igInLoop->igLoopBackEdge != igLoopHeader)) @@ -6949,7 +6944,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, *consAddrRW = consBlockRW; /* Nothing has been pushed on the stack */ - CLANG_FORMAT_COMMENT_ANCHOR; #if EMIT_TRACK_STACK_DEPTH emitCurStackLvl = 0; @@ -7612,7 +7606,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, if (jmp->idjShort) { // Patch Forward Short Jump - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) *(BYTE*)(adr + writeableOffset) -= (BYTE)adj; #elif defined(TARGET_ARM) @@ -7632,7 +7625,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, else { // Patch Forward non-Short Jump - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) *(int*)(adr + writeableOffset) -= adj; #elif defined(TARGET_ARMARCH) @@ -8611,7 +8603,6 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp desc->vpdNext = nullptr; /* the lower 2 bits encode props about the stk ptr */ - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS) if (offs == emitSyncThisObjOffs) @@ -10069,7 +10060,6 @@ void emitter::emitStackPopLargeStk(BYTE* addr, bool isCall, unsigned char callIn Or do we have a partially interruptible EBP-less frame, and any of EDI,ESI,EBX,EBP are live, or is there an outer/pending call? */ - CLANG_FORMAT_COMMENT_ANCHOR; #if !FPO_INTERRUPTIBLE if (emitFullyInt || (gcrefRegs == 0 && byrefRegs == 0 && u2.emitGcArgTrackCnt == 0)) diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 094720597ead70..4e37226e2b5816 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -328,7 +328,6 @@ struct insGroup #endif // !FEATURE_EH_FUNCLETS // Try to do better packing based on how large regMaskSmall is (8, 16, or 64 bits). - CLANG_FORMAT_COMMENT_ANCHOR; #if !(REGMASK_BITS <= 32) regMaskSmall igGCregs; // set of registers with live GC refs @@ -807,17 +806,16 @@ class emitter #endif // TARGET_XARCH #ifdef TARGET_ARM64 - unsigned _idLclVar : 1; // access a local on stack - unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars. + unsigned _idLclVarPair : 1; // carries information for 2 GC lcl vars. #endif #ifdef TARGET_LOONGARCH64 - // TODO-LoongArch64: maybe delete on future. - opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 - insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the - // accessing a local on stack. - unsigned _idLclVar : 1; // access a local on stack. + // TODO-LoongArch64: maybe delete on future. + opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 + insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the + // accessing a local on stack. + unsigned _idLclVar : 1; // access a local on stack. #endif #ifdef TARGET_RISCV64 @@ -848,7 +846,6 @@ class emitter // How many bits have been used beyond the first 32? // Define ID_EXTRA_BITFIELD_BITS to that number. // - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) #define ID_EXTRA_BITFIELD_BITS (16) @@ -876,7 +873,6 @@ class emitter // All instrDesc types are <= 56 bytes, but we also need m_debugInfoSize, // which is pointer sized, so 5 bits are required on 64-bit and 4 bits // on 32-bit. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef HOST_64BIT unsigned _idScaledPrevOffset : 5; @@ -898,7 +894,6 @@ class emitter // arm64: 60/55 bits // loongarch64: 53/48 bits // risc-v: 53/48 bits - CLANG_FORMAT_COMMENT_ANCHOR; #define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS + ID_EXTRA_PREV_OFFSET_BITS) @@ -915,7 +910,6 @@ class emitter // arm64: 4/9 bits // loongarch64: 11/16 bits // risc-v: 11/16 bits - CLANG_FORMAT_COMMENT_ANCHOR; #define ID_ADJ_SMALL_CNS (int)(1 << (ID_BIT_SMALL_CNS - 1)) #define ID_CNT_SMALL_CNS (int)(1 << ID_BIT_SMALL_CNS) @@ -940,7 +934,6 @@ class emitter // // SMALL_IDSC_SIZE is this size, in bytes. // - CLANG_FORMAT_COMMENT_ANCHOR; #define SMALL_IDSC_SIZE 8 @@ -957,7 +950,6 @@ class emitter } private: - CLANG_FORMAT_COMMENT_ANCHOR; void checkSizes(); @@ -2571,7 +2563,6 @@ class emitter // instruction group depends on the instruction mix as well as DEBUG/non-DEBUG build type. See the // EMITTER_STATS output for various statistics related to this. // - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // ARM32/64, LoongArch and RISC-V can require a bigger prolog instruction group. One scenario diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 539e07a1136541..c69ea7c5a36e6f 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2873,7 +2873,6 @@ void emitter::emitJumpDistBind() jmp->idjOffs += adjSJ; // If this is a jump via register, the instruction size does not change, so we are done. - CLANG_FORMAT_COMMENT_ANCHOR; /* Have we bound this jump's target already? */ @@ -2894,7 +2893,6 @@ void emitter::emitJumpDistBind() else { /* First time we've seen this label, convert its target */ - CLANG_FORMAT_COMMENT_ANCHOR; tgtIG = (insGroup*)emitCodeGetCookie(jmp->idAddr()->iiaBBlabel); diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index 525d5e5274ba74..533d26ef2307cf 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1847,7 +1847,6 @@ void emitter::emitJumpDistBind() jmp->idjOffs += adjSJ; // If this is a jump via register, the instruction size does not change, so we are done. - CLANG_FORMAT_COMMENT_ANCHOR; /* Have we bound this jump's target already? */ @@ -1868,7 +1867,6 @@ void emitter::emitJumpDistBind() else { /* First time we've seen this label, convert its target */ - CLANG_FORMAT_COMMENT_ANCHOR; tgtIG = (insGroup*)emitCodeGetCookie(jmp->idAddr()->iiaBBlabel); diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index e356ab8b3d1132..848ec0f479edd0 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -3863,7 +3863,6 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSVCalcDisp(instrDesc* id, code_t code, #endif { // Dev10 804810 - failing this assert can lead to bad codegen and runtime crashes - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI const LclVarDsc* varDsc = emitComp->lvaGetDesc(var); @@ -4146,7 +4145,6 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) if (reg == REG_NA) { /* The address is of the form "[disp]" */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 // Special case: "mov eax, [disp]" and "mov [disp], eax" can use a smaller 1-byte encoding. @@ -15770,7 +15768,6 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) if (dstOffs <= srcOffs) { // This is a backward jump - distance is known at this point - CLANG_FORMAT_COMMENT_ANCHOR; #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) @@ -17205,7 +17202,6 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) if (ins == INS_pop) { // The offset in "pop [ESP+xxx]" is relative to the new ESP value - CLANG_FORMAT_COMMENT_ANCHOR; #if !FEATURE_FIXED_OUT_ARGS emitCurStackLvl -= sizeof(int); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index a650ae437fccc6..48d8765857fd0b 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5635,7 +5635,6 @@ bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter) void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk) { /* We have decided to insert the block(s) after 'insertAfterBlk' */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -5742,7 +5741,6 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. - CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(FEATURE_EH_FUNCLETS) bool isRare; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index ba5ed96610dd32..7450df323aea0c 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1582,7 +1582,6 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) } // Update optMethodFlags - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG unsigned optMethodFlagsBefore = optMethodFlags; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 9a98ea7f619cd2..f5ca834696ce5b 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1897,7 +1897,6 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) if (block->NumSucc(this) == 1) { // Use BBJ_ALWAYS for a switch with only a default clause, or with only one unique successor. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -2007,7 +2006,6 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // replace it with a COMMA node. In such a case we will end up with GT_JTRUE node pointing to // a COMMA node which results in noway asserts in fgMorphSmpOp(), optAssertionGen() and rpPredictTreeRegUse(). // For the same reason fgMorphSmpOp() marks GT_JTRUE nodes with RELOP children as GTF_DONT_CSE. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 96e0a3e785f1fc..92b84e31aa72bb 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -310,7 +310,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // Create a GT_EQ node that checks against g_TrapReturningThreads. True jumps to Bottom, // false falls through to poll. Add this to the end of Top. Top is now BBJ_COND. Bottom is // now a jump target - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef ENABLE_FAST_GCPOLL_HELPER // Prefer the fast gc poll helepr over the double indirection diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index 9d521ebef799cd..a093d8a20e5981 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -134,7 +134,6 @@ void GCInfo::gcMarkFilterVarsPinned() // (2) a regular one for after the filter // and then adjust the original lifetime to end before // the filter. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) @@ -177,7 +176,6 @@ void GCInfo::gcMarkFilterVarsPinned() // somewhere inside it, so we only create 1 new lifetime, // and then adjust the original lifetime to end before // the filter. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) @@ -216,7 +214,6 @@ void GCInfo::gcMarkFilterVarsPinned() // lifetime for the part inside the filter and adjust // the start of the original lifetime to be the end // of the filter - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { @@ -259,7 +256,6 @@ void GCInfo::gcMarkFilterVarsPinned() { // The variable lifetime is completely within the filter, // so just add the pinned flag. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { @@ -1463,7 +1459,6 @@ size_t GCInfo::gcInfoBlockHdrSave( #endif /* Write the method size first (using between 1 and 5 bytes) */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) @@ -4112,7 +4107,6 @@ void GCInfo::gcMakeRegPtrTable( // pointers" section of the GC info even if lvTracked==true // Has this argument been fully enregistered? - CLANG_FORMAT_COMMENT_ANCHOR; if (!varDsc->lvOnFrame) { @@ -4141,7 +4135,6 @@ void GCInfo::gcMakeRegPtrTable( } // If we haven't continued to the next variable, we should report this as an untracked local. - CLANG_FORMAT_COMMENT_ANCHOR; GcSlotFlags flags = GC_SLOT_UNTRACKED; diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 5125c3ad42fcfe..9cf06c7bb1fcb5 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -891,7 +891,6 @@ int GenTree::GetRegisterDstCount(Compiler* compiler) const // A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST. // For the latter two (ARM-only), they only have multiple registers if they produce a long value // (GT_MUL_LONG always produces a long value). - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM return (TypeGet() == TYP_LONG) ? 2 : 1; #else @@ -4882,7 +4881,6 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ // [base + idx * mul + cns] // mul can be 0, 2, 4, or 8 // Note that mul == 0 is semantically equivalent to mul == 1. // Note that cns can be zero. - CLANG_FORMAT_COMMENT_ANCHOR; assert((base != nullptr) || (idx != nullptr && mul >= 2)); @@ -9450,7 +9448,6 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) if (kind & GTK_SMPOP) { /* If necessary, make sure we allocate a "fat" tree node */ - CLANG_FORMAT_COMMENT_ANCHOR; switch (oper) { diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 7fbcf5471103ea..a0e8eb7242e6cf 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -826,7 +826,6 @@ struct GenTree // // Register or register pair number of the node. // - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG @@ -858,7 +857,6 @@ struct GenTree public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG bool canBeContained() const; @@ -1309,7 +1307,6 @@ struct GenTree { // Note that only GT_EQ to GT_GT are HIR nodes, GT_TEST and GT_BITTEST // nodes are backend nodes only. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_XARCH static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_BITTEST_EQ, GT_BITTEST_NE)); @@ -1999,7 +1996,6 @@ struct GenTree // These are only used for dumping. // The GetRegNum() is only valid in LIR, but the dumping methods are not easily // modified to check this. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG bool InReg() const diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 4a1590fb7e0bfb..0d1df79812f03a 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -5238,7 +5238,6 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr // VSW 318822 // // So here we decide to make the resulting type to be a native int. - CLANG_FORMAT_COMMENT_ANCHOR; // Insert an explicit upcast if needed. op1 = *pOp1 = impImplicitIorI4Cast(op1, TYP_I_IMPL, fUnsigned); @@ -9167,20 +9166,19 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!"Unexpected fieldAccessor"); } - /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full - trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during - importation and reads from the union as if it were a long during code generation. Though this - can potentially read garbage, one can get lucky to have this working correctly. + /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full + trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during + importation and reads from the union as if it were a long during code generation. Though this + can potentially read garbage, one can get lucky to have this working correctly. - This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with - /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a - dependency on it. To be backward compatible, we will explicitly add an upward cast here so that - it works correctly always. + This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with + /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a + dependency on it. To be backward compatible, we will explicitly add an upward cast here so that + it works correctly always. - Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT - for V4.0. - */ - CLANG_FORMAT_COMMENT_ANCHOR; + Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT + for V4.0. + */ #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 24d7370b8f985a..0974d654ffb309 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -587,7 +587,6 @@ var_types Compiler::impImportCall(OPCODE opcode, tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) @@ -9781,8 +9780,6 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. - CLANG_FORMAT_COMMENT_ANCHOR; - #ifdef FEATURE_HW_INTRINSICS namespaceName += 10; const char* platformNamespaceName; diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp index 19755c312de350..a1ab0c58ecd976 100644 --- a/src/coreclr/jit/inductionvariableopts.cpp +++ b/src/coreclr/jit/inductionvariableopts.cpp @@ -481,7 +481,6 @@ PhaseStatus Compiler::optInductionVariables() // Currently we only do IV widening which generally is only profitable for // x64 because arm64 addressing modes can include the zero/sign-extension // of the index for free. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) && defined(TARGET_64BIT) m_dfsTree = fgComputeDfs(); m_loops = FlowGraphNaturalLoops::Find(m_dfsTree); diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 1094740a8e25d0..cc8c8cb717d9ab 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -17,10 +17,6 @@ #endif #endif -// Clang-format messes with the indentation of comments if they directly precede an -// ifdef. This macro allows us to anchor the comments to the regular flow of code. -#define CLANG_FORMAT_COMMENT_ANCHOR ; - // Clang-tidy replaces 0 with nullptr in some templated functions, causing a build // break. Replacing those instances with ZERO avoids this change #define ZERO 0 diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 573191fecb38c3..329f1c602cf989 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1361,7 +1361,6 @@ void Compiler::fgAllocEHTable() // twice the number of EH clauses in the IL, which should be good in practice. // In extreme cases, we might need to abandon this and reallocate. See // fgAddEHTableEntry() for more details. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG @@ -1682,7 +1681,6 @@ void Compiler::fgSortEHTable() // but ARM did. It turns out not sorting the table can cause the EH table to incorrectly // set the bbHndIndex value in some nested cases, and that can lead to a security exploit // that allows the execution of arbitrary code. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -2542,7 +2540,6 @@ bool Compiler::fgNormalizeEHCase3() if (EHblkDsc::ebdIsSameTry(ehOuter, ehInner)) { // We can't touch this 'try', since it's mutual protect. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { @@ -2729,7 +2726,6 @@ bool Compiler::fgNormalizeEHCase3() if (innerIsTryRegion && ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { // We can't touch this 'try', since it's mutual protect. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -3195,7 +3191,6 @@ void Compiler::fgVerifyHandlerTab() // blocks in the nested EH region. However, if funclets have been created, this is no longer true, since // this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try' // remains in the main function region. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) @@ -4203,7 +4198,6 @@ bool Compiler::fgRelocateEHRegions() // Currently it is not good to move the rarely run handler regions to the end of the method // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot // section. - CLANG_FORMAT_COMMENT_ANCHOR; #if 0 // Now try to move the entire handler region if it can be moved. diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index 288042d4c6b1e4..2258903a0603eb 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -340,7 +340,6 @@ class GCInfo // // These record the info about the procedure in the info-block // - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef JIT32_GCENCODER private: diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 6b3650416574c0..042c411b306d0d 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -2590,7 +2590,6 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) // In that case, we would like to avoid promortion. // However we haven't yet computed the lvRefCnt values so we can't do that. // - CLANG_FORMAT_COMMENT_ANCHOR; return shouldPromote; } @@ -4449,7 +4448,6 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, { // Variables can be marked as DoNotEngister in earlier stages like LocalAddressVisitor. // No need to track them for single-def. - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // TODO-CQ: If the varType needs partial callee save, conservatively do not enregister @@ -6067,7 +6065,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, { /* Argument is passed in a register, don't count it * when updating the current offset on the stack */ - CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) #if DEBUG @@ -6254,7 +6251,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs. // r2 struct { int } a1 // r0-r1 struct { long } a0 - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef PROFILING_SUPPORTED // On Arm under profiler, r0-r3 are always prespilled on stack. @@ -6318,7 +6314,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // For struct promoted parameters we need to set the offsets for both LclVars. // // For a dependent promoted struct we also assign the struct fields stack offset - CLANG_FORMAT_COMMENT_ANCHOR; if (varDsc->lvPromoted) { @@ -7385,7 +7380,6 @@ void Compiler::lvaAlignFrame() // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // The compNeedToAlignFrame flag is indicating if there is a need to align the frame. @@ -7507,8 +7501,6 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() // assign their offsets in lvaAssignVirtualFrameOffsetToArg(). // This is not true for the System V systems since there is no // outgoing args space. Assign the dependently promoted fields properly. - // - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) // ARM: lo/hi parts of a promoted long arg need to be updated. @@ -7584,7 +7576,6 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) /* Figure out and record the stack offset of the temp */ /* Need to align the offset? */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 7f413b75d6649e..05c65d2de3450c 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -498,7 +498,6 @@ void Compiler::fgPerBlockLocalVarLiveness() // 32-bit targets always pop the frame in the epilog. // For 64-bit targets, we only do this in the epilog for IL stubs; // for non-IL stubs the frame is popped after every PInvoke call. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) #endif diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 5d3a504175ee9e..0cd0500a9d1630 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1633,7 +1633,6 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, CallArg* callArg, // Mark this one as tail call arg if it is a fast tail call. // This provides the info to put this argument in in-coming arg area slot // instead of in out-going arg area slot. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce @@ -5667,7 +5666,6 @@ void Lowering::InsertPInvokeMethodProlog() // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. // As a result, don't push the frame onto the frame stack here for any 64-bit targets - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT #ifdef USE_PER_FRAME_PINVOKE_INIT @@ -5732,7 +5730,6 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef USE_PER_FRAME_PINVOKE_INIT // For IL stubs, we push the frame once even when we're doing per-pinvoke init @@ -5882,7 +5879,6 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method // contains PInvokes; on 64-bit targets this is necessary in non-stubs. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef USE_PER_FRAME_PINVOKE_INIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) @@ -5960,7 +5956,6 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets // this happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef USE_PER_FRAME_PINVOKE_INIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) @@ -6954,7 +6949,6 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) // On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one. bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul; #else - CLANG_FORMAT_COMMENT_ANCHOR; bool widenToNativeIntForMul = (type != TYP_I_IMPL); #endif @@ -8856,7 +8850,6 @@ GenTree* Lowering::LowerIndir(GenTreeIndir* ind) // TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects // address containment in some cases so we end up creating trivial (reg + offfset) // or (reg + reg) LEAs that are not necessary. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. @@ -9528,7 +9521,6 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store) // section and it is not a clear win to switch them to inline integers. // ARM: FP constants are assembled from integral ones, so it is always profitable // to directly use the integers as it avoids the int -> float conversion. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 50652ca075254d..ebedf7849004df 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -815,7 +815,6 @@ LinearScan::LinearScan(Compiler* theCompiler) #endif // Initialize the availableRegs to use for each TYP_* - CLANG_FORMAT_COMMENT_ANCHOR; #define DEF_TP(tn, nm, jitType, sz, sze, asze, st, al, regTyp, regFld, csr, ctr, tf) \ availableRegs[static_cast(TYP_##tn)] = ®Fld; @@ -2003,7 +2002,6 @@ void LinearScan::identifyCandidates() // We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count, // and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd // above). - CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // Additionally, when we are generating code for a target with partial SIMD callee-save @@ -2080,7 +2078,6 @@ void LinearScan::identifyCandidates() // registers current include the number of fp vars, whether there are loops, and whether there are // multiple exits. These have been selected somewhat empirically, but there is probably room for // more tuning. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (VERBOSE) @@ -3023,7 +3020,6 @@ regNumber LinearScan::allocateReg(Interval* currentInterval, if (regSelector->isSpilling()) { // We're spilling. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM if (currentInterval->registerType == TYP_DOUBLE) @@ -5455,7 +5451,6 @@ void LinearScan::allocateRegistersMinimal() } // Free registers to clear associated intervals for resolution phase - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (getLsraExtendLifeTimes()) @@ -6756,7 +6751,6 @@ void LinearScan::allocateRegisters() #endif // JIT32_GCENCODER // Free registers to clear associated intervals for resolution phase - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (getLsraExtendLifeTimes()) diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h index e038b4e8243a57..2705a93188dad6 100644 --- a/src/coreclr/jit/lsra.h +++ b/src/coreclr/jit/lsra.h @@ -773,7 +773,6 @@ class LinearScan : public LinearScanInterface // At least for x86 and AMD64, and potentially other architecture that will support SIMD, // we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4. // Hence the "SmallFPSet" has 5 elements. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 4f3d39c76d3ad1..86abef939a9d8c 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -2533,7 +2533,6 @@ void LinearScan::buildIntervals() // is at a new location and doesn't interfere with the uses. // For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the // location by 2 for each destination register beyond the first. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG node->gtSeqNum = currentLoc; diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index ad7d25709ee303..f380daeab59ac2 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -710,7 +710,6 @@ bool LinearScan::isRMWRegOper(GenTree* tree) { // TODO-XArch-CQ: Make this more accurate. // For now, We assume that most binary operators are of the RMW form. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS assert(tree->OperIsBinary() || (tree->OperIsMultiOp() && (tree->AsMultiOp()->GetOperandCount() <= 2))); @@ -1069,7 +1068,6 @@ int LinearScan::BuildShiftRotate(GenTree* tree) // TODO-CQ-XARCH: We can optimize generating 'test' instruction for GT_EQ/NE(shift, 0) // if the shift count is known to be non-zero and in the range depending on the // operand size. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that @@ -1169,7 +1167,6 @@ int LinearScan::BuildCall(GenTreeCall* call) RegisterType registerType = regType(call); // Set destination candidates for return value of the call. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) @@ -3019,7 +3016,6 @@ int LinearScan::BuildMul(GenTree* tree) // three-op form: reg = r/m * imm // This special widening 32x32->64 MUL is not used on x64 - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (tree->OperGet() != GT_MUL_LONG) #endif diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 2144869b1ce40e..4b301696a1eeb3 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -1681,7 +1681,6 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (comp->verbose) @@ -1984,7 +1983,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper @@ -2030,8 +2028,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call // We are allowed to have a ret buffer argument combined // with any of the remaining non-standard arguments - // - CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub() && addStubCellArg) { @@ -2158,7 +2154,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call #endif // TARGET_X86 /* Morph the user arguments */ - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) @@ -2729,7 +2724,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call } // Now we know if the argument goes in registers or not and how big it is. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all @@ -5317,7 +5311,6 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) @@ -5460,7 +5453,6 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. - CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { @@ -7376,7 +7368,6 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // Local copy for implicit byref promotion that was undone. Do // not introduce new references to it, all uses have been // morphed to access the parameter. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG LclVarDsc* param = lvaGetDesc(firstField->lvParentLcl); @@ -7643,7 +7634,6 @@ GenTree* Compiler::fgMorphCall(GenTreeCall* call) // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. - CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { @@ -12390,7 +12380,6 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 - CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) @@ -13591,7 +13580,6 @@ void Compiler::fgMorphStmts(BasicBlock* block) /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; @@ -14065,7 +14053,6 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else { // We'll jump to the genReturnBB. - CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 289e37b16fc4e8..7daf7104271fdc 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -1693,7 +1693,6 @@ bool Compiler::optTryUnrollLoop(FlowGraphNaturalLoop* loop, bool* changedIR) // The old loop body is unreachable now, but we will remove those // blocks after we finish unrolling. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) @@ -3418,7 +3417,6 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu switch (oper) { /* Constants can usually be narrowed by changing their value */ - CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT __int64 lval; diff --git a/src/coreclr/jit/stacklevelsetter.cpp b/src/coreclr/jit/stacklevelsetter.cpp index db97352d5e6977..d25f2683ca302d 100644 --- a/src/coreclr/jit/stacklevelsetter.cpp +++ b/src/coreclr/jit/stacklevelsetter.cpp @@ -287,7 +287,6 @@ void StackLevelSetter::SetThrowHelperBlock(SpecialCodeKind kind, BasicBlock* blo // or generate all required helpers after all stack alignment // has been added, and the stack level at each call to fgAddCodeRef() // is known, or can be recalculated. - CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_X86_ABI) framePointerRequired = true; #else // !defined(UNIX_X86_ABI) diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index 08a4ab996bd1bf..3a861c3d7ef35a 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -232,7 +232,6 @@ // Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. // Note that x86 normally emits an optimized (source-register-specific) write barrier, but can emit // a call to a "general" write barrier. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS #define RBM_CALLEE_TRASH_WRITEBARRIER (RBM_EAX | RBM_EDX) diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index bdc7663bde7ed1..b292d74968f6ac 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -847,7 +847,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes) &upcMem[upcCodeSlot], prologBytes); // Note that the three UWC_END padding bytes still exist at the end of the array. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes. @@ -1909,7 +1908,6 @@ void UnwindInfo::Split() // the actual offsets of the splits since we haven't issued the instructions yet, so store // an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase, // like we do for the function length and epilog offsets. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (uwiComp->verbose) diff --git a/src/coreclr/jit/unwindloongarch64.cpp b/src/coreclr/jit/unwindloongarch64.cpp index 1b561eaaaae669..e46d3ec60e0794 100644 --- a/src/coreclr/jit/unwindloongarch64.cpp +++ b/src/coreclr/jit/unwindloongarch64.cpp @@ -1112,7 +1112,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes) &upcMem[upcCodeSlot], prologBytes); // Note that the three UWC_END padding bytes still exist at the end of the array. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes. @@ -2139,7 +2138,6 @@ void UnwindInfo::Split() // the actual offsets of the splits since we haven't issued the instructions yet, so store // an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase, // like we do for the function length and epilog offsets. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (uwiComp->verbose) diff --git a/src/coreclr/jit/unwindriscv64.cpp b/src/coreclr/jit/unwindriscv64.cpp index f9db0d433c6f13..05648c481744ea 100644 --- a/src/coreclr/jit/unwindriscv64.cpp +++ b/src/coreclr/jit/unwindriscv64.cpp @@ -923,7 +923,6 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes) &upcMem[upcCodeSlot], prologBytes); // Note that the three UWC_END padding bytes still exist at the end of the array. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Zero out the epilog codes memory, to ensure we've copied the right bytes. Don't zero the padding bytes. @@ -1946,7 +1945,6 @@ void UnwindInfo::Split() // the actual offsets of the splits since we haven't issued the instructions yet, so store // an emitter location instead of an offset, and "finalize" the offset in the unwindEmit() phase, // like we do for the function length and epilog offsets. - CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (uwiComp->verbose) diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index c3234e5524deaa..ea33f1d14fb035 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -323,7 +323,6 @@ const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regN minSiz -= strlen(sep) + strlen(nam); // What kind of separator should we use for this range (if it is indeed going to be a range)? - CLANG_FORMAT_COMMENT_ANCHOR; if (genIsValidIntReg(regNum)) { @@ -355,7 +354,6 @@ const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regN } #elif defined(TARGET_X86) // No register ranges - CLANG_FORMAT_COMMENT_ANCHOR; #elif defined(TARGET_LOONGARCH64) if (REG_A0 <= regNum && regNum <= REG_T8) { From 96d5a66d300251ee2f32edc8cd7ffe849a3df667 Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Thu, 4 Apr 2024 21:33:54 +0200 Subject: [PATCH 100/132] JIT: Fix profiler enter callback init reg trash logic (#100637) During prolog generation we sometimes generate code to call the profiler enter callback. This may trash the "initReg" that we expect to keep zeroed during the prolog. The logic to check if the initReg was being trashed was wrong in a couple of cases: - Most backends did not take into account that the logic also trashes the registers used for arguments to the enter callback - SysV x64 thought that the enter callback trashed the parameter registers, but it does not This generally did not cause issues because `genFnPrologCalleeRegArgs` is unnecessarily conservative around whether or not it trashes `initReg`, and it comes after the profiler callback in the prolog. However, with the rewrite of the homing function that is not going to be the case anymore. --- src/coreclr/jit/codegenarm.cpp | 5 ++++- src/coreclr/jit/codegenarm64.cpp | 6 +++++- src/coreclr/jit/codegenloongarch64.cpp | 6 +++++- src/coreclr/jit/codegenriscv64.cpp | 6 +++++- src/coreclr/jit/codegenxarch.cpp | 12 ++++++++---- src/coreclr/jit/targetamd64.h | 15 +++++++++------ 6 files changed, 36 insertions(+), 14 deletions(-) diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index bddc03e0b41a54..2c010f116a2657 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -1710,7 +1710,10 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) 0, // argSize. Again, we have to lie about it EA_UNKNOWN); // retSize - if (initReg == argReg) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_PROFILER_ENTER_ARG) & genRegMask(initReg)) != RBM_NONE) { *pInitRegZeroed = false; } diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 74258cdd55a73a..cd1b1558d93e64 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -5455,7 +5455,11 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN); - if ((genRegMask(initReg) & RBM_PROFILER_ENTER_TRASH) != RBM_NONE) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_PROFILER_ENTER_ARG_FUNC_ID | RBM_PROFILER_ENTER_ARG_CALLER_SP) & + genRegMask(initReg)) != RBM_NONE) { *pInitRegZeroed = false; } diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index d6f18005c767c5..94329e3486100a 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -8551,7 +8551,11 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN); - if ((genRegMask(initReg) & RBM_PROFILER_ENTER_TRASH) != RBM_NONE) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_PROFILER_ENTER_ARG_FUNC_ID | RBM_PROFILER_ENTER_ARG_CALLER_SP) & + genRegMask(initReg)) != RBM_NONE) { *pInitRegZeroed = false; } diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 17d30e5ada2754..0df6f56c5b76de 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -8541,7 +8541,11 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN); - if ((genRegMask(initReg) & RBM_PROFILER_ENTER_TRASH)) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_PROFILER_ENTER_ARG_FUNC_ID | RBM_PROFILER_ENTER_ARG_CALLER_SP) & + genRegMask(initReg)) != RBM_NONE) { *pInitRegZeroed = false; } diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 77045cce0875ad..f25e5bb046d29d 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -9491,8 +9491,10 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) } } - // If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using. - if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_ARG_0 | RBM_ARG_1) & genRegMask(initReg)) != 0) { *pInitRegZeroed = false; } @@ -9528,8 +9530,10 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) // "mov r11, helper addr; call r11" genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET); - // If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using. - if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0) + // If initReg is trashed, either because it was an arg to the enter + // callback, or because the enter callback itself trashes it, then it needs + // to be zero'ed again before using. + if (((RBM_PROFILER_ENTER_TRASH | RBM_PROFILER_ENTER_ARG_0 | RBM_PROFILER_ENTER_ARG_1) & genRegMask(initReg)) != 0) { *pInitRegZeroed = false; } diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index 5d37870d03b03a..7d1a2c8f08039f 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -511,12 +511,6 @@ #define RBM_FLTARG_REGS (RBM_FLTARG_0|RBM_FLTARG_1|RBM_FLTARG_2|RBM_FLTARG_3) #endif // !UNIX_AMD64_ABI - // The registers trashed by profiler enter/leave/tailcall hook - // See vm\amd64\asmhelpers.asm for more details. - #define RBM_PROFILER_ENTER_TRASH RBM_CALLEE_TRASH - - #define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH - // The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper. #ifdef UNIX_AMD64_ABI // See vm\amd64\unixasmhelpers.S for more details. @@ -525,11 +519,20 @@ // The return registers could be any two from the set { RAX, RDX, XMM0, XMM1 }. // STOP_FOR_GC helper preserves all the 4 possible return registers. #define RBM_STOP_FOR_GC_TRASH (RBM_CALLEE_TRASH & ~(RBM_FLOATRET | RBM_INTRET | RBM_FLOATRET_1 | RBM_INTRET_1)) + + // The registers trashed by profiler enter/leave/tailcall hook + // See vm\amd64\asmhelpers.S for more details. + #define RBM_PROFILER_ENTER_TRASH (RBM_CALLEE_TRASH & ~(RBM_ARG_REGS|RBM_FLTARG_REGS)) #define RBM_PROFILER_LEAVE_TRASH (RBM_CALLEE_TRASH & ~(RBM_FLOATRET | RBM_INTRET | RBM_FLOATRET_1 | RBM_INTRET_1)) + #define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH + #else // See vm\amd64\asmhelpers.asm for more details. #define RBM_STOP_FOR_GC_TRASH (RBM_CALLEE_TRASH & ~(RBM_FLOATRET | RBM_INTRET)) + + #define RBM_PROFILER_ENTER_TRASH RBM_CALLEE_TRASH #define RBM_PROFILER_LEAVE_TRASH (RBM_CALLEE_TRASH & ~(RBM_FLOATRET | RBM_INTRET)) + #define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH #endif // The registers trashed by the CORINFO_HELP_INIT_PINVOKE_FRAME helper. From 09e1418200ac0de4d18ec2e9eea86755c0db3a7c Mon Sep 17 00:00:00 2001 From: "Mukund Raghav Sharma (Moko)" <68247673+mrsharm@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:19:15 -0700 Subject: [PATCH 101/132] Clean up macro in GetLogicalProcessorCacheSizeFromOS to include cacheSize as a parameter via a new argument CURRENT_CACHE_SIZE (#100596) * Clean up macro * Addressed feedback. * Removed redundant print * Update src/coreclr/gc/unix/gcenv.unix.cpp Co-authored-by: Jan Kotas * Inlined function call * Update src/coreclr/gc/unix/gcenv.unix.cpp Co-authored-by: Jan Kotas * Update src/coreclr/gc/unix/gcenv.unix.cpp Co-authored-by: Jan Kotas * Update src/coreclr/gc/unix/gcenv.unix.cpp Co-authored-by: Jan Kotas * Update src/coreclr/gc/unix/gcenv.unix.cpp Co-authored-by: Jan Kotas * Update src/coreclr/gc/unix/gcenv.unix.cpp --------- Co-authored-by: Jan Kotas --- src/coreclr/gc/unix/gcenv.unix.cpp | 53 +++++++++++++----------------- 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp index b50297e25b25a6..88dd8aabb030ad 100644 --- a/src/coreclr/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/gc/unix/gcenv.unix.cpp @@ -862,31 +862,30 @@ bool ReadMemoryValueFromFile(const char* filename, uint64_t* val) return result; } -#define UPDATE_CACHE_SIZE_AND_LEVEL(NEW_CACHE_SIZE, NEW_CACHE_LEVEL) if (NEW_CACHE_SIZE > ((long)cacheSize)) { cacheSize = NEW_CACHE_SIZE; cacheLevel = NEW_CACHE_LEVEL; } - static size_t GetLogicalProcessorCacheSizeFromOS() { size_t cacheLevel = 0; size_t cacheSize = 0; - long size; - // sysconf can return -1 if the cache size is unavailable in some distributions and 0 in others. - // UPDATE_CACHE_SIZE_AND_LEVEL should handle both the cases by not updating cacheSize if either of cases are met. -#ifdef _SC_LEVEL1_DCACHE_SIZE - size = sysconf(_SC_LEVEL1_DCACHE_SIZE); - UPDATE_CACHE_SIZE_AND_LEVEL(size, 1) -#endif -#ifdef _SC_LEVEL2_CACHE_SIZE - size = sysconf(_SC_LEVEL2_CACHE_SIZE); - UPDATE_CACHE_SIZE_AND_LEVEL(size, 2) -#endif -#ifdef _SC_LEVEL3_CACHE_SIZE - size = sysconf(_SC_LEVEL3_CACHE_SIZE); - UPDATE_CACHE_SIZE_AND_LEVEL(size, 3) -#endif -#ifdef _SC_LEVEL4_CACHE_SIZE - size = sysconf(_SC_LEVEL4_CACHE_SIZE); - UPDATE_CACHE_SIZE_AND_LEVEL(size, 4) +#if defined(_SC_LEVEL1_DCACHE_SIZE) || defined(_SC_LEVEL2_CACHE_SIZE) || defined(_SC_LEVEL3_CACHE_SIZE) || defined(_SC_LEVEL4_CACHE_SIZE) + const int cacheLevelNames[] = + { + _SC_LEVEL1_DCACHE_SIZE, + _SC_LEVEL2_CACHE_SIZE, + _SC_LEVEL3_CACHE_SIZE, + _SC_LEVEL4_CACHE_SIZE, + }; + + for (int i = ARRAY_SIZE(cacheLevelNames) - 1; i >= 0; i--) + { + long size = sysconf(cacheLevelNames[i]); + if (size > 0) + { + cacheSize = (size_t)size; + cacheLevel = i + 1; + break; + } + } #endif #if defined(TARGET_LINUX) && !defined(HOST_ARM) && !defined(HOST_X86) @@ -912,18 +911,12 @@ static size_t GetLogicalProcessorCacheSizeFromOS() if (ReadMemoryValueFromFile(path_to_size_file, &cache_size_from_sys_file)) { - // uint64_t to long conversion as ReadMemoryValueFromFile takes a uint64_t* as an argument for the val argument. - size = (long)cache_size_from_sys_file; - path_to_level_file[index] = (char)(48 + i); + cacheSize = std::max(cacheSize, (size_t)cache_size_from_sys_file); + path_to_level_file[index] = (char)(48 + i); if (ReadMemoryValueFromFile(path_to_level_file, &level)) { - UPDATE_CACHE_SIZE_AND_LEVEL(size, level) - } - - else - { - cacheSize = std::max((long)cacheSize, size); + cacheLevel = level; } } } @@ -975,7 +968,7 @@ static size_t GetLogicalProcessorCacheSizeFromOS() if (success) { assert(cacheSizeFromSysctl > 0); - cacheSize = ( size_t) cacheSizeFromSysctl; + cacheSize = (size_t) cacheSizeFromSysctl; } } #endif From 7f2be68d3386b788e54a609d1f5b20b67571fc3f Mon Sep 17 00:00:00 2001 From: Jacques Eloff Date: Thu, 4 Apr 2024 14:13:37 -0700 Subject: [PATCH 102/132] Add workload metadata for automation (#100651) --- src/workloads/workloads.csproj | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/workloads/workloads.csproj b/src/workloads/workloads.csproj index e8f9b0726f4433..2f91a14f59c620 100644 --- a/src/workloads/workloads.csproj +++ b/src/workloads/workloads.csproj @@ -187,6 +187,15 @@ + + + + + + + + + Date: Thu, 4 Apr 2024 15:08:13 -0700 Subject: [PATCH 103/132] Make host tests use single copy of shared framework (#100588) - Only create one .NET install layout to be shared by all host tests - Add `pretest.proj` for `host.pretest` subset that builds all test project assets and creates the single .NET install layout - Fix `NativeHostApis` tests that were editing the .NET install layout directly (instead of creating a copy to edit) - Remove some unnecessary copying/creating of SDKs and frameworks by sharing the fixture across tests - Update host testing doc with simpler setup instructions and more details around investigating test failures --- docs/workflow/testing/host/testing.md | 21 +- eng/Subsets.props | 2 +- src/installer/tests/Directory.Build.targets | 20 -- .../HostActivation.Tests/NativeHostApis.cs | 224 +++++++++--------- .../Assertions/CommandResultAssertions.cs | 21 +- src/installer/tests/TestUtils/TestContext.cs | 3 +- src/installer/tests/pretest.proj | 22 ++ 7 files changed, 167 insertions(+), 146 deletions(-) create mode 100644 src/installer/tests/pretest.proj diff --git a/docs/workflow/testing/host/testing.md b/docs/workflow/testing/host/testing.md index 46f7761be1fa11..a217d1dd0ab984 100644 --- a/docs/workflow/testing/host/testing.md +++ b/docs/workflow/testing/host/testing.md @@ -37,16 +37,16 @@ dotnet build src\installer\tests\HostActivation.Tests The host tests depend on: 1. Pre-built [test project](/src/installer/tests/Assets/Projects) output which will be copied and run by the tests. The `host.pretest` subset builds these projects. - 2. Product binaries in a directory layout matching that of a .NET install - 3. TestContextVariables.txt file with property and value pairs which will be read by the tests + 2. Product binaries in a directory layout matching that of a .NET install. The `host.pretest` subset creates this layout. + 3. TestContextVariables.txt files with property and value pairs which will be read by the tests. The `host.tests` subset creates these files as part of building the tests. When [running all tests](#running-all-tests), the build is configured such that these are created/performed before the start of the test run. In order to create (or update) these dependencies without running all tests: - 1. Build the `host.pretest` subset. By default, this is included in the `host` subset. This corresponds to (1) above. - 2. Run the `SetUpSharedFrameworkPublish` and `SetupTestContextVariables` targets for the desired test project. This corresponds to (2) and (3) above. For example: + 1. Build the `host.pretest` subset. By default, this is included in the `host` subset. This corresponds to (1) and (2) above. + 2. Build the desired test project. This corresponds to (3) above. Building the test itself will run the `SetupTestContextVariables` target, but it can also be run independently - for example: ``` - dotnet build src\installer\tests\HostActivation.Tests -t:SetUpSharedFrameworkPublish;SetupTestContextVariables -p:RuntimeConfiguration=Release -p:LibrariesConfiguration=Release + dotnet build src\installer\tests\HostActivation.Tests -t:SetupTestContextVariables -p:RuntimeConfiguration=Release -p:LibrariesConfiguration=Release ``` ## Running tests @@ -86,6 +86,15 @@ If you built the runtime or libraries with a different configuration from the ho build.cmd -vs Microsoft.DotNet.CoreSetup -rc Release -lc Release ``` +## Investigating failures + +When [running all tests](#running-all-tests), reports with results will be generated under `\artifacts\TestResults`. When [running individual tests](#running-specific-tests), results will be output to the console by default and can be configured via [`dotnet test` options](https://learn.microsoft.com/dotnet/core/tools/dotnet-test#options). + +In order to test the hosting components, the tests launch a separate process (e.g. `dotnet`, apphost, native host) and validate the expected output (standard output and error) of the launched process. This usually involves copying or creating test artifacts in the form of an application to run or a .NET install to run against. + +On failure, tests will report the file, arguments, and environment for the launched process that failed validation. With [preserved test artifacts](#preserving-test-artifacts), this information can be used to directly debug the specific scenario that the test was running. + ### Preserving test artifacts -In order to test the hosting components, the tests launch a separate process (e.g. `dotnet`, apphost, native host) and validate the expected output (standard output and error) of the launched process. This usually involves copying or creating test artifacts in the form of an application to run or a .NET install to run against. The tests will delete these artifacts after the test finishes. To allow inspection or usage after the test finishes, set the environment variable `PRESERVE_TEST_RUNS=1` to avoid deleting the test artifacts. +The tests will delete any generated test artifacts after the test finishes. To allow inspection or usage after the test finishes, set the environment variable `PRESERVE_TEST_RUNS=1` to avoid deleting the test artifacts. + diff --git a/eng/Subsets.props b/eng/Subsets.props index 63aca146463a5e..499e9c3cb645fe 100644 --- a/eng/Subsets.props +++ b/eng/Subsets.props @@ -500,7 +500,7 @@ - + diff --git a/src/installer/tests/Directory.Build.targets b/src/installer/tests/Directory.Build.targets index 5e952873f92e4c..9bfb4ffbea286e 100644 --- a/src/installer/tests/Directory.Build.targets +++ b/src/installer/tests/Directory.Build.targets @@ -1,25 +1,5 @@ - - - - - - - - Path.Combine(ExeDir, "pf"); - public string SelfRegistered => Path.Combine(ExeDir, "sr"); - public string WorkingDir => Path.Combine(_app.Location, "wd"); - public string ProgramFilesGlobalSdkDir => Path.Combine(ProgramFiles, "dotnet", "sdk"); - public string ProgramFilesGlobalFrameworksDir => Path.Combine(ProgramFiles, "dotnet", "shared"); - public string SelfRegisteredGlobalSdkDir => Path.Combine(SelfRegistered, "sdk"); + private readonly TestArtifact _artifact; + + public string EmptyGlobalJsonDir => Path.Combine(_artifact.Location, "wd"); + + public string ExeDir => Path.Combine(_artifact.Location, "ed"); public string LocalSdkDir => Path.Combine(ExeDir, "sdk"); public string LocalFrameworksDir => Path.Combine(ExeDir, "shared"); + public string[] LocalSdks = new[] { "0.1.2", "5.6.7-preview", "1.2.3" }; + public List<(string fwName, string[] fwVersions)> LocalFrameworks = + new List<(string fwName, string[] fwVersions)>() + { + ("HostFxr.Test.B", new[] { "4.0.0", "5.6.7-A" }), + ("HostFxr.Test.C", new[] { "3.0.0" }) + }; + + public string ProgramFiles => Path.Combine(_artifact.Location, "pf"); + public string ProgramFilesGlobalSdkDir => Path.Combine(ProgramFiles, "dotnet", "sdk"); + public string ProgramFilesGlobalFrameworksDir => Path.Combine(ProgramFiles, "dotnet", "shared"); public string[] ProgramFilesGlobalSdks = new[] { "4.5.6", "1.2.3", "2.3.4-preview" }; public List<(string fwName, string[] fwVersions)> ProgramFilesGlobalFrameworks = new List<(string fwName, string[] fwVersions)>() @@ -49,26 +54,20 @@ private class SdkResolutionFixture ("HostFxr.Test.A", new[] { "1.2.3", "3.0.0" }), ("HostFxr.Test.B", new[] { "5.6.7-A" }) }; + + public string SelfRegistered => Path.Combine(_artifact.Location, "sr"); + public string SelfRegisteredGlobalSdkDir => Path.Combine(SelfRegistered, "sdk"); public string[] SelfRegisteredGlobalSdks = new[] { "3.0.0", "15.1.4-preview", "5.6.7" }; - public string[] LocalSdks = new[] { "0.1.2", "5.6.7-preview", "1.2.3" }; - public List<(string fwName, string[] fwVersions)> LocalFrameworks = - new List<(string fwName, string[] fwVersions)>() - { - ("HostFxr.Test.B", new[] { "4.0.0", "5.6.7-A" }), - ("HostFxr.Test.C", new[] { "3.0.0" }) - }; - public SdkResolutionFixture(SharedTestState state) + public SdkAndFrameworkFixture() { - Dotnet = TestContext.BuiltDotNet; - - _app = state.HostApiInvokerApp.Copy(); + _artifact = TestArtifact.Create(nameof(SdkAndFrameworkFixture)); - Directory.CreateDirectory(WorkingDir); + Directory.CreateDirectory(EmptyGlobalJsonDir); // start with an empty global.json, it will be ignored, but prevent one lying on disk // on a given machine from impacting the test. - GlobalJson.CreateEmpty(WorkingDir); + GlobalJson.CreateEmpty(EmptyGlobalJsonDir); foreach (string sdk in ProgramFilesGlobalSdks) { @@ -114,16 +113,20 @@ static void AddFrameworkDirectory(string frameworkDir, string name, string versi File.WriteAllText(Path.Combine(versionDir, $"{name}.deps.json"), string.Empty); } } + + public void Dispose() + { + _artifact.Dispose(); + } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // The test setup only works on Windows (and MLL was Windows-only anyway) public void Hostfxr_get_available_sdks_with_multilevel_lookup() { - var f = new SdkResolutionFixture(sharedTestState); - // Starting with .NET 7, multi-level lookup is completely disabled for hostfxr API calls. // This test is still valuable to validate that it is in fact disabled + var f = sharedTestState.SdkAndFrameworkFixture; string expectedList = string.Join(';', new[] { Path.Combine(f.LocalSdkDir, "0.1.2"), @@ -132,26 +135,22 @@ public void Hostfxr_get_available_sdks_with_multilevel_lookup() }); string api = ApiNames.hostfxr_get_available_sdks; - using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) - { - f.Dotnet.Exec(f.AppDll, api, f.ExeDir) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .EnableTracingAndCaptureOutputs() - .Execute() - .Should().Pass() - .And.ReturnStatusCode(api, Constants.ErrorCode.Success) - .And.HaveStdOutContaining($"{api} sdks:[{expectedList}]"); - } + sharedTestState.TestBehaviorEnabledDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdks:[{expectedList}]"); } [Fact] - public void Hostfxr_get_available_sdks_without_multilevel_lookup() + public void Hostfxr_get_available_sdks() { - // Without multi-level lookup: get only sdks sorted by ascending version - - var f = new SdkResolutionFixture(sharedTestState); + // Get SDKs sorted by ascending version + var f = sharedTestState.SdkAndFrameworkFixture; string expectedList = string.Join(';', new[] { Path.Combine(f.LocalSdkDir, "0.1.2"), @@ -160,7 +159,7 @@ public void Hostfxr_get_available_sdks_without_multilevel_lookup() }); string api = ApiNames.hostfxr_get_available_sdks; - f.Dotnet.Exec(f.AppDll, api, f.ExeDir) + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir) .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -173,15 +172,14 @@ public void Hostfxr_resolve_sdk2_without_global_json_or_flags() { // with no global.json and no flags, pick latest SDK - var f = new SdkResolutionFixture(sharedTestState); - + var f = sharedTestState.SdkAndFrameworkFixture; string expectedData = string.Join(';', new[] { ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "5.6.7-preview")), }); string api = ApiNames.hostfxr_resolve_sdk2; - f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "0") + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir, f.EmptyGlobalJsonDir, "0") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -194,15 +192,14 @@ public void Hostfxr_resolve_sdk2_without_global_json_and_disallowing_previews() { // Without global.json and disallowing previews, pick latest non-preview - var f = new SdkResolutionFixture(sharedTestState); - + var f = sharedTestState.SdkAndFrameworkFixture; string expectedData = string.Join(';', new[] { ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "1.2.3")) }); string api = ApiNames.hostfxr_resolve_sdk2; - f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "disallow_prerelease") + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir, f.EmptyGlobalJsonDir, "disallow_prerelease") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -217,24 +214,26 @@ public void Hostfxr_resolve_sdk2_with_global_json_and_disallowing_previews() // since flag has no impact if global.json specifies a preview. // Also check that global.json that impacted resolution is reported. - var f = new SdkResolutionFixture(sharedTestState); - - string requestedVersion = "5.6.6-preview"; - string globalJson = GlobalJson.CreateWithVersion(f.WorkingDir, requestedVersion); - string expectedData = string.Join(';', new[] + var f = sharedTestState.SdkAndFrameworkFixture; + using (TestArtifact workingDir = TestArtifact.Create(nameof(workingDir))) { - ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "5.6.7-preview")), - ("global_json_path", globalJson), - ("requested_version", requestedVersion), - }); + string requestedVersion = "5.6.6-preview"; + string globalJson = GlobalJson.CreateWithVersion(workingDir.Location, requestedVersion); + string expectedData = string.Join(';', new[] + { + ("resolved_sdk_dir", Path.Combine(f.LocalSdkDir, "5.6.7-preview")), + ("global_json_path", globalJson), + ("requested_version", requestedVersion), + }); - string api = ApiNames.hostfxr_resolve_sdk2; - f.Dotnet.Exec(f.AppDll, api, f.ExeDir, f.WorkingDir, "disallow_prerelease") - .EnableTracingAndCaptureOutputs() - .Execute() - .Should().Pass() - .And.ReturnStatusCode(api, Constants.ErrorCode.Success) - .And.HaveStdOutContaining($"{api} data:[{expectedData}]"); + string api = ApiNames.hostfxr_resolve_sdk2; + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir, workingDir.Location, "disallow_prerelease") + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} data:[{expectedData}]"); + } } [Fact] @@ -249,7 +248,7 @@ public void Hostfxr_corehost_set_error_writer_test() [Fact] public void Hostfxr_get_dotnet_environment_info_dotnet_root_only() { - var f = new SdkResolutionFixture(sharedTestState); + var f = sharedTestState.SdkAndFrameworkFixture; string expectedSdkVersions = string.Join(";", new[] { "0.1.2", @@ -286,7 +285,7 @@ public void Hostfxr_get_dotnet_environment_info_dotnet_root_only() }); string api = ApiNames.hostfxr_get_dotnet_environment_info; - f.Dotnet.Exec(f.AppDll, api, f.ExeDir) + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, f.ExeDir) .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -302,7 +301,7 @@ public void Hostfxr_get_dotnet_environment_info_dotnet_root_only() [PlatformSpecific(TestPlatforms.Windows)] // The test setup only works on Windows (and MLL was Windows-only anyway) public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_with_dotnet_root() { - var f = new SdkResolutionFixture(sharedTestState); + var f = sharedTestState.SdkAndFrameworkFixture; string expectedSdkVersions = string.Join(';', new[] { "0.1.2", @@ -338,58 +337,49 @@ public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_with_dotn Path.Combine(f.LocalFrameworksDir, "HostFxr.Test.C") }); - using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) - { - string api = ApiNames.hostfxr_get_dotnet_environment_info; - f.Dotnet.Exec(f.AppDll, new[] { api, f.ExeDir }) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .EnableTracingAndCaptureOutputs() - .Execute() - .Should().Pass() - .And.ReturnStatusCode(api, Constants.ErrorCode.Success) - .And.HaveStdOutContaining($"{api} sdk versions:[{expectedSdkVersions}]") - .And.HaveStdOutContaining($"{api} sdk paths:[{expectedSdkPaths}]") - .And.HaveStdOutContaining($"{api} framework names:[{expectedFrameworkNames}]") - .And.HaveStdOutContaining($"{api} framework versions:[{expectedFrameworkVersions}]") - .And.HaveStdOutContaining($"{api} framework paths:[{expectedFrameworkPaths}]"); - } + string api = ApiNames.hostfxr_get_dotnet_environment_info; + sharedTestState.TestBehaviorEnabledDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, new[] { api, f.ExeDir }) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdk versions:[{expectedSdkVersions}]") + .And.HaveStdOutContaining($"{api} sdk paths:[{expectedSdkPaths}]") + .And.HaveStdOutContaining($"{api} framework names:[{expectedFrameworkNames}]") + .And.HaveStdOutContaining($"{api} framework versions:[{expectedFrameworkVersions}]") + .And.HaveStdOutContaining($"{api} framework paths:[{expectedFrameworkPaths}]"); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // The test setup only works on Windows (and MLL was Windows-only anyway) public void Hostfxr_get_dotnet_environment_info_with_multilevel_lookup_only() { - var f = new SdkResolutionFixture(sharedTestState); + var f = sharedTestState.SdkAndFrameworkFixture; // Multi-level lookup is completely disabled on 7+ // The test runs the API with the dotnet root directory set to a location which doesn't have any SDKs or frameworks - using (TestOnlyProductBehavior.Enable(f.Dotnet.GreatestVersionHostFxrFilePath)) - { - // We pass f.WorkingDir so that we don't resolve dotnet_dir to the global installation - // in the native side. - string api = ApiNames.hostfxr_get_dotnet_environment_info; - f.Dotnet.Exec(f.AppDll, api, f.WorkingDir) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) - .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) - .EnableTracingAndCaptureOutputs() - .Execute() - .Should().Pass() - .And.ReturnStatusCode(api, Constants.ErrorCode.Success) - .And.HaveStdOutContaining($"{api} sdk versions:[]") - .And.HaveStdOutContaining($"{api} sdk paths:[]") - .And.HaveStdOutContaining($"{api} framework names:[]") - .And.HaveStdOutContaining($"{api} framework versions:[]") - .And.HaveStdOutContaining($"{api} framework paths:[]"); - } + string api = ApiNames.hostfxr_get_dotnet_environment_info; + sharedTestState.TestBehaviorEnabledDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, sharedTestState.HostApiInvokerApp.Location) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_PROGRAM_FILES", f.ProgramFiles) + .EnvironmentVariable("TEST_MULTILEVEL_LOOKUP_SELF_REGISTERED", f.SelfRegistered) + .EnableTracingAndCaptureOutputs() + .Execute() + .Should().Pass() + .And.ReturnStatusCode(api, Constants.ErrorCode.Success) + .And.HaveStdOutContaining($"{api} sdk versions:[]") + .And.HaveStdOutContaining($"{api} sdk paths:[]") + .And.HaveStdOutContaining($"{api} framework names:[]") + .And.HaveStdOutContaining($"{api} framework versions:[]") + .And.HaveStdOutContaining($"{api} framework paths:[]"); } [Fact] public void Hostfxr_get_dotnet_environment_info_global_install_path() { string api = ApiNames.hostfxr_get_dotnet_environment_info; - var f = new SdkResolutionFixture(sharedTestState); - f.Dotnet.Exec(f.AppDll, api) + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api) .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -399,9 +389,8 @@ public void Hostfxr_get_dotnet_environment_info_global_install_path() [Fact] public void Hostfxr_get_dotnet_environment_info_result_is_nullptr_fails() { - var f = new SdkResolutionFixture(sharedTestState); string api = ApiNames.hostfxr_get_dotnet_environment_info; - f.Dotnet.Exec(f.AppDll, api, "test_invalid_result_ptr") + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, "test_invalid_result_ptr") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() @@ -412,13 +401,11 @@ public void Hostfxr_get_dotnet_environment_info_result_is_nullptr_fails() [Fact] public void Hostfxr_get_dotnet_environment_info_reserved_is_not_nullptr_fails() { - var f = new SdkResolutionFixture(sharedTestState); string api = ApiNames.hostfxr_get_dotnet_environment_info; - f.Dotnet.Exec(f.AppDll, api, "test_invalid_reserved_ptr") + TestContext.BuiltDotNet.Exec(sharedTestState.HostApiInvokerApp.AppDll, api, "test_invalid_reserved_ptr") .EnableTracingAndCaptureOutputs() .Execute() .Should().Pass() - // 0x80008081 (InvalidArgFailure) .And.ReturnStatusCode(api, Constants.ErrorCode.InvalidArgFailure) .And.HaveStdErrContaining($"{api} received an invalid argument: reserved should be null."); } @@ -460,6 +447,11 @@ public class SharedTestState : IDisposable { public TestApp HostApiInvokerApp { get; } + public DotNetCli TestBehaviorEnabledDotNet { get; } + private readonly TestArtifact copiedDotnet; + + internal SdkAndFrameworkFixture SdkAndFrameworkFixture { get; } + public SharedTestState() { HostApiInvokerApp = TestApp.CreateFromBuiltAssets("HostApiInvokerApp"); @@ -469,11 +461,23 @@ public SharedTestState() // On non-Windows, we can't just P/Invoke to already loaded hostfxr, so copy it next to the app dll. File.Copy(Binaries.HostFxr.FilePath, Path.Combine(HostApiInvokerApp.Location, Binaries.HostFxr.FileName)); } + + // Make a copy of the built .NET, as we will enable test-only behaviour + copiedDotnet = TestArtifact.CreateFromCopy(nameof(NativeHostApis), TestContext.BuiltDotNet.BinPath); + TestBehaviorEnabledDotNet = new DotNetCli(copiedDotnet.Location); + + // Enable test-only behavior for the copied .NET. We don't bother disabling the behaviour later, + // as we just delete the entire copy after the tests run. + _ = TestOnlyProductBehavior.Enable(TestBehaviorEnabledDotNet.GreatestVersionHostFxrFilePath); + + SdkAndFrameworkFixture = new SdkAndFrameworkFixture(); } public void Dispose() { HostApiInvokerApp?.Dispose(); + copiedDotnet.Dispose(); + SdkAndFrameworkFixture.Dispose(); } } } diff --git a/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs b/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs index a43b1406189509..63f369b2a214da 100644 --- a/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs +++ b/src/installer/tests/TestUtils/Assertions/CommandResultAssertions.cs @@ -2,6 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. using System; +using System.Linq; using System.Text.RegularExpressions; using FluentAssertions; using FluentAssertions.Execution; @@ -147,13 +148,17 @@ public AndConstraint NotFileContains(string path, strin } public string GetDiagnosticsInfo() - { - return $"{Environment.NewLine}" + - $"File Name: {Result.StartInfo.FileName}{Environment.NewLine}" + - $"Arguments: {Result.StartInfo.Arguments}{Environment.NewLine}" + - $"Exit Code: {Result.ExitCode}{Environment.NewLine}" + - $"StdOut:{Environment.NewLine}{Result.StdOut}{Environment.NewLine}" + - $"StdErr:{Environment.NewLine}{Result.StdErr}{Environment.NewLine}"; - } + => $""" + + File Name: {Result.StartInfo.FileName} + Arguments: {Result.StartInfo.Arguments} + Environment: + {string.Join(Environment.NewLine, Result.StartInfo.Environment.Where(i => i.Key.StartsWith(Constants.DotnetRoot.EnvironmentVariable)).Select(i => $" {i.Key} = {i.Value}"))} + Exit Code: 0x{Result.ExitCode:x} + StdOut: + {Result.StdOut} + StdErr: + {Result.StdErr} + """; } } diff --git a/src/installer/tests/TestUtils/TestContext.cs b/src/installer/tests/TestUtils/TestContext.cs index 74bcf5c4f23928..4c2f7994c98906 100644 --- a/src/installer/tests/TestUtils/TestContext.cs +++ b/src/installer/tests/TestUtils/TestContext.cs @@ -43,8 +43,9 @@ static TestContext() TestAssetsOutput = GetTestContextVariable("TEST_ASSETS_OUTPUT"); TestArtifactsPath = GetTestContextVariable("TEST_ARTIFACTS"); + Directory.CreateDirectory(TestArtifactsPath); - BuiltDotNet = new DotNetCli(Path.Combine(TestArtifactsPath, "sharedFrameworkPublish")); + BuiltDotNet = new DotNetCli(Path.Combine(TestAssetsOutput, "sharedFrameworkPublish")); } public static string GetTestContextVariable(string name) diff --git a/src/installer/tests/pretest.proj b/src/installer/tests/pretest.proj new file mode 100644 index 00000000000000..b97a2e77c2e131 --- /dev/null +++ b/src/installer/tests/pretest.proj @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + From d86b84f2c8df7928388bcd6e23e2539a16623f36 Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:43:18 -0400 Subject: [PATCH 104/132] [main] Update dependencies from dotnet/roslyn (#98559) * Update dependencies from https://github.com/dotnet/roslyn build 20240215.8 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24115.8 * Update dependencies from https://github.com/dotnet/roslyn build 20240216.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24116.4 * Update dependencies from https://github.com/dotnet/roslyn build 20240216.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24116.4 * Update dependencies from https://github.com/dotnet/roslyn build 20240216.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24116.4 * Update dependencies from https://github.com/dotnet/roslyn build 20240220.1 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24120.1 * Update dependencies from https://github.com/dotnet/roslyn build 20240221.2 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24121.2 * Update dependencies from https://github.com/dotnet/roslyn build 20240221.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24121.4 * Update dependencies from https://github.com/dotnet/roslyn build 20240222.5 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24122.5 * Update dependencies from https://github.com/dotnet/roslyn build 20240223.6 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24123.6 * Update dependencies from https://github.com/dotnet/roslyn build 20240224.2 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24124.2 * Update dependencies from https://github.com/dotnet/roslyn build 20240222.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24122.4 * Avoid/suppress Lock-to-object conversion warnings * Update dependencies from https://github.com/dotnet/roslyn build 20240227.1 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-2.24127.1 * Update dependencies from https://github.com/dotnet/roslyn build 20240227.10 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24127.10 * Update dependencies from https://github.com/dotnet/roslyn build 20240228.4 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24128.4 * Update dependencies from https://github.com/dotnet/roslyn build 20240301.3 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24151.3 * Update dependencies from https://github.com/dotnet/roslyn build 20240301.8 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24151.8 * Update dependencies from https://github.com/dotnet/roslyn build 20240301.8 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24151.8 * Update dependencies from https://github.com/dotnet/roslyn build 20240301.8 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24151.8 * Update dependencies from https://github.com/dotnet/roslyn build 20240305.1 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24155.1 * Update dependencies from https://github.com/dotnet/roslyn build 20240305.13 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24155.13 * Suppress more Lock-to-object warnings * Remove invalid source build element * Update dependencies from https://github.com/dotnet/roslyn build 20240306.11 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24156.11 * Update dependencies from https://github.com/dotnet/roslyn build 20240307.14 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24157.14 * Update dependencies from https://github.com/dotnet/roslyn build 20240308.5 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24158.5 * Update dependencies from https://github.com/dotnet/roslyn build 20240308.5 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24158.5 * Update dependencies from https://github.com/dotnet/roslyn build 20240311.2 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24161.2 * Update dependencies from https://github.com/dotnet/roslyn build 20240311.10 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24161.10 * Update dependencies from https://github.com/dotnet/roslyn build Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24172.1 * Update dependencies from https://github.com/dotnet/roslyn build Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24175.2 * Workaround compiler changes related to dynamic * Work around Microsoft.CSharp test failure due to dynamic changes * Update dependencies from https://github.com/dotnet/roslyn build Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.11.0-1.24176.2 * Update dependencies from https://github.com/dotnet/roslyn build 20240330.2 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.11.0-1.24180.2 * Convert ParallelTheory to Theory in InteropService test suites. The ParallelTheory creates an environment that overwhelms the GC and Finalizer thread on x86. The result is OOMs in the 32-bit memory address space. * Fix deadlocking in regex tests * Update dependencies from https://github.com/dotnet/roslyn build 20240402.15 Microsoft.SourceBuild.Intermediate.roslyn , Microsoft.CodeAnalysis , Microsoft.CodeAnalysis.CSharp , Microsoft.Net.Compilers.Toolset From Version 4.10.0-2.24114.13 -> To Version 4.10.0-3.24202.15 * Fix wasm * Comment out test for InvokeConstructor Roslyn no longer generates a call to InvokeConstructor. --------- Co-authored-by: dotnet-maestro[bot] Co-authored-by: Larry Ewing Co-authored-by: Jan Jones Co-authored-by: Pavel Savara Co-authored-by: Stephen Toub Co-authored-by: Aaron R Robinson Co-authored-by: Sven Boemer --- eng/Version.Details.xml | 16 +++++----- eng/Versions.props | 6 ++-- .../src/System/Threading/Condition.cs | 2 ++ .../src/System/Threading/SyncTable.cs | 2 +- .../Microsoft.CSharp/tests/BindingErrors.cs | 4 +++ ...untimeEventSource.Threading.NativeSinks.cs | 9 +++++- .../NativeRuntimeEventSource.Threading.cs | 9 +++++- .../src/System/Threading/Lock.cs | 2 +- .../CompileFails.cs | 16 +++++----- .../Compiles.cs | 4 +-- .../CompileFails.cs | 4 +-- .../Compiles.cs | 14 ++++----- ...mance.dynamic.context.operator.regclass.cs | 4 +++ ...ynamic.declarations.returnType.indexers.cs | 8 +++++ .../tests/FunctionalTests/AttRegexTests.cs | 2 +- .../tests/FunctionalTests/MonoRegexTests.cs | 2 +- .../FunctionalTests/Regex.Groups.Tests.cs | 2 +- .../Regex.KnownPattern.Tests.cs | 8 ++--- .../FunctionalTests/Regex.Match.Tests.cs | 2 +- .../FunctionalTests/Regex.Tests.Common.cs | 17 ++++++++++ .../tests/FunctionalTests/RegexPcreTests.cs | 2 +- .../tests/FunctionalTests/RegexRustTests.cs | 2 +- .../DataFlow/DynamicObjects.cs | 31 ++++++++++--------- 23 files changed, 109 insertions(+), 59 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index b7cbff725f6d4e..dfb1ea462c5c89 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -360,17 +360,17 @@ https://github.com/dotnet/runtime-assets f282faa0ddd1b3672a3cba54518943fb1d0b4e36 - + https://github.com/dotnet/roslyn - 77372c66fd54927312b5b0a2e399e192f74445c9 + cbca41cad4e21c29548e9e57d7135740b6f78df9 - + https://github.com/dotnet/roslyn - 77372c66fd54927312b5b0a2e399e192f74445c9 + cbca41cad4e21c29548e9e57d7135740b6f78df9 - + https://github.com/dotnet/roslyn - 77372c66fd54927312b5b0a2e399e192f74445c9 + cbca41cad4e21c29548e9e57d7135740b6f78df9 https://github.com/dotnet/roslyn-analyzers @@ -381,9 +381,9 @@ ad732e236e7ffcb66de4b45a1b736aad4ccdcd83 - + https://github.com/dotnet/roslyn - 77372c66fd54927312b5b0a2e399e192f74445c9 + cbca41cad4e21c29548e9e57d7135740b6f78df9 diff --git a/eng/Versions.props b/eng/Versions.props index c9ff6989f03a28..8b862d54f60efa 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -42,9 +42,9 @@ Any tools that contribute to the design-time experience should use the MicrosoftCodeAnalysisVersion_LatestVS property above to ensure they do not break the local dev experience. --> - 4.10.0-2.24114.13 - 4.10.0-2.24114.13 - 4.10.0-2.24114.13 + 4.10.0-3.24202.15 + 4.10.0-3.24202.15 + 4.10.0-3.24202.15 + + // Replace QNaN and SNaN with Zero + op1 = Avx512F.Fixup(op1, op1, Vector128.Create(0x88), 0); + + // Convert from double to long, replacing any values that were greater than or equal to MaxValue + with MaxValue + // Values that were less than or equal to MinValue will already be MinValue + return Vector128.ConditionalSelect( + Vector128.LessThan(op1, Vector128.Create(long.MaxValue)).AsInt64(), + Avx512DQ.VL.ConvertToVector128Int64(op1), + Vector128.Create(long.MaxValue) + ); + */ + if (comp->IsBaselineVector512IsaSupportedOpportunistically()) + { + // Clone the cast operand for usage. + GenTree* op1Clone1 = comp->gtClone(castOp); + BlockRange().InsertAfter(castOp, op1Clone1); + + // Generate the control table for VFIXUPIMMSD + // The behavior we want is to saturate negative values to 0. + GenTreeVecCon* tbl = comp->gtNewVconNode(TYP_SIMD16); + tbl->gtSimdVal.i32[0] = (varTypeIsUnsigned(dstType)) ? 0x08080088 : 0x00000088; + BlockRange().InsertAfter(op1Clone1, tbl); + + // get a zero int node for control table + GenTree* ctrlByte = comp->gtNewIconNode(0); + BlockRange().InsertAfter(tbl, ctrlByte); + + if (varTypeIsUnsigned(dstType)) + { + // run vfixupimmsd base on table and no flags reporting + GenTree* oper1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, castOp, op1Clone1, tbl, ctrlByte, + NI_AVX512F_FixupScalar, fieldType, 16); + BlockRange().InsertAfter(ctrlByte, oper1); + LowerNode(oper1); + + // Convert to scalar + // Here, we try to insert a Vector128 to Scalar node so that the input + // can be provided to the scalar cast + GenTree* oper2 = comp->gtNewSimdHWIntrinsicNode(srcType, oper1, NI_Vector128_ToScalar, fieldType, 16); + BlockRange().InsertAfter(oper1, oper2); + LowerNode(oper2); + + castOutput = comp->gtNewCastNode(genActualType(dstType), oper2, false, dstType); + BlockRange().InsertAfter(oper2, castOutput); + } + else + { + CorInfoType destFieldType = (dstType == TYP_INT) ? CORINFO_TYPE_INT : CORINFO_TYPE_LONG; + + ssize_t actualMaxVal = (dstType == TYP_INT) ? INT32_MAX : INT64_MAX; + + // run vfixupimmsd base on table and no flags reporting + GenTree* fixupVal = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, castOp, op1Clone1, tbl, ctrlByte, + NI_AVX512F_FixupScalar, fieldType, 16); + BlockRange().InsertAfter(ctrlByte, fixupVal); + LowerNode(fixupVal); + + // get the max value vector + GenTree* maxValScalar = (srcType == TYP_DOUBLE) + ? comp->gtNewDconNodeD(static_cast(actualMaxVal)) + : comp->gtNewDconNodeF(static_cast(actualMaxVal)); + GenTree* maxVal = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, maxValScalar, fieldType, 16); + BlockRange().InsertAfter(fixupVal, maxVal); + + GenTree* maxValDstTypeScalar = (dstType == TYP_INT) ? comp->gtNewIconNode(actualMaxVal, dstType) + : comp->gtNewLconNode(actualMaxVal); + GenTree* maxValDstType = + comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, maxValDstTypeScalar, destFieldType, 16); + BlockRange().InsertAfter(maxVal, maxValDstType); + + // usage 1 --> compare with max value of integer + GenTree* compMask = comp->gtNewSimdCmpOpNode(GT_GE, TYP_SIMD16, fixupVal, maxVal, fieldType, 16); + BlockRange().InsertAfter(maxValDstType, compMask); + + // convert fixupVal to local variable and clone it for further use + LIR::Use fixupValUse(BlockRange(), &(compMask->AsHWIntrinsic()->Op(1)), compMask); + ReplaceWithLclVar(fixupValUse); + fixupVal = compMask->AsHWIntrinsic()->Op(1); + GenTree* fixupValClone = comp->gtClone(fixupVal); + LowerNode(compMask); + BlockRange().InsertAfter(fixupVal, fixupValClone); + + GenTree* FixupValCloneScalar = + comp->gtNewSimdHWIntrinsicNode(srcType, fixupValClone, NI_Vector128_ToScalar, fieldType, 16); + BlockRange().InsertAfter(compMask, FixupValCloneScalar); + LowerNode(FixupValCloneScalar); + + // cast it + GenTreeCast* newCast = comp->gtNewCastNode(dstType, FixupValCloneScalar, false, dstType); + BlockRange().InsertAfter(FixupValCloneScalar, newCast); + + GenTree* newTree = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, newCast, destFieldType, 16); + BlockRange().InsertAfter(newCast, newTree); + LowerNode(newTree); + + // usage 2 --> use thecompared mask with input value and max value to blend + GenTree* control = comp->gtNewIconNode(0xCA); // (B & A) | (C & ~A) + BlockRange().InsertAfter(newTree, control); + GenTree* cndSelect = comp->gtNewSimdTernaryLogicNode(TYP_SIMD16, compMask, maxValDstType, newTree, + control, destFieldType, 16); + BlockRange().InsertAfter(control, cndSelect); + LowerNode(cndSelect); + + castOutput = + comp->gtNewSimdHWIntrinsicNode(dstType, cndSelect, NI_Vector128_ToScalar, destFieldType, 16); + BlockRange().InsertAfter(cndSelect, castOutput); + LowerNode(castOutput); + } + } + else if (varTypeIsSigned(dstType) && comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) + { + CorInfoType destFieldType = (dstType == TYP_INT) ? CORINFO_TYPE_INT : CORINFO_TYPE_LONG; + + ssize_t actualMaxVal = (dstType == TYP_INT) ? INT32_MAX : INT64_MAX; + + // create clones for usage + GenTree* castOpClone1 = comp->gtClone(castOp); + GenTree* castOpClone2 = comp->gtClone(castOp); + BlockRange().InsertAfter(castOp, castOpClone1); + BlockRange().InsertAfter(castOpClone1, castOpClone2); + + GenTree* oper = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOp, fieldType, 16); + BlockRange().InsertAfter(castOpClone2, oper); + LowerNode(oper); + GenTree* op1Clone1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOpClone1, fieldType, 16); + BlockRange().InsertAfter(oper, op1Clone1); + LowerNode(op1Clone1); + GenTree* op1Clone2 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOpClone2, fieldType, 16); + BlockRange().InsertAfter(op1Clone1, op1Clone2); + LowerNode(op1Clone2); + + // check NaN + GenTree* mask1 = comp->gtNewSimdCmpOpNode(GT_EQ, TYP_SIMD16, oper, op1Clone1, fieldType, 16); + BlockRange().InsertAfter(op1Clone2, mask1); + LowerNode(mask1); + // inp = inp & mask + GenTree* maskNaN = comp->gtNewSimdBinOpNode(GT_AND, TYP_SIMD16, op1Clone2, mask1, fieldType, 16); + BlockRange().InsertAfter(mask1, maskNaN); + LowerNode(maskNaN); + + // get the max value vector + GenTree* maxVal = (srcType == TYP_DOUBLE) ? comp->gtNewDconNodeD(static_cast(actualMaxVal)) + : comp->gtNewDconNodeF(static_cast(actualMaxVal)); + GenTree* maxValDup = + (dstType == TYP_INT) ? comp->gtNewIconNode(actualMaxVal) : comp->gtNewLconNode(actualMaxVal); + maxVal = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, maxVal, fieldType, 16); + BlockRange().InsertAfter(maskNaN, maxVal); + maxValDup = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, maxValDup, destFieldType, 16); + BlockRange().InsertAfter(maxVal, maxValDup); + + // usage 1 --> compare with max value of integer + GenTree* compMask = comp->gtNewSimdCmpOpNode(GT_GE, TYP_SIMD16, maskNaN, maxVal, fieldType, 16); + BlockRange().InsertAfter(maxValDup, compMask); + + // we will be using the maskNaN value twice + LIR::Use maskNaNUse(BlockRange(), &(compMask->AsHWIntrinsic()->Op(1)), compMask); + ReplaceWithLclVar(maskNaNUse); + maskNaN = compMask->AsHWIntrinsic()->Op(1); + GenTree* maskNaNClone = comp->gtClone(maskNaN); + LowerNode(compMask); + BlockRange().InsertAfter(maskNaN, maskNaNClone); + + // convert to scalar for conversion + GenTree* maskNaNCloneScalar = + comp->gtNewSimdHWIntrinsicNode(srcType, maskNaNClone, NI_Vector128_ToScalar, fieldType, 16); + BlockRange().InsertAfter(compMask, maskNaNCloneScalar); + LowerNode(maskNaNCloneScalar); + + // cast it + GenTreeCast* newCast = comp->gtNewCastNode(dstType, maskNaNCloneScalar, false, dstType); + BlockRange().InsertAfter(maskNaNCloneScalar, newCast); + GenTree* newTree = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, newCast, destFieldType, 16); + BlockRange().InsertAfter(newCast, newTree); + LowerNode(newTree); + + // usage 2 --> use thecompared mask with input value and max value to blend + GenTree* cndSelect = comp->gtNewSimdCndSelNode(TYP_SIMD16, compMask, maxValDup, newTree, destFieldType, 16); + BlockRange().InsertAfter(newTree, cndSelect); + LowerNode(cndSelect); + + castOutput = comp->gtNewSimdHWIntrinsicNode(dstType, cndSelect, NI_Vector128_ToScalar, destFieldType, 16); + BlockRange().InsertAfter(cndSelect, castOutput); + LowerNode(castOutput); + } + else + { + // The remaining case not handled above should be conversion + // to TYP_UINT in case where SSE41 is supported. + // We should have converted float -> uint conversion to + // float -> double -> uint during morph. + assert((dstType == TYP_UINT) && comp->compIsaSupportedDebugOnly(InstructionSet_SSE41) && + (srcType != TYP_FLOAT)); + + ssize_t actualMaxVal = UINT32_MAX; + CorInfoType destFieldType = CORINFO_TYPE_LONG; + + GenTree* castOpClone1 = comp->gtClone(castOp); + GenTree* castOpClone2 = comp->gtClone(castOp); + GenTree* castOpClone3 = comp->gtClone(castOp); + BlockRange().InsertAfter(castOp, castOpClone1); + BlockRange().InsertAfter(castOpClone1, castOpClone2); + BlockRange().InsertAfter(castOpClone2, castOpClone3); + + GenTree* oper = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOp, fieldType, 16); + BlockRange().InsertAfter(castOpClone3, oper); + LowerNode(oper); + GenTree* op1Clone1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOpClone1, fieldType, 16); + BlockRange().InsertAfter(oper, op1Clone1); + LowerNode(op1Clone1); + GenTree* op1Clone2 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOpClone2, fieldType, 16); + BlockRange().InsertAfter(op1Clone1, op1Clone2); + LowerNode(op1Clone2); + GenTree* op1Clone3 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, castOpClone3, fieldType, 16); + BlockRange().InsertAfter(op1Clone2, op1Clone3); + LowerNode(op1Clone3); + + // get the max/min value vector + GenTree* minVal = comp->gtNewDconNodeD(static_cast(0)); + minVal = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, minVal, fieldType, 16); + BlockRange().InsertAfter(op1Clone3, minVal); + GenTree* maxVal = comp->gtNewDconNodeD(static_cast(actualMaxVal)); + maxVal = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, maxVal, fieldType, 16); + BlockRange().InsertAfter(minVal, maxVal); + + // check NaN + GenTree* mask1 = comp->gtNewSimdCmpOpNode(GT_EQ, TYP_SIMD16, oper, op1Clone1, fieldType, 16); + BlockRange().InsertAfter(maxVal, mask1); + LowerNode(mask1); + + // check negative + GenTree* mask2 = comp->gtNewSimdCmpOpNode(GT_GE, TYP_SIMD16, op1Clone2, minVal, fieldType, 16); + BlockRange().InsertAfter(mask1, mask2); + LowerNode(mask2); + + // and mask + GenTree* mask12 = comp->gtNewSimdBinOpNode(GT_AND, TYP_SIMD16, mask1, mask2, fieldType, 16); + BlockRange().InsertAfter(mask2, mask12); + LowerNode(mask12); + + // inp = inp & mask + GenTree* saturatedVal = comp->gtNewSimdBinOpNode(GT_AND, TYP_SIMD16, op1Clone3, mask12, fieldType, 16); + BlockRange().InsertAfter(mask12, saturatedVal); + LowerNode(saturatedVal); + + // compare with max value of uint + GenTree* mask3 = comp->gtNewSimdCmpOpNode(GT_GE, TYP_SIMD16, saturatedVal, maxVal, fieldType, 16); + BlockRange().InsertAfter(saturatedVal, mask3); + + // Convert both the operands of mask3 to local variables for reusage + LIR::Use saturatedValUse(BlockRange(), &(mask3->AsHWIntrinsic()->Op(1)), mask3); + ReplaceWithLclVar(saturatedValUse); + saturatedVal = mask3->AsHWIntrinsic()->Op(1); + GenTree* saturatedValDup = comp->gtClone(saturatedVal); + BlockRange().InsertAfter(saturatedVal, saturatedValDup); + + LIR::Use maxValUse(BlockRange(), &(mask3->AsHWIntrinsic()->Op(2)), mask3); + ReplaceWithLclVar(maxValUse); + maxVal = mask3->AsHWIntrinsic()->Op(2); + GenTree* maxValDup = comp->gtClone(maxVal); + LowerNode(mask3); + BlockRange().InsertAfter(maxVal, maxValDup); + + // Select based on mask3 + GenTree* castOpVal = + comp->gtNewSimdCndSelNode(TYP_SIMD16, mask3, maxValDup, saturatedValDup, fieldType, 16); + BlockRange().InsertAfter(mask3, castOpVal); + LowerNode(castOpVal); + + // scalar + GenTree* castOpValScalar = + comp->gtNewSimdHWIntrinsicNode(srcType, castOpVal, NI_Vector128_ToScalar, fieldType, 16); + BlockRange().InsertAfter(castOpVal, castOpValScalar); + LowerNode(castOpValScalar); + + // cast it + castOutput = comp->gtNewCastNode(TYP_INT, castOpValScalar, false, dstType); + BlockRange().InsertAfter(castOpValScalar, castOutput); + } + assert(castOutput != nullptr); + LIR::Use use; + if (BlockRange().TryGetUse(tree, &use)) + { + use.ReplaceWith(castOutput); + } + else + { + castOutput->SetUnusedValue(); + } + BlockRange().Remove(tree); + return castOutput->gtNext; + } +#endif // TARGET_AMD64 + // Case of src is a small type and dst is a floating point type. if (varTypeIsSmall(srcType) && varTypeIsFloating(castToType)) { @@ -880,6 +1194,7 @@ void Lowering::LowerCast(GenTree* tree) // Now determine if we have operands that should be contained. ContainCheckCast(tree->AsCast()); + return nullptr; } #ifdef FEATURE_HW_INTRINSICS diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 4b301696a1eeb3..e140c395505330 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -335,14 +335,20 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. - // This goes through helper and hence src needs to be converted to double. - && (tree->gtOverflow() || (dstType == TYP_ULONG)) + // src needs to be converted to double except for the following cases + // dstType = int/uint/ulong for AVX512F + // dstType = int for SSE41 + // For pre-SSE41, the all src is converted to TYP_DOUBLE + // and goes through helpers. + && (tree->gtOverflow() || (dstType == TYP_LONG) || + !(compOpportunisticallyDependsOn(InstructionSet_AVX512F) || + (dstType == TYP_INT && compOpportunisticallyDependsOn(InstructionSet_SSE41)))) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. - && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) + && (tree->gtOverflow() || varTypeIsIntegral(dstType)) #endif ) { @@ -368,25 +374,39 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) #if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) return nullptr; #else +#if defined(TARGET_AMD64) + // Following nodes are handled when lowering the nodes + // float -> ulong/uint/int for AVX512F + // double -> ulong/uint/long/int for AVX512F + // float -> int for SSE41 + // double -> int/uint/long for SSE41 + // For all other conversions, we use helper functions. + if (compOpportunisticallyDependsOn(InstructionSet_AVX512F) || + ((dstType != TYP_ULONG) && compOpportunisticallyDependsOn(InstructionSet_SSE41))) + { + if (tree->CastOp() != oper) + { + tree->CastOp() = oper; + } + return nullptr; + } +#endif // TARGET_AMD64 switch (dstType) { case TYP_INT: +#ifdef TARGET_XARCH + return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT, oper); +#endif // TARGET_XARCH return nullptr; case TYP_UINT: -#if defined(TARGET_ARM) || defined(TARGET_AMD64) +#if defined(TARGET_ARM) return nullptr; -#else // TARGET_X86 +#endif return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); -#endif // TARGET_X86 case TYP_LONG: -#ifdef TARGET_AMD64 - // SSE2 has instructions to convert a float/double directly to a long - return nullptr; -#else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); -#endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index c22ebc7b635440..9ffd3b7b011d55 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -513,23 +513,44 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, switch (intrinsic) { #if defined(TARGET_XARCH) + case NI_VectorT_ConvertToDouble: + { + if (IsBaselineVector512IsaSupportedOpportunistically()) + { + break; + } + return nullptr; + } + case NI_VectorT_ConvertToInt64: case NI_VectorT_ConvertToUInt32: case NI_VectorT_ConvertToUInt64: { - // TODO-XARCH-CQ: These intrinsics should be accelerated + if (IsBaselineVector512IsaSupportedOpportunistically()) + { + break; + } + return nullptr; + } + + case NI_VectorT_ConvertToInt32: + { + if (compOpportunisticallyDependsOn(InstructionSet_SSE41)) + { + break; + } return nullptr; } case NI_VectorT_ConvertToSingle: { - if (simdBaseType == TYP_UINT) + if ((simdBaseType == TYP_INT) || + (simdBaseType == TYP_UINT && IsBaselineVector512IsaSupportedOpportunistically())) { - // TODO-XARCH-CQ: These intrinsics should be accelerated - return nullptr; + break; } - break; + return nullptr; } #endif // TARGET_XARCH @@ -1154,50 +1175,95 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, } #if defined(TARGET_XARCH) + + case NI_VectorT_ConvertToInt64: + { + assert(sig->numArgs == 1); + assert(simdBaseType == TYP_DOUBLE); + return gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_LONG, simdBaseJitType, simdSize); + } + + case NI_VectorT_ConvertToUInt32: + { + assert(sig->numArgs == 1); + assert(simdBaseType == TYP_FLOAT); + return gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_UINT, simdBaseJitType, simdSize); + } + + case NI_VectorT_ConvertToUInt64: + { + assert(sig->numArgs == 1); + assert(simdBaseType == TYP_DOUBLE); + return gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_ULONG, simdBaseJitType, simdSize); + } + case NI_VectorT_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); - NamedIntrinsic convert; + return gtNewSimdCvtNode(retType, op1, CORINFO_TYPE_INT, simdBaseJitType, simdSize); + } - switch (simdSize) + case NI_VectorT_ConvertToDouble: + { + assert(sig->numArgs == 1); + assert(varTypeIsLong(simdBaseType)); + NamedIntrinsic intrinsic = NI_Illegal; + if (simdSize == 64) { - case 16: - convert = NI_SSE2_ConvertToVector128Int32WithTruncation; - break; - case 32: - convert = NI_AVX_ConvertToVector256Int32WithTruncation; - break; - case 64: - convert = NI_AVX512F_ConvertToVector512Int32WithTruncation; - break; - default: - unreached(); + intrinsic = NI_AVX512DQ_ConvertToVector512Double; } - - return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize); + else if (simdSize == 32) + { + intrinsic = NI_AVX512DQ_VL_ConvertToVector256Double; + } + else + { + assert(simdSize == 16); + intrinsic = NI_AVX512DQ_VL_ConvertToVector128Double; + } + return gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); } case NI_VectorT_ConvertToSingle: { - assert(simdBaseType == TYP_INT); - NamedIntrinsic convert; - - switch (simdSize) + assert(varTypeIsInt(simdBaseType)); + NamedIntrinsic intrinsic = NI_Illegal; + if (simdBaseType == TYP_INT) { - case 16: - convert = NI_SSE2_ConvertToVector128Single; - break; - case 32: - convert = NI_AVX_ConvertToVector256Single; - break; - case 64: - convert = NI_AVX512F_ConvertToVector512Single; - break; - default: - unreached(); + switch (simdSize) + { + case 16: + intrinsic = NI_SSE2_ConvertToVector128Single; + break; + case 32: + intrinsic = NI_AVX_ConvertToVector256Single; + break; + case 64: + intrinsic = NI_AVX512F_ConvertToVector512Single; + break; + default: + unreached(); + } } - - return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize); + else if (simdBaseType == TYP_UINT) + { + switch (simdSize) + { + case 16: + intrinsic = NI_AVX512F_VL_ConvertToVector128Single; + break; + case 32: + intrinsic = NI_AVX512F_VL_ConvertToVector256Single; + break; + case 64: + intrinsic = NI_AVX512F_ConvertToVector512Single; + break; + default: + unreached(); + } + } + assert(intrinsic != NI_Illegal); + return gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) case NI_VectorT_ConvertToDouble: diff --git a/src/coreclr/nativeaot/Runtime/MathHelpers.cpp b/src/coreclr/nativeaot/Runtime/MathHelpers.cpp index b5c4a251c82ba0..73a5aa924794dd 100644 --- a/src/coreclr/nativeaot/Runtime/MathHelpers.cpp +++ b/src/coreclr/nativeaot/Runtime/MathHelpers.cpp @@ -11,6 +11,10 @@ FCIMPL1_D(uint64_t, RhpDbl2ULng, double val) { +#if defined(HOST_X86) || defined(HOST_AMD64) + const double uint64_max_plus_1 = 4294967296.0 * 4294967296.0; + return (val > 0) ? ((val >= uint64_max_plus_1) ? UINT64_MAX : (uint64_t)val) : 0; +#else const double two63 = 2147483648.0 * 4294967296.0; uint64_t ret; if (val < two63) @@ -23,6 +27,42 @@ FCIMPL1_D(uint64_t, RhpDbl2ULng, double val) ret = (int64_t)(val - two63) + I64(0x8000000000000000); } return ret; +#endif //HOST_X86 || HOST_AMD64 +} +FCIMPLEND + +FCIMPL1_D(int64_t, RhpDbl2Lng, double val) +{ +#if defined(HOST_X86) || defined(HOST_AMD64) + const double int64_min = -2147483648.0 * 4294967296.0; + const double int64_max = 2147483648.0 * 4294967296.0; + return (val != val) ? 0 : (val <= int64_min) ? INT64_MIN : (val >= int64_max) ? INT64_MAX : (int64_t)val; +#else + return (int64_t)val; +#endif //HOST_X86 || HOST_AMD64 +} +FCIMPLEND + +FCIMPL1_D(int32_t, RhpDbl2Int, double val) +{ +#if defined(HOST_X86) || defined(HOST_AMD64) + const double int32_min = -2147483648.0; + const double int32_max_plus_1 = 2147483648.0; + return (val != val) ? 0 : (val <= int32_min) ? INT32_MIN : (val >= int32_max_plus_1) ? INT32_MAX : (int32_t)val; +#else + return (int32_t)val; +#endif //HOST_X86 || HOST_AMD64 +} +FCIMPLEND + +FCIMPL1_D(uint32_t, RhpDbl2UInt, double val) +{ +#if defined(HOST_X86) || defined(HOST_AMD64) + const double uint_max = 4294967295.0; + return (val > 0) ? ((val >= uint_max) ? UINT32_MAX : (uint32_t)val) : 0; +#else + return (uint32_t)val; +#endif //HOST_X86 || HOST_AMD64 } FCIMPLEND @@ -51,24 +91,6 @@ EXTERN_C uint64_t QCALLTYPE RhpULMod(uint64_t i, uint64_t j) return i % j; } -FCIMPL1_D(int64_t, RhpDbl2Lng, double val) -{ - return (int64_t)val; -} -FCIMPLEND - -FCIMPL1_D(int32_t, RhpDbl2Int, double val) -{ - return (int32_t)val; -} -FCIMPLEND - -FCIMPL1_D(uint32_t, RhpDbl2UInt, double val) -{ - return (uint32_t)val; -} -FCIMPLEND - FCIMPL1_L(double, RhpLng2Dbl, int64_t val) { return (double)val; @@ -336,4 +358,4 @@ FCIMPL2_FI(float, modff, float x, float* intptr) return std::modff(x, intptr); FCIMPLEND -#endif +#endif \ No newline at end of file diff --git a/src/coreclr/vm/i386/jithelp.S b/src/coreclr/vm/i386/jithelp.S index c1da6f4dcb8014..d0275252027814 100644 --- a/src/coreclr/vm/i386/jithelp.S +++ b/src/coreclr/vm/i386/jithelp.S @@ -551,87 +551,6 @@ LOCAL_LABEL(LRszMORE32): ret LEAF_END JIT_LRsz, _TEXT -// *********************************************************************/ -// JIT_Dbl2LngP4x87 -// -// Purpose: -// converts a double to a long truncating toward zero (C semantics) -// -// uses stdcall calling conventions -// -// This code is faster on a P4 than the Dbl2Lng code above, but is -// slower on a PIII. Hence we choose this code when on a P4 or above. -// -LEAF_ENTRY JIT_Dbl2LngP4x87, _TEXT - // get some local space - sub esp, 8 - - #define arg1 [esp + 0x0C] - fld QWORD PTR arg1 // fetch arg - fnstcw WORD PTR arg1 // store FPCW - movzx eax, WORD PTR arg1 // zero extend - wide - or ah, 0x0C // turn on OE and DE flags - mov DWORD PTR [esp], eax // store new FPCW bits - fldcw WORD PTR [esp] // reload FPCW with new bits - fistp QWORD PTR [esp] // convert - - // reload FP result - mov eax, DWORD PTR [esp] - mov edx, DWORD PTR [esp + 4] - - // reload original FPCW value - fldcw WORD PTR arg1 - #undef arg1 - - // restore stack - add esp, 8 - - ret -LEAF_END JIT_Dbl2LngP4x87, _TEXT - -// *********************************************************************/ -// JIT_Dbl2LngSSE3 -// -// Purpose: -// converts a double to a long truncating toward zero (C semantics) -// -// uses stdcall calling conventions -// -// This code is faster than the above P4 x87 code for Intel processors -// equal or later than Core2 and Atom that have SSE3 support -// -LEAF_ENTRY JIT_Dbl2LngSSE3, _TEXT - // get some local space - sub esp, 8 - - fld QWORD PTR [esp + 0x0C] // fetch arg - fisttp QWORD PTR [esp] // convert - mov eax, DWORD PTR [esp] // reload FP result - mov edx, DWORD PTR [esp + 4] - - // restore stack - add esp, 8 - - ret -LEAF_END JIT_Dbl2LngSSE3, _TEXT - -// *********************************************************************/ -// JIT_Dbl2IntSSE2 -// -// Purpose: -// converts a double to a long truncating toward zero (C semantics) -// -// uses stdcall calling conventions -// -// This code is even faster than the P4 x87 code for Dbl2LongP4x87, -// but only returns a 32 bit value (only good for int). -// -LEAF_ENTRY JIT_Dbl2IntSSE2, _TEXT - movsd xmm0, [esp + 4] - cvttsd2si eax, xmm0 - ret -LEAF_END JIT_Dbl2IntSSE2, _TEXT - // *********************************************************************/ // JIT_StackProbe // diff --git a/src/coreclr/vm/i386/jithelp.asm b/src/coreclr/vm/i386/jithelp.asm index 0faf7cde0e0b26..c2011190abc3fb 100644 --- a/src/coreclr/vm/i386/jithelp.asm +++ b/src/coreclr/vm/i386/jithelp.asm @@ -36,11 +36,6 @@ JIT_LLsh TEXTEQU <_JIT_LLsh@0> JIT_LRsh TEXTEQU <_JIT_LRsh@0> JIT_LRsz TEXTEQU <_JIT_LRsz@0> JIT_LMul TEXTEQU <@JIT_LMul@16> -JIT_Dbl2LngOvf TEXTEQU <@JIT_Dbl2LngOvf@8> -JIT_Dbl2Lng TEXTEQU <@JIT_Dbl2Lng@8> -JIT_Dbl2IntSSE2 TEXTEQU <@JIT_Dbl2IntSSE2@8> -JIT_Dbl2LngP4x87 TEXTEQU <@JIT_Dbl2LngP4x87@8> -JIT_Dbl2LngSSE3 TEXTEQU <@JIT_Dbl2LngSSE3@8> JIT_InternalThrowFromHelper TEXTEQU <@JIT_InternalThrowFromHelper@4> JIT_WriteBarrierReg_PreGrow TEXTEQU <_JIT_WriteBarrierReg_PreGrow@0> JIT_WriteBarrierReg_PostGrow TEXTEQU <_JIT_WriteBarrierReg_PostGrow@0> @@ -637,182 +632,6 @@ LMul_hard: JIT_LMul ENDP -;*********************************************************************/ -; JIT_Dbl2LngOvf - -;Purpose: -; converts a double to a long truncating toward zero (C semantics) -; with check for overflow -; -; uses stdcall calling conventions -; -PUBLIC JIT_Dbl2LngOvf -JIT_Dbl2LngOvf PROC - fnclex - fld qword ptr [esp+4] - push ecx - push ecx - fstp qword ptr [esp] - call JIT_Dbl2Lng - mov ecx,eax - fnstsw ax - test ax,01h - jnz Dbl2LngOvf_throw - mov eax,ecx - ret 8 - -Dbl2LngOvf_throw: - mov ECX, CORINFO_OverflowException_ASM - call JIT_InternalThrowFromHelper - ret 8 -JIT_Dbl2LngOvf ENDP - -;*********************************************************************/ -; JIT_Dbl2Lng - -;Purpose: -; converts a double to a long truncating toward zero (C semantics) -; -; uses stdcall calling conventions -; -; note that changing the rounding mode is very expensive. This -; routine basiclly does the truncation semantics without changing -; the rounding mode, resulting in a win. -; -PUBLIC JIT_Dbl2Lng -JIT_Dbl2Lng PROC - fld qword ptr[ESP+4] ; fetch arg - lea ecx,[esp-8] - sub esp,16 ; allocate frame - and ecx,-8 ; align pointer on boundary of 8 - fld st(0) ; duplciate top of stack - fistp qword ptr[ecx] ; leave arg on stack, also save in temp - fild qword ptr[ecx] ; arg, round(arg) now on stack - mov edx,[ecx+4] ; high dword of integer - mov eax,[ecx] ; low dword of integer - test eax,eax - je integer_QNaN_or_zero - -arg_is_not_integer_QNaN: - fsubp st(1),st ; TOS=d-round(d), - ; { st(1)=st(1)-st & pop ST } - test edx,edx ; what's sign of integer - jns positive - ; number is negative - ; dead cycle - ; dead cycle - fstp dword ptr[ecx] ; result of subtraction - mov ecx,[ecx] ; dword of difference(single precision) - add esp,16 - xor ecx,80000000h - add ecx,7fffffffh ; if difference>0 then increment integer - adc eax,0 ; inc eax (add CARRY flag) - adc edx,0 ; propagate carry flag to upper bits - ret 8 - -positive: - fstp dword ptr[ecx] ;17-18 ; result of subtraction - mov ecx,[ecx] ; dword of difference (single precision) - add esp,16 - add ecx,7fffffffh ; if difference<0 then decrement integer - sbb eax,0 ; dec eax (subtract CARRY flag) - sbb edx,0 ; propagate carry flag to upper bits - ret 8 - -integer_QNaN_or_zero: - test edx,7fffffffh - jnz arg_is_not_integer_QNaN - fstp st(0) ;; pop round(arg) - fstp st(0) ;; arg - add esp,16 - ret 8 -JIT_Dbl2Lng ENDP - -;*********************************************************************/ -; JIT_Dbl2LngP4x87 - -;Purpose: -; converts a double to a long truncating toward zero (C semantics) -; -; uses stdcall calling conventions -; -; This code is faster on a P4 than the Dbl2Lng code above, but is -; slower on a PIII. Hence we choose this code when on a P4 or above. -; -PUBLIC JIT_Dbl2LngP4x87 -JIT_Dbl2LngP4x87 PROC -arg1 equ <[esp+0Ch]> - - sub esp, 8 ; get some local space - - fld qword ptr arg1 ; fetch arg - fnstcw word ptr arg1 ; store FPCW - movzx eax, word ptr arg1 ; zero extend - wide - or ah, 0Ch ; turn on OE and DE flags - mov dword ptr [esp], eax ; store new FPCW bits - fldcw word ptr [esp] ; reload FPCW with new bits - fistp qword ptr [esp] ; convert - mov eax, dword ptr [esp] ; reload FP result - mov edx, dword ptr [esp+4] ; - fldcw word ptr arg1 ; reload original FPCW value - - add esp, 8 ; restore stack - - ret 8 -JIT_Dbl2LngP4x87 ENDP - -;*********************************************************************/ -; JIT_Dbl2LngSSE3 - -;Purpose: -; converts a double to a long truncating toward zero (C semantics) -; -; uses stdcall calling conventions -; -; This code is faster than the above P4 x87 code for Intel processors -; equal or later than Core2 and Atom that have SSE3 support -; -.686P -.XMM -PUBLIC JIT_Dbl2LngSSE3 -JIT_Dbl2LngSSE3 PROC -arg1 equ <[esp+0Ch]> - - sub esp, 8 ; get some local space - - fld qword ptr arg1 ; fetch arg - fisttp qword ptr [esp] ; convert - mov eax, dword ptr [esp] ; reload FP result - mov edx, dword ptr [esp+4] - - add esp, 8 ; restore stack - - ret 8 -JIT_Dbl2LngSSE3 ENDP -.586 - -;*********************************************************************/ -; JIT_Dbl2IntSSE2 - -;Purpose: -; converts a double to a long truncating toward zero (C semantics) -; -; uses stdcall calling conventions -; -; This code is even faster than the P4 x87 code for Dbl2LongP4x87, -; but only returns a 32 bit value (only good for int). -; -.686P -.XMM -PUBLIC JIT_Dbl2IntSSE2 -JIT_Dbl2IntSSE2 PROC - $movsd xmm0, [esp+4] - cvttsd2si eax, xmm0 - ret 8 -JIT_Dbl2IntSSE2 ENDP -.586 - - ;*********************************************************************/ ; This is the small write barrier thunk we use when we know the ; ephemeral generation is higher in memory than older generations. @@ -1214,39 +1033,6 @@ JIT_TailCallVSDLeave: JIT_TailCall ENDP - -;------------------------------------------------------------------------------ - -; HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor) -@JIT_FltRem@8 proc public - fld dword ptr [esp+4] ; divisor - fld dword ptr [esp+8] ; dividend -fremloop: - fprem - fstsw ax - fwait - sahf - jp fremloop ; Continue while the FPU status bit C2 is set - fxch ; swap, so divisor is on top and result is in st(1) - fstp ST(0) ; Pop the divisor from the FP stack - retn 8 ; Return value is in st(0) -@JIT_FltRem@8 endp - -; HCIMPL2_VV(float, JIT_DblRem, float dividend, float divisor) -@JIT_DblRem@16 proc public - fld qword ptr [esp+4] ; divisor - fld qword ptr [esp+12] ; dividend -fremloopd: - fprem - fstsw ax - fwait - sahf - jp fremloopd ; Continue while the FPU status bit C2 is set - fxch ; swap, so divisor is on top and result is in st(1) - fstp ST(0) ; Pop the divisor from the FP stack - retn 16 ; Return value is in st(0) -@JIT_DblRem@16 endp - ;------------------------------------------------------------------------------ ; PatchedCodeStart and PatchedCodeEnd are used to determine bounds of patched code. diff --git a/src/coreclr/vm/i386/jitinterfacex86.cpp b/src/coreclr/vm/i386/jitinterfacex86.cpp index 08360e9ff0c060..bfc7c0abc674b8 100644 --- a/src/coreclr/vm/i386/jitinterfacex86.cpp +++ b/src/coreclr/vm/i386/jitinterfacex86.cpp @@ -96,26 +96,6 @@ extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj) #endif // _DEBUG -#ifndef TARGET_UNIX - -HCIMPL1_V(INT32, JIT_Dbl2IntOvf, double val) -{ - FCALL_CONTRACT; - - INT64 ret = HCCALL1_V(JIT_Dbl2Lng, val); - - if (ret != (INT32) ret) - goto THROW; - - return (INT32) ret; - -THROW: - FCThrow(kOverflowException); -} -HCIMPLEND -#endif // TARGET_UNIX - - FCDECL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_); @@ -961,32 +941,6 @@ void InitJITHelpers1() JIT_TrialAlloc::Flags flags = GCHeapUtilities::UseThreadAllocationContexts() ? JIT_TrialAlloc::MP_ALLOCATOR : JIT_TrialAlloc::NORMAL; - // Get CPU features and check for SSE2 support. - // This code should eventually probably be moved into codeman.cpp, - // where we set the cpu feature flags for the JIT based on CPU type and features. - int cpuFeatures[4]; - __cpuid(cpuFeatures, 1); - - DWORD dwCPUFeaturesECX = cpuFeatures[2]; - DWORD dwCPUFeaturesEDX = cpuFeatures[3]; - - // If bit 26 (SSE2) is set, then we can use the SSE2 flavors - // and faster x87 implementation for the P4 of Dbl2Lng. - if (dwCPUFeaturesEDX & (1<<26)) - { - SetJitHelperFunction(CORINFO_HELP_DBL2INT, JIT_Dbl2IntSSE2); - if (dwCPUFeaturesECX & 1) // check SSE3 - { - SetJitHelperFunction(CORINFO_HELP_DBL2UINT, JIT_Dbl2LngSSE3); - SetJitHelperFunction(CORINFO_HELP_DBL2LNG, JIT_Dbl2LngSSE3); - } - else - { - SetJitHelperFunction(CORINFO_HELP_DBL2UINT, JIT_Dbl2LngP4x87); // SSE2 only for signed - SetJitHelperFunction(CORINFO_HELP_DBL2LNG, JIT_Dbl2LngP4x87); - } - } - if (!(TrackAllocationsEnabled() || LoggingOn(LF_GCALLOC, LL_INFO10) #ifdef _DEBUG diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 18629a0da24141..4614a89f403c49 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -514,46 +514,60 @@ HCIMPL1_V(double, JIT_Lng2Dbl, INT64 val) HCIMPLEND /*********************************************************************/ -// Call fast Dbl2Lng conversion - used by functions below -FORCEINLINE INT64 FastDbl2Lng(double val) +HCIMPL1_V(INT64, JIT_Dbl2Lng, double val) { -#ifdef TARGET_X86 FCALL_CONTRACT; - return HCCALL1_V(JIT_Dbl2Lng, val); + +#if defined(TARGET_X86) || defined(TARGET_AMD64) + const double int64_min = -2147483648.0 * 4294967296.0; + const double int64_max = 2147483648.0 * 4294967296.0; + return (val != val) ? 0 : (val <= int64_min) ? INT64_MIN : (val >= int64_max) ? INT64_MAX : (INT64)val; #else - FCALL_CONTRACT; - return((__int64) val); -#endif + return((INT64)val); +#endif // TARGET_X86 || TARGET_AMD64 } +HCIMPLEND /*********************************************************************/ HCIMPL1_V(UINT32, JIT_Dbl2UIntOvf, double val) { FCALL_CONTRACT; - // Note that this expression also works properly for val = NaN case + // Note that this expression also works properly for val = NaN case if (val > -1.0 && val < 4294967296.0) - return((UINT32)FastDbl2Lng(val)); + return((UINT32)val); FCThrow(kOverflowException); } HCIMPLEND /*********************************************************************/ -HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val) +HCIMPL1_V(int, JIT_Dbl2IntOvf, double val) +{ + FCALL_CONTRACT; + + const double two31 = 2147483648.0; + // Note that this expression also works properly for val = NaN case + if (val > -two31 - 1 && val < two31) + return((INT32)val); + + FCThrow(kOverflowException); +} +HCIMPLEND + +/*********************************************************************/ +HCIMPL1_V(INT64, JIT_Dbl2LngOvf, double val) { FCALL_CONTRACT; const double two63 = 2147483648.0 * 4294967296.0; - UINT64 ret; - if (val < two63) { - ret = FastDbl2Lng(val); - } - else { - // subtract 0x8000000000000000, do the convert then add it back again - ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000); - } - return ret; + + // Note that this expression also works properly for val = NaN case + // We need to compare with the very next double to two63. 0x402 is epsilon to get us there. + if (val > -two63 - 0x402 && val < two63) + return((INT64)val); + + FCThrow(kOverflowException); } HCIMPLEND @@ -563,69 +577,69 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val) FCALL_CONTRACT; const double two64 = 4294967296.0 * 4294967296.0; - // Note that this expression also works properly for val = NaN case - if (val > -1.0 && val < two64) { - const double two63 = 2147483648.0 * 4294967296.0; - UINT64 ret; - if (val < two63) { - ret = FastDbl2Lng(val); - } - else { - // subtract 0x8000000000000000, do the convert then add it back again - ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000); - } -#ifdef _DEBUG - // since no overflow can occur, the value always has to be within 1 - double roundTripVal = HCCALL1_V(JIT_ULng2Dbl, ret); - _ASSERTE(val - 1.0 <= roundTripVal && roundTripVal <= val + 1.0); -#endif // _DEBUG - return ret; - } + // Note that this expression also works properly for val = NaN case + if (val > -1.0 && val < two64) + return (UINT64)val; FCThrow(kOverflowException); } HCIMPLEND - -#if !defined(TARGET_X86) || defined(TARGET_UNIX) - -HCIMPL1_V(INT64, JIT_Dbl2Lng, double val) +HCIMPL1_V(UINT32, JIT_Dbl2UInt, double val) { FCALL_CONTRACT; - return((INT64)val); +#if defined(TARGET_X86) || defined(TARGET_AMD64) + const double uint_max = 4294967295.0; + // Note that this expression also works properly for val = NaN case + return (val >= 0) ? ((val >= uint_max) ? UINT32_MAX : (UINT32)val) : 0; +#else + return((UINT32)val); +#endif //TARGET_X86 || TARGET_AMD64 } HCIMPLEND -HCIMPL1_V(int, JIT_Dbl2IntOvf, double val) +/*********************************************************************/ +HCIMPL1_V(INT32, JIT_Dbl2Int, double val) { FCALL_CONTRACT; - const double two31 = 2147483648.0; - - // Note that this expression also works properly for val = NaN case - if (val > -two31 - 1 && val < two31) - return((INT32)val); - - FCThrow(kOverflowException); +#if defined(TARGET_X86) || defined(TARGET_AMD64) + const double int32_min = -2147483648.0; + const double int32_max_plus_1 = 2147483648.0; + return (val != val) ? 0 : (val <= int32_min) ? INT32_MIN : (val >= int32_max_plus_1) ? INT32_MAX : (INT32)val; +#else + return((INT32)val); +#endif // TARGET_X86 || TARGET_AMD64 } HCIMPLEND -HCIMPL1_V(INT64, JIT_Dbl2LngOvf, double val) +/*********************************************************************/ +HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val) { FCALL_CONTRACT; - const double two63 = 2147483648.0 * 4294967296.0; - +#if defined(TARGET_X86) || defined(TARGET_AMD64) + const double uint64_max_plus_1 = 4294967296.0 * 4294967296.0; // Note that this expression also works properly for val = NaN case - // We need to compare with the very next double to two63. 0x402 is epsilon to get us there. - if (val > -two63 - 0x402 && val < two63) - return((INT64)val); + return (val >= 0) ? ((val >= uint64_max_plus_1) ? UINT64_MAX : (UINT64)val) : 0; - FCThrow(kOverflowException); +#else + const double two63 = 2147483648.0 * 4294967296.0; + UINT64 ret; + if (val < two63) { + ret = (INT64)(val); + } + else { + // subtract 0x8000000000000000, do the convert then add it back again + ret = (INT64)(val - two63) + I64(0x8000000000000000); + } + return ret; +#endif // TARGET_X86 || TARGET_AMD64 } HCIMPLEND +/*********************************************************************/ HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor) { FCALL_CONTRACT; @@ -634,6 +648,7 @@ HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor) } HCIMPLEND +/*********************************************************************/ HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor) { FCALL_CONTRACT; @@ -642,8 +657,6 @@ HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor) } HCIMPLEND -#endif // !TARGET_X86 || TARGET_UNIX - #include diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h index 90c3fdcc9f4d01..7429352a47de6e 100644 --- a/src/coreclr/vm/jitinterface.h +++ b/src/coreclr/vm/jitinterface.h @@ -325,17 +325,6 @@ EXTERN_C FCDECL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDL EXTERN_C FCDECL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2); -EXTERN_C FCDECL1_V(INT64, JIT_Dbl2Lng, double val); -EXTERN_C FCDECL1_V(INT64, JIT_Dbl2IntSSE2, double val); -EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngP4x87, double val); -EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngSSE3, double val); -EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngOvf, double val); - -EXTERN_C FCDECL1_V(INT32, JIT_Dbl2IntOvf, double val); - -EXTERN_C FCDECL2_VV(float, JIT_FltRem, float dividend, float divisor); -EXTERN_C FCDECL2_VV(double, JIT_DblRem, double dividend, double divisor); - #ifndef HOST_64BIT #ifdef TARGET_X86 // JIThelp.asm diff --git a/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cpp b/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cpp index eaf7f2fa1a9daa..a16932e8a78a45 100644 --- a/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cpp +++ b/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cpp @@ -17,7 +17,6 @@ typedef enum { CONVERT_SENTINEL, CONVERT_SATURATING, CONVERT_NATIVECOMPILERBEHAVIOR, - CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64, CONVERT_MANAGED_BACKWARD_COMPATIBLE_ARM32, } FPtoIntegerConversionType; @@ -30,7 +29,6 @@ extern "C" DLLEXPORT int32_t ConvertDoubleToInt32(double x, FPtoIntegerConversio switch (t) { case CONVERT_BACKWARD_COMPATIBLE: - case CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case CONVERT_SENTINEL: return ((x != x) || (x < INT32_MIN) || (x > INT32_MAX)) ? INT32_MIN : (int32_t)x; @@ -53,7 +51,6 @@ extern "C" DLLEXPORT uint32_t ConvertDoubleToUInt32(double x, FPtoIntegerConvers const double int64_max_plus_1 = 0x1.p63; // 0x43e0000000000000 // (uint64_t)INT64_MAX + 1; switch (t) { - case CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case CONVERT_BACKWARD_COMPATIBLE: return ((x != x) || (x < INT64_MIN) || (x >= int64_max_plus_1)) ? 0 : (uint32_t)(int64_t)x; @@ -95,7 +92,6 @@ extern "C" DLLEXPORT int64_t ConvertDoubleToInt64(double x, FPtoIntegerConversio const double int32_max_plus1 = ((double)INT32_MAX) + 1; switch (t) { - case CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case CONVERT_BACKWARD_COMPATIBLE: case CONVERT_SENTINEL: return ((x != x) || (x < INT64_MIN) || (x >= int64_max_plus_1)) ? INT64_MIN : (int64_t)x; @@ -154,17 +150,6 @@ extern "C" DLLEXPORT uint64_t ConvertDoubleToUInt64(double x, FPtoIntegerConver } } - case CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: - if (x < int64_max_plus_1) - { - return (x < INT64_MIN) ? (uint64_t)INT64_MIN : (uint64_t)(int64_t)x; - } - else - { - x -= int64_max_plus_1; - x = trunc(x); - return (uint64_t)(((x != x) || (x >= int64_max_plus_1)) ? INT64_MIN : (int64_t)x) + (0x8000000000000000); - } case CONVERT_NATIVECOMPILERBEHAVIOR: // handled above, but add case to silence warning return 0; } diff --git a/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cs b/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cs index 5b78783c09e4ca..e61078a0e05016 100644 --- a/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cs +++ b/src/tests/JIT/Directed/Convert/out_of_range_fp_to_int_conversions.cs @@ -19,7 +19,6 @@ public enum FPtoIntegerConversionType CONVERT_SENTINEL, CONVERT_SATURATING, CONVERT_NATIVECOMPILERBEHAVIOR, - CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64, CONVERT_MANAGED_BACKWARD_COMPATIBLE_ARM32, } @@ -87,7 +86,6 @@ public static int ConvertDoubleToInt32(double x, FPtoIntegerConversionType t) switch (t) { - case FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case FPtoIntegerConversionType.CONVERT_BACKWARD_COMPATIBLE: case FPtoIntegerConversionType.CONVERT_SENTINEL: return (Double.IsNaN(x) || (x int.MaxValue)) ? int.MinValue: (int) x; @@ -109,7 +107,6 @@ public static uint ConvertDoubleToUInt32(double x, FPtoIntegerConversionType t) switch (t) { - case FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case FPtoIntegerConversionType.CONVERT_BACKWARD_COMPATIBLE: return (Double.IsNaN(x) || (x < long.MinValue) || (x >= llong_max_plus_1)) ? 0 : (uint)(long)x; @@ -136,7 +133,6 @@ public static long ConvertDoubleToInt64(double x, FPtoIntegerConversionType t) switch (t) { - case FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: case FPtoIntegerConversionType.CONVERT_BACKWARD_COMPATIBLE: case FPtoIntegerConversionType.CONVERT_SENTINEL: return (Double.IsNaN(x) || (x < long.MinValue) || (x >= llong_max_plus_1)) ? long.MinValue : (long)x; @@ -199,21 +195,6 @@ public static ulong ConvertDoubleToUInt64(double x, FPtoIntegerConversionType t) return (ulong)ConvertDoubleToInt64(x - two63, FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_ARM32) + (0x8000000000000000); } } - - case FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64: - - if (x < two63) - { - return (x < long.MinValue) ? unchecked((ulong)long.MinValue) : (ulong)(long)x; - } - else - { - // (double)LLONG_MAX cannot be represented exactly as double - const double llong_max_plus_1 = (double)((ulong)long.MaxValue + 1); - x -= two63; - x = Math.Truncate(x); - return (ulong)((Double.IsNaN(x) || (x >= llong_max_plus_1)) ? long.MinValue : (long)x) + (0x8000000000000000); - } } return 0; @@ -263,7 +244,7 @@ public static Vector ConvertToVectorUInt64(Vector vFloat, FPtoInt public class Program { static int failures = 0; - static FPtoIntegerConversionType ManagedConversionRule = FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64; + static FPtoIntegerConversionType ManagedConversionRule = FPtoIntegerConversionType.CONVERT_SATURATING; static void TestBitValue(uint value, double? dblValNullable = null, FPtoIntegerConversionType? tValue = null) { @@ -280,7 +261,6 @@ static void TestBitValue(uint value, double? dblValNullable = null, FPtoIntegerC if (!tValue.HasValue) { - TestBitValue(value, dblVal, FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64); TestBitValue(value, dblVal, FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_ARM32); TestBitValue(value, dblVal, FPtoIntegerConversionType.CONVERT_BACKWARD_COMPATIBLE); TestBitValue(value, dblVal, FPtoIntegerConversionType.CONVERT_SATURATING); @@ -377,15 +357,12 @@ public static int TestEntryPoint() { switch (RuntimeInformation.ProcessArchitecture) { - case Architecture.X86: - case Architecture.X64: - Program.ManagedConversionRule = FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_X86_X64; - break; - case Architecture.Arm: Program.ManagedConversionRule = FPtoIntegerConversionType.CONVERT_MANAGED_BACKWARD_COMPATIBLE_ARM32; break; + case Architecture.X86: + case Architecture.X64: case Architecture.Arm64: Program.ManagedConversionRule = FPtoIntegerConversionType.CONVERT_SATURATING; break; diff --git a/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b28598/b28598.il b/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b28598/b28598.il index b8ccece0a1d6f2..ff132dd8685969 100644 --- a/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b28598/b28598.il +++ b/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b28598/b28598.il @@ -48,6 +48,9 @@ End_Orphan_3: } catch [mscorlib]System.OverflowException { pop leave the_end +} catch [mscorlib]System.DivideByZeroException { + pop + leave the_end } the_end: ldc.i4 100 diff --git a/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b50027/b50027.il b/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b50027/b50027.il index 65f3bc2af34f66..0422a59b020521 100644 --- a/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b50027/b50027.il +++ b/src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b50027/b50027.il @@ -684,6 +684,9 @@ leave END } catch [mscorlib]System.OverflowException { pop leave END +} catch [mscorlib]System.DivideByZeroException { + pop + leave END } END: ldc.i4 100 diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_62692/Runtime_62692.cs b/src/tests/JIT/Regression/JitBlue/Runtime_62692/Runtime_62692.cs index 5b85cbb0115a0c..fe5105d7a91fb8 100644 --- a/src/tests/JIT/Regression/JitBlue/Runtime_62692/Runtime_62692.cs +++ b/src/tests/JIT/Regression/JitBlue/Runtime_62692/Runtime_62692.cs @@ -5,6 +5,7 @@ using System; using System.Runtime.Intrinsics.X86; using Xunit; +using System.Runtime.InteropServices; public unsafe class Runtime_62692 { @@ -39,8 +40,8 @@ public static int TestEntryPoint() AssertEqual(Problem2(1111, 0xFFFF_FFFF_0000_0001), 3414328792); AssertEqual(Problem3(1, 0xFFFF_0001), 0); AssertEqual(Problem4(1111, 0xFFFF_FFFF_0000_0001), 3414328792); - AssertEqual(Problem5(1111, double.MaxValue), 3307008522); - AssertEqual(Problem6(1111, float.MaxValue), 3307008522); + AssertEqual(Problem5(1111, double.MaxValue), 1921271346); + AssertEqual(Problem6(1111, float.MaxValue), 1921271346); AssertEqual(Problem5(1111, double.MinValue), 3307008522); AssertEqual(Problem6(1111, float.MinValue), 3307008522); AssertEqual(Problem5(1111, -0.0), 3307008522); diff --git a/src/tests/issues.targets b/src/tests/issues.targets index 886d6dd9f743e8..f0b1baef76e00f 100644 --- a/src/tests/issues.targets +++ b/src/tests/issues.targets @@ -1166,6 +1166,9 @@ + + https://github.com/dotnet/runtime/issues/100368 + https://github.com/dotnet/runtime/issues/88775 From a9717630e4e72fd86690762c1b8bf089a5e91f28 Mon Sep 17 00:00:00 2001 From: Jakob Botsch Nielsen Date: Fri, 5 Apr 2024 10:04:08 +0200 Subject: [PATCH 107/132] JIT: Add support for frozen structs in Swift reverse pinvokes (#100344) This adds the final support for frozen structs in UCO methods. This passes 2500 auto generated tests locally on both macOS x64 and arm64. This PR includes only 100 tests. Frozen struct parameters are handled via the new ABI representation added in #100138. When such a parameter exists we always allocate space for it on the local stack frame. The struct is then reassembled from its passed constituents as the first thing in the codegen. One complication is that there can be an arbitrary amount of codegen to handle this reassembling. We cannot handle an arbitrary amount of codegen in the prolog, so the reassembling is handled in two places. First, since the amount of register passed data is limited, we handle those in the prolog (which frees them up to be used for other things). If some pieces were passed on the stack the JIT then ensures that there is a scratch BB and generates the code to reassemble the remaining parts as the first thing in the scratch BB. Since Swift structs can be passed by reference in certain cases this PR also enables `FEATURE_IMPLICIT_BYREFS` for SysV x64 to handle those cases. Depending on the TP impact we can refine some of the ifdefs around this. --- src/coreclr/jit/abi.cpp | 152 + src/coreclr/jit/abi.h | 7 + src/coreclr/jit/codegen.h | 3 + src/coreclr/jit/codegencommon.cpp | 135 +- src/coreclr/jit/codegenlinear.cpp | 10 + src/coreclr/jit/compiler.h | 2 + src/coreclr/jit/emitxarch.cpp | 6 +- src/coreclr/jit/flowgraph.cpp | 5 +- src/coreclr/jit/lclvars.cpp | 249 +- src/coreclr/jit/lsrabuild.cpp | 41 +- src/coreclr/jit/morph.cpp | 27 +- src/coreclr/jit/targetamd64.h | 2 +- .../SwiftCallbackAbiStress.cs | 8552 ++++++++++++++++- .../SwiftCallbackAbiStress.swift | 3881 +++++++- 14 files changed, 12637 insertions(+), 435 deletions(-) diff --git a/src/coreclr/jit/abi.cpp b/src/coreclr/jit/abi.cpp index d54243aa47614e..91f74fca03c8fd 100644 --- a/src/coreclr/jit/abi.cpp +++ b/src/coreclr/jit/abi.cpp @@ -41,6 +41,28 @@ regNumber ABIPassingSegment::GetRegister() const return m_register; } +//----------------------------------------------------------------------------- +// GetRegisterMask: +// Get the mask of registers that this segment is passed in. +// +// Return Value: +// The register mask. +// +regMaskTP ABIPassingSegment::GetRegisterMask() const +{ + assert(IsPassedInRegister()); + regMaskTP reg = genRegMask(m_register); + +#ifdef TARGET_ARM + if (genIsValidFloatReg(m_register) && (Size == 8)) + { + reg |= genRegMask(REG_NEXT(m_register)); + } +#endif + + return reg; +} + //----------------------------------------------------------------------------- // GetStackOffset: // Get the stack offset where this segment is passed. @@ -54,6 +76,53 @@ unsigned ABIPassingSegment::GetStackOffset() const return m_stackOffset; } +//----------------------------------------------------------------------------- +// GetRegisterStoreType: +// Return a type that can be used to store from the register this segment is +// in, taking the segment's size into account. +// +// Return Value: +// A type that matches ABIPassingSegment::Size and the register type. +// +var_types ABIPassingSegment::GetRegisterStoreType() const +{ + assert(IsPassedInRegister()); + if (genIsValidFloatReg(m_register)) + { + switch (Size) + { + case 4: + return TYP_FLOAT; + case 8: + return TYP_DOUBLE; +#ifdef FEATURE_SIMD + case 16: + return TYP_SIMD16; +#endif + default: + return TYP_UNDEF; + } + } + else + { + switch (Size) + { + case 1: + return TYP_UBYTE; + case 2: + return TYP_USHORT; + case 4: + return TYP_INT; +#ifdef TARGET_64BIT + case 8: + return TYP_LONG; +#endif + default: + return TYP_UNDEF; + } + } +} + //----------------------------------------------------------------------------- // InRegister: // Create an ABIPassingSegment representing that a segment is passed in a @@ -101,6 +170,56 @@ ABIPassingSegment ABIPassingSegment::OnStack(unsigned stackOffset, unsigned offs return segment; } +//----------------------------------------------------------------------------- +// HasAnyRegisterSegment: +// Check if any part of this value is passed in a register. +// +// Return Value: +// True if so. +// +bool ABIPassingInformation::HasAnyRegisterSegment() const +{ + for (unsigned i = 0; i < NumSegments; i++) + { + if (Segments[i].IsPassedInRegister()) + { + return true; + } + } + return false; +} + +//----------------------------------------------------------------------------- +// HasAnyStackSegment: +// Check if any part of this value is passed on the stack. +// +// Return Value: +// True if so. +// +bool ABIPassingInformation::HasAnyStackSegment() const +{ + for (unsigned i = 0; i < NumSegments; i++) + { + if (Segments[i].IsPassedOnStack()) + { + return true; + } + } + return false; +} + +//----------------------------------------------------------------------------- +// HasExactlyOneStackSegment: +// Check if this value is passed as a single stack segment. +// +// Return Value: +// True if so. +// +bool ABIPassingInformation::HasExactlyOneStackSegment() const +{ + return (NumSegments == 1) && Segments[0].IsPassedOnStack(); +} + //----------------------------------------------------------------------------- // IsSplitAcrossRegistersAndStack: // Check if this ABIPassingInformation represents passing a value in both @@ -253,6 +372,39 @@ ABIPassingInformation SwiftABIClassifier::Classify(Compiler* comp, TARGET_POINTER_SIZE)); } + if (type == TYP_STRUCT) + { + const CORINFO_SWIFT_LOWERING* lowering = comp->GetSwiftLowering(structLayout->GetClassHandle()); + if (lowering->byReference) + { + return m_classifier.Classify(comp, TYP_I_IMPL, nullptr, WellKnownArg::None); + } + + ArrayStack segments(comp->getAllocator(CMK_ABI)); + for (unsigned i = 0; i < lowering->numLoweredElements; i++) + { + var_types elemType = JITtype2varType(lowering->loweredElements[i]); + ABIPassingInformation elemInfo = m_classifier.Classify(comp, elemType, nullptr, WellKnownArg::None); + + for (unsigned j = 0; j < elemInfo.NumSegments; j++) + { + ABIPassingSegment newSegment = elemInfo.Segments[j]; + newSegment.Offset += lowering->offsets[i]; + segments.Push(newSegment); + } + } + + ABIPassingInformation result; + result.NumSegments = static_cast(segments.Height()); + result.Segments = new (comp, CMK_ABI) ABIPassingSegment[result.NumSegments]; + for (int i = 0; i < segments.Height(); i++) + { + result.Segments[i] = segments.Bottom(i); + } + + return result; + } + return m_classifier.Classify(comp, type, structLayout, wellKnownParam); } #endif diff --git a/src/coreclr/jit/abi.h b/src/coreclr/jit/abi.h index 82ec58b5d807f0..7236627d375d48 100644 --- a/src/coreclr/jit/abi.h +++ b/src/coreclr/jit/abi.h @@ -25,10 +25,14 @@ class ABIPassingSegment // If this segment is passed in a register, return the particular register. regNumber GetRegister() const; + regMaskTP GetRegisterMask() const; + // If this segment is passed on the stack then return the particular stack // offset, relative to the first stack argument's offset. unsigned GetStackOffset() const; + var_types GetRegisterStoreType() const; + static ABIPassingSegment InRegister(regNumber reg, unsigned offset, unsigned size); static ABIPassingSegment OnStack(unsigned stackOffset, unsigned offset, unsigned size); }; @@ -47,6 +51,9 @@ struct ABIPassingInformation unsigned NumSegments = 0; ABIPassingSegment* Segments = nullptr; + bool HasAnyRegisterSegment() const; + bool HasAnyStackSegment() const; + bool HasExactlyOneStackSegment() const; bool IsSplitAcrossRegistersAndStack() const; static ABIPassingInformation FromSegment(Compiler* comp, const ABIPassingSegment& segment); diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index c5e65b081583df..ae97be76fbe5a2 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -273,6 +273,9 @@ class CodeGen final : public CodeGenInterface #else void genEnregisterOSRArgsAndLocals(); #endif + + void genHomeSwiftStructParameters(bool handleStack); + void genCheckUseBlockInit(); #if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD) void genClearStackVec3ArgUpperBits(); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index eed9a96a981724..2cee578a47bdc3 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -4963,6 +4963,110 @@ void CodeGen::genEnregisterOSRArgsAndLocals() } } +#ifdef SWIFT_SUPPORT + +//----------------------------------------------------------------------------- +// genHomeSwiftStructParameters: +// Reassemble Swift struct parameters if necessary. +// +// Parameters: +// handleStack - If true, reassemble the segments that were passed on the stack. +// If false, reassemble the segments that were passed in registers. +// +void CodeGen::genHomeSwiftStructParameters(bool handleStack) +{ + for (unsigned lclNum = 0; lclNum < compiler->info.compArgsCount; lclNum++) + { + if (lclNum == compiler->lvaSwiftSelfArg) + { + continue; + } + + LclVarDsc* dsc = compiler->lvaGetDesc(lclNum); + if ((dsc->TypeGet() != TYP_STRUCT) || compiler->lvaIsImplicitByRefLocal(lclNum) || !dsc->lvOnFrame) + { + continue; + } + + JITDUMP("Homing Swift parameter V%02u: ", lclNum); + const ABIPassingInformation& abiInfo = compiler->lvaParameterPassingInfo[lclNum]; + DBEXEC(VERBOSE, abiInfo.Dump()); + + for (unsigned i = 0; i < abiInfo.NumSegments; i++) + { + const ABIPassingSegment& seg = abiInfo.Segments[i]; + if (seg.IsPassedOnStack() != handleStack) + { + continue; + } + + if (seg.IsPassedInRegister()) + { + RegState* regState = genIsValidFloatReg(seg.GetRegister()) ? &floatRegState : &intRegState; + regMaskTP regs = seg.GetRegisterMask(); + + if ((regState->rsCalleeRegArgMaskLiveIn & regs) != RBM_NONE) + { + var_types storeType = seg.GetRegisterStoreType(); + assert(storeType != TYP_UNDEF); + GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), seg.GetRegister(), lclNum, + seg.Offset); + + regState->rsCalleeRegArgMaskLiveIn &= ~regs; + } + } + else + { + var_types loadType = TYP_UNDEF; + switch (seg.Size) + { + case 1: + loadType = TYP_UBYTE; + break; + case 2: + loadType = TYP_USHORT; + break; + case 4: + loadType = TYP_INT; + break; + case 8: + loadType = TYP_LONG; + break; + default: + assert(!"Unexpected segment size for struct parameter not passed implicitly by ref"); + continue; + } + + int offset; + if (isFramePointerUsed()) + { + offset = -genCallerSPtoFPdelta(); + } + else + { + offset = -genCallerSPtoInitialSPdelta(); + } + + offset += (int)seg.GetStackOffset(); + + // Move the incoming segment to the local stack frame. We can + // use REG_SCRATCH as a temporary register here as we ensured + // that during LSRA build. +#ifdef TARGET_XARCH + GetEmitter()->emitIns_R_AR(ins_Load(loadType), emitTypeSize(loadType), REG_SCRATCH, + genFramePointerReg(), offset); +#else + genInstrWithConstant(ins_Load(loadType), emitTypeSize(loadType), REG_SCRATCH, genFramePointerReg(), + offset, REG_SCRATCH); +#endif + + GetEmitter()->emitIns_S_R(ins_Store(loadType), emitTypeSize(loadType), REG_SCRATCH, lclNum, seg.Offset); + } + } + } +} +#endif + /*----------------------------------------------------------------------------- * * Save the generic context argument. @@ -6133,18 +6237,6 @@ void CodeGen::genFnProlog() intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SECRET_STUB_PARAM; } -#ifdef SWIFT_SUPPORT - if ((compiler->lvaSwiftSelfArg != BAD_VAR_NUM) && ((intRegState.rsCalleeRegArgMaskLiveIn & RBM_SWIFT_SELF) != 0)) - { - GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SWIFT_SELF, compiler->lvaSwiftSelfArg, 0); - intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_SELF; - } - else if (compiler->lvaSwiftErrorArg != BAD_VAR_NUM) - { - intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_ERROR; - } -#endif - // // Zero out the frame as needed // @@ -6236,6 +6328,25 @@ void CodeGen::genFnProlog() * Take care of register arguments first */ +#ifdef SWIFT_SUPPORT + if (compiler->info.compCallConv == CorInfoCallConvExtension::Swift) + { + if ((compiler->lvaSwiftSelfArg != BAD_VAR_NUM) && + ((intRegState.rsCalleeRegArgMaskLiveIn & RBM_SWIFT_SELF) != 0)) + { + GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SWIFT_SELF, compiler->lvaSwiftSelfArg, 0); + intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_SELF; + } + + if (compiler->lvaSwiftErrorArg != BAD_VAR_NUM) + { + intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SWIFT_ERROR; + } + + genHomeSwiftStructParameters(/* handleStack */ false); + } +#endif + // Home incoming arguments and generate any required inits. // OSR handles this by moving the values from the original frame. // diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 038f9fea696bbf..cdfd7b1666c460 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -387,6 +387,16 @@ void CodeGen::genCodeForBBlist() compiler->compCurStmt = nullptr; compiler->compCurLifeTree = nullptr; +#ifdef SWIFT_SUPPORT + // Reassemble Swift struct parameters on the local stack frame in the + // scratch BB right after the prolog. There can be arbitrary amounts of + // codegen related to doing this, so it cannot be done in the prolog. + if (compiler->fgBBisScratch(block) && compiler->lvaHasAnySwiftStackParamToReassemble()) + { + genHomeSwiftStructParameters(/* handleStack */ true); + } +#endif + // Emit poisoning into scratch BB that comes right after prolog. // We cannot emit this code in the prolog as it might make the prolog too large. if (compiler->compShouldPoisonFrame() && compiler->fgBBisScratch(block)) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 6d2a6068d11e53..f27618af986f09 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3952,6 +3952,7 @@ class Compiler int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); + bool lvaParamHasLocalStackSpace(unsigned lclNum); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. @@ -4016,6 +4017,7 @@ class Compiler void lvaClassifyParameterABI(); bool lvaInitSpecialSwiftParam(CORINFO_ARG_LIST_HANDLE argHnd, InitVarDscInfo* varDscInfo, CorInfoType type, CORINFO_CLASS_HANDLE typeHnd); + bool lvaHasAnySwiftStackParamToReassemble(); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 424d93d865b7e0..b1cc6a78108b52 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -3831,11 +3831,7 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSVCalcDisp(instrDesc* id, code_t code, /* Is this a stack parameter reference? */ - if ((emitComp->lvaIsParameter(var) -#if !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) - && !emitComp->lvaIsRegArgument(var) -#endif // !TARGET_AMD64 || UNIX_AMD64_ABI - ) || + if ((emitComp->lvaIsParameter(var) && !emitComp->lvaParamHasLocalStackSpace(var)) || (static_cast(var) == emitComp->lvaRetAddrVar)) { /* If no EBP frame, arguments and ret addr are off of ESP, above temps */ diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 92b84e31aa72bb..7cedd596b12e8b 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -2293,8 +2293,9 @@ PhaseStatus Compiler::fgAddInternal() madeChanges |= fgCreateFiltersForGenericExceptions(); // The backend requires a scratch BB into which it can safely insert a P/Invoke method prolog if one is - // required. Similarly, we need a scratch BB for poisoning. Create it here. - if (compMethodRequiresPInvokeFrame() || compShouldPoisonFrame()) + // required. Similarly, we need a scratch BB for poisoning and when we have Swift parameters to reassemble. + // Create it here. + if (compMethodRequiresPInvokeFrame() || compShouldPoisonFrame() || lvaHasAnySwiftStackParamToReassemble()) { madeChanges |= fgEnsureFirstBBisScratch(); fgFirstBB->SetFlags(BBF_DONT_REMOVE); diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 042c411b306d0d..3e182e0820a1c5 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -470,14 +470,14 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) // We have set info.compArgsCount in compCompile() noway_assert(varDscInfo->varNum == info.compArgsCount); - // Now we have parameters created in the right order. Figure out how they're passed. - lvaClassifyParameterABI(); - assert(varDscInfo->intRegArgNum <= MAX_REG_ARG); codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum; codeGen->floatRegState.rsCalleeRegArgCount = varDscInfo->floatRegArgNum; + // Now we have parameters created in the right order. Figure out how they're passed. + lvaClassifyParameterABI(); + #if FEATURE_FASTTAILCALL // Save the stack usage information // We can get register usage information using codeGen->intRegState and @@ -662,10 +662,26 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un varDsc->lvOnFrame = true; #ifdef SWIFT_SUPPORT - if ((info.compCallConv == CorInfoCallConvExtension::Swift) && - lvaInitSpecialSwiftParam(argLst, varDscInfo, strip(corInfoType), typeHnd)) + if (info.compCallConv == CorInfoCallConvExtension::Swift) { - continue; + if (varTypeIsSIMD(varDsc)) + { + IMPL_LIMITATION("SIMD types are currently unsupported in Swift reverse pinvokes"); + } + + if (lvaInitSpecialSwiftParam(argLst, varDscInfo, strip(corInfoType), typeHnd)) + { + continue; + } + + if (varDsc->TypeGet() == TYP_STRUCT) + { + // Struct parameters are lowered to separate primitives in the + // Swift calling convention. We cannot handle these patterns + // efficiently, so we always DNER them and home them to stack + // in the prolog. + lvaSetVarDoNotEnregister(varDscInfo->varNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); + } } #endif @@ -1736,6 +1752,59 @@ void Compiler::lvaClassifyParameterABI() { SwiftABIClassifier classifier(cInfo); lvaClassifyParameterABI(classifier); + + regMaskTP argRegs = RBM_NONE; + + // The calling convention details computed by the old ABI classifier + // are wrong since it does not handle the Swift ABI for structs + // appropriately. Grab them from the new ABI information. + for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) + { + LclVarDsc* dsc = lvaGetDesc(lclNum); + const ABIPassingInformation& abiInfo = lvaParameterPassingInfo[lclNum]; + + if (dsc->TypeGet() == TYP_STRUCT) + { + const CORINFO_SWIFT_LOWERING* lowering = GetSwiftLowering(dsc->GetLayout()->GetClassHandle()); + dsc->lvIsImplicitByRef = lowering->byReference; + } + + if ((dsc->TypeGet() == TYP_STRUCT) && !lvaIsImplicitByRefLocal(lclNum) && + !abiInfo.HasExactlyOneStackSegment()) + { + dsc->lvIsRegArg = false; + } + else + { + assert(abiInfo.NumSegments == 1); + if (abiInfo.Segments[0].IsPassedInRegister()) + { + dsc->lvIsRegArg = true; + dsc->SetArgReg(abiInfo.Segments[0].GetRegister()); + dsc->SetOtherArgReg(REG_NA); + } + else + { + dsc->lvIsRegArg = false; + dsc->SetArgReg(REG_STK); + dsc->SetOtherArgReg(REG_NA); + dsc->SetStackOffset(abiInfo.Segments[0].GetStackOffset()); + } + } + + for (unsigned i = 0; i < abiInfo.NumSegments; i++) + { + const ABIPassingSegment& segment = abiInfo.Segments[i]; + if (segment.IsPassedInRegister()) + { + argRegs |= segment.GetRegisterMask(); + } + } + } + + // genFnPrologCalleeRegArgs expect these to be the counts of registers it knows how to handle. + codeGen->intRegState.rsCalleeRegArgCount = genCountBits(argRegs & RBM_ARG_REGS); + codeGen->floatRegState.rsCalleeRegArgCount = genCountBits(argRegs & RBM_FLTARG_REGS); } else #endif @@ -1759,6 +1828,11 @@ void Compiler::lvaClassifyParameterABI() assert(abiInfo.NumSegments > 0); + if ((dsc->TypeGet() == TYP_STRUCT) && (info.compCallConv == CorInfoCallConvExtension::Swift)) + { + continue; + } + unsigned numSegmentsToCompare = abiInfo.NumSegments; if (dsc->lvIsHfa()) { @@ -1820,6 +1894,35 @@ void Compiler::lvaClassifyParameterABI() #endif // DEBUG } +//-------------------------------------------------------------------------------------------- +// lvaHaveSwiftStructStackParamsToReassemble: +// Check if this compilation has any Swift parameters that are passed on the +// stack and that need to be reassembled on the local stack frame. +// +// Return value: +// True if so. +// +bool Compiler::lvaHasAnySwiftStackParamToReassemble() +{ +#ifdef SWIFT_SUPPORT + if (info.compCallConv != CorInfoCallConvExtension::Swift) + { + return false; + } + + for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) + { + const ABIPassingInformation& abiInfo = lvaParameterPassingInfo[lclNum]; + if (abiInfo.HasAnyStackSegment() && !abiInfo.HasExactlyOneStackSegment()) + { + return true; + } + } +#endif + + return false; +} + /***************************************************************************** * Returns our internal varNum for a given IL variable. * Asserts assume it is called after lvaTable[] has been set up. @@ -2387,6 +2490,17 @@ bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) return false; } +#ifdef SWIFT_SUPPORT + // Swift structs are not passed in a way that match their layout and + // require reassembling on the local stack frame. Skip promotion for these + // (which would result in dependent promotion anyway). + if ((compiler->info.compCallConv == CorInfoCallConvExtension::Swift) && varDsc->lvIsParam) + { + JITDUMP(" struct promotion of V%02u is disabled because it is a parameter to a Swift function"); + return false; + } +#endif + CORINFO_CLASS_HANDLE typeHnd = varDsc->GetLayout()->GetClassHandle(); assert(typeHnd != NO_CLASS_HANDLE); @@ -3187,7 +3301,7 @@ void Compiler::lvaSetStruct(unsigned varNum, ClassLayout* layout, bool unsafeVal if (varDsc->lvIsParam && !varDsc->lvIsStructField) { structPassingKind howToReturnStruct; - getArgTypeForStruct(layout->GetClassHandle(), &howToReturnStruct, this->info.compIsVarArgs, + getArgTypeForStruct(layout->GetClassHandle(), &howToReturnStruct, info.compIsVarArgs, varDsc->lvExactSize()); if (howToReturnStruct == SPK_ByReference) @@ -5580,17 +5694,7 @@ void Compiler::lvaFixVirtualFrameOffsets() if (!varDsc->lvOnFrame) { - if (!varDsc->lvIsParam -#if !defined(TARGET_AMD64) - || (varDsc->lvIsRegArg -#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) - && compIsProfilerHookNeeded() && - !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets - // for prespilled arguments -#endif - ) -#endif // !defined(TARGET_AMD64) - ) + if (!varDsc->lvIsParam || lvaParamHasLocalStackSpace(lclNum)) { doAssignStkOffs = false; // Not on frame or an incoming stack arg } @@ -5764,6 +5868,27 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() // Update the arg initial register locations. lvaUpdateArgsWithInitialReg(); +#ifdef SWIFT_SUPPORT + if (info.compCallConv == CorInfoCallConvExtension::Swift) + { + // We already assigned argument offsets in lvaClassifyParameterABI. + // Just get them from there. + // TODO-Cleanup: We can use similar logic for all backends once we have + // the new ABI info for all targets. + for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) + { + LclVarDsc* dsc = lvaGetDesc(lclNum); + const ABIPassingInformation& abiInfo = lvaParameterPassingInfo[lclNum]; + + if (abiInfo.HasExactlyOneStackSegment()) + { + dsc->SetStackOffset(abiInfo.Segments[0].GetStackOffset()); + } + } + return; + } +#endif + /* Is there a "this" argument? */ if (!info.compIsStatic) @@ -6942,45 +7067,21 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (varDsc->lvIsParam) { -#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) - - // On Windows AMD64 we can use the caller-reserved stack area that is already setup - assert(varDsc->GetStackOffset() != BAD_STK_OFFS); - continue; - -#else // !TARGET_AMD64 - - // A register argument that is not enregistered ends up as - // a local variable which will need stack frame space. - // - if (!varDsc->lvIsRegArg) - { - continue; - } - #ifdef TARGET_ARM64 - if (info.compIsVarArgs && (varDsc->GetArgReg() != theFixedRetBuffReg(info.compCallConv))) + if (info.compIsVarArgs && varDsc->lvIsRegArg && + (varDsc->GetArgReg() != theFixedRetBuffReg(info.compCallConv))) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. const unsigned regArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg(), info.compCallConv); varDsc->SetStackOffset(-initialStkOffs + regArgNum * REGSIZE_BYTES); continue; } - #endif -#ifdef TARGET_ARM - // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg - // in the prolog, thus they don't need stack frame space. - // - if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) + if (!lvaParamHasLocalStackSpace(lclNum)) { - assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } -#endif - -#endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ @@ -7234,6 +7335,58 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE))); } +//------------------------------------------------------------------------ +// lvaParamHasLocalStackSpace: Check if a local that represents a parameter has +// space allocated for it in the local stack frame. +// +// Arguments: +// lclNum - the variable number +// +// Return Value: +// true if the local does not have reusable stack space created by the caller +// already. +// +bool Compiler::lvaParamHasLocalStackSpace(unsigned lclNum) +{ + LclVarDsc* varDsc = lvaGetDesc(lclNum); + +#ifdef SWIFT_SUPPORT + if ((info.compCallConv == CorInfoCallConvExtension::Swift) && !lvaIsImplicitByRefLocal(lclNum) && + !lvaParameterPassingInfo[lclNum].HasExactlyOneStackSegment()) + { + return true; + } +#endif + +#if defined(WINDOWS_AMD64_ABI) + // On Windows AMD64 we can use the caller-reserved stack area that is already setup + return false; +#else // !WINDOWS_AMD64_ABI + + // A register argument that is not enregistered ends up as + // a local variable which will need stack frame space. + // + if (!varDsc->lvIsRegArg) + { + return false; + } + +#ifdef TARGET_ARM + // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg + // in the prolog, thus they don't need stack frame space. + // + if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) + { + assert(varDsc->GetStackOffset() != BAD_STK_OFFS); + return false; + } +#endif + +#endif // !WINDOWS_AMD64_ABI + + return true; +} + int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs) { noway_assert(lclNum != BAD_VAR_NUM); @@ -7513,9 +7666,9 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() // const bool mustProcessParams = true; #else - // OSR must also assign offsets here. + // OSR/Swift must also assign offsets here. // - const bool mustProcessParams = opts.IsOSR(); + const bool mustProcessParams = opts.IsOSR() || (info.compCallConv == CorInfoCallConvExtension::Swift); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) if (varDsc->lvIsStructField && (!varDsc->lvIsParam || mustProcessParams)) @@ -8124,7 +8277,7 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) // // Return Value: // The offset. - +// int Compiler::lvaGetSPRelativeOffset(unsigned varNum) { assert(!compLocallocUsed); diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 86abef939a9d8c..62532de8d66f0c 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -2291,6 +2291,32 @@ void LinearScan::buildIntervals() regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; +#ifdef SWIFT_SUPPORT + if (compiler->info.compCallConv == CorInfoCallConvExtension::Swift) + { + for (unsigned lclNum = 0; lclNum < compiler->info.compArgsCount; lclNum++) + { + LclVarDsc* argDsc = compiler->lvaGetDesc(lclNum); + + if ((argDsc->lvRefCnt() == 0) && !compiler->opts.compDbgCode) + { + continue; + } + + const ABIPassingInformation& abiInfo = compiler->lvaParameterPassingInfo[lclNum]; + for (unsigned i = 0; i < abiInfo.NumSegments; i++) + { + const ABIPassingSegment& seg = abiInfo.Segments[i]; + if (seg.IsPassedInRegister()) + { + RegState* regState = genIsValidFloatReg(seg.GetRegister()) ? floatRegState : intRegState; + regState->rsCalleeRegArgMaskLiveIn |= seg.GetRegisterMask(); + } + } + } + } +#endif + for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++) { LclVarDsc* argDsc = compiler->lvaGetDescByTrackedIndex(varIndex); @@ -2509,11 +2535,24 @@ void LinearScan::buildIntervals() // assert(block->isRunRarely()); } + // For Swift calls there can be an arbitrary amount of codegen related + // to homing of decomposed struct parameters passed on stack. We cannot + // do that in the prolog. We handle registers in the prolog and the + // stack args in the scratch BB that we have ensured exists. The + // handling clobbers REG_SCRATCH, so kill it here. + if ((block == compiler->fgFirstBB) && compiler->lvaHasAnySwiftStackParamToReassemble()) + { + assert(compiler->fgFirstBBisScratch()); + addRefsForPhysRegMask(genRegMask(REG_SCRATCH), currentLoc + 1, RefTypeKill, true); + currentLoc += 2; + } + // For frame poisoning we generate code into scratch BB right after prolog since // otherwise the prolog might become too large. In this case we will put the poison immediate // into the scratch register, so it will be killed here. - if (compiler->compShouldPoisonFrame() && compiler->fgFirstBBisScratch() && block == compiler->fgFirstBB) + if (compiler->compShouldPoisonFrame() && (block == compiler->fgFirstBB)) { + assert(compiler->fgFirstBBisScratch()); regMaskTP killed; #if defined(TARGET_XARCH) // Poisoning uses EAX for small vars and rep stosd that kills edi, ecx and eax for large vars. diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index e140c395505330..eb22f9d8f9ce5a 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -4636,17 +4636,15 @@ GenTree* Compiler::fgMorphExpandStackArgForVarArgs(GenTreeLclVarCommon* lclNode) // GenTree* Compiler::fgMorphExpandImplicitByRefArg(GenTreeLclVarCommon* lclNode) { - if (!fgGlobalMorph) - { - return nullptr; - } - unsigned lclNum = lclNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); unsigned fieldOffset = 0; unsigned newLclNum = BAD_VAR_NUM; bool isStillLastUse = false; + assert(lvaIsImplicitByRefLocal(lclNum) || + (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl))); + if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will re-invoke @@ -4696,16 +4694,12 @@ GenTree* Compiler::fgMorphExpandImplicitByRefArg(GenTreeLclVarCommon* lclNode) } } } - else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) + else { // This was a field reference to an implicit-by-reference struct parameter that was dependently promoted. newLclNum = varDsc->lvParentLcl; fieldOffset = varDsc->lvFldOffset; } - else - { - return nullptr; - } // Add a level of indirection to this node. The "base" will be a local node referring to "newLclNum". // We will also add an offset, and, if the original "lclNode" represents a location, a dereference. @@ -4767,7 +4761,16 @@ GenTree* Compiler::fgMorphExpandLocal(GenTreeLclVarCommon* lclNode) #ifdef TARGET_X86 expandedTree = fgMorphExpandStackArgForVarArgs(lclNode); #else - expandedTree = fgMorphExpandImplicitByRefArg(lclNode); +#if FEATURE_IMPLICIT_BYREFS + if (fgGlobalMorph) + { + LclVarDsc* dsc = lvaGetDesc(lclNode); + if (dsc->lvIsImplicitByRef || (dsc->lvIsStructField && lvaIsImplicitByRefLocal(dsc->lvParentLcl))) + { + expandedTree = fgMorphExpandImplicitByRefArg(lclNode); + } + } +#endif #endif if (expandedTree != nullptr) @@ -14972,7 +14975,7 @@ PhaseStatus Compiler::fgPromoteStructs() // PhaseStatus Compiler::fgMarkImplicitByRefCopyOmissionCandidates() { -#if FEATURE_IMPLICIT_BYREFS +#if FEATURE_IMPLICIT_BYREFS && !defined(UNIX_AMD64_ABI) if (!fgDidEarlyLiveness) { return PhaseStatus::MODIFIED_NOTHING; diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index 7d1a2c8f08039f..ba2109b9cb8b28 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -32,7 +32,7 @@ #define FEATURE_SET_FLAGS 0 // Set to true to force the JIT to mark the trees with GTF_SET_FLAGS when the flags need to be set #define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (double). #ifdef UNIX_AMD64_ABI - #define FEATURE_IMPLICIT_BYREFS 0 // Support for struct parameters passed via pointers to shadow copies + #define FEATURE_IMPLICIT_BYREFS 1 // Support for struct parameters passed via pointers to shadow copies #define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register #define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register #define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register diff --git a/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.cs b/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.cs index 33e8dac7a184c0..cd00caec4667f4 100644 --- a/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.cs +++ b/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.cs @@ -14,456 +14,8406 @@ public unsafe class SwiftCallbackAbiStress { private const string SwiftLib = "libSwiftCallbackAbiStress.dylib"; + [StructLayout(LayoutKind.Sequential, Size = 14)] + struct F0_S0 + { + public double F0; + public uint F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F0_S1 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F0_S2 + { + public float F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func01fs5Int32VAEs5Int16V_AEs6UInt64Vs6UInt16VAA5F0_S0VAA0K3_S1Vs5UInt8VAA0K3_S2VtXE_tF")] + private static extern int SwiftCallbackFunc0(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static int SwiftCallbackFunc0Callback(short a0, int a1, ulong a2, ushort a3, F0_S0 a4, F0_S1 a5, byte a6, F0_S2 a7, SwiftSelf self) + { + try + { + Assert.Equal((short)-17813, a0); + Assert.Equal((int)318006528, a1); + Assert.Equal((ulong)1195162122024233590, a2); + Assert.Equal((ushort)60467, a3); + Assert.Equal((double)2239972725713766, a4.F0); + Assert.Equal((uint)1404066621, a4.F1); + Assert.Equal((ushort)29895, a4.F2); + Assert.Equal((ulong)7923486769850554262, a5.F0); + Assert.Equal((byte)217, a6); + Assert.Equal((float)2497655, a7.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1579768470; + } + + [Fact] + public static void TestSwiftCallbackFunc0() + { + Console.Write("Running SwiftCallbackFunc0: "); + ExceptionDispatchInfo ex = null; + int val = SwiftCallbackFunc0(&SwiftCallbackFunc0Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)1579768470, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 3)] + struct F1_S0 + { + public ushort F0; + public byte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F1_S1 + { + public byte F0; + public ulong F1; + public short F2; + public float F3; + public float F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F1_S2_S0 + { + public uint F0; + public double F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F1_S2 + { + public sbyte F0; + public nuint F1; + public F1_S2_S0 F2; + public nint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F1_S3 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F1_S4 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F1_S5_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F1_S5 + { + public F1_S5_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func11fs5UInt8VAEs5Int64V_Sds4Int8VAA5F1_S0VAA0J3_S1VAA0J3_S2VAeigA0J3_S3VSuAA0J3_S4VAA0J3_S5VSitXE_tF")] + private static extern byte SwiftCallbackFunc1(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static byte SwiftCallbackFunc1Callback(long a0, double a1, sbyte a2, F1_S0 a3, F1_S1 a4, F1_S2 a5, byte a6, sbyte a7, long a8, F1_S3 a9, nuint a10, F1_S4 a11, F1_S5 a12, nint a13, SwiftSelf self) + { + try + { + Assert.Equal((long)7920511243396412395, a0); + Assert.Equal((double)1396130721334528, a1); + Assert.Equal((sbyte)-55, a2); + Assert.Equal((ushort)33758, a3.F0); + Assert.Equal((byte)103, a3.F1); + Assert.Equal((byte)201, a4.F0); + Assert.Equal((ulong)7390774039746135757, a4.F1); + Assert.Equal((short)14699, a4.F2); + Assert.Equal((float)7235330, a4.F3); + Assert.Equal((float)7189013, a4.F4); + Assert.Equal((sbyte)37, a5.F0); + Assert.Equal((nuint)unchecked((nuint)3310322731568932038), a5.F1); + Assert.Equal((uint)1100328218, a5.F2.F0); + Assert.Equal((double)1060779460203640, a5.F2.F1); + Assert.Equal((nint)unchecked((nint)8325292022909418877), a5.F3); + Assert.Equal((byte)137, a6); + Assert.Equal((sbyte)82, a7); + Assert.Equal((long)1197537325837505041, a8); + Assert.Equal((ushort)46950, a9.F0); + Assert.Equal((nuint)unchecked((nuint)8181828233622947597), a10); + Assert.Equal((nint)unchecked((nint)1851182205030289056), a11.F0); + Assert.Equal((uint)1971014225, a12.F0.F0); + Assert.Equal((nint)unchecked((nint)6437995407675718392), a13); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 248; + } + + [Fact] + public static void TestSwiftCallbackFunc1() + { + Console.Write("Running SwiftCallbackFunc1: "); + ExceptionDispatchInfo ex = null; + byte val = SwiftCallbackFunc1(&SwiftCallbackFunc1Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((byte)248, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F2_S0 + { + public int F0; + public nuint F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F2_S1_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F2_S1 + { + public long F0; + public ushort F1; + public F2_S1_S0 F2; + public nint F3; + public double F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 11)] + struct F2_S2 + { + public float F0; + public int F1; + public ushort F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F2_S3_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F2_S3 + { + public F2_S3_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func21fs4Int8VAeA5F2_S0V_AA0H3_S1VAA0H3_S2VSfs6UInt64VAA0H3_S3VtXE_tF")] + private static extern sbyte SwiftCallbackFunc2(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static sbyte SwiftCallbackFunc2Callback(F2_S0 a0, F2_S1 a1, F2_S2 a2, float a3, ulong a4, F2_S3 a5, SwiftSelf self) + { + try + { + Assert.Equal((int)1860840185, a0.F0); + Assert.Equal((nuint)unchecked((nuint)5407074783834178811), a0.F1); + Assert.Equal((float)6261766, a0.F2); + Assert.Equal((long)4033972792915237065, a1.F0); + Assert.Equal((ushort)22825, a1.F1); + Assert.Equal((ushort)44574, a1.F2.F0); + Assert.Equal((nint)unchecked((nint)4536911485304731630), a1.F3); + Assert.Equal((double)4282944015147385, a1.F4); + Assert.Equal((float)2579193, a2.F0); + Assert.Equal((int)586252933, a2.F1); + Assert.Equal((ushort)47002, a2.F2); + Assert.Equal((sbyte)71, a2.F3); + Assert.Equal((float)3225929, a3); + Assert.Equal((ulong)3599444831393612282, a4); + Assert.Equal((sbyte)13, a5.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 115; + } + + [Fact] + public static void TestSwiftCallbackFunc2() + { + Console.Write("Running SwiftCallbackFunc2: "); + ExceptionDispatchInfo ex = null; + sbyte val = SwiftCallbackFunc2(&SwiftCallbackFunc2Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((sbyte)115, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F3_S0_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F3_S0 + { + public F3_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F3_S1 + { + public uint F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 3)] + struct F3_S2_S0 + { + public short F0; + public byte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 5)] + struct F3_S2 + { + public F3_S2_S0 F0; + public sbyte F1; + public byte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F3_S3 + { + public ulong F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F3_S4 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F3_Ret + { + public ushort F0; + public byte F1; + public ushort F2; + public float F3; + + public F3_Ret(ushort f0, byte f1, ushort f2, float f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func31fAA6F3_RetVAeA0G3_S0V_Sfs6UInt16VAA0G3_S1VAIs5Int32VAA0G3_S2VSiAA0G3_S3VAA0G3_S4VtXE_tF")] + private static extern F3_Ret SwiftCallbackFunc3(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F3_Ret SwiftCallbackFunc3Callback(F3_S0 a0, float a1, ushort a2, F3_S1 a3, ushort a4, int a5, F3_S2 a6, nint a7, F3_S3 a8, F3_S4 a9, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)5610153900386943274), a0.F0.F0); + Assert.Equal((float)7736836, a1); + Assert.Equal((ushort)31355, a2); + Assert.Equal((uint)1159208572, a3.F0); + Assert.Equal((long)2707818827451590538, a3.F1); + Assert.Equal((ushort)37580, a4); + Assert.Equal((int)1453603418, a5); + Assert.Equal((short)699, a6.F0.F0); + Assert.Equal((byte)46, a6.F0.F1); + Assert.Equal((sbyte)-125, a6.F1); + Assert.Equal((byte)92, a6.F2); + Assert.Equal((nint)unchecked((nint)94557706586779834), a7); + Assert.Equal((ulong)2368015527878194540, a8.F0); + Assert.Equal((long)5026404532195049271, a8.F1); + Assert.Equal((short)21807, a9.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F3_Ret(51293, 217, 64666, 5667425); + } + + [Fact] + public static void TestSwiftCallbackFunc3() + { + Console.Write("Running SwiftCallbackFunc3: "); + ExceptionDispatchInfo ex = null; + F3_Ret val = SwiftCallbackFunc3(&SwiftCallbackFunc3Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)51293, val.F0); + Assert.Equal((byte)217, val.F1); + Assert.Equal((ushort)64666, val.F2); + Assert.Equal((float)5667425, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F4_S0_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F4_S0 + { + public F4_S0_S0 F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F4_Ret_S0 + { + public nint F0; + + public F4_Ret_S0(nint f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 44)] + struct F4_Ret + { + public int F0; + public F4_Ret_S0 F1; + public nint F2; + public short F3; + public nint F4; + public uint F5; + + public F4_Ret(int f0, F4_Ret_S0 f1, nint f2, short f3, nint f4, uint f5) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func41fAA6F4_RetVAESd_AA0G3_S0Vs5UInt8Vs5Int32Vs6UInt32VtXE_tF")] + private static extern F4_Ret SwiftCallbackFunc4(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F4_Ret SwiftCallbackFunc4Callback(double a0, F4_S0 a1, byte a2, int a3, uint a4, SwiftSelf self) + { + try + { + Assert.Equal((double)4282972206489588, a0); + Assert.Equal((uint)611688063, a1.F0.F0); + Assert.Equal((float)877466, a1.F1); + Assert.Equal((byte)53, a2); + Assert.Equal((int)965123506, a3); + Assert.Equal((uint)1301067653, a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F4_Ret(2069454428, new F4_Ret_S0(unchecked((nint)5483154806067048127)), unchecked((nint)2342208892279753870), -21578, unchecked((nint)4641984012938514811), 1691113876); + } + + [Fact] + public static void TestSwiftCallbackFunc4() + { + Console.Write("Running SwiftCallbackFunc4: "); + ExceptionDispatchInfo ex = null; + F4_Ret val = SwiftCallbackFunc4(&SwiftCallbackFunc4Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)2069454428, val.F0); + Assert.Equal((nint)unchecked((nint)5483154806067048127), val.F1.F0); + Assert.Equal((nint)unchecked((nint)2342208892279753870), val.F2); + Assert.Equal((short)-21578, val.F3); + Assert.Equal((nint)unchecked((nint)4641984012938514811), val.F4); + Assert.Equal((uint)1691113876, val.F5); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F5_S0 + { + public nuint F0; + public uint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F5_S1_S0 + { + public nint F0; + public uint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F5_S1_S1 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F5_S1 + { + public F5_S1_S0 F0; + public F5_S1_S1 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F5_S2 + { + public double F0; + public sbyte F1; + public nint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F5_S3 + { + public long F0; + public double F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F5_S4 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 26)] + struct F5_Ret + { + public short F0; + public int F1; + public int F2; + public ulong F3; + public short F4; + + public F5_Ret(short f0, int f1, int f2, ulong f3, short f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func51fAA6F5_RetVAEs5UInt8V_s5Int16Vs6UInt64VS2uAkgA0G3_S0Vs4Int8VAoA0G3_S1VAA0G3_S2VAA0G3_S3VSdAA0G3_S4Vs6UInt16VS2fAYtXE_tF")] + private static extern F5_Ret SwiftCallbackFunc5(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F5_Ret SwiftCallbackFunc5Callback(byte a0, short a1, ulong a2, nuint a3, nuint a4, ulong a5, byte a6, F5_S0 a7, sbyte a8, sbyte a9, F5_S1 a10, F5_S2 a11, F5_S3 a12, double a13, F5_S4 a14, ushort a15, float a16, float a17, ushort a18, SwiftSelf self) + { + try + { + Assert.Equal((byte)42, a0); + Assert.Equal((short)18727, a1); + Assert.Equal((ulong)3436765034579128495, a2); + Assert.Equal((nuint)unchecked((nuint)6305137336506323506), a3); + Assert.Equal((nuint)unchecked((nuint)6280137078630028944), a4); + Assert.Equal((ulong)6252650621827449809, a5); + Assert.Equal((byte)129, a6); + Assert.Equal((nuint)unchecked((nuint)6879980973426111678), a7.F0); + Assert.Equal((uint)1952654577, a7.F1); + Assert.Equal((sbyte)-34, a8); + Assert.Equal((sbyte)102, a9); + Assert.Equal((nint)unchecked((nint)8389143657021522019), a10.F0.F0); + Assert.Equal((uint)437030241, a10.F0.F1); + Assert.Equal((float)7522798, a10.F1.F0); + Assert.Equal((double)523364011167530, a11.F0); + Assert.Equal((sbyte)16, a11.F1); + Assert.Equal((nint)unchecked((nint)3823439046574037759), a11.F2); + Assert.Equal((long)3767260839267771462, a12.F0); + Assert.Equal((double)1181031208183008, a12.F1); + Assert.Equal((double)2338830539621828, a13); + Assert.Equal((ushort)36276, a14.F0); + Assert.Equal((ushort)41286, a15); + Assert.Equal((float)6683955, a16); + Assert.Equal((float)6399917, a17); + Assert.Equal((ushort)767, a18); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F5_Ret(-23277, 1015782032, 83490460, 2747931081050267058, -10369); + } + + [Fact] + public static void TestSwiftCallbackFunc5() + { + Console.Write("Running SwiftCallbackFunc5: "); + ExceptionDispatchInfo ex = null; + F5_Ret val = SwiftCallbackFunc5(&SwiftCallbackFunc5Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)-23277, val.F0); + Assert.Equal((int)1015782032, val.F1); + Assert.Equal((int)83490460, val.F2); + Assert.Equal((ulong)2747931081050267058, val.F3); + Assert.Equal((short)-10369, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F6_S0_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F6_S0 + { + public sbyte F0; + public sbyte F1; + public int F2; + public F6_S0_S0 F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F6_S1 + { + public int F0; + public ulong F1; + public ulong F2; + public uint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 11)] + struct F6_S2 + { + public long F0; + public short F1; + public sbyte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F6_S3 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F6_Ret_S0 + { + public long F0; + public uint F1; + + public F6_Ret_S0(long f0, uint f1) + { + F0 = f0; + F1 = f1; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 29)] + struct F6_Ret + { + public F6_Ret_S0 F0; + public ulong F1; + public float F2; + public sbyte F3; + + public F6_Ret(F6_Ret_S0 f0, ulong f1, float f2, sbyte f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func61fAA6F6_RetVAESf_AA0G3_S0Vs5Int64Vs4Int8Vs6UInt16VSuAMs6UInt64VAA0G3_S1Vs5Int16VAA0G3_S2VAA0G3_S3VAMtXE_tF")] + private static extern F6_Ret SwiftCallbackFunc6(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F6_Ret SwiftCallbackFunc6Callback(float a0, F6_S0 a1, long a2, sbyte a3, ushort a4, nuint a5, ushort a6, ulong a7, F6_S1 a8, short a9, F6_S2 a10, F6_S3 a11, ushort a12, SwiftSelf self) + { + try + { + Assert.Equal((float)2905241, a0); + Assert.Equal((sbyte)-27, a1.F0); + Assert.Equal((sbyte)-77, a1.F1); + Assert.Equal((int)1315779092, a1.F2); + Assert.Equal((float)5373970, a1.F3.F0); + Assert.Equal((long)7022244764256789748, a2); + Assert.Equal((sbyte)-110, a3); + Assert.Equal((ushort)2074, a4); + Assert.Equal((nuint)unchecked((nuint)3560129042279209151), a5); + Assert.Equal((ushort)2200, a6); + Assert.Equal((ulong)5730241035812482149, a7); + Assert.Equal((int)18625011, a8.F0); + Assert.Equal((ulong)242340713355417257, a8.F1); + Assert.Equal((ulong)6962175160124965670, a8.F2); + Assert.Equal((uint)1983617839, a8.F3); + Assert.Equal((short)-28374, a9); + Assert.Equal((long)6355748563312062178, a10.F0); + Assert.Equal((short)-23189, a10.F1); + Assert.Equal((sbyte)81, a10.F2); + Assert.Equal((float)4547677, a11.F0); + Assert.Equal((ushort)6397, a12); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F6_Ret(new F6_Ret_S0(3036123356548380503, 653452587), 4787954187933165977, 5060002, -68); + } + + [Fact] + public static void TestSwiftCallbackFunc6() + { + Console.Write("Running SwiftCallbackFunc6: "); + ExceptionDispatchInfo ex = null; + F6_Ret val = SwiftCallbackFunc6(&SwiftCallbackFunc6Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)3036123356548380503, val.F0.F0); + Assert.Equal((uint)653452587, val.F0.F1); + Assert.Equal((ulong)4787954187933165977, val.F1); + Assert.Equal((float)5060002, val.F2); + Assert.Equal((sbyte)-68, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F7_S0 + { + public float F0; + public long F1; + public nuint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F7_S1 + { + public short F0; + public uint F1; + public uint F2; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func71fs6UInt16VAEs5Int64V_s5UInt8VSdAeA5F7_S0VAISds6UInt32VAA0J3_S1Vs5Int32VAQSis5Int16VAESis6UInt64VAiStXE_tF")] + private static extern ushort SwiftCallbackFunc7(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ushort SwiftCallbackFunc7Callback(long a0, byte a1, double a2, ushort a3, F7_S0 a4, byte a5, double a6, uint a7, F7_S1 a8, int a9, int a10, nint a11, short a12, ushort a13, nint a14, ulong a15, byte a16, short a17, SwiftSelf self) + { + try + { + Assert.Equal((long)7625368278886567558, a0); + Assert.Equal((byte)70, a1); + Assert.Equal((double)2146971972122530, a2); + Assert.Equal((ushort)54991, a3); + Assert.Equal((float)1072132, a4.F0); + Assert.Equal((long)3890459003549150599, a4.F1); + Assert.Equal((nuint)unchecked((nuint)56791000421908673), a4.F2); + Assert.Equal((byte)227, a5); + Assert.Equal((double)3248250571953113, a6); + Assert.Equal((uint)1138780108, a7); + Assert.Equal((short)-22670, a8.F0); + Assert.Equal((uint)1796712687, a8.F1); + Assert.Equal((uint)304251857, a8.F2); + Assert.Equal((int)1288765591, a9); + Assert.Equal((int)1382721790, a10); + Assert.Equal((nint)unchecked((nint)6746417265635727373), a11); + Assert.Equal((short)-15600, a12); + Assert.Equal((ushort)47575, a13); + Assert.Equal((nint)unchecked((nint)7200793040165597188), a14); + Assert.Equal((ulong)2304985873826892392, a15); + Assert.Equal((byte)99, a16); + Assert.Equal((short)-9993, a17); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 31412; + } + + [Fact] + public static void TestSwiftCallbackFunc7() + { + Console.Write("Running SwiftCallbackFunc7: "); + ExceptionDispatchInfo ex = null; + ushort val = SwiftCallbackFunc7(&SwiftCallbackFunc7Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)31412, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F8_S0 + { + public short F0; + public short F1; + public nuint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F8_S1 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F8_Ret_S0 + { + public int F0; + public nuint F1; + public nint F2; + + public F8_Ret_S0(int f0, nuint f1, nint f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 44)] + struct F8_Ret + { + public long F0; + public F8_Ret_S0 F1; + public nint F2; + public uint F3; + + public F8_Ret(long f0, F8_Ret_S0 f1, nint f2, uint f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func81fAA6F8_RetVAeA0G3_S0V_AA0G3_S1VtXE_tF")] + private static extern F8_Ret SwiftCallbackFunc8(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F8_Ret SwiftCallbackFunc8Callback(F8_S0 a0, F8_S1 a1, SwiftSelf self) + { + try + { + Assert.Equal((short)16278, a0.F0); + Assert.Equal((short)-31563, a0.F1); + Assert.Equal((nuint)unchecked((nuint)2171308312325435543), a0.F2); + Assert.Equal((long)8923668560896309835, a1.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F8_Ret(4170441467272673523, new F8_Ret_S0(1940721160, unchecked((nuint)6524670832376567295), unchecked((nint)4210781401091965722)), unchecked((nint)3245727696885859461), 855061841); + } + + [Fact] + public static void TestSwiftCallbackFunc8() + { + Console.Write("Running SwiftCallbackFunc8: "); + ExceptionDispatchInfo ex = null; + F8_Ret val = SwiftCallbackFunc8(&SwiftCallbackFunc8Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)4170441467272673523, val.F0); + Assert.Equal((int)1940721160, val.F1.F0); + Assert.Equal((nuint)unchecked((nuint)6524670832376567295), val.F1.F1); + Assert.Equal((nint)unchecked((nint)4210781401091965722), val.F1.F2); + Assert.Equal((nint)unchecked((nint)3245727696885859461), val.F2); + Assert.Equal((uint)855061841, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F9_S0_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F9_S0 + { + public F9_S0_S0 F0; + public short F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F9_S1_S0 + { + public long F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F9_S1 + { + public nint F0; + public F9_S1_S0 F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 19)] + struct F9_S2 + { + public ulong F0; + public double F1; + public short F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F9_S3_S0_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F9_S3_S0 + { + public F9_S3_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F9_S3 + { + public sbyte F0; + public F9_S3_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F9_S4_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F9_S4 + { + public F9_S4_S0 F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F9_S5_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F9_S5 + { + public uint F0; + public F9_S5_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F9_S6 + { + public double F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func91fs6UInt16VAEs4Int8V_s5UInt8Vs5Int64VAA5F9_S0VAA0K3_S1VAA0K3_S2VSdAA0K3_S3VAA0K3_S4VSdAA0K3_S5VAA0K3_S6VtXE_tF")] + private static extern ushort SwiftCallbackFunc9(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ushort SwiftCallbackFunc9Callback(sbyte a0, byte a1, long a2, F9_S0 a3, F9_S1 a4, F9_S2 a5, double a6, F9_S3 a7, F9_S4 a8, double a9, F9_S5 a10, F9_S6 a11, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)17, a0); + Assert.Equal((byte)104, a1); + Assert.Equal((long)8922699691031703191, a2); + Assert.Equal((byte)123, a3.F0.F0); + Assert.Equal((short)31706, a3.F1); + Assert.Equal((nint)unchecked((nint)1804058604961822948), a4.F0); + Assert.Equal((long)8772179036715198777, a4.F1.F0); + Assert.Equal((long)3320511540592563328, a4.F1.F1); + Assert.Equal((float)679540, a4.F2); + Assert.Equal((ulong)8642590829466497926, a5.F0); + Assert.Equal((double)4116322155252965, a5.F1); + Assert.Equal((short)17992, a5.F2); + Assert.Equal((sbyte)-48, a5.F3); + Assert.Equal((double)414017537937894, a6); + Assert.Equal((sbyte)47, a7.F0); + Assert.Equal((ulong)7576380984563129085, a7.F1.F0.F0); + Assert.Equal((ulong)1356827400304742803, a8.F0.F0); + Assert.Equal((sbyte)-17, a8.F1); + Assert.Equal((double)4458031413035521, a9); + Assert.Equal((uint)352075098, a10.F0); + Assert.Equal((uint)1840980094, a10.F1.F0); + Assert.Equal((double)396957263013930, a11.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 5567; + } + + [Fact] + public static void TestSwiftCallbackFunc9() + { + Console.Write("Running SwiftCallbackFunc9: "); + ExceptionDispatchInfo ex = null; + ushort val = SwiftCallbackFunc9(&SwiftCallbackFunc9Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)5567, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F10_Ret + { + public long F0; + public uint F1; + public ushort F2; + public uint F3; + + public F10_Ret(long f0, uint f1, ushort f2, uint f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func101fAA7F10_RetVAEs5Int16VXE_tF")] + private static extern F10_Ret SwiftCallbackFunc10(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F10_Ret SwiftCallbackFunc10Callback(short a0, SwiftSelf self) + { + try + { + Assert.Equal((short)-7168, a0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F10_Ret(7820305774933543349, 1501926289, 39078, 661487951); + } + + [Fact] + public static void TestSwiftCallbackFunc10() + { + Console.Write("Running SwiftCallbackFunc10: "); + ExceptionDispatchInfo ex = null; + F10_Ret val = SwiftCallbackFunc10(&SwiftCallbackFunc10Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)7820305774933543349, val.F0); + Assert.Equal((uint)1501926289, val.F1); + Assert.Equal((ushort)39078, val.F2); + Assert.Equal((uint)661487951, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F11_S0_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F11_S0 + { + public uint F0; + public F11_S0_S0 F1; + public nuint F2; + public int F3; + public long F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F11_S1_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F11_S1 + { + public F11_S1_S0 F0; + public short F1; + public uint F2; + public short F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F11_S2 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F11_Ret + { + public short F0; + public short F1; + public byte F2; + public long F3; + + public F11_Ret(short f0, short f1, byte f2, long f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func111fAA7F11_RetVAEs6UInt32V_Sus6UInt64Vs5Int16VAA0G3_S0VSfs4Int8Vs6UInt16VAA0G3_S1VAGs5Int64VAgA0G3_S2VtXE_tF")] + private static extern F11_Ret SwiftCallbackFunc11(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F11_Ret SwiftCallbackFunc11Callback(uint a0, nuint a1, ulong a2, short a3, F11_S0 a4, float a5, sbyte a6, ushort a7, F11_S1 a8, uint a9, long a10, uint a11, F11_S2 a12, SwiftSelf self) + { + try + { + Assert.Equal((uint)454751144, a0); + Assert.Equal((nuint)unchecked((nuint)1696592254558667577), a1); + Assert.Equal((ulong)5831587230944972245, a2); + Assert.Equal((short)15352, a3); + Assert.Equal((uint)1306601347, a4.F0); + Assert.Equal((sbyte)123, a4.F1.F0); + Assert.Equal((nuint)unchecked((nuint)3064471520018434938), a4.F2); + Assert.Equal((int)272956246, a4.F3); + Assert.Equal((long)3683518307106722029, a4.F4); + Assert.Equal((float)5606122, a5); + Assert.Equal((sbyte)-126, a6); + Assert.Equal((ushort)50801, a7); + Assert.Equal((ushort)63467, a8.F0.F0); + Assert.Equal((short)-31828, a8.F1); + Assert.Equal((uint)2117176776, a8.F2); + Assert.Equal((short)-27265, a8.F3); + Assert.Equal((uint)1879606687, a9); + Assert.Equal((long)4981244336430926707, a10); + Assert.Equal((uint)1159924856, a11); + Assert.Equal((byte)29, a12.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F11_Ret(7934, -24509, 20, 5470383170748296608); + } + + [Fact] + public static void TestSwiftCallbackFunc11() + { + Console.Write("Running SwiftCallbackFunc11: "); + ExceptionDispatchInfo ex = null; + F11_Ret val = SwiftCallbackFunc11(&SwiftCallbackFunc11Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)7934, val.F0); + Assert.Equal((short)-24509, val.F1); + Assert.Equal((byte)20, val.F2); + Assert.Equal((long)5470383170748296608, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F12_S0 + { + public ulong F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F12_S1_S0_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F12_S1_S0 + { + public F12_S1_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F12_S1 + { + public ushort F0; + public uint F1; + public F12_S1_S0 F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F12_Ret + { + public ulong F0; + public nint F1; + + public F12_Ret(ulong f0, nint f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func121fAA7F12_RetVAeA0G3_S0V_s5Int16Vs6UInt64VAA0G3_S1Vs4Int8VtXE_tF")] + private static extern F12_Ret SwiftCallbackFunc12(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F12_Ret SwiftCallbackFunc12Callback(F12_S0 a0, short a1, ulong a2, F12_S1 a3, sbyte a4, SwiftSelf self) + { + try + { + Assert.Equal((ulong)3236871137735400659, a0.F0); + Assert.Equal((sbyte)-123, a0.F1); + Assert.Equal((short)-22828, a1); + Assert.Equal((ulong)2132557792366642035, a2); + Assert.Equal((ushort)42520, a3.F0); + Assert.Equal((uint)879349060, a3.F1); + Assert.Equal((ulong)5694370973277919380, a3.F2.F0.F0); + Assert.Equal((sbyte)-75, a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F12_Ret(4675419585914412295, unchecked((nint)1931022181202552704)); + } + + [Fact] + public static void TestSwiftCallbackFunc12() + { + Console.Write("Running SwiftCallbackFunc12: "); + ExceptionDispatchInfo ex = null; + F12_Ret val = SwiftCallbackFunc12(&SwiftCallbackFunc12Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)4675419585914412295, val.F0); + Assert.Equal((nint)unchecked((nint)1931022181202552704), val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F13_S0_S0 + { + public long F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 22)] + struct F13_S0 + { + public F13_S0_S0 F0; + public float F1; + public short F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F13_S1 + { + public nint F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F13_S2_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F13_S2 + { + public F13_S2_S0 F0; + public double F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 5)] + struct F13_S3 + { + public float F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F13_S4 + { + public nint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func131fS2dAA6F13_S0V_s5Int32VSis6UInt16VSuAA0G3_S1VAA0G3_S2VSiSds4Int8VSfSiAA0G3_S3VSuAA0G3_S4VtXE_tF")] + private static extern double SwiftCallbackFunc13(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static double SwiftCallbackFunc13Callback(F13_S0 a0, int a1, nint a2, ushort a3, nuint a4, F13_S1 a5, F13_S2 a6, nint a7, double a8, sbyte a9, float a10, nint a11, F13_S3 a12, nuint a13, F13_S4 a14, SwiftSelf self) + { + try + { + Assert.Equal((long)9003727031576598067, a0.F0.F0); + Assert.Equal((long)8527798284445940986, a0.F0.F1); + Assert.Equal((float)3585628, a0.F1); + Assert.Equal((short)-12520, a0.F2); + Assert.Equal((int)1510815104, a1); + Assert.Equal((nint)unchecked((nint)5883331525294982326), a2); + Assert.Equal((ushort)60738, a3); + Assert.Equal((nuint)unchecked((nuint)5291799143932627546), a4); + Assert.Equal((nint)unchecked((nint)1949276559361384602), a5.F0); + Assert.Equal((ulong)876048527237138968, a5.F1); + Assert.Equal((byte)67, a6.F0.F0); + Assert.Equal((double)2455575228564859, a6.F1); + Assert.Equal((nint)unchecked((nint)2321408806345977320), a7); + Assert.Equal((double)12750323283778, a8); + Assert.Equal((sbyte)46, a9); + Assert.Equal((float)6774339, a10); + Assert.Equal((nint)unchecked((nint)5121910967292140178), a11); + Assert.Equal((float)8254279, a12.F0); + Assert.Equal((sbyte)-7, a12.F1); + Assert.Equal((nuint)unchecked((nuint)7533347207018595125), a13); + Assert.Equal((nint)unchecked((nint)6605448167191082938), a14.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 2798050901932855; + } + + [Fact] + public static void TestSwiftCallbackFunc13() + { + Console.Write("Running SwiftCallbackFunc13: "); + ExceptionDispatchInfo ex = null; + double val = SwiftCallbackFunc13(&SwiftCallbackFunc13Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)2798050901932855, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F14_S0 + { + public sbyte F0; + public float F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F14_S1 + { + public ulong F0; + public ulong F1; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func141fs5Int64VA2E_AA6F14_S0Vs4Int8Vs6UInt64VAA0H3_S1VSitXE_tF")] + private static extern long SwiftCallbackFunc14(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static long SwiftCallbackFunc14Callback(long a0, F14_S0 a1, sbyte a2, ulong a3, F14_S1 a4, nint a5, SwiftSelf self) + { + try + { + Assert.Equal((long)5547219684656041875, a0); + Assert.Equal((sbyte)-39, a1.F0); + Assert.Equal((float)5768837, a1.F1); + Assert.Equal((ushort)53063, a1.F2); + Assert.Equal((sbyte)-102, a2); + Assert.Equal((ulong)5745438709817040873, a3); + Assert.Equal((ulong)2178706453119907411, a4.F0); + Assert.Equal((ulong)4424726479787355131, a4.F1); + Assert.Equal((nint)unchecked((nint)5693881223150438553), a5); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 5130561516716417305; + } + + [Fact] + public static void TestSwiftCallbackFunc14() + { + Console.Write("Running SwiftCallbackFunc14: "); + ExceptionDispatchInfo ex = null; + long val = SwiftCallbackFunc14(&SwiftCallbackFunc14Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)5130561516716417305, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F15_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F15_S1 + { + public nint F0; + public uint F1; + public byte F2; + public short F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 25)] + struct F15_S2 + { + public sbyte F0; + public ulong F1; + public long F2; + public byte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F15_S3 + { + public double F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func151fS2is5UInt8V_s6UInt16Vs6UInt64VAIs4Int8VSuSdSfSiAA6F15_S0VAA0K3_S1VAgA0K3_S2VAeA0K3_S3VtXE_tF")] + private static extern nint SwiftCallbackFunc15(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc15Callback(byte a0, ushort a1, ulong a2, ulong a3, sbyte a4, nuint a5, double a6, float a7, nint a8, F15_S0 a9, F15_S1 a10, ushort a11, F15_S2 a12, byte a13, F15_S3 a14, SwiftSelf self) + { + try + { + Assert.Equal((byte)0, a0); + Assert.Equal((ushort)31081, a1); + Assert.Equal((ulong)8814881608835743979, a2); + Assert.Equal((ulong)4283853687332682681, a3); + Assert.Equal((sbyte)80, a4); + Assert.Equal((nuint)unchecked((nuint)7895994601265649979), a5); + Assert.Equal((double)1855521542692398, a6); + Assert.Equal((float)3235683, a7); + Assert.Equal((nint)unchecked((nint)215122646177738904), a8); + Assert.Equal((uint)2044750195, a9.F0); + Assert.Equal((nint)unchecked((nint)1772412898183620625), a10.F0); + Assert.Equal((uint)131256973, a10.F1); + Assert.Equal((byte)153, a10.F2); + Assert.Equal((short)25281, a10.F3); + Assert.Equal((ushort)50965, a11); + Assert.Equal((sbyte)-83, a12.F0); + Assert.Equal((ulong)7751486385861474282, a12.F1); + Assert.Equal((long)3744400479301818340, a12.F2); + Assert.Equal((byte)150, a12.F3); + Assert.Equal((byte)179, a13); + Assert.Equal((double)3108143600787174, a14.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)2326283264176371053); + } + + [Fact] + public static void TestSwiftCallbackFunc15() + { + Console.Write("Running SwiftCallbackFunc15: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc15(&SwiftCallbackFunc15Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)2326283264176371053), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F16_S0 + { + public sbyte F0; + public int F1; + public ushort F2; + public ushort F3; + public uint F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F16_S1 + { + public ushort F0; + public sbyte F1; + public byte F2; + public nint F3; + public nint F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F16_S2_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 14)] + struct F16_S2 + { + public int F0; + public int F1; + public uint F2; + public byte F3; + public F16_S2_S0 F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F16_S3 + { + public short F0; + public double F1; + public double F2; + public int F3; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func161fs4Int8VAeA6F16_S0V_s5Int16VSfAA0H3_S1VAA0H3_S2Vs6UInt64VAA0H3_S3VSutXE_tF")] + private static extern sbyte SwiftCallbackFunc16(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static sbyte SwiftCallbackFunc16Callback(F16_S0 a0, short a1, float a2, F16_S1 a3, F16_S2 a4, ulong a5, F16_S3 a6, nuint a7, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-59, a0.F0); + Assert.Equal((int)1181591186, a0.F1); + Assert.Equal((ushort)44834, a0.F2); + Assert.Equal((ushort)28664, a0.F3); + Assert.Equal((uint)404461767, a0.F4); + Assert.Equal((short)2482, a1); + Assert.Equal((float)2997348, a2); + Assert.Equal((ushort)22423, a3.F0); + Assert.Equal((sbyte)-106, a3.F1); + Assert.Equal((byte)182, a3.F2); + Assert.Equal((nint)unchecked((nint)3784074551275084420), a3.F3); + Assert.Equal((nint)unchecked((nint)7092934571108982079), a3.F4); + Assert.Equal((int)1835134709, a4.F0); + Assert.Equal((int)246067261, a4.F1); + Assert.Equal((uint)1986526591, a4.F2); + Assert.Equal((byte)24, a4.F3); + Assert.Equal((sbyte)-112, a4.F4.F0); + Assert.Equal((ulong)1465053746911704089, a5); + Assert.Equal((short)-27636, a6.F0); + Assert.Equal((double)1896887612303356, a6.F1); + Assert.Equal((double)4263157082840190, a6.F2); + Assert.Equal((int)774653659, a6.F3); + Assert.Equal((nuint)unchecked((nuint)3755775782607884861), a7); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 103; + } + + [Fact] + public static void TestSwiftCallbackFunc16() + { + Console.Write("Running SwiftCallbackFunc16: "); + ExceptionDispatchInfo ex = null; + sbyte val = SwiftCallbackFunc16(&SwiftCallbackFunc16Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((sbyte)103, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F17_S0 + { + public int F0; + public nuint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F17_S1_S0 + { + public double F0; + public uint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F17_S1 + { + public F17_S1_S0 F0; + public int F1; + public byte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F17_S2 + { + public uint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func171fS2ds6UInt32V_AA6F17_S0VAA0H3_S1VSds6UInt64VAA0H3_S2VtXE_tF")] + private static extern double SwiftCallbackFunc17(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static double SwiftCallbackFunc17Callback(uint a0, F17_S0 a1, F17_S1 a2, double a3, ulong a4, F17_S2 a5, SwiftSelf self) + { + try + { + Assert.Equal((uint)201081002, a0); + Assert.Equal((int)2018751226, a1.F0); + Assert.Equal((nuint)unchecked((nuint)8488544433072104028), a1.F1); + Assert.Equal((double)1190765430157980, a2.F0.F0); + Assert.Equal((uint)70252071, a2.F0.F1); + Assert.Equal((int)1297775609, a2.F1); + Assert.Equal((byte)160, a2.F2); + Assert.Equal((double)4290084351352688, a3); + Assert.Equal((ulong)4738339757002694731, a4); + Assert.Equal((uint)1829312773, a5.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 4214404512040467; + } + + [Fact] + public static void TestSwiftCallbackFunc17() + { + Console.Write("Running SwiftCallbackFunc17: "); + ExceptionDispatchInfo ex = null; + double val = SwiftCallbackFunc17(&SwiftCallbackFunc17Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)4214404512040467, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F18_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F18_S1 + { + public ushort F0; + public short F1; + public double F2; + public nuint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F18_S2 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F18_Ret_S0 + { + public short F0; + + public F18_Ret_S0(short f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F18_Ret + { + public F18_Ret_S0 F0; + + public F18_Ret(F18_Ret_S0 f0) + { + F0 = f0; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func181fAA7F18_RetVAeA0G3_S0V_AA0G3_S1VAA0G3_S2VSus6UInt32Vs5Int64Vs5Int16VSdtXE_tF")] + private static extern F18_Ret SwiftCallbackFunc18(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F18_Ret SwiftCallbackFunc18Callback(F18_S0 a0, F18_S1 a1, F18_S2 a2, nuint a3, uint a4, long a5, short a6, double a7, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)106, a0.F0); + Assert.Equal((ushort)21619, a1.F0); + Assert.Equal((short)-4350, a1.F1); + Assert.Equal((double)3457288266203248, a1.F2); + Assert.Equal((nuint)unchecked((nuint)9020447812661292883), a1.F3); + Assert.Equal((nint)unchecked((nint)2317132584983719004), a2.F0); + Assert.Equal((nuint)unchecked((nuint)7379425918918939512), a3); + Assert.Equal((uint)2055208746, a4); + Assert.Equal((long)1042861174364145790, a5); + Assert.Equal((short)28457, a6); + Assert.Equal((double)1799004152435515, a7); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F18_Ret(new F18_Ret_S0(-2080)); + } + + [Fact] + public static void TestSwiftCallbackFunc18() + { + Console.Write("Running SwiftCallbackFunc18: "); + ExceptionDispatchInfo ex = null; + F18_Ret val = SwiftCallbackFunc18(&SwiftCallbackFunc18Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)-2080, val.F0.F0); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F19_S0 + { + public short F0; + public sbyte F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F19_S1 + { + public long F0; + public ushort F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F19_S2 + { + public ulong F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F19_S3 + { + public uint F0; + public int F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F19_Ret_S0 + { + public long F0; + + public F19_Ret_S0(long f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 56)] + struct F19_Ret + { + public uint F0; + public long F1; + public ushort F2; + public F19_Ret_S0 F3; + public double F4; + public double F5; + public double F6; + + public F19_Ret(uint f0, long f1, ushort f2, F19_Ret_S0 f3, double f4, double f5, double f6) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + F6 = f6; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func191fAA7F19_RetVAEs5Int64V_s5UInt8VAA0G3_S0VSiAA0G3_S1Vs5Int32VAOSus6UInt64VAA0G3_S2Vs6UInt16VAA0G3_S3Vs4Int8VAGtXE_tF")] + private static extern F19_Ret SwiftCallbackFunc19(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F19_Ret SwiftCallbackFunc19Callback(long a0, byte a1, F19_S0 a2, nint a3, F19_S1 a4, int a5, int a6, nuint a7, ulong a8, F19_S2 a9, ushort a10, F19_S3 a11, sbyte a12, long a13, SwiftSelf self) + { + try + { + Assert.Equal((long)7456120134117592143, a0); + Assert.Equal((byte)114, a1); + Assert.Equal((short)-7583, a2.F0); + Assert.Equal((sbyte)97, a2.F1); + Assert.Equal((float)2768322, a2.F2); + Assert.Equal((nint)unchecked((nint)3605245176125291560), a3); + Assert.Equal((long)4445885313084714470, a4.F0); + Assert.Equal((ushort)15810, a4.F1); + Assert.Equal((int)1179699879, a5); + Assert.Equal((int)109603412, a6); + Assert.Equal((nuint)unchecked((nuint)6521628547431964799), a7); + Assert.Equal((ulong)7687430644226018854, a8); + Assert.Equal((ulong)8464855230956039883, a9.F0); + Assert.Equal((long)861462819289140037, a9.F1); + Assert.Equal((ushort)26519, a10); + Assert.Equal((uint)1864602741, a11.F0); + Assert.Equal((int)397176384, a11.F1); + Assert.Equal((sbyte)81, a12); + Assert.Equal((long)4909173176891211442, a13); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F19_Ret(301901837, 5183322153843416979, 16744, new F19_Ret_S0(4587948079871666183), 341974742264104, 750011710367955, 681779256292286); + } + + [Fact] + public static void TestSwiftCallbackFunc19() + { + Console.Write("Running SwiftCallbackFunc19: "); + ExceptionDispatchInfo ex = null; + F19_Ret val = SwiftCallbackFunc19(&SwiftCallbackFunc19Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)301901837, val.F0); + Assert.Equal((long)5183322153843416979, val.F1); + Assert.Equal((ushort)16744, val.F2); + Assert.Equal((long)4587948079871666183, val.F3.F0); + Assert.Equal((double)341974742264104, val.F4); + Assert.Equal((double)750011710367955, val.F5); + Assert.Equal((double)681779256292286, val.F6); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F20_S0_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F20_S0 + { + public short F0; + public nuint F1; + public F20_S0_S0 F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F20_S1_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 36)] + struct F20_S1 + { + public long F0; + public nuint F1; + public F20_S1_S0 F2; + public long F3; + public int F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F20_S2 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F20_Ret + { + public ushort F0; + public ushort F1; + public double F2; + public short F3; + public double F4; + + public F20_Ret(ushort f0, ushort f1, double f2, short f3, double f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func201fAA7F20_RetVAeA0G3_S0V_AA0G3_S1VS2fs4Int8VAA0G3_S2VSftXE_tF")] + private static extern F20_Ret SwiftCallbackFunc20(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F20_Ret SwiftCallbackFunc20Callback(F20_S0 a0, F20_S1 a1, float a2, float a3, sbyte a4, F20_S2 a5, float a6, SwiftSelf self) + { + try + { + Assert.Equal((short)28858, a0.F0); + Assert.Equal((nuint)unchecked((nuint)7024100299344418039), a0.F1); + Assert.Equal((ushort)13025, a0.F2.F0); + Assert.Equal((long)7900431324553135989, a1.F0); + Assert.Equal((nuint)unchecked((nuint)8131425055682506706), a1.F1); + Assert.Equal((float)3884322, a1.F2.F0); + Assert.Equal((long)605453501265278638, a1.F3); + Assert.Equal((int)353756684, a1.F4); + Assert.Equal((float)622319, a2); + Assert.Equal((float)1401604, a3); + Assert.Equal((sbyte)-101, a4); + Assert.Equal((uint)1355570413, a5.F0); + Assert.Equal((float)2912776, a6); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F20_Ret(53384, 55736, 105589186779121, -24217, 2181722329638192); + } + + [Fact] + public static void TestSwiftCallbackFunc20() + { + Console.Write("Running SwiftCallbackFunc20: "); + ExceptionDispatchInfo ex = null; + F20_Ret val = SwiftCallbackFunc20(&SwiftCallbackFunc20Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)53384, val.F0); + Assert.Equal((ushort)55736, val.F1); + Assert.Equal((double)105589186779121, val.F2); + Assert.Equal((short)-24217, val.F3); + Assert.Equal((double)2181722329638192, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F21_S0 + { + public double F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F21_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F21_Ret + { + public ushort F0; + public uint F1; + public long F2; + + public F21_Ret(ushort f0, uint f1, long f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func211fAA7F21_RetVAEs5Int32V_s5Int16VAA0G3_S0VAgA0G3_S1Vs5Int64Vs6UInt32VAOs5UInt8Vs6UInt16VtXE_tF")] + private static extern F21_Ret SwiftCallbackFunc21(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F21_Ret SwiftCallbackFunc21Callback(int a0, short a1, F21_S0 a2, int a3, F21_S1 a4, long a5, uint a6, long a7, byte a8, ushort a9, SwiftSelf self) + { + try + { + Assert.Equal((int)256017319, a0); + Assert.Equal((short)14555, a1); + Assert.Equal((double)2102091966108033, a2.F0); + Assert.Equal((ulong)8617538752301505079, a2.F1); + Assert.Equal((int)834677431, a3); + Assert.Equal((ushort)7043, a4.F0); + Assert.Equal((long)7166819734655141128, a5); + Assert.Equal((uint)965538086, a6); + Assert.Equal((long)3827752442102685645, a7); + Assert.Equal((byte)110, a8); + Assert.Equal((ushort)33646, a9); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F21_Ret(13904, 1020161192, 7669588951617295307); + } + + [Fact] + public static void TestSwiftCallbackFunc21() + { + Console.Write("Running SwiftCallbackFunc21: "); + ExceptionDispatchInfo ex = null; + F21_Ret val = SwiftCallbackFunc21(&SwiftCallbackFunc21Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)13904, val.F0); + Assert.Equal((uint)1020161192, val.F1); + Assert.Equal((long)7669588951617295307, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F22_S0 + { + public nint F0; + public float F1; + public double F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F22_S1 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F22_S2 + { + public int F0; + public double F1; + public float F2; + public short F3; + public ushort F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F22_S3 + { + public long F0; + public ushort F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F22_S4 + { + public double F0; + public ushort F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 6)] + struct F22_S5 + { + public uint F0; + public short F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F22_S6 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F22_Ret + { + public ushort F0; + public short F1; + public nuint F2; + + public F22_Ret(ushort f0, short f1, nuint f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func221fAA7F22_RetVAEs5Int32V_AA0G3_S0VAA0G3_S1VAA0G3_S2VAA0G3_S3Vs4Int8VAA0G3_S4Vs5UInt8Vs6UInt16Vs5Int64VAA0G3_S5VAYSfAA0G3_S6VAWtXE_tF")] + private static extern F22_Ret SwiftCallbackFunc22(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F22_Ret SwiftCallbackFunc22Callback(int a0, F22_S0 a1, F22_S1 a2, F22_S2 a3, F22_S3 a4, sbyte a5, F22_S4 a6, byte a7, ushort a8, long a9, F22_S5 a10, long a11, float a12, F22_S6 a13, ushort a14, SwiftSelf self) + { + try + { + Assert.Equal((int)640156952, a0); + Assert.Equal((nint)unchecked((nint)824774470287401457), a1.F0); + Assert.Equal((float)6163704, a1.F1); + Assert.Equal((double)54328782764685, a1.F2); + Assert.Equal((nuint)unchecked((nuint)1679730195865415747), a2.F0); + Assert.Equal((int)1462995665, a3.F0); + Assert.Equal((double)2554087365600344, a3.F1); + Assert.Equal((float)8193295, a3.F2); + Assert.Equal((short)16765, a3.F3); + Assert.Equal((ushort)45388, a3.F4); + Assert.Equal((long)5560492364570389430, a4.F0); + Assert.Equal((ushort)48308, a4.F1); + Assert.Equal((sbyte)71, a5); + Assert.Equal((double)1639169280741045, a6.F0); + Assert.Equal((ushort)12045, a6.F1); + Assert.Equal((byte)217, a7); + Assert.Equal((ushort)62917, a8); + Assert.Equal((long)1465918945905384332, a9); + Assert.Equal((uint)1364750179, a10.F0); + Assert.Equal((short)3311, a10.F1); + Assert.Equal((long)9003480567517966914, a11); + Assert.Equal((float)2157327, a12); + Assert.Equal((float)6647392, a13.F0); + Assert.Equal((ushort)1760, a14); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F22_Ret(39726, 21753, unchecked((nuint)5706055053768469840)); + } + + [Fact] + public static void TestSwiftCallbackFunc22() + { + Console.Write("Running SwiftCallbackFunc22: "); + ExceptionDispatchInfo ex = null; + F22_Ret val = SwiftCallbackFunc22(&SwiftCallbackFunc22Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)39726, val.F0); + Assert.Equal((short)21753, val.F1); + Assert.Equal((nuint)unchecked((nuint)5706055053768469840), val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F23_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F23_S1 + { + public nint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func231fS2dSu_s5UInt8Vs4Int8VA2eA6F23_S0VSuAA0I3_S1VSdtXE_tF")] + private static extern double SwiftCallbackFunc23(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static double SwiftCallbackFunc23Callback(nuint a0, byte a1, sbyte a2, byte a3, byte a4, F23_S0 a5, nuint a6, F23_S1 a7, double a8, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)5779410841248940897), a0); + Assert.Equal((byte)192, a1); + Assert.Equal((sbyte)-128, a2); + Assert.Equal((byte)133, a3); + Assert.Equal((byte)20, a4); + Assert.Equal((nint)unchecked((nint)2959916071636885436), a5.F0); + Assert.Equal((nuint)unchecked((nuint)3651155214497129159), a6); + Assert.Equal((nint)unchecked((nint)8141565342203061885), a7.F0); + Assert.Equal((double)1465425469608034, a8); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 893532429511039; + } + + [Fact] + public static void TestSwiftCallbackFunc23() + { + Console.Write("Running SwiftCallbackFunc23: "); + ExceptionDispatchInfo ex = null; + double val = SwiftCallbackFunc23(&SwiftCallbackFunc23Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)893532429511039, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F24_S0 + { + public sbyte F0; + public byte F1; + public ulong F2; + public uint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F24_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F24_S2_S0 + { + public ushort F0; + public uint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F24_S2_S1 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F24_S2 + { + public nint F0; + public uint F1; + public F24_S2_S0 F2; + public F24_S2_S1 F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F24_S3 + { + public short F0; + public float F1; + public long F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F24_S4 + { + public byte F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func241fS2fs5Int32V_SuAA6F24_S0Vs6UInt16VAA0H3_S1Vs4Int8VAA0H3_S2Vs6UInt64VAqA0H3_S3VSdAA0H3_S4VtXE_tF")] + private static extern float SwiftCallbackFunc24(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc24Callback(int a0, nuint a1, F24_S0 a2, ushort a3, F24_S1 a4, sbyte a5, F24_S2 a6, ulong a7, ulong a8, F24_S3 a9, double a10, F24_S4 a11, SwiftSelf self) + { + try + { + Assert.Equal((int)1710754874, a0); + Assert.Equal((nuint)unchecked((nuint)6447433131978039331), a1); + Assert.Equal((sbyte)-92, a2.F0); + Assert.Equal((byte)181, a2.F1); + Assert.Equal((ulong)3710374263631495948, a2.F2); + Assert.Equal((uint)257210428, a2.F3); + Assert.Equal((ushort)6631, a3); + Assert.Equal((ushort)2303, a4.F0); + Assert.Equal((sbyte)15, a5); + Assert.Equal((nint)unchecked((nint)2509049432824972381), a6.F0); + Assert.Equal((uint)616918672, a6.F1); + Assert.Equal((ushort)50635, a6.F2.F0); + Assert.Equal((uint)1337844540, a6.F2.F1); + Assert.Equal((long)335964796567786281, a6.F3.F0); + Assert.Equal((ulong)1114365571136806382, a7); + Assert.Equal((ulong)8988425145801188208, a8); + Assert.Equal((short)31969, a9.F0); + Assert.Equal((float)3008861, a9.F1); + Assert.Equal((long)5466306080595269107, a9.F2); + Assert.Equal((double)2027780227887952, a10); + Assert.Equal((byte)234, a11.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 3470219; + } + + [Fact] + public static void TestSwiftCallbackFunc24() + { + Console.Write("Running SwiftCallbackFunc24: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc24(&SwiftCallbackFunc24Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)3470219, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F25_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F25_S1 + { + public float F0; + public sbyte F1; + public float F2; + public nint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 25)] + struct F25_S2 + { + public nuint F0; + public nuint F1; + public long F2; + public byte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F25_S3 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F25_S4 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F25_Ret + { + public ulong F0; + public long F1; + public byte F2; + public ushort F3; + + public F25_Ret(ulong f0, long f1, byte f2, ushort f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func251fAA7F25_RetVAeA0G3_S0V_s6UInt16VSuAA0G3_S1Vs5Int16VAA0G3_S2Vs6UInt64VA2qA0G3_S3VAA0G3_S4VtXE_tF")] + private static extern F25_Ret SwiftCallbackFunc25(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F25_Ret SwiftCallbackFunc25Callback(F25_S0 a0, ushort a1, nuint a2, F25_S1 a3, short a4, F25_S2 a5, ulong a6, ulong a7, ulong a8, F25_S3 a9, F25_S4 a10, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)6077761381429658786), a0.F0); + Assert.Equal((ushort)2300, a1); + Assert.Equal((nuint)unchecked((nuint)3498354181807010234), a2); + Assert.Equal((float)5360721, a3.F0); + Assert.Equal((sbyte)-40, a3.F1); + Assert.Equal((float)109485, a3.F2); + Assert.Equal((nint)unchecked((nint)2311625789899959825), a3.F3); + Assert.Equal((short)-28395, a4); + Assert.Equal((nuint)unchecked((nuint)8729509817732080529), a5.F0); + Assert.Equal((nuint)unchecked((nuint)860365359368130822), a5.F1); + Assert.Equal((long)7498894262834346040, a5.F2); + Assert.Equal((byte)218, a5.F3); + Assert.Equal((ulong)961687210282504701, a6); + Assert.Equal((ulong)7184177441364400868, a7); + Assert.Equal((ulong)8389319500274436977, a8); + Assert.Equal((float)4437173, a9.F0); + Assert.Equal((sbyte)-107, a10.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F25_Ret(8006862079710523876, 7879510716857855733, 114, 3220); + } + + [Fact] + public static void TestSwiftCallbackFunc25() + { + Console.Write("Running SwiftCallbackFunc25: "); + ExceptionDispatchInfo ex = null; + F25_Ret val = SwiftCallbackFunc25(&SwiftCallbackFunc25Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)8006862079710523876, val.F0); + Assert.Equal((long)7879510716857855733, val.F1); + Assert.Equal((byte)114, val.F2); + Assert.Equal((ushort)3220, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F26_S0 + { + public sbyte F0; + public nint F1; + public byte F2; + public byte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F26_S1_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F26_S1 + { + public sbyte F0; + public int F1; + public short F2; + public F26_S1_S0 F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F26_S2 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F26_S3 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F26_Ret + { + public nuint F0; + public byte F1; + + public F26_Ret(nuint f0, byte f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func261fAA7F26_RetVAEs4Int8V_s5UInt8Vs6UInt32VAA0G3_S0VAA0G3_S1VAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F26_Ret SwiftCallbackFunc26(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F26_Ret SwiftCallbackFunc26Callback(sbyte a0, byte a1, uint a2, F26_S0 a3, F26_S1 a4, F26_S2 a5, F26_S3 a6, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-16, a0); + Assert.Equal((byte)220, a1); + Assert.Equal((uint)72386567, a2); + Assert.Equal((sbyte)-33, a3.F0); + Assert.Equal((nint)unchecked((nint)6488877286424796715), a3.F1); + Assert.Equal((byte)143, a3.F2); + Assert.Equal((byte)74, a3.F3); + Assert.Equal((sbyte)104, a4.F0); + Assert.Equal((int)1719453315, a4.F1); + Assert.Equal((short)20771, a4.F2); + Assert.Equal((ulong)3636117595999837800, a4.F3.F0); + Assert.Equal((long)2279530426119665839, a5.F0); + Assert.Equal((byte)207, a6.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F26_Ret(unchecked((nuint)1050319650554930471), 89); + } + + [Fact] + public static void TestSwiftCallbackFunc26() + { + Console.Write("Running SwiftCallbackFunc26: "); + ExceptionDispatchInfo ex = null; + F26_Ret val = SwiftCallbackFunc26(&SwiftCallbackFunc26Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)1050319650554930471), val.F0); + Assert.Equal((byte)89, val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F27_S0 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 3)] + struct F27_S1_S0 + { + public ushort F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F27_S1 + { + public long F0; + public F27_S1_S0 F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F27_S2 + { + public ulong F0; + public sbyte F1; + public uint F2; + public long F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F27_S3_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F27_S3 + { + public F27_S3_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func271fS2fs6UInt64V_s5UInt8VAA6F27_S0VA2gA0I3_S1Vs5Int32VAA0I3_S2VSis6UInt32VAA0I3_S3VtXE_tF")] + private static extern float SwiftCallbackFunc27(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc27Callback(ulong a0, byte a1, F27_S0 a2, byte a3, byte a4, F27_S1 a5, int a6, F27_S2 a7, nint a8, uint a9, F27_S3 a10, SwiftSelf self) + { + try + { + Assert.Equal((ulong)4847421047018330189, a0); + Assert.Equal((byte)214, a1); + Assert.Equal((short)31313, a2.F0); + Assert.Equal((byte)207, a3); + Assert.Equal((byte)174, a4); + Assert.Equal((long)4476120319602257660, a5.F0); + Assert.Equal((ushort)26662, a5.F1.F0); + Assert.Equal((sbyte)-55, a5.F1.F1); + Assert.Equal((float)70666, a5.F2); + Assert.Equal((int)1340306103, a6); + Assert.Equal((ulong)2772939788297637999, a7.F0); + Assert.Equal((sbyte)-65, a7.F1); + Assert.Equal((uint)7500441, a7.F2); + Assert.Equal((long)4926907273817562134, a7.F3); + Assert.Equal((nint)unchecked((nint)5862689255099071258), a8); + Assert.Equal((uint)1077270996, a9); + Assert.Equal((ushort)35167, a10.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 8117856; + } + + [Fact] + public static void TestSwiftCallbackFunc27() + { + Console.Write("Running SwiftCallbackFunc27: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc27(&SwiftCallbackFunc27Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)8117856, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F28_S0 + { + public ulong F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F28_S1 + { + public long F0; + public nuint F1; + public nint F2; + public int F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F28_S2 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F28_S3 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F28_Ret_S0 + { + public float F0; + + public F28_Ret_S0(float f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 6)] + struct F28_Ret + { + public F28_Ret_S0 F0; + public ushort F1; + + public F28_Ret(F28_Ret_S0 f0, ushort f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func281fAA7F28_RetVAEs6UInt32V_s6UInt16Vs4Int8VAkISfAA0G3_S0VSds6UInt64VAA0G3_S1VAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F28_Ret SwiftCallbackFunc28(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F28_Ret SwiftCallbackFunc28Callback(uint a0, ushort a1, sbyte a2, sbyte a3, ushort a4, float a5, F28_S0 a6, double a7, ulong a8, F28_S1 a9, F28_S2 a10, F28_S3 a11, SwiftSelf self) + { + try + { + Assert.Equal((uint)893827094, a0); + Assert.Equal((ushort)38017, a1); + Assert.Equal((sbyte)-90, a2); + Assert.Equal((sbyte)-1, a3); + Assert.Equal((ushort)16109, a4); + Assert.Equal((float)5844449, a5); + Assert.Equal((ulong)176269147098539470, a6.F0); + Assert.Equal((sbyte)23, a6.F1); + Assert.Equal((double)1431426259441210, a7); + Assert.Equal((ulong)6103261251702315645, a8); + Assert.Equal((long)3776818122826483419, a9.F0); + Assert.Equal((nuint)unchecked((nuint)9181420263296840471), a9.F1); + Assert.Equal((nint)unchecked((nint)3281861424961082542), a9.F2); + Assert.Equal((int)1442905253, a9.F3); + Assert.Equal((nint)unchecked((nint)8760009193798370900), a10.F0); + Assert.Equal((long)7119917900929398683, a11.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F28_Ret(new F28_Ret_S0(4515425), 25944); + } + + [Fact] + public static void TestSwiftCallbackFunc28() + { + Console.Write("Running SwiftCallbackFunc28: "); + ExceptionDispatchInfo ex = null; + F28_Ret val = SwiftCallbackFunc28(&SwiftCallbackFunc28Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)4515425, val.F0.F0); + Assert.Equal((ushort)25944, val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F29_S0 + { + public byte F0; + public double F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F29_S1 + { + public uint F0; + public nint F1; + public ulong F2; + public uint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F29_S2 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F29_S3 + { + public uint F0; + public uint F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F29_S4 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F29_Ret_S0 + { + public nint F0; + public ulong F1; + + public F29_Ret_S0(nint f0, ulong f1) + { + F0 = f0; + F1 = f1; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 52)] + struct F29_Ret + { + public nuint F0; + public nuint F1; + public nuint F2; + public F29_Ret_S0 F3; + public ulong F4; + public uint F5; + + public F29_Ret(nuint f0, nuint f1, nuint f2, F29_Ret_S0 f3, ulong f4, uint f5) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func291fAA7F29_RetVAeA0G3_S0V_Sis6UInt64Vs5UInt8Vs5Int64VAKSiAA0G3_S1Vs5Int32Vs4Int8VAkiA0G3_S2VAA0G3_S3Vs5Int16VAA0G3_S4Vs6UInt32VtXE_tF")] + private static extern F29_Ret SwiftCallbackFunc29(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F29_Ret SwiftCallbackFunc29Callback(F29_S0 a0, nint a1, ulong a2, byte a3, long a4, byte a5, nint a6, F29_S1 a7, int a8, sbyte a9, byte a10, ulong a11, F29_S2 a12, F29_S3 a13, short a14, F29_S4 a15, uint a16, SwiftSelf self) + { + try + { + Assert.Equal((byte)152, a0.F0); + Assert.Equal((double)737900189383874, a0.F1); + Assert.Equal((ushort)33674, a0.F2); + Assert.Equal((nint)unchecked((nint)5162040247631126074), a1); + Assert.Equal((ulong)6524156301721885895, a2); + Assert.Equal((byte)129, a3); + Assert.Equal((long)6661424933974053497, a4); + Assert.Equal((byte)145, a5); + Assert.Equal((nint)unchecked((nint)7521422786615537370), a6); + Assert.Equal((uint)1361601345, a7.F0); + Assert.Equal((nint)unchecked((nint)3366726213840694614), a7.F1); + Assert.Equal((ulong)7767610514138029164, a7.F2); + Assert.Equal((uint)1266864987, a7.F3); + Assert.Equal((int)1115803878, a8); + Assert.Equal((sbyte)5, a9); + Assert.Equal((byte)80, a10); + Assert.Equal((ulong)2041754562738600205, a11); + Assert.Equal((int)1492686870, a12.F0); + Assert.Equal((uint)142491811, a13.F0); + Assert.Equal((uint)1644962309, a13.F1); + Assert.Equal((float)1905811, a13.F2); + Assert.Equal((short)-3985, a14); + Assert.Equal((int)1921386549, a15.F0); + Assert.Equal((uint)1510666400, a16); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F29_Ret(unchecked((nuint)1866868811776234672), unchecked((nuint)8169323498884891375), unchecked((nuint)2528257272266524428), new F29_Ret_S0(unchecked((nint)4705260670026405131), 8299241689326234556), 4459635217352912270, 188636136); + } + + [Fact] + public static void TestSwiftCallbackFunc29() + { + Console.Write("Running SwiftCallbackFunc29: "); + ExceptionDispatchInfo ex = null; + F29_Ret val = SwiftCallbackFunc29(&SwiftCallbackFunc29Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)1866868811776234672), val.F0); + Assert.Equal((nuint)unchecked((nuint)8169323498884891375), val.F1); + Assert.Equal((nuint)unchecked((nuint)2528257272266524428), val.F2); + Assert.Equal((nint)unchecked((nint)4705260670026405131), val.F3.F0); + Assert.Equal((ulong)8299241689326234556, val.F3.F1); + Assert.Equal((ulong)4459635217352912270, val.F4); + Assert.Equal((uint)188636136, val.F5); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 7)] + struct F30_S0 + { + public ushort F0; + public short F1; + public short F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F30_S1 + { + public ushort F0; + public nuint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F30_S2 + { + public long F0; + public sbyte F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F30_S3 + { + public sbyte F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func301fS2fAA6F30_S0V_AA0G3_S1VAA0G3_S2VAA0G3_S3VSitXE_tF")] + private static extern float SwiftCallbackFunc30(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc30Callback(F30_S0 a0, F30_S1 a1, F30_S2 a2, F30_S3 a3, nint a4, SwiftSelf self) + { + try + { + Assert.Equal((ushort)50723, a0.F0); + Assert.Equal((short)19689, a0.F1); + Assert.Equal((short)-6469, a0.F2); + Assert.Equal((sbyte)83, a0.F3); + Assert.Equal((ushort)51238, a1.F0); + Assert.Equal((nuint)unchecked((nuint)5879147675377398012), a1.F1); + Assert.Equal((long)7909999288286190848, a2.F0); + Assert.Equal((sbyte)-99, a2.F1); + Assert.Equal((ushort)61385, a2.F2); + Assert.Equal((sbyte)48, a3.F0); + Assert.Equal((nint)unchecked((nint)2980085298293056148), a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 289587; + } + + [Fact] + public static void TestSwiftCallbackFunc30() + { + Console.Write("Running SwiftCallbackFunc30: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc30(&SwiftCallbackFunc30Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)289587, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F31_S0 + { + public int F0; + public ulong F1; + public nuint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F31_Ret_S0 + { + public uint F0; + public float F1; + public ushort F2; + public short F3; + public float F4; + + public F31_Ret_S0(uint f0, float f1, ushort f2, short f3, float f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F31_Ret + { + public F31_Ret_S0 F0; + public ushort F1; + + public F31_Ret(F31_Ret_S0 f0, ushort f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func311fAA7F31_RetVAeA0G3_S0V_SdtXE_tF")] + private static extern F31_Ret SwiftCallbackFunc31(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F31_Ret SwiftCallbackFunc31Callback(F31_S0 a0, double a1, SwiftSelf self) + { + try + { + Assert.Equal((int)1072945099, a0.F0); + Assert.Equal((ulong)5760996810500287322, a0.F1); + Assert.Equal((nuint)unchecked((nuint)3952909367135409979), a0.F2); + Assert.Equal((double)2860786541632685, a1); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F31_Ret(new F31_Ret_S0(1236856932, 1761447, 1260, 25704, 6212541), 44632); + } + + [Fact] + public static void TestSwiftCallbackFunc31() + { + Console.Write("Running SwiftCallbackFunc31: "); + ExceptionDispatchInfo ex = null; + F31_Ret val = SwiftCallbackFunc31(&SwiftCallbackFunc31Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)1236856932, val.F0.F0); + Assert.Equal((float)1761447, val.F0.F1); + Assert.Equal((ushort)1260, val.F0.F2); + Assert.Equal((short)25704, val.F0.F3); + Assert.Equal((float)6212541, val.F0.F4); + Assert.Equal((ushort)44632, val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F32_Ret + { + public nuint F0; + public double F1; + public nint F2; + + public F32_Ret(nuint f0, double f1, nint f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func321fAA7F32_RetVAEs6UInt16V_s5Int16VtXE_tF")] + private static extern F32_Ret SwiftCallbackFunc32(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F32_Ret SwiftCallbackFunc32Callback(ushort a0, short a1, SwiftSelf self) + { + try + { + Assert.Equal((ushort)21020, a0); + Assert.Equal((short)7462, a1); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F32_Ret(unchecked((nuint)868833742355713000), 411817582525317, unchecked((nint)3926422244180816571)); + } + + [Fact] + public static void TestSwiftCallbackFunc32() + { + Console.Write("Running SwiftCallbackFunc32: "); + ExceptionDispatchInfo ex = null; + F32_Ret val = SwiftCallbackFunc32(&SwiftCallbackFunc32Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)868833742355713000), val.F0); + Assert.Equal((double)411817582525317, val.F1); + Assert.Equal((nint)unchecked((nint)3926422244180816571), val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F33_S0 + { + public short F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F33_S1_S0 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F33_S1 + { + public F33_S1_S0 F0; + public uint F1; + public nuint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F33_S2 + { + public uint F0; + public ulong F1; + public sbyte F2; + public sbyte F3; + public nuint F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F33_S3_S0_S0 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F33_S3_S0 + { + public F33_S3_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F33_S3 + { + public F33_S3_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func331fS2uAA6F33_S0V_SfAA0G3_S1Vs6UInt32VSis4Int8VAKSfs5UInt8VSfAkA0G3_S2VSiAA0G3_S3VSiAItXE_tF")] + private static extern nuint SwiftCallbackFunc33(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nuint SwiftCallbackFunc33Callback(F33_S0 a0, float a1, F33_S1 a2, uint a3, nint a4, sbyte a5, sbyte a6, float a7, byte a8, float a9, sbyte a10, F33_S2 a11, nint a12, F33_S3 a13, nint a14, uint a15, SwiftSelf self) + { + try + { + Assert.Equal((short)-23471, a0.F0); + Assert.Equal((ulong)2736941806609505888, a0.F1); + Assert.Equal((float)6930550, a1); + Assert.Equal((short)32476, a2.F0.F0); + Assert.Equal((uint)165441961, a2.F1); + Assert.Equal((nuint)unchecked((nuint)3890227499323387948), a2.F2); + Assert.Equal((uint)591524870, a3); + Assert.Equal((nint)unchecked((nint)1668420058132495503), a4); + Assert.Equal((sbyte)-67, a5); + Assert.Equal((sbyte)94, a6); + Assert.Equal((float)3180786, a7); + Assert.Equal((byte)42, a8); + Assert.Equal((float)7674952, a9); + Assert.Equal((sbyte)43, a10); + Assert.Equal((uint)771356149, a11.F0); + Assert.Equal((ulong)3611576949210389997, a11.F1); + Assert.Equal((sbyte)-15, a11.F2); + Assert.Equal((sbyte)7, a11.F3); + Assert.Equal((nuint)unchecked((nuint)2577587324978560192), a11.F4); + Assert.Equal((nint)unchecked((nint)8266150294848599489), a12); + Assert.Equal((short)9216, a13.F0.F0.F0); + Assert.Equal((nint)unchecked((nint)710302565025364450), a14); + Assert.Equal((uint)1060812904, a15); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nuint)8322391372382633712); + } + + [Fact] + public static void TestSwiftCallbackFunc33() + { + Console.Write("Running SwiftCallbackFunc33: "); + ExceptionDispatchInfo ex = null; + nuint val = SwiftCallbackFunc33(&SwiftCallbackFunc33Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)8322391372382633712), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F34_S0_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F34_S0 + { + public F34_S0_S0 F0; + public nuint F1; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func341fs6UInt16VAEs6UInt32V_AA6F34_S0VSus5Int16VtXE_tF")] + private static extern ushort SwiftCallbackFunc34(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ushort SwiftCallbackFunc34Callback(uint a0, F34_S0 a1, nuint a2, short a3, SwiftSelf self) + { + try + { + Assert.Equal((uint)2068009847, a0); + Assert.Equal((uint)845123292, a1.F0.F0); + Assert.Equal((nuint)unchecked((nuint)5148244462913472487), a1.F1); + Assert.Equal((nuint)unchecked((nuint)8632568386462910655), a2); + Assert.Equal((short)7058, a3); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 20647; + } + + [Fact] + public static void TestSwiftCallbackFunc34() + { + Console.Write("Running SwiftCallbackFunc34: "); + ExceptionDispatchInfo ex = null; + ushort val = SwiftCallbackFunc34(&SwiftCallbackFunc34Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)20647, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F35_S0_S0_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F35_S0_S0 + { + public long F0; + public F35_S0_S0_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F35_S0_S1 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F35_S0 + { + public F35_S0_S0 F0; + public int F1; + public F35_S0_S1 F2; + public nint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F35_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F35_S2_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F35_S2 + { + public F35_S2_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func351fs6UInt64VAEs5UInt8V_s4Int8VSfs5Int64VSiAA6F35_S0VAA0K3_S1VAA0K3_S2VtXE_tF")] + private static extern ulong SwiftCallbackFunc35(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc35Callback(byte a0, sbyte a1, float a2, long a3, nint a4, F35_S0 a5, F35_S1 a6, F35_S2 a7, SwiftSelf self) + { + try + { + Assert.Equal((byte)182, a0); + Assert.Equal((sbyte)-16, a1); + Assert.Equal((float)7763558, a2); + Assert.Equal((long)5905028570860904693, a3); + Assert.Equal((nint)unchecked((nint)5991001624972063224), a4); + Assert.Equal((long)6663912001709962059, a5.F0.F0); + Assert.Equal((int)1843939591, a5.F0.F1.F0); + Assert.Equal((int)1095170337, a5.F1); + Assert.Equal((double)3908756332193409, a5.F2.F0); + Assert.Equal((nint)unchecked((nint)8246190362462442203), a5.F3); + Assert.Equal((ushort)52167, a6.F0); + Assert.Equal((double)283499999631068, a7.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 4329482286317894385; + } + + [Fact] + public static void TestSwiftCallbackFunc35() + { + Console.Write("Running SwiftCallbackFunc35: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc35(&SwiftCallbackFunc35Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)4329482286317894385, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F36_S0 + { + public uint F0; + public long F1; + public byte F2; + public nuint F3; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func361fS2iSu_SdSus5UInt8Vs5Int64VAA6F36_S0Vs4Int8VtXE_tF")] + private static extern nint SwiftCallbackFunc36(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc36Callback(nuint a0, double a1, nuint a2, byte a3, long a4, F36_S0 a5, sbyte a6, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)5079603407518207003), a0); + Assert.Equal((double)2365862518115571, a1); + Assert.Equal((nuint)unchecked((nuint)6495651757722767835), a2); + Assert.Equal((byte)46, a3); + Assert.Equal((long)1550138390178394449, a4); + Assert.Equal((uint)1858960269, a5.F0); + Assert.Equal((long)1925263848394986294, a5.F1); + Assert.Equal((byte)217, a5.F2); + Assert.Equal((nuint)unchecked((nuint)8520779488644482307), a5.F3); + Assert.Equal((sbyte)-83, a6); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)2889858798271230534); + } + + [Fact] + public static void TestSwiftCallbackFunc36() + { + Console.Write("Running SwiftCallbackFunc36: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc36(&SwiftCallbackFunc36Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)2889858798271230534), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F37_S0_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F37_S0 + { + public nuint F0; + public uint F1; + public F37_S0_S0 F2; + public float F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F37_S1 + { + public nuint F0; + public uint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F37_S2 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F37_Ret + { + public float F0; + public byte F1; + public short F2; + public ulong F3; + + public F37_Ret(float f0, byte f1, short f2, ulong f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func371fAA7F37_RetVAEs6UInt64V_AA0G3_S0VSds6UInt16VAA0G3_S1VAA0G3_S2VtXE_tF")] + private static extern F37_Ret SwiftCallbackFunc37(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F37_Ret SwiftCallbackFunc37Callback(ulong a0, F37_S0 a1, double a2, ushort a3, F37_S1 a4, F37_S2 a5, SwiftSelf self) + { + try + { + Assert.Equal((ulong)1623104856688575867, a0); + Assert.Equal((nuint)unchecked((nuint)3785544303342575322), a1.F0); + Assert.Equal((uint)717682682, a1.F1); + Assert.Equal((nint)unchecked((nint)2674933748436691896), a1.F2.F0); + Assert.Equal((float)3211458, a1.F3); + Assert.Equal((double)996705046384579, a2); + Assert.Equal((ushort)8394, a3); + Assert.Equal((nuint)unchecked((nuint)1048947722954084863), a4.F0); + Assert.Equal((uint)252415487, a4.F1); + Assert.Equal((ushort)3664, a5.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F37_Ret(433224, 163, -5538, 4525229514824359136); + } + + [Fact] + public static void TestSwiftCallbackFunc37() + { + Console.Write("Running SwiftCallbackFunc37: "); + ExceptionDispatchInfo ex = null; + F37_Ret val = SwiftCallbackFunc37(&SwiftCallbackFunc37Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)433224, val.F0); + Assert.Equal((byte)163, val.F1); + Assert.Equal((short)-5538, val.F2); + Assert.Equal((ulong)4525229514824359136, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F38_S0_S0 + { + public nint F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F38_S0 + { + public F38_S0_S0 F0; + public ushort F1; + public int F2; + public float F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F38_S1 + { + public short F0; + public int F1; + public uint F2; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func381fS2dAA6F38_S0V_AA0G3_S1VSds5Int16Vs4Int8Vs6UInt32VAISfSiSfAMs5UInt8VSdAKtXE_tF")] + private static extern double SwiftCallbackFunc38(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static double SwiftCallbackFunc38Callback(F38_S0 a0, F38_S1 a1, double a2, short a3, sbyte a4, uint a5, short a6, float a7, nint a8, float a9, uint a10, byte a11, double a12, sbyte a13, SwiftSelf self) + { + try + { + Assert.Equal((nint)unchecked((nint)7389960750529773276), a0.F0.F0); + Assert.Equal((float)4749108, a0.F0.F1); + Assert.Equal((ushort)54323, a0.F1); + Assert.Equal((int)634649910, a0.F2); + Assert.Equal((float)83587, a0.F3); + Assert.Equal((short)-15547, a1.F0); + Assert.Equal((int)1747384081, a1.F1); + Assert.Equal((uint)851987981, a1.F2); + Assert.Equal((double)3543874366683681, a2); + Assert.Equal((short)5045, a3); + Assert.Equal((sbyte)-32, a4); + Assert.Equal((uint)2084540698, a5); + Assert.Equal((short)25583, a6); + Assert.Equal((float)3158067, a7); + Assert.Equal((nint)unchecked((nint)1655263182833369283), a8); + Assert.Equal((float)829404, a9); + Assert.Equal((uint)1888859844, a10); + Assert.Equal((byte)153, a11); + Assert.Equal((double)222366180309763, a12); + Assert.Equal((sbyte)61, a13); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 2529010496939244; + } + + [Fact] + public static void TestSwiftCallbackFunc38() + { + Console.Write("Running SwiftCallbackFunc38: "); + ExceptionDispatchInfo ex = null; + double val = SwiftCallbackFunc38(&SwiftCallbackFunc38Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)2529010496939244, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F39_S0_S0 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F39_S0_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F39_S0 + { + public F39_S0_S0 F0; + public int F1; + public F39_S0_S1 F2; + public nuint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F39_S1 + { + public ushort F0; + public byte F1; + public float F2; + public long F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F39_S2 + { + public int F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F39_S3 + { + public uint F0; + public nint F1; + public nint F2; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func391fS2iAA6F39_S0V_Sus6UInt32VSdAA0G3_S1VAA0G3_S2Vs4Int8VAA0G3_S3Vs5Int32Vs6UInt64Vs5UInt8VtXE_tF")] + private static extern nint SwiftCallbackFunc39(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc39Callback(F39_S0 a0, nuint a1, uint a2, double a3, F39_S1 a4, F39_S2 a5, sbyte a6, F39_S3 a7, int a8, ulong a9, byte a10, SwiftSelf self) + { + try + { + Assert.Equal((short)-31212, a0.F0.F0); + Assert.Equal((int)1623216479, a0.F1); + Assert.Equal((ushort)7181, a0.F2.F0); + Assert.Equal((nuint)unchecked((nuint)8643545152918150186), a0.F3); + Assert.Equal((nuint)unchecked((nuint)799631211988519637), a1); + Assert.Equal((uint)94381581, a2); + Assert.Equal((double)761127371030426, a3); + Assert.Equal((ushort)417, a4.F0); + Assert.Equal((byte)85, a4.F1); + Assert.Equal((float)1543931, a4.F2); + Assert.Equal((long)3918460222899735322, a4.F3); + Assert.Equal((int)883468300, a5.F0); + Assert.Equal((float)2739152, a5.F1); + Assert.Equal((sbyte)-94, a6); + Assert.Equal((uint)1374766954, a7.F0); + Assert.Equal((nint)unchecked((nint)2042223450490396789), a7.F1); + Assert.Equal((nint)unchecked((nint)2672454113535023130), a7.F2); + Assert.Equal((int)946259065, a8); + Assert.Equal((ulong)6805548458517673751, a9); + Assert.Equal((byte)61, a10); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)3023907365579871618); + } + + [Fact] + public static void TestSwiftCallbackFunc39() + { + Console.Write("Running SwiftCallbackFunc39: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc39(&SwiftCallbackFunc39Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)3023907365579871618), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F40_S0 + { + public short F0; + public int F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F40_S1 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 25)] + struct F40_S2 + { + public long F0; + public ushort F1; + public nint F2; + public byte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F40_S3_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F40_S3 + { + public nuint F0; + public double F1; + public F40_S3_S0 F2; + public double F3; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func401fS2uAA6F40_S0V_s6UInt32Vs5UInt8VAA0G3_S1VAA0G3_S2Vs6UInt64VSuAOSis6UInt16VAgA0G3_S3VSutXE_tF")] + private static extern nuint SwiftCallbackFunc40(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nuint SwiftCallbackFunc40Callback(F40_S0 a0, uint a1, byte a2, F40_S1 a3, F40_S2 a4, ulong a5, nuint a6, ulong a7, nint a8, ushort a9, uint a10, F40_S3 a11, nuint a12, SwiftSelf self) + { + try + { + Assert.Equal((short)22601, a0.F0); + Assert.Equal((int)312892872, a0.F1); + Assert.Equal((uint)1040102825, a1); + Assert.Equal((byte)56, a2); + Assert.Equal((int)101203812, a3.F0); + Assert.Equal((long)4298883321494088257, a4.F0); + Assert.Equal((ushort)2095, a4.F1); + Assert.Equal((nint)unchecked((nint)1536552108568739270), a4.F2); + Assert.Equal((byte)220, a4.F3); + Assert.Equal((ulong)2564624804830565018, a5); + Assert.Equal((nuint)unchecked((nuint)173855559108584219), a6); + Assert.Equal((ulong)6222832940831380264, a7); + Assert.Equal((nint)unchecked((nint)1898370824516510398), a8); + Assert.Equal((ushort)3352, a9); + Assert.Equal((uint)1643571476, a10); + Assert.Equal((nuint)unchecked((nuint)7940054758811932961), a11.F0); + Assert.Equal((double)246670432251533, a11.F1); + Assert.Equal((float)7890596, a11.F2.F0); + Assert.Equal((double)1094140965415232, a11.F3); + Assert.Equal((nuint)unchecked((nuint)2081923113238309816), a12); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nuint)4616766375038360400); + } + + [Fact] + public static void TestSwiftCallbackFunc40() + { + Console.Write("Running SwiftCallbackFunc40: "); + ExceptionDispatchInfo ex = null; + nuint val = SwiftCallbackFunc40(&SwiftCallbackFunc40Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)4616766375038360400), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F41_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F41_Ret + { + public ulong F0; + public double F1; + public uint F2; + public uint F3; + + public F41_Ret(ulong f0, double f1, uint f2, uint f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func411fAA7F41_RetVAeA0G3_S0VXE_tF")] + private static extern F41_Ret SwiftCallbackFunc41(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F41_Ret SwiftCallbackFunc41Callback(F41_S0 a0, SwiftSelf self) + { + try + { + Assert.Equal((uint)1430200072, a0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F41_Ret(5150172797708870426, 3489330932479773, 833949606, 2098665090); + } + + [Fact] + public static void TestSwiftCallbackFunc41() + { + Console.Write("Running SwiftCallbackFunc41: "); + ExceptionDispatchInfo ex = null; + F41_Ret val = SwiftCallbackFunc41(&SwiftCallbackFunc41Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)5150172797708870426, val.F0); + Assert.Equal((double)3489330932479773, val.F1); + Assert.Equal((uint)833949606, val.F2); + Assert.Equal((uint)2098665090, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F42_S0_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F42_S0 + { + public F42_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F42_S1 + { + public uint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func421fS2is5Int32V_s6UInt32VAA6F42_S0VSfs5UInt8VAA0I3_S1VtXE_tF")] + private static extern nint SwiftCallbackFunc42(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc42Callback(int a0, uint a1, F42_S0 a2, float a3, byte a4, F42_S1 a5, SwiftSelf self) + { + try + { + Assert.Equal((int)1046060439, a0); + Assert.Equal((uint)1987212952, a1); + Assert.Equal((nint)unchecked((nint)4714080408858753964), a2.F0.F0); + Assert.Equal((float)2364146, a3); + Assert.Equal((byte)25, a4); + Assert.Equal((uint)666986488, a5.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)4147856807670154637); + } + + [Fact] + public static void TestSwiftCallbackFunc42() + { + Console.Write("Running SwiftCallbackFunc42: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc42(&SwiftCallbackFunc42Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)4147856807670154637), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F43_S0 + { + public int F0; + public int F1; + public nint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F43_S1 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F43_Ret + { + public ushort F0; + + public F43_Ret(ushort f0) + { + F0 = f0; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func431fAA7F43_RetVAeA0G3_S0V_AA0G3_S1VtXE_tF")] + private static extern F43_Ret SwiftCallbackFunc43(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F43_Ret SwiftCallbackFunc43Callback(F43_S0 a0, F43_S1 a1, SwiftSelf self) + { + try + { + Assert.Equal((int)406102630, a0.F0); + Assert.Equal((int)1946236062, a0.F1); + Assert.Equal((nint)unchecked((nint)663606396354980308), a0.F2); + Assert.Equal((sbyte)-8, a1.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F43_Ret(18672); + } + + [Fact] + public static void TestSwiftCallbackFunc43() + { + Console.Write("Running SwiftCallbackFunc43: "); + ExceptionDispatchInfo ex = null; + F43_Ret val = SwiftCallbackFunc43(&SwiftCallbackFunc43Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)18672, val.F0); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F44_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F44_S1_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F44_S1_S1 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F44_S1 + { + public short F0; + public short F1; + public F44_S1_S0 F2; + public F44_S1_S1 F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F44_S2 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F44_S3 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F44_Ret_S0 + { + public nuint F0; + + public F44_Ret_S0(nuint f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F44_Ret + { + public nint F0; + public F44_Ret_S0 F1; + public double F2; + + public F44_Ret(nint f0, F44_Ret_S0 f1, double f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func441fAA7F44_RetVAESd_AA0G3_S0VAA0G3_S1VAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F44_Ret SwiftCallbackFunc44(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F44_Ret SwiftCallbackFunc44Callback(double a0, F44_S0 a1, F44_S1 a2, F44_S2 a3, F44_S3 a4, SwiftSelf self) + { + try + { + Assert.Equal((double)4281406007431544, a0); + Assert.Equal((uint)2097291497, a1.F0); + Assert.Equal((short)-10489, a2.F0); + Assert.Equal((short)-9573, a2.F1); + Assert.Equal((ushort)62959, a2.F2.F0); + Assert.Equal((nuint)unchecked((nuint)7144119809173057975), a2.F3.F0); + Assert.Equal((nuint)unchecked((nuint)168733393207234277), a3.F0); + Assert.Equal((sbyte)64, a4.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F44_Ret(unchecked((nint)7157474620613398513), new F44_Ret_S0(unchecked((nuint)8272092288451488897)), 8724612718809); + } + + [Fact] + public static void TestSwiftCallbackFunc44() + { + Console.Write("Running SwiftCallbackFunc44: "); + ExceptionDispatchInfo ex = null; + F44_Ret val = SwiftCallbackFunc44(&SwiftCallbackFunc44Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)7157474620613398513), val.F0); + Assert.Equal((nuint)unchecked((nuint)8272092288451488897), val.F1.F0); + Assert.Equal((double)8724612718809, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F45_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F45_S1 + { + public nuint F0; + public short F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F45_Ret_S0 + { + public float F0; + + public F45_Ret_S0(float f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 48)] + struct F45_Ret + { + public double F0; + public F45_Ret_S0 F1; + public long F2; + public double F3; + public ulong F4; + public sbyte F5; + public int F6; + + public F45_Ret(double f0, F45_Ret_S0 f1, long f2, double f3, ulong f4, sbyte f5, int f6) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + F6 = f6; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func451fAA7F45_RetVAeA0G3_S0V_AA0G3_S1Vs5UInt8VtXE_tF")] + private static extern F45_Ret SwiftCallbackFunc45(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F45_Ret SwiftCallbackFunc45Callback(F45_S0 a0, F45_S1 a1, byte a2, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)5311803360204128233), a0.F0); + Assert.Equal((nuint)unchecked((nuint)2204790044275015546), a1.F0); + Assert.Equal((short)8942, a1.F1); + Assert.Equal((byte)207, a2); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F45_Ret(262658215125446, new F45_Ret_S0(3145713), 4924669542959578265, 2052183120467519, 3135406744871464298, 81, 1000720476); + } + + [Fact] + public static void TestSwiftCallbackFunc45() + { + Console.Write("Running SwiftCallbackFunc45: "); + ExceptionDispatchInfo ex = null; + F45_Ret val = SwiftCallbackFunc45(&SwiftCallbackFunc45Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)262658215125446, val.F0); + Assert.Equal((float)3145713, val.F1.F0); + Assert.Equal((long)4924669542959578265, val.F2); + Assert.Equal((double)2052183120467519, val.F3); + Assert.Equal((ulong)3135406744871464298, val.F4); + Assert.Equal((sbyte)81, val.F5); + Assert.Equal((int)1000720476, val.F6); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 26)] + struct F46_Ret + { + public nuint F0; + public double F1; + public long F2; + public ushort F3; + + public F46_Ret(nuint f0, double f1, long f2, ushort f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func461fAA7F46_RetVAESi_Sus6UInt16VAGs5Int64VtXE_tF")] + private static extern F46_Ret SwiftCallbackFunc46(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F46_Ret SwiftCallbackFunc46Callback(nint a0, nuint a1, ushort a2, ushort a3, long a4, SwiftSelf self) + { + try + { + Assert.Equal((nint)unchecked((nint)1855296013283572041), a0); + Assert.Equal((nuint)unchecked((nuint)1145047910516899437), a1); + Assert.Equal((ushort)20461, a2); + Assert.Equal((ushort)58204, a3); + Assert.Equal((long)1923767011143317115, a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F46_Ret(unchecked((nuint)4268855101008870857), 2061088094528291, 541679466428431692, 30655); + } + + [Fact] + public static void TestSwiftCallbackFunc46() + { + Console.Write("Running SwiftCallbackFunc46: "); + ExceptionDispatchInfo ex = null; + F46_Ret val = SwiftCallbackFunc46(&SwiftCallbackFunc46Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)4268855101008870857), val.F0); + Assert.Equal((double)2061088094528291, val.F1); + Assert.Equal((long)541679466428431692, val.F2); + Assert.Equal((ushort)30655, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F47_S0 + { + public byte F0; + public int F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 13)] + struct F47_S1 + { + public nint F0; + public uint F1; + public sbyte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F47_S2_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F47_S2 + { + public sbyte F0; + public float F1; + public int F2; + public float F3; + public F47_S2_S0 F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F47_S3 + { + public ulong F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F47_S4 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F47_Ret + { + public short F0; + public short F1; + public long F2; + + public F47_Ret(short f0, short f1, long f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func471fAA7F47_RetVAESi_Sfs6UInt32VAA0G3_S0VAA0G3_S1Vs6UInt16VSfS2iS2us5Int16VAA0G3_S2VAA0G3_S3VAA0G3_S4VtXE_tF")] + private static extern F47_Ret SwiftCallbackFunc47(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F47_Ret SwiftCallbackFunc47Callback(nint a0, float a1, uint a2, F47_S0 a3, F47_S1 a4, ushort a5, float a6, nint a7, nint a8, nuint a9, nuint a10, short a11, F47_S2 a12, F47_S3 a13, F47_S4 a14, SwiftSelf self) + { + try + { + Assert.Equal((nint)unchecked((nint)6545360066379352091), a0); + Assert.Equal((float)1240616, a1); + Assert.Equal((uint)575670382, a2); + Assert.Equal((byte)27, a3.F0); + Assert.Equal((int)1769677101, a3.F1); + Assert.Equal((nint)unchecked((nint)4175209822525678639), a4.F0); + Assert.Equal((uint)483151627, a4.F1); + Assert.Equal((sbyte)-41, a4.F2); + Assert.Equal((ushort)20891, a5); + Assert.Equal((float)1011044, a6); + Assert.Equal((nint)unchecked((nint)8543308148327168378), a7); + Assert.Equal((nint)unchecked((nint)9126721646663585297), a8); + Assert.Equal((nuint)unchecked((nuint)5438914191614359864), a9); + Assert.Equal((nuint)unchecked((nuint)5284613245897089025), a10); + Assert.Equal((short)-9227, a11); + Assert.Equal((sbyte)-23, a12.F0); + Assert.Equal((float)1294109, a12.F1); + Assert.Equal((int)411726757, a12.F2); + Assert.Equal((float)6621598, a12.F3); + Assert.Equal((byte)249, a12.F4.F0); + Assert.Equal((ulong)5281612261430853979, a13.F0); + Assert.Equal((long)7161295082465816089, a13.F1); + Assert.Equal((ulong)1995556861952451598, a14.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F47_Ret(32110, 21949, 479980404077668674); + } + + [Fact] + public static void TestSwiftCallbackFunc47() + { + Console.Write("Running SwiftCallbackFunc47: "); + ExceptionDispatchInfo ex = null; + F47_Ret val = SwiftCallbackFunc47(&SwiftCallbackFunc47Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)32110, val.F0); + Assert.Equal((short)21949, val.F1); + Assert.Equal((long)479980404077668674, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F48_S0 + { + public ulong F0; + public short F1; + public ulong F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F48_S1_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F48_S1 + { + public double F0; + public int F1; + public int F2; + public F48_S1_S0 F3; + public nuint F4; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func481fs5Int64VAEs4Int8V_s5Int16VAIs6UInt32VAA6F48_S0VAkA0K3_S1Vs5Int32VAQs6UInt16VAeKtXE_tF")] + private static extern long SwiftCallbackFunc48(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static long SwiftCallbackFunc48Callback(sbyte a0, short a1, short a2, uint a3, F48_S0 a4, uint a5, F48_S1 a6, int a7, int a8, ushort a9, long a10, uint a11, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-34, a0); + Assert.Equal((short)11634, a1); + Assert.Equal((short)-27237, a2); + Assert.Equal((uint)1039294154, a3); + Assert.Equal((ulong)1367847206719062131, a4.F0); + Assert.Equal((short)22330, a4.F1); + Assert.Equal((ulong)689282484471011648, a4.F2); + Assert.Equal((uint)1572626904, a5); + Assert.Equal((double)3054128759424009, a6.F0); + Assert.Equal((int)1677338134, a6.F1); + Assert.Equal((int)1257237843, a6.F2); + Assert.Equal((float)6264494, a6.F3.F0); + Assert.Equal((nuint)unchecked((nuint)8397097040610783205), a6.F4); + Assert.Equal((int)1060447208, a7); + Assert.Equal((int)269785114, a8); + Assert.Equal((ushort)20635, a9); + Assert.Equal((long)7679010342730986048, a10); + Assert.Equal((uint)1362633148, a11); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1864372483209206459; + } + + [Fact] + public static void TestSwiftCallbackFunc48() + { + Console.Write("Running SwiftCallbackFunc48: "); + ExceptionDispatchInfo ex = null; + long val = SwiftCallbackFunc48(&SwiftCallbackFunc48Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)1864372483209206459, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F49_S0_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F49_S0 + { + public F49_S0_S0 F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F49_Ret + { + public int F0; + public short F1; + public byte F2; + public byte F3; + public sbyte F4; + public long F5; + + public F49_Ret(int f0, short f1, byte f2, byte f3, sbyte f4, long f5) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func491fAA7F49_RetVAeA0G3_S0V_s5Int64VtXE_tF")] + private static extern F49_Ret SwiftCallbackFunc49(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F49_Ret SwiftCallbackFunc49Callback(F49_S0 a0, long a1, SwiftSelf self) + { + try + { + Assert.Equal((byte)48, a0.F0.F0); + Assert.Equal((ulong)7563394992711018452, a0.F1); + Assert.Equal((long)4358370311341042916, a1); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F49_Ret(1638493854, -13624, 61, 236, -97, 3942201385605817844); + } + + [Fact] + public static void TestSwiftCallbackFunc49() + { + Console.Write("Running SwiftCallbackFunc49: "); + ExceptionDispatchInfo ex = null; + F49_Ret val = SwiftCallbackFunc49(&SwiftCallbackFunc49Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)1638493854, val.F0); + Assert.Equal((short)-13624, val.F1); + Assert.Equal((byte)61, val.F2); + Assert.Equal((byte)236, val.F3); + Assert.Equal((sbyte)-97, val.F4); + Assert.Equal((long)3942201385605817844, val.F5); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F50_S0_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F50_S0 + { + public ushort F0; + public F50_S0_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F50_S1 + { + public double F0; + public ushort F1; + public int F2; + public nint F3; + public double F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F50_S2 + { + public int F0; + public float F1; + public uint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F50_S3 + { + public long F0; + public int F1; + public float F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F50_S4 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F50_S5_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F50_S5 + { + public F50_S5_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func501fs5UInt8VAeA6F50_S0V_AA0H3_S1VAeA0H3_S2Vs5Int32Vs6UInt64Vs4Int8VAQSfAA0H3_S3VAA0H3_S4VAA0H3_S5VSftXE_tF")] + private static extern byte SwiftCallbackFunc50(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static byte SwiftCallbackFunc50Callback(F50_S0 a0, F50_S1 a1, byte a2, F50_S2 a3, int a4, ulong a5, sbyte a6, sbyte a7, float a8, F50_S3 a9, F50_S4 a10, F50_S5 a11, float a12, SwiftSelf self) + { + try + { + Assert.Equal((ushort)31857, a0.F0); + Assert.Equal((double)1743417849706254, a0.F1.F0); + Assert.Equal((double)4104577461772135, a1.F0); + Assert.Equal((ushort)13270, a1.F1); + Assert.Equal((int)2072598986, a1.F2); + Assert.Equal((nint)unchecked((nint)9056978834867675248), a1.F3); + Assert.Equal((double)844742439929087, a1.F4); + Assert.Equal((byte)87, a2); + Assert.Equal((int)1420884537, a3.F0); + Assert.Equal((float)78807, a3.F1); + Assert.Equal((uint)1081688273, a3.F2); + Assert.Equal((int)336878110, a4); + Assert.Equal((ulong)1146514566942283069, a5); + Assert.Equal((sbyte)-93, a6); + Assert.Equal((sbyte)73, a7); + Assert.Equal((float)2321639, a8); + Assert.Equal((long)1940888991336881606, a9.F0); + Assert.Equal((int)688345394, a9.F1); + Assert.Equal((float)712275, a9.F2); + Assert.Equal((sbyte)-128, a9.F3); + Assert.Equal((long)2638503583829414770, a10.F0); + Assert.Equal((ushort)23681, a11.F0.F0); + Assert.Equal((float)8223218, a12); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 252; + } + + [Fact] + public static void TestSwiftCallbackFunc50() + { + Console.Write("Running SwiftCallbackFunc50: "); + ExceptionDispatchInfo ex = null; + byte val = SwiftCallbackFunc50(&SwiftCallbackFunc50Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((byte)252, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F51_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F51_Ret + { + public ushort F0; + public sbyte F1; + public nint F2; + public ushort F3; + public ulong F4; + + public F51_Ret(ushort f0, sbyte f1, nint f2, ushort f3, ulong f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func511fAA7F51_RetVAEs5Int16V_SuAA0G3_S0Vs6UInt64VtXE_tF")] + private static extern F51_Ret SwiftCallbackFunc51(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F51_Ret SwiftCallbackFunc51Callback(short a0, nuint a1, F51_S0 a2, ulong a3, SwiftSelf self) + { + try + { + Assert.Equal((short)10812, a0); + Assert.Equal((nuint)unchecked((nuint)470861239714315155), a1); + Assert.Equal((long)5415660333180374788, a2.F0); + Assert.Equal((ulong)2389942629143476149, a3); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F51_Ret(28396, 23, unchecked((nint)4042678034578400305), 16166, 8390419605778076733); + } + + [Fact] + public static void TestSwiftCallbackFunc51() + { + Console.Write("Running SwiftCallbackFunc51: "); + ExceptionDispatchInfo ex = null; + F51_Ret val = SwiftCallbackFunc51(&SwiftCallbackFunc51Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)28396, val.F0); + Assert.Equal((sbyte)23, val.F1); + Assert.Equal((nint)unchecked((nint)4042678034578400305), val.F2); + Assert.Equal((ushort)16166, val.F3); + Assert.Equal((ulong)8390419605778076733, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F52_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F52_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 33)] + struct F52_Ret + { + public float F0; + public ushort F1; + public long F2; + public short F3; + public ulong F4; + public sbyte F5; + + public F52_Ret(float f0, ushort f1, long f2, short f3, ulong f4, sbyte f5) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func521fAA7F52_RetVAESi_AA0G3_S0Vs5Int16VAiA0G3_S1VtXE_tF")] + private static extern F52_Ret SwiftCallbackFunc52(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F52_Ret SwiftCallbackFunc52Callback(nint a0, F52_S0 a1, short a2, short a3, F52_S1 a4, SwiftSelf self) + { + try + { + Assert.Equal((nint)unchecked((nint)3233654765973602550), a0); + Assert.Equal((float)5997729, a1.F0); + Assert.Equal((short)-7404, a2); + Assert.Equal((short)-20804, a3); + Assert.Equal((ushort)17231, a4.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F52_Ret(3003005, 4886, 1846269873983567093, 24151, 1408198981123859746, -41); + } + + [Fact] + public static void TestSwiftCallbackFunc52() + { + Console.Write("Running SwiftCallbackFunc52: "); + ExceptionDispatchInfo ex = null; + F52_Ret val = SwiftCallbackFunc52(&SwiftCallbackFunc52Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)3003005, val.F0); + Assert.Equal((ushort)4886, val.F1); + Assert.Equal((long)1846269873983567093, val.F2); + Assert.Equal((short)24151, val.F3); + Assert.Equal((ulong)1408198981123859746, val.F4); + Assert.Equal((sbyte)-41, val.F5); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F53_S0_S0_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F53_S0_S0 + { + public F53_S0_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F53_S0 + { + public sbyte F0; + public F53_S0_S0 F1; + public byte F2; + public nuint F3; + public long F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 5)] + struct F53_S1 + { + public float F0; + public byte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F53_S2 + { + public sbyte F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F53_S3_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F53_S3 + { + public int F0; + public uint F1; + public F53_S3_S0 F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F53_S4 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F53_S5_S0 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F53_S5_S1_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F53_S5_S1 + { + public F53_S5_S1_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F53_S5 + { + public F53_S5_S0 F0; + public nuint F1; + public ushort F2; + public F53_S5_S1 F3; + public sbyte F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F53_S6 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F53_Ret + { + public nint F0; + + public F53_Ret(nint f0) + { + F0 = f0; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func531fAA7F53_RetVAeA0G3_S0V_s5UInt8Vs5Int64VAA0G3_S1VAA0G3_S2VAA0G3_S3VAkA0G3_S4VAA0G3_S5VAA0G3_S6VtXE_tF")] + private static extern F53_Ret SwiftCallbackFunc53(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F53_Ret SwiftCallbackFunc53Callback(F53_S0 a0, byte a1, long a2, F53_S1 a3, F53_S2 a4, F53_S3 a5, long a6, F53_S4 a7, F53_S5 a8, F53_S6 a9, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-123, a0.F0); + Assert.Equal((long)3494916243607193741, a0.F1.F0.F0); + Assert.Equal((byte)167, a0.F2); + Assert.Equal((nuint)unchecked((nuint)4018943158751734338), a0.F3); + Assert.Equal((long)6768175524813742847, a0.F4); + Assert.Equal((byte)207, a1); + Assert.Equal((long)8667995458064724392, a2); + Assert.Equal((float)492157, a3.F0); + Assert.Equal((byte)175, a3.F1); + Assert.Equal((sbyte)76, a4.F0); + Assert.Equal((long)5794486968525461488, a4.F1); + Assert.Equal((int)2146070335, a5.F0); + Assert.Equal((uint)1109141712, a5.F1); + Assert.Equal((ushort)44270, a5.F2.F0); + Assert.Equal((long)3581380181786253859, a6); + Assert.Equal((short)23565, a7.F0); + Assert.Equal((uint)1995174927, a8.F0.F0); + Assert.Equal((nuint)unchecked((nuint)5025417700244056666), a8.F1); + Assert.Equal((ushort)1847, a8.F2); + Assert.Equal((byte)6, a8.F3.F0.F0); + Assert.Equal((sbyte)-87, a8.F4); + Assert.Equal((nint)unchecked((nint)5737280129078653969), a9.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F53_Ret(unchecked((nint)3955567540648861371)); + } + + [Fact] + public static void TestSwiftCallbackFunc53() + { + Console.Write("Running SwiftCallbackFunc53: "); + ExceptionDispatchInfo ex = null; + F53_Ret val = SwiftCallbackFunc53(&SwiftCallbackFunc53Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)3955567540648861371), val.F0); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F54_S0 + { + public int F0; + public float F1; + public nuint F2; + public byte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F54_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F54_S2_S0_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F54_S2_S0 + { + public short F0; + public F54_S2_S0_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F54_S2 + { + public double F0; + public F54_S2_S0 F1; + public long F2; + public ulong F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F54_S3 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 3)] + struct F54_S4 + { + public ushort F0; + public sbyte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F54_S5 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F54_Ret + { + public short F0; + public nint F1; + + public F54_Ret(short f0, nint f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func541fAA7F54_RetVAEs6UInt16V_AA0G3_S0VSfAA0G3_S1Vs5Int64Vs5Int32VAA0G3_S2VAA0G3_S3VAA0G3_S4VSfAA0G3_S5VtXE_tF")] + private static extern F54_Ret SwiftCallbackFunc54(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F54_Ret SwiftCallbackFunc54Callback(ushort a0, F54_S0 a1, float a2, F54_S1 a3, long a4, int a5, F54_S2 a6, F54_S3 a7, F54_S4 a8, float a9, F54_S5 a10, SwiftSelf self) + { + try + { + Assert.Equal((ushort)16440, a0); + Assert.Equal((int)922752112, a1.F0); + Assert.Equal((float)7843043, a1.F1); + Assert.Equal((nuint)unchecked((nuint)1521939500434086364), a1.F2); + Assert.Equal((byte)50, a1.F3); + Assert.Equal((float)3111108, a2); + Assert.Equal((ushort)50535, a3.F0); + Assert.Equal((long)4761507229870258916, a4); + Assert.Equal((int)1670668155, a5); + Assert.Equal((double)432665443852892, a6.F0); + Assert.Equal((short)13094, a6.F1.F0); + Assert.Equal((double)669143993481144, a6.F1.F1.F0); + Assert.Equal((long)30067117315069590, a6.F2); + Assert.Equal((ulong)874012622621600805, a6.F3); + Assert.Equal((float)7995066, a7.F0); + Assert.Equal((ushort)48478, a8.F0); + Assert.Equal((sbyte)23, a8.F1); + Assert.Equal((float)4383787, a9); + Assert.Equal((ushort)61633, a10.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F54_Ret(924, unchecked((nint)7680560643733996038)); + } + + [Fact] + public static void TestSwiftCallbackFunc54() + { + Console.Write("Running SwiftCallbackFunc54: "); + ExceptionDispatchInfo ex = null; + F54_Ret val = SwiftCallbackFunc54(&SwiftCallbackFunc54Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)924, val.F0); + Assert.Equal((nint)unchecked((nint)7680560643733996038), val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F55_S0_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F55_S0 + { + public nuint F0; + public F55_S0_S0 F1; + public sbyte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F55_S1 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F55_S2 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F55_Ret_S0 + { + public short F0; + public int F1; + + public F55_Ret_S0(short f0, int f1) + { + F0 = f0; + F1 = f1; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F55_Ret + { + public nuint F0; + public nint F1; + public double F2; + public F55_Ret_S0 F3; + public ulong F4; + + public F55_Ret(nuint f0, nint f1, double f2, F55_Ret_S0 f3, ulong f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func551fAA7F55_RetVAeA0G3_S0V_s5Int64VAA0G3_S1Vs4Int8VAA0G3_S2VSftXE_tF")] + private static extern F55_Ret SwiftCallbackFunc55(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F55_Ret SwiftCallbackFunc55Callback(F55_S0 a0, long a1, F55_S1 a2, sbyte a3, F55_S2 a4, float a5, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)2856661562863799725), a0.F0); + Assert.Equal((double)1260582440479139, a0.F1.F0); + Assert.Equal((sbyte)5, a0.F2); + Assert.Equal((long)7945068527720423751, a1); + Assert.Equal((nint)unchecked((nint)4321616441998677375), a2.F0); + Assert.Equal((sbyte)-68, a3); + Assert.Equal((ulong)3311106172201778367, a4.F0); + Assert.Equal((float)5600069, a5); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F55_Ret(unchecked((nuint)6916953478574785342), unchecked((nint)6448649235859031640), 1920468532326411, new F55_Ret_S0(30394, 40356024), 6146457824330132360); + } + + [Fact] + public static void TestSwiftCallbackFunc55() + { + Console.Write("Running SwiftCallbackFunc55: "); + ExceptionDispatchInfo ex = null; + F55_Ret val = SwiftCallbackFunc55(&SwiftCallbackFunc55Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)6916953478574785342), val.F0); + Assert.Equal((nint)unchecked((nint)6448649235859031640), val.F1); + Assert.Equal((double)1920468532326411, val.F2); + Assert.Equal((short)30394, val.F3.F0); + Assert.Equal((int)40356024, val.F3.F1); + Assert.Equal((ulong)6146457824330132360, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F56_S0 + { + public double F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func561fs6UInt32VAeA6F56_S0VXE_tF")] + private static extern uint SwiftCallbackFunc56(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static uint SwiftCallbackFunc56Callback(F56_S0 a0, SwiftSelf self) + { + try + { + Assert.Equal((double)3082602006731666, a0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1601166926; + } + + [Fact] + public static void TestSwiftCallbackFunc56() + { + Console.Write("Running SwiftCallbackFunc56: "); + ExceptionDispatchInfo ex = null; + uint val = SwiftCallbackFunc56(&SwiftCallbackFunc56Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)1601166926, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F57_S0 + { + public long F0; + public int F1; + public ulong F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F57_S1 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F57_S2 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F57_Ret_S0 + { + public long F0; + public byte F1; + public short F2; + + public F57_Ret_S0(long f0, byte f1, short f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 13)] + struct F57_Ret + { + public F57_Ret_S0 F0; + public byte F1; + + public F57_Ret(F57_Ret_S0 f0, byte f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func571fAA7F57_RetVAEs4Int8V_Sus6UInt32Vs5Int64Vs6UInt64Vs5Int16VAkA0G3_S0VAA0G3_S1VAA0G3_S2VtXE_tF")] + private static extern F57_Ret SwiftCallbackFunc57(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F57_Ret SwiftCallbackFunc57Callback(sbyte a0, nuint a1, uint a2, long a3, ulong a4, short a5, long a6, F57_S0 a7, F57_S1 a8, F57_S2 a9, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)54, a0); + Assert.Equal((nuint)unchecked((nuint)753245150862584974), a1); + Assert.Equal((uint)1470962934, a2); + Assert.Equal((long)1269392070140776313, a3); + Assert.Equal((ulong)2296560034524654667, a4); + Assert.Equal((short)12381, a5); + Assert.Equal((long)198893062684618980, a6); + Assert.Equal((long)1310571041794038100, a7.F0); + Assert.Equal((int)18741662, a7.F1); + Assert.Equal((ulong)7855196891704523814, a7.F2); + Assert.Equal((byte)156, a8.F0); + Assert.Equal((float)72045, a9.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F57_Ret(new F57_Ret_S0(3441370978522907304, 105, 24446), 200); + } + + [Fact] + public static void TestSwiftCallbackFunc57() + { + Console.Write("Running SwiftCallbackFunc57: "); + ExceptionDispatchInfo ex = null; + F57_Ret val = SwiftCallbackFunc57(&SwiftCallbackFunc57Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)3441370978522907304, val.F0.F0); + Assert.Equal((byte)105, val.F0.F1); + Assert.Equal((short)24446, val.F0.F2); + Assert.Equal((byte)200, val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F58_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 6)] + struct F58_S1 + { + public float F0; + public ushort F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F58_S2_S0_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F58_S2_S0 + { + public F58_S2_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F58_S2 + { + public F58_S2_S0 F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func581fS2is6UInt64V_s4Int8VSiAA6F58_S0VAA0I3_S1Vs5Int64VAA0I3_S2Vs5Int32VtXE_tF")] + private static extern nint SwiftCallbackFunc58(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc58Callback(ulong a0, sbyte a1, nint a2, F58_S0 a3, F58_S1 a4, long a5, F58_S2 a6, int a7, SwiftSelf self) + { + try + { + Assert.Equal((ulong)4612004722568513699, a0); + Assert.Equal((sbyte)-96, a1); + Assert.Equal((nint)unchecked((nint)1970590839325113617), a2); + Assert.Equal((byte)211, a3.F0); + Assert.Equal((float)5454927, a4.F0); + Assert.Equal((ushort)48737, a4.F1); + Assert.Equal((long)921570327236881486, a5); + Assert.Equal((nint)unchecked((nint)7726203059421444802), a6.F0.F0.F0); + Assert.Equal((int)491616915, a7); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)5337995302960578101); + } + + [Fact] + public static void TestSwiftCallbackFunc58() + { + Console.Write("Running SwiftCallbackFunc58: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc58(&SwiftCallbackFunc58Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)5337995302960578101), val); + Console.WriteLine("OK"); + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func591fs6UInt64VAEs6UInt16V_s5Int64VSitXE_tF")] + private static extern ulong SwiftCallbackFunc59(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc59Callback(ushort a0, long a1, nint a2, SwiftSelf self) + { + try + { + Assert.Equal((ushort)9232, a0); + Assert.Equal((long)7281011081566942937, a1); + Assert.Equal((nint)unchecked((nint)8203439771560005792), a2); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 7843473552989551261; + } + + [Fact] + public static void TestSwiftCallbackFunc59() + { + Console.Write("Running SwiftCallbackFunc59: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc59(&SwiftCallbackFunc59Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)7843473552989551261, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F60_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F60_S1 + { + public ulong F0; + public int F1; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func601fs6UInt64VAESf_Sds5Int64Vs6UInt16VS2fAA6F60_S0Vs5Int16VAA0J3_S1VAmGtXE_tF")] + private static extern ulong SwiftCallbackFunc60(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc60Callback(float a0, double a1, long a2, ushort a3, float a4, float a5, F60_S0 a6, short a7, F60_S1 a8, short a9, long a10, SwiftSelf self) + { + try + { + Assert.Equal((float)2682255, a0); + Assert.Equal((double)2041676057169359, a1); + Assert.Equal((long)5212916666940122160, a2); + Assert.Equal((ushort)64444, a3); + Assert.Equal((float)6372882, a4); + Assert.Equal((float)8028835, a5); + Assert.Equal((nint)unchecked((nint)6629286640024570381), a6.F0); + Assert.Equal((short)1520, a7); + Assert.Equal((ulong)8398497739914283366, a8.F0); + Assert.Equal((int)1882981891, a8.F1); + Assert.Equal((short)7716, a9); + Assert.Equal((long)6631047215535600409, a10); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1713850918199577358; + } + + [Fact] + public static void TestSwiftCallbackFunc60() + { + Console.Write("Running SwiftCallbackFunc60: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc60(&SwiftCallbackFunc60Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)1713850918199577358, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F61_S0_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F61_S0 + { + public F61_S0_S0 F0; + public long F1; + public uint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F61_S1 + { + public sbyte F0; + public float F1; + public nint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F61_S2_S0_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F61_S2_S0 + { + public F61_S2_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F61_S2_S1 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F61_S2 + { + public F61_S2_S0 F0; + public F61_S2_S1 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F61_S3 + { + public ulong F0; + public nint F1; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func611fs6UInt32VA2E_AeA6F61_S0VAA0H3_S1VAA0H3_S2Vs4Int8Vs5Int16VAA0H3_S3Vs5Int32VAEtXE_tF")] + private static extern uint SwiftCallbackFunc61(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static uint SwiftCallbackFunc61Callback(uint a0, uint a1, F61_S0 a2, F61_S1 a3, F61_S2 a4, sbyte a5, short a6, F61_S3 a7, int a8, uint a9, SwiftSelf self) + { + try + { + Assert.Equal((uint)1070797065, a0); + Assert.Equal((uint)135220309, a1); + Assert.Equal((long)6475887024664217162, a2.F0.F0); + Assert.Equal((long)563444654083452485, a2.F1); + Assert.Equal((uint)1748956360, a2.F2); + Assert.Equal((sbyte)-112, a3.F0); + Assert.Equal((float)3433396, a3.F1); + Assert.Equal((nint)unchecked((nint)8106074956722850624), a3.F2); + Assert.Equal((ulong)2318628619979263858, a4.F0.F0.F0); + Assert.Equal((sbyte)-93, a4.F1.F0); + Assert.Equal((sbyte)-122, a5); + Assert.Equal((short)-11696, a6); + Assert.Equal((ulong)5229393236090246212, a7.F0); + Assert.Equal((nint)unchecked((nint)4021449757638811198), a7.F1); + Assert.Equal((int)689517945, a8); + Assert.Equal((uint)657677740, a9); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 138627237; + } + + [Fact] + public static void TestSwiftCallbackFunc61() + { + Console.Write("Running SwiftCallbackFunc61: "); + ExceptionDispatchInfo ex = null; + uint val = SwiftCallbackFunc61(&SwiftCallbackFunc61Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)138627237, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F62_S0 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F62_Ret + { + public ushort F0; + public long F1; + public nint F2; + public long F3; + + public F62_Ret(ushort f0, long f1, nint f2, long f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func621fAA7F62_RetVAeA0G3_S0VXE_tF")] + private static extern F62_Ret SwiftCallbackFunc62(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F62_Ret SwiftCallbackFunc62Callback(F62_S0 a0, SwiftSelf self) + { + try + { + Assert.Equal((float)6500993, a0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F62_Ret(63013, 4076138842444340990, unchecked((nint)6876195265868121021), 223819901796794423); + } + + [Fact] + public static void TestSwiftCallbackFunc62() + { + Console.Write("Running SwiftCallbackFunc62: "); + ExceptionDispatchInfo ex = null; + F62_Ret val = SwiftCallbackFunc62(&SwiftCallbackFunc62Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)63013, val.F0); + Assert.Equal((long)4076138842444340990, val.F1); + Assert.Equal((nint)unchecked((nint)6876195265868121021), val.F2); + Assert.Equal((long)223819901796794423, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F63_S0 + { + public nint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func631fS2fAA6F63_S0V_s5Int16VtXE_tF")] + private static extern float SwiftCallbackFunc63(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc63Callback(F63_S0 a0, short a1, SwiftSelf self) + { + try + { + Assert.Equal((nint)unchecked((nint)8391317504019075904), a0.F0); + Assert.Equal((short)11218, a1); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1458978; + } + + [Fact] + public static void TestSwiftCallbackFunc63() + { + Console.Write("Running SwiftCallbackFunc63: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc63(&SwiftCallbackFunc63Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)1458978, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F64_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F64_S1 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F64_S2 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F64_Ret_S0 + { + public ushort F0; + public nuint F1; + public ulong F2; + + public F64_Ret_S0(ushort f0, nuint f1, ulong f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F64_Ret + { + public nuint F0; + public F64_Ret_S0 F1; + public double F2; + + public F64_Ret(nuint f0, F64_Ret_S0 f1, double f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func641fAA7F64_RetVAEs4Int8V_AA0G3_S0VAA0G3_S1VSuAA0G3_S2VtXE_tF")] + private static extern F64_Ret SwiftCallbackFunc64(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F64_Ret SwiftCallbackFunc64Callback(sbyte a0, F64_S0 a1, F64_S1 a2, nuint a3, F64_S2 a4, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-22, a0); + Assert.Equal((int)1591678205, a1.F0); + Assert.Equal((ulong)8355549563000003325, a2.F0); + Assert.Equal((nuint)unchecked((nuint)5441989206466502201), a3); + Assert.Equal((uint)2097092811, a4.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F64_Ret(unchecked((nuint)7966680593035770540), new F64_Ret_S0(20244, unchecked((nuint)7259704667595065333), 1039021449222712763), 594768504899138); + } + + [Fact] + public static void TestSwiftCallbackFunc64() + { + Console.Write("Running SwiftCallbackFunc64: "); + ExceptionDispatchInfo ex = null; + F64_Ret val = SwiftCallbackFunc64(&SwiftCallbackFunc64Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nuint)unchecked((nuint)7966680593035770540), val.F0); + Assert.Equal((ushort)20244, val.F1.F0); + Assert.Equal((nuint)unchecked((nuint)7259704667595065333), val.F1.F1); + Assert.Equal((ulong)1039021449222712763, val.F1.F2); + Assert.Equal((double)594768504899138, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F65_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F65_S1 + { + public ushort F0; + public nint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F65_S2 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F65_S3 + { + public int F0; + public uint F1; + public sbyte F2; + public nuint F3; + public double F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F65_Ret + { + public nint F0; + public nint F1; + public nint F2; + public float F3; + + public F65_Ret(nint f0, nint f1, nint f2, float f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func651fAA7F65_RetVAeA0G3_S0V_s5Int16VSdSuAA0G3_S1Vs6UInt64VAA0G3_S2VSiAA0G3_S3Vs5Int32Vs5Int64Vs6UInt32VSdtXE_tF")] + private static extern F65_Ret SwiftCallbackFunc65(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F65_Ret SwiftCallbackFunc65Callback(F65_S0 a0, short a1, double a2, nuint a3, F65_S1 a4, ulong a5, F65_S2 a6, nint a7, F65_S3 a8, int a9, long a10, uint a11, double a12, SwiftSelf self) + { + try + { + Assert.Equal((double)2969223123583220, a0.F0); + Assert.Equal((short)-10269, a1); + Assert.Equal((double)3909264978196109, a2); + Assert.Equal((nuint)unchecked((nuint)522883062031213707), a3); + Assert.Equal((ushort)37585, a4.F0); + Assert.Equal((nint)unchecked((nint)5879827541057349126), a4.F1); + Assert.Equal((ulong)1015270399093748716, a5); + Assert.Equal((short)19670, a6.F0); + Assert.Equal((nint)unchecked((nint)1900026319968050423), a7); + Assert.Equal((int)1440511399, a8.F0); + Assert.Equal((uint)1203865685, a8.F1); + Assert.Equal((sbyte)12, a8.F2); + Assert.Equal((nuint)unchecked((nuint)4061296318630567634), a8.F3); + Assert.Equal((double)2406524883317724, a8.F4); + Assert.Equal((int)1594888000, a9); + Assert.Equal((long)2860599972459787263, a10); + Assert.Equal((uint)1989052358, a11); + Assert.Equal((double)1036075606072593, a12); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F65_Ret(unchecked((nint)7810903219784151958), unchecked((nint)8310527878848492866), unchecked((nint)1357258266300958550), 5970506); + } + + [Fact] + public static void TestSwiftCallbackFunc65() + { + Console.Write("Running SwiftCallbackFunc65: "); + ExceptionDispatchInfo ex = null; + F65_Ret val = SwiftCallbackFunc65(&SwiftCallbackFunc65Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)7810903219784151958), val.F0); + Assert.Equal((nint)unchecked((nint)8310527878848492866), val.F1); + Assert.Equal((nint)unchecked((nint)1357258266300958550), val.F2); + Assert.Equal((float)5970506, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 5)] + struct F66_Ret_S0 + { + public float F0; + public byte F1; + + public F66_Ret_S0(float f0, byte f1) + { + F0 = f0; + F1 = f1; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F66_Ret + { + public uint F0; + public int F1; + public uint F2; + public F66_Ret_S0 F3; + public nint F4; + + public F66_Ret(uint f0, int f1, uint f2, F66_Ret_S0 f3, nint f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func661fAA7F66_RetVAEs5Int64VXE_tF")] + private static extern F66_Ret SwiftCallbackFunc66(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F66_Ret SwiftCallbackFunc66Callback(long a0, SwiftSelf self) + { + try + { + Assert.Equal((long)8300712022174991120, a0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F66_Ret(1855065799, 2029697750, 149423164, new F66_Ret_S0(4327716, 116), unchecked((nint)5847795120921557969)); + } + + [Fact] + public static void TestSwiftCallbackFunc66() + { + Console.Write("Running SwiftCallbackFunc66: "); + ExceptionDispatchInfo ex = null; + F66_Ret val = SwiftCallbackFunc66(&SwiftCallbackFunc66Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)1855065799, val.F0); + Assert.Equal((int)2029697750, val.F1); + Assert.Equal((uint)149423164, val.F2); + Assert.Equal((float)4327716, val.F3.F0); + Assert.Equal((byte)116, val.F3.F1); + Assert.Equal((nint)unchecked((nint)5847795120921557969), val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F67_S0 + { + public uint F0; + public byte F1; + public byte F2; + public int F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F67_S1 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F67_S2_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 40)] + struct F67_S2 + { + public ulong F0; + public uint F1; + public nint F2; + public uint F3; + public F67_S2_S0 F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F67_S3 + { + public short F0; + public ulong F1; + public ulong F2; + public float F3; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func671fs5Int32VAESd_AA6F67_S0VSfAA0H3_S1Vs5Int16VSuAA0H3_S2Vs6UInt16VS2uAA0H3_S3Vs6UInt64VtXE_tF")] + private static extern int SwiftCallbackFunc67(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static int SwiftCallbackFunc67Callback(double a0, F67_S0 a1, float a2, F67_S1 a3, short a4, nuint a5, F67_S2 a6, ushort a7, nuint a8, nuint a9, F67_S3 a10, ulong a11, SwiftSelf self) + { + try + { + Assert.Equal((double)2365334314089079, a0); + Assert.Equal((uint)1133369490, a1.F0); + Assert.Equal((byte)54, a1.F1); + Assert.Equal((byte)244, a1.F2); + Assert.Equal((int)411611102, a1.F3); + Assert.Equal((float)4453912, a2); + Assert.Equal((uint)837821989, a3.F0); + Assert.Equal((short)-3824, a4); + Assert.Equal((nuint)unchecked((nuint)2394019088612006082), a5); + Assert.Equal((ulong)2219661088889353540, a6.F0); + Assert.Equal((uint)294254132, a6.F1); + Assert.Equal((nint)unchecked((nint)5363897228951721947), a6.F2); + Assert.Equal((uint)2038380379, a6.F3); + Assert.Equal((nint)unchecked((nint)8364879421385869437), a6.F4.F0); + Assert.Equal((ushort)27730, a7); + Assert.Equal((nuint)unchecked((nuint)1854446871602777695), a8); + Assert.Equal((nuint)unchecked((nuint)5020910156102352016), a9); + Assert.Equal((short)-2211, a10.F0); + Assert.Equal((ulong)5910581461792482729, a10.F1); + Assert.Equal((ulong)9095210648679611609, a10.F2); + Assert.Equal((float)6138428, a10.F3); + Assert.Equal((ulong)4274242076331880276, a11); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 391983354; + } + + [Fact] + public static void TestSwiftCallbackFunc67() + { + Console.Write("Running SwiftCallbackFunc67: "); + ExceptionDispatchInfo ex = null; + int val = SwiftCallbackFunc67(&SwiftCallbackFunc67Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)391983354, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F68_S0_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F68_S0 + { + public long F0; + public F68_S0_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F68_S1 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F68_S2_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F68_S2_S1_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F68_S2_S1 + { + public F68_S2_S1_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F68_S2 + { + public F68_S2_S0 F0; + public F68_S2_S1 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F68_S3 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F68_Ret + { + public ushort F0; + public long F1; + + public F68_Ret(ushort f0, long f1) + { + F0 = f0; + F1 = f1; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func681fAA7F68_RetVAEs5UInt8V_Sfs5Int32VSiAA0G3_S0Vs5Int16VSiAISiAA0G3_S1VSdAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F68_Ret SwiftCallbackFunc68(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F68_Ret SwiftCallbackFunc68Callback(byte a0, float a1, int a2, nint a3, F68_S0 a4, short a5, nint a6, int a7, nint a8, F68_S1 a9, double a10, F68_S2 a11, F68_S3 a12, SwiftSelf self) + { + try + { + Assert.Equal((byte)203, a0); + Assert.Equal((float)7725681, a1); + Assert.Equal((int)323096997, a2); + Assert.Equal((nint)unchecked((nint)7745650233784541800), a3); + Assert.Equal((long)4103074885750473230, a4.F0); + Assert.Equal((sbyte)12, a4.F1.F0); + Assert.Equal((short)28477, a5); + Assert.Equal((nint)unchecked((nint)3772772447290536725), a6); + Assert.Equal((int)1075348149, a7); + Assert.Equal((nint)unchecked((nint)2017898311184593242), a8); + Assert.Equal((ushort)60280, a9.F0); + Assert.Equal((double)4052387873895590, a10); + Assert.Equal((nuint)unchecked((nuint)1321857087602747558), a11.F0.F0); + Assert.Equal((ulong)9011155097138053416, a11.F1.F0.F0); + Assert.Equal((short)8332, a12.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F68_Ret(64088, 8144208533922264568); + } + + [Fact] + public static void TestSwiftCallbackFunc68() + { + Console.Write("Running SwiftCallbackFunc68: "); + ExceptionDispatchInfo ex = null; + F68_Ret val = SwiftCallbackFunc68(&SwiftCallbackFunc68Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ushort)64088, val.F0); + Assert.Equal((long)8144208533922264568, val.F1); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F69_S0_S0 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F69_S0 + { + public F69_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F69_S1 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F69_S2 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F69_S3 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F69_S4_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F69_S4 + { + public F69_S4_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F69_Ret + { + public byte F0; + public long F1; + public uint F2; + + public F69_Ret(byte f0, long f1, uint f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func691fAA7F69_RetVAeA0G3_S0V_Sis5Int32VAA0G3_S1Vs6UInt32Vs4Int8VAA0G3_S2VSiAA0G3_S3VAA0G3_S4VtXE_tF")] + private static extern F69_Ret SwiftCallbackFunc69(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F69_Ret SwiftCallbackFunc69Callback(F69_S0 a0, nint a1, int a2, F69_S1 a3, uint a4, sbyte a5, F69_S2 a6, nint a7, F69_S3 a8, F69_S4 a9, SwiftSelf self) + { + try + { + Assert.Equal((ulong)7154553222175076145, a0.F0.F0); + Assert.Equal((nint)unchecked((nint)6685908100026425691), a1); + Assert.Equal((int)1166526155, a2); + Assert.Equal((long)6042278185730963289, a3.F0); + Assert.Equal((uint)182060391, a4); + Assert.Equal((sbyte)45, a5); + Assert.Equal((int)1886331345, a6.F0); + Assert.Equal((nint)unchecked((nint)485542148877875333), a7); + Assert.Equal((byte)209, a8.F0); + Assert.Equal((long)6856847647688321191, a9.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F69_Ret(52, 5510942427596951043, 1854355776); + } + + [Fact] + public static void TestSwiftCallbackFunc69() + { + Console.Write("Running SwiftCallbackFunc69: "); + ExceptionDispatchInfo ex = null; + F69_Ret val = SwiftCallbackFunc69(&SwiftCallbackFunc69Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((byte)52, val.F0); + Assert.Equal((long)5510942427596951043, val.F1); + Assert.Equal((uint)1854355776, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F70_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F70_S1 + { + public nint F0; + public double F1; + public short F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F70_S2 + { + public uint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 36)] + struct F70_S3 + { + public ushort F0; + public double F1; + public byte F2; + public ulong F3; + public int F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F70_S4_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F70_S4 + { + public F70_S4_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F70_Ret + { + public sbyte F0; + public uint F1; + public ulong F2; + public short F3; + public short F4; + + public F70_Ret(sbyte f0, uint f1, ulong f2, short f3, short f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func701fAA7F70_RetVAEs5Int16V_s5UInt8VSis6UInt32VAA0G3_S0Vs5Int32VAA0G3_S1VAA0G3_S2VAA0G3_S3Vs5Int64VAOs6UInt16VS2iSuAA0G3_S4VtXE_tF")] + private static extern F70_Ret SwiftCallbackFunc70(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F70_Ret SwiftCallbackFunc70Callback(short a0, byte a1, nint a2, uint a3, F70_S0 a4, int a5, F70_S1 a6, F70_S2 a7, F70_S3 a8, long a9, int a10, ushort a11, nint a12, nint a13, nuint a14, F70_S4 a15, SwiftSelf self) + { + try + { + Assert.Equal((short)-13167, a0); + Assert.Equal((byte)126, a1); + Assert.Equal((nint)unchecked((nint)3641983584484741827), a2); + Assert.Equal((uint)1090448265, a3); + Assert.Equal((long)3696858216713616004, a4.F0); + Assert.Equal((int)1687025402, a5); + Assert.Equal((nint)unchecked((nint)714916953527626038), a6.F0); + Assert.Equal((double)459810445900614, a6.F1); + Assert.Equal((short)4276, a6.F2); + Assert.Equal((uint)529194028, a7.F0); + Assert.Equal((ushort)40800, a8.F0); + Assert.Equal((double)3934985905568056, a8.F1); + Assert.Equal((byte)230, a8.F2); + Assert.Equal((ulong)7358783417346157372, a8.F3); + Assert.Equal((int)187926922, a8.F4); + Assert.Equal((long)228428560763393434, a9); + Assert.Equal((int)146501405, a10); + Assert.Equal((ushort)58804, a11); + Assert.Equal((nint)unchecked((nint)7098488973446286248), a12); + Assert.Equal((nint)unchecked((nint)1283658442251334575), a13); + Assert.Equal((nuint)unchecked((nuint)3644681944588099582), a14); + Assert.Equal((nuint)unchecked((nuint)8197135412164695911), a15.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F70_Ret(45, 460004173, 7766748067698372018, 27369, 16509); + } + + [Fact] + public static void TestSwiftCallbackFunc70() + { + Console.Write("Running SwiftCallbackFunc70: "); + ExceptionDispatchInfo ex = null; + F70_Ret val = SwiftCallbackFunc70(&SwiftCallbackFunc70Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((sbyte)45, val.F0); + Assert.Equal((uint)460004173, val.F1); + Assert.Equal((ulong)7766748067698372018, val.F2); + Assert.Equal((short)27369, val.F3); + Assert.Equal((short)16509, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F71_S0_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F71_S0 + { + public F71_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F71_S1 + { + public long F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func711fs6UInt64VAeA6F71_S0V_AA0H3_S1VtXE_tF")] + private static extern ulong SwiftCallbackFunc71(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc71Callback(F71_S0 a0, F71_S1 a1, SwiftSelf self) + { + try + { + Assert.Equal((int)258165353, a0.F0.F0); + Assert.Equal((long)8603744544763953916, a1.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 8460721064583106347; + } + + [Fact] + public static void TestSwiftCallbackFunc71() + { + Console.Write("Running SwiftCallbackFunc71: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc71(&SwiftCallbackFunc71Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)8460721064583106347, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F72_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F72_Ret + { + public uint F0; + public float F1; + public float F2; + public long F3; + + public F72_Ret(uint f0, float f1, float f2, long f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func721fAA7F72_RetVAeA0G3_S0V_s5Int64Vs4Int8VtXE_tF")] + private static extern F72_Ret SwiftCallbackFunc72(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F72_Ret SwiftCallbackFunc72Callback(F72_S0 a0, long a1, sbyte a2, SwiftSelf self) + { + try + { + Assert.Equal((int)2021509367, a0.F0); + Assert.Equal((long)2480039820482100351, a1); + Assert.Equal((sbyte)91, a2); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F72_Ret(1583929847, 2026234, 8092211, 445254465286132488); + } + + [Fact] + public static void TestSwiftCallbackFunc72() + { + Console.Write("Running SwiftCallbackFunc72: "); + ExceptionDispatchInfo ex = null; + F72_Ret val = SwiftCallbackFunc72(&SwiftCallbackFunc72Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)1583929847, val.F0); + Assert.Equal((float)2026234, val.F1); + Assert.Equal((float)8092211, val.F2); + Assert.Equal((long)445254465286132488, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F73_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F73_S1_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F73_S1 + { + public F73_S1_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F73_S2 + { + public int F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 11)] + struct F73_S3 + { + public nuint F0; + public short F1; + public sbyte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F73_S4 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F73_S5 + { + public uint F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func731fs4Int8VAESd_SfAA6F73_S0Vs5Int64VAA0H3_S1VAA0H3_S2Vs5Int16VSdAEs5Int32VAiA0H3_S3VSus6UInt64VAqA0H3_S4Vs5UInt8VAA0H3_S5VtXE_tF")] + private static extern sbyte SwiftCallbackFunc73(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static sbyte SwiftCallbackFunc73Callback(double a0, float a1, F73_S0 a2, long a3, F73_S1 a4, F73_S2 a5, short a6, double a7, sbyte a8, int a9, long a10, F73_S3 a11, nuint a12, ulong a13, int a14, F73_S4 a15, byte a16, F73_S5 a17, SwiftSelf self) + { + try + { + Assert.Equal((double)3038361048801008, a0); + Assert.Equal((float)7870661, a1); + Assert.Equal((int)1555231180, a2.F0); + Assert.Equal((long)7433951069104961, a3); + Assert.Equal((ushort)63298, a4.F0.F0); + Assert.Equal((int)1759846580, a5.F0); + Assert.Equal((float)1335901, a5.F1); + Assert.Equal((short)11514, a6); + Assert.Equal((double)695278874601974, a7); + Assert.Equal((sbyte)108, a8); + Assert.Equal((int)48660527, a9); + Assert.Equal((long)7762050749172332624, a10); + Assert.Equal((nuint)unchecked((nuint)7486686356276472663), a11.F0); + Assert.Equal((short)11622, a11.F1); + Assert.Equal((sbyte)112, a11.F2); + Assert.Equal((nuint)unchecked((nuint)884183974530885885), a12); + Assert.Equal((ulong)7434462110419085390, a13); + Assert.Equal((int)170242607, a14); + Assert.Equal((short)-26039, a15.F0); + Assert.Equal((byte)41, a16); + Assert.Equal((uint)191302504, a17.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 76; + } + + [Fact] + public static void TestSwiftCallbackFunc73() + { + Console.Write("Running SwiftCallbackFunc73: "); + ExceptionDispatchInfo ex = null; + sbyte val = SwiftCallbackFunc73(&SwiftCallbackFunc73Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((sbyte)76, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F74_S0_S0 + { + public ushort F0; + public nuint F1; + public sbyte F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F74_S0 + { + public F74_S0_S0 F0; + public nint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F74_S1 + { + public float F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func741fs5Int64VAeA6F74_S0V_AA0H3_S1Vs5Int16VtXE_tF")] + private static extern long SwiftCallbackFunc74(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static long SwiftCallbackFunc74Callback(F74_S0 a0, F74_S1 a1, short a2, SwiftSelf self) + { + try + { + Assert.Equal((ushort)59883, a0.F0.F0); + Assert.Equal((nuint)unchecked((nuint)5554216411943233256), a0.F0.F1); + Assert.Equal((sbyte)126, a0.F0.F2); + Assert.Equal((nint)unchecked((nint)724541378819571203), a0.F1); + Assert.Equal((float)172601, a1.F0); + Assert.Equal((short)27932, a2); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 7382123574052120438; + } + + [Fact] + public static void TestSwiftCallbackFunc74() + { + Console.Write("Running SwiftCallbackFunc74: "); + ExceptionDispatchInfo ex = null; + long val = SwiftCallbackFunc74(&SwiftCallbackFunc74Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((long)7382123574052120438, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F75_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F75_S1_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F75_S1 + { + public F75_S1_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F75_S2 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F75_S3_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F75_S3 + { + public F75_S3_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 36)] + struct F75_Ret + { + public byte F0; + public double F1; + public double F2; + public long F3; + public uint F4; + + public F75_Ret(byte f0, double f1, double f2, long f3, uint f4) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func751fAA7F75_RetVAEs4Int8V_A2gA0G3_S0VAA0G3_S1VAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F75_Ret SwiftCallbackFunc75(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F75_Ret SwiftCallbackFunc75Callback(sbyte a0, sbyte a1, sbyte a2, F75_S0 a3, F75_S1 a4, F75_S2 a5, F75_S3 a6, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)-105, a0); + Assert.Equal((sbyte)71, a1); + Assert.Equal((sbyte)108, a2); + Assert.Equal((long)7224638108479292438, a3.F0); + Assert.Equal((byte)126, a4.F0.F0); + Assert.Equal((sbyte)-88, a5.F0); + Assert.Equal((ushort)4934, a6.F0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F75_Ret(8, 494440474432982, 3322048351205313, 7525253715666045341, 7365589); + } + + [Fact] + public static void TestSwiftCallbackFunc75() + { + Console.Write("Running SwiftCallbackFunc75: "); + ExceptionDispatchInfo ex = null; + F75_Ret val = SwiftCallbackFunc75(&SwiftCallbackFunc75Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((byte)8, val.F0); + Assert.Equal((double)494440474432982, val.F1); + Assert.Equal((double)3322048351205313, val.F2); + Assert.Equal((long)7525253715666045341, val.F3); + Assert.Equal((uint)7365589, val.F4); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F76_S0 + { + public ushort F0; + public nint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F76_S1_S0 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F76_S1 + { + public F76_S1_S0 F0; + public nuint F1; + public double F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F76_S2 + { + public ulong F0; + public nint F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F76_S3_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F76_S3 + { + public F76_S3_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F76_S4 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F76_S5 + { + public nuint F0; + public double F1; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func761fs6UInt64VAEs5UInt8V_AA6F76_S0Vs4Int8VAA0I3_S1VAA0I3_S2VAA0I3_S3Vs6UInt32VAA0I3_S4VAgA0I3_S5VSds5Int16VtXE_tF")] + private static extern ulong SwiftCallbackFunc76(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc76Callback(byte a0, F76_S0 a1, sbyte a2, F76_S1 a3, F76_S2 a4, F76_S3 a5, uint a6, F76_S4 a7, byte a8, F76_S5 a9, double a10, short a11, SwiftSelf self) + { + try + { + Assert.Equal((byte)69, a0); + Assert.Equal((ushort)25503, a1.F0); + Assert.Equal((nint)unchecked((nint)4872234474620951743), a1.F1); + Assert.Equal((sbyte)43, a2); + Assert.Equal((nint)unchecked((nint)1199076663426903579), a3.F0.F0); + Assert.Equal((nuint)unchecked((nuint)4639522222462236688), a3.F1); + Assert.Equal((double)4082956091930029, a3.F2); + Assert.Equal((ulong)5171821618947987626, a4.F0); + Assert.Equal((nint)unchecked((nint)3369410144919558564), a4.F1); + Assert.Equal((ushort)5287, a4.F2); + Assert.Equal((long)929854460912895550, a5.F0.F0); + Assert.Equal((uint)1208311201, a6); + Assert.Equal((long)7033993025788649145, a7.F0); + Assert.Equal((byte)58, a8); + Assert.Equal((nuint)unchecked((nuint)1401399014740601512), a9.F0); + Assert.Equal((double)2523645319232571, a9.F1); + Assert.Equal((double)230232835550369, a10); + Assert.Equal((short)-22975, a11); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 2608582352406315143; + } + + [Fact] + public static void TestSwiftCallbackFunc76() + { + Console.Write("Running SwiftCallbackFunc76: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc76(&SwiftCallbackFunc76Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)2608582352406315143, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F77_S0 + { + public long F0; + public double F1; + public nuint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F77_S1 + { + public short F0; + public float F1; + public float F2; + public long F3; + public long F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F77_S2 + { + public ushort F0; + public sbyte F1; + public int F2; + public float F3; + public float F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F77_Ret + { + public double F0; + public ushort F1; + public sbyte F2; + public nuint F3; + + public F77_Ret(double f0, ushort f1, sbyte f2, nuint f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func771fAA7F77_RetVAESd_AA0G3_S0VAA0G3_S1VAA0G3_S2Vs6UInt32VtXE_tF")] + private static extern F77_Ret SwiftCallbackFunc77(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F77_Ret SwiftCallbackFunc77Callback(double a0, F77_S0 a1, F77_S1 a2, F77_S2 a3, uint a4, SwiftSelf self) + { + try + { + Assert.Equal((double)1623173949127682, a0); + Assert.Equal((long)5204451347781433070, a1.F0); + Assert.Equal((double)3469485630755805, a1.F1); + Assert.Equal((nuint)unchecked((nuint)7586276835848725004), a1.F2); + Assert.Equal((short)2405, a2.F0); + Assert.Equal((float)2419792, a2.F1); + Assert.Equal((float)6769317, a2.F2); + Assert.Equal((long)1542327522833750776, a2.F3); + Assert.Equal((long)1297586130846695275, a2.F4); + Assert.Equal((ushort)10102, a3.F0); + Assert.Equal((sbyte)-48, a3.F1); + Assert.Equal((int)14517107, a3.F2); + Assert.Equal((float)4856023, a3.F3); + Assert.Equal((float)2681358, a3.F4); + Assert.Equal((uint)1463251524, a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F77_Ret(1601613740657843, 14373, -17, unchecked((nuint)274065318894652498)); + } + + [Fact] + public static void TestSwiftCallbackFunc77() + { + Console.Write("Running SwiftCallbackFunc77: "); + ExceptionDispatchInfo ex = null; + F77_Ret val = SwiftCallbackFunc77(&SwiftCallbackFunc77Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)1601613740657843, val.F0); + Assert.Equal((ushort)14373, val.F1); + Assert.Equal((sbyte)-17, val.F2); + Assert.Equal((nuint)unchecked((nuint)274065318894652498), val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F78_S0 + { + public nuint F0; + public nint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F78_S1_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F78_S1 + { + public short F0; + public ulong F1; + public F78_S1_S0 F2; + public int F3; + public nint F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F78_S2 + { + public nuint F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F78_S3 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F78_S4 + { + public ulong F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func781fS2ds6UInt64V_AA6F78_S0VAeA0H3_S1VAA0H3_S2Vs5Int32VAEs5Int64VAA0H3_S3VS2fs6UInt16VAA0H3_S4VSdtXE_tF")] + private static extern double SwiftCallbackFunc78(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static double SwiftCallbackFunc78Callback(ulong a0, F78_S0 a1, ulong a2, F78_S1 a3, F78_S2 a4, int a5, ulong a6, long a7, F78_S3 a8, float a9, float a10, ushort a11, F78_S4 a12, double a13, SwiftSelf self) + { + try + { + Assert.Equal((ulong)6780767594736146373, a0); + Assert.Equal((nuint)unchecked((nuint)6264193481541646332), a1.F0); + Assert.Equal((nint)unchecked((nint)6600856439035088503), a1.F1); + Assert.Equal((ulong)1968254881389492170, a2); + Assert.Equal((short)-17873, a3.F0); + Assert.Equal((ulong)5581169895682201971, a3.F1); + Assert.Equal((sbyte)127, a3.F2.F0); + Assert.Equal((int)1942346704, a3.F3); + Assert.Equal((nint)unchecked((nint)118658265323815307), a3.F4); + Assert.Equal((nuint)unchecked((nuint)1489326778640378879), a4.F0); + Assert.Equal((ulong)1427061853707270770, a4.F1); + Assert.Equal((int)858391966, a5); + Assert.Equal((ulong)5830110056171302270, a6); + Assert.Equal((long)2953614358173898788, a7); + Assert.Equal((ulong)6761452244699684409, a8.F0); + Assert.Equal((float)3452451, a9); + Assert.Equal((float)3507119, a10); + Assert.Equal((ushort)40036, a11); + Assert.Equal((ulong)4800085294404376817, a12.F0); + Assert.Equal((double)780368756754436, a13); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 1088544646657969; + } + + [Fact] + public static void TestSwiftCallbackFunc78() + { + Console.Write("Running SwiftCallbackFunc78: "); + ExceptionDispatchInfo ex = null; + double val = SwiftCallbackFunc78(&SwiftCallbackFunc78Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((double)1088544646657969, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F79_S0_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F79_S0 + { + public F79_S0_S0 F0; + public nint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F79_Ret + { + public uint F0; + public ulong F1; + public double F2; + + public F79_Ret(uint f0, ulong f1, double f2) + { + F0 = f0; + F1 = f1; + F2 = f2; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func791fAA7F79_RetVAeA0G3_S0V_SftXE_tF")] + private static extern F79_Ret SwiftCallbackFunc79(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F79_Ret SwiftCallbackFunc79Callback(F79_S0 a0, float a1, SwiftSelf self) + { + try + { + Assert.Equal((nuint)unchecked((nuint)1013911700897046117), a0.F0.F0); + Assert.Equal((nint)unchecked((nint)7323935615297665289), a0.F1); + Assert.Equal((float)5159506, a1); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F79_Ret(895629788, 4824209192377460356, 2599150646028906); + } + + [Fact] + public static void TestSwiftCallbackFunc79() + { + Console.Write("Running SwiftCallbackFunc79: "); + ExceptionDispatchInfo ex = null; + F79_Ret val = SwiftCallbackFunc79(&SwiftCallbackFunc79Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)895629788, val.F0); + Assert.Equal((ulong)4824209192377460356, val.F1); + Assert.Equal((double)2599150646028906, val.F2); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F80_S0 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F80_S1_S0_S0 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F80_S1_S0 + { + public F80_S1_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F80_S1 + { + public nint F0; + public F80_S1_S0 F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F80_S2 + { + public ulong F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func801fS2fs6UInt64V_Sis5Int32Vs5Int16VSuAA6F80_S0VAISis4Int8VAGs6UInt32VAA0J3_S1VAA0J3_S2VAEtXE_tF")] + private static extern float SwiftCallbackFunc80(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc80Callback(ulong a0, nint a1, int a2, short a3, nuint a4, F80_S0 a5, short a6, nint a7, sbyte a8, int a9, uint a10, F80_S1 a11, F80_S2 a12, ulong a13, SwiftSelf self) + { + try + { + Assert.Equal((ulong)4470427843910624516, a0); + Assert.Equal((nint)unchecked((nint)8383677749057878551), a1); + Assert.Equal((int)2017117925, a2); + Assert.Equal((short)-10531, a3); + Assert.Equal((nuint)unchecked((nuint)3438375001906177611), a4); + Assert.Equal((ushort)65220, a5.F0); + Assert.Equal((short)7107, a6); + Assert.Equal((nint)unchecked((nint)7315288835693680178), a7); + Assert.Equal((sbyte)-48, a8); + Assert.Equal((int)813870434, a9); + Assert.Equal((uint)1092037477, a10); + Assert.Equal((nint)unchecked((nint)7104962838387954470), a11.F0); + Assert.Equal((byte)236, a11.F1.F0.F0); + Assert.Equal((ulong)7460392384225808790, a12.F0); + Assert.Equal((ulong)364121728483540667, a13); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 5169959; + } + + [Fact] + public static void TestSwiftCallbackFunc80() + { + Console.Write("Running SwiftCallbackFunc80: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc80(&SwiftCallbackFunc80Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)5169959, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F81_S0 + { + public float F0; + public float F1; + public nint F2; + public nint F3; + public nint F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F81_Ret + { + public nint F0; + + public F81_Ret(nint f0) + { + F0 = f0; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func811fAA7F81_RetVAEs5UInt8V_s6UInt32VAgA0G3_S0Vs4Int8VtXE_tF")] + private static extern F81_Ret SwiftCallbackFunc81(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F81_Ret SwiftCallbackFunc81Callback(byte a0, uint a1, byte a2, F81_S0 a3, sbyte a4, SwiftSelf self) + { + try + { + Assert.Equal((byte)53, a0); + Assert.Equal((uint)57591489, a1); + Assert.Equal((byte)19, a2); + Assert.Equal((float)5675845, a3.F0); + Assert.Equal((float)6469988, a3.F1); + Assert.Equal((nint)unchecked((nint)5775316279348621124), a3.F2); + Assert.Equal((nint)unchecked((nint)7699091894067057939), a3.F3); + Assert.Equal((nint)unchecked((nint)1049086627558950131), a3.F4); + Assert.Equal((sbyte)15, a4); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F81_Ret(unchecked((nint)1055606720535823947)); + } + + [Fact] + public static void TestSwiftCallbackFunc81() + { + Console.Write("Running SwiftCallbackFunc81: "); + ExceptionDispatchInfo ex = null; + F81_Ret val = SwiftCallbackFunc81(&SwiftCallbackFunc81Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)1055606720535823947), val.F0); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F82_S0_S0 + { + public float F0; + public nuint F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F82_S0 + { + public nuint F0; + public F82_S0_S0 F1; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F82_S1 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F82_S2 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F82_S3_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F82_S3 + { + public double F0; + public nuint F1; + public F82_S3_S0 F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F82_S4 + { + public ulong F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func821fS2fs5Int64V_AA6F82_S0Vs5Int16Vs4Int8Vs6UInt32VAA0H3_S1Vs5Int32VAeKSdAA0H3_S2VAA0H3_S3VAA0H3_S4VtXE_tF")] + private static extern float SwiftCallbackFunc82(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static float SwiftCallbackFunc82Callback(long a0, F82_S0 a1, short a2, sbyte a3, uint a4, F82_S1 a5, int a6, long a7, sbyte a8, double a9, F82_S2 a10, F82_S3 a11, F82_S4 a12, SwiftSelf self) + { + try + { + Assert.Equal((long)6454754584537364459, a0); + Assert.Equal((nuint)unchecked((nuint)6703634779264968131), a1.F0); + Assert.Equal((float)1010059, a1.F1.F0); + Assert.Equal((nuint)unchecked((nuint)4772968591609202284), a1.F1.F1); + Assert.Equal((ushort)64552, a1.F1.F2); + Assert.Equal((ushort)47126, a1.F2); + Assert.Equal((short)9869, a2); + Assert.Equal((sbyte)-8, a3); + Assert.Equal((uint)1741550381, a4); + Assert.Equal((int)705741282, a5.F0); + Assert.Equal((int)1998781399, a6); + Assert.Equal((long)7787961471254401526, a7); + Assert.Equal((sbyte)-27, a8); + Assert.Equal((double)4429830670351707, a9); + Assert.Equal((nint)unchecked((nint)4975772762589349422), a10.F0); + Assert.Equal((double)1423948098664774, a11.F0); + Assert.Equal((nuint)unchecked((nuint)504607538824251986), a11.F1); + Assert.Equal((int)1940911018, a11.F2.F0); + Assert.Equal((ulong)2988623645681463667, a12.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 7514083; + } + + [Fact] + public static void TestSwiftCallbackFunc82() + { + Console.Write("Running SwiftCallbackFunc82: "); + ExceptionDispatchInfo ex = null; + float val = SwiftCallbackFunc82(&SwiftCallbackFunc82Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((float)7514083, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F83_S0 + { + public int F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F83_Ret + { + public short F0; + + public F83_Ret(short f0) + { + F0 = f0; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func831fAA7F83_RetVAEs4Int8V_AA0G3_S0Vs5Int16VtXE_tF")] + private static extern F83_Ret SwiftCallbackFunc83(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F83_Ret SwiftCallbackFunc83Callback(sbyte a0, F83_S0 a1, short a2, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)17, a0); + Assert.Equal((int)530755056, a1.F0); + Assert.Equal((short)-11465, a2); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F83_Ret(-32475); + } + + [Fact] + public static void TestSwiftCallbackFunc83() + { + Console.Write("Running SwiftCallbackFunc83: "); + ExceptionDispatchInfo ex = null; + F83_Ret val = SwiftCallbackFunc83(&SwiftCallbackFunc83Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)-32475, val.F0); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 36)] + struct F84_S0 + { + public nuint F0; + public uint F1; + public nuint F2; + public ulong F3; + public int F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F84_S1 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F84_S2 + { + public float F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F84_S3 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F84_S4 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 10)] + struct F84_S5 + { + public nint F0; + public short F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F84_S6 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F84_S7 + { + public int F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func841fS2is5Int32V_AA6F84_S0VAA0H3_S1VSdAEs5Int16VSdAA0H3_S2VAA0H3_S3VSdAA0H3_S4VAA0H3_S5VAA0H3_S6VAA0H3_S7VSutXE_tF")] + private static extern nint SwiftCallbackFunc84(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static nint SwiftCallbackFunc84Callback(int a0, F84_S0 a1, F84_S1 a2, double a3, int a4, short a5, double a6, F84_S2 a7, F84_S3 a8, double a9, F84_S4 a10, F84_S5 a11, F84_S6 a12, F84_S7 a13, nuint a14, SwiftSelf self) + { + try + { + Assert.Equal((int)1605022009, a0); + Assert.Equal((nuint)unchecked((nuint)6165049220831866664), a1.F0); + Assert.Equal((uint)1235491183, a1.F1); + Assert.Equal((nuint)unchecked((nuint)7926620970405586826), a1.F2); + Assert.Equal((ulong)2633248816907294140, a1.F3); + Assert.Equal((int)2012834055, a1.F4); + Assert.Equal((nuint)unchecked((nuint)2881830362339122988), a2.F0); + Assert.Equal((double)4065309434963087, a3); + Assert.Equal((int)1125165825, a4); + Assert.Equal((short)-32360, a5); + Assert.Equal((double)1145602045200029, a6); + Assert.Equal((float)5655563, a7.F0); + Assert.Equal((byte)14, a8.F0); + Assert.Equal((double)3919593995303128, a9); + Assert.Equal((short)26090, a10.F0); + Assert.Equal((nint)unchecked((nint)8584898862398781737), a11.F0); + Assert.Equal((short)-5185, a11.F1); + Assert.Equal((short)144, a12.F0); + Assert.Equal((int)2138004352, a13.F0); + Assert.Equal((nuint)unchecked((nuint)9102562043027810686), a14); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return unchecked((nint)2320162198211027422); + } + + [Fact] + public static void TestSwiftCallbackFunc84() + { + Console.Write("Running SwiftCallbackFunc84: "); + ExceptionDispatchInfo ex = null; + nint val = SwiftCallbackFunc84(&SwiftCallbackFunc84Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((nint)unchecked((nint)2320162198211027422), val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F85_S0 + { + public double F0; + public double F1; + public sbyte F2; + public int F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F85_S1 + { + public long F0; + public ushort F1; + public ulong F2; + public nuint F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F85_S2 + { + public float F0; + public float F1; + public uint F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F85_S3 + { + public byte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F85_S4 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F85_S5 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 48)] + struct F85_Ret + { + public uint F0; + public ushort F1; + public int F2; + public double F3; + public nint F4; + public ulong F5; + public long F6; + + public F85_Ret(uint f0, ushort f1, int f2, double f3, nint f4, ulong f5, long f6) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + F4 = f4; + F5 = f5; + F6 = f6; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func851fAA7F85_RetVAeA0G3_S0V_AA0G3_S1Vs6UInt32VAA0G3_S2Vs5Int64VAA0G3_S3VAoA0G3_S4Vs6UInt16Vs5UInt8Vs5Int32VAkYSfAA0G3_S5VAOtXE_tF")] + private static extern F85_Ret SwiftCallbackFunc85(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F85_Ret SwiftCallbackFunc85Callback(F85_S0 a0, F85_S1 a1, uint a2, F85_S2 a3, long a4, F85_S3 a5, long a6, F85_S4 a7, ushort a8, byte a9, int a10, uint a11, int a12, float a13, F85_S5 a14, long a15, SwiftSelf self) + { + try + { + Assert.Equal((double)4325646965362202, a0.F0); + Assert.Equal((double)3313084380250914, a0.F1); + Assert.Equal((sbyte)42, a0.F2); + Assert.Equal((int)2034100272, a0.F3); + Assert.Equal((long)1365643665271339575, a1.F0); + Assert.Equal((ushort)25442, a1.F1); + Assert.Equal((ulong)3699631470459352980, a1.F2); + Assert.Equal((nuint)unchecked((nuint)7611776251925132200), a1.F3); + Assert.Equal((uint)911446742, a2); + Assert.Equal((float)352423, a3.F0); + Assert.Equal((float)7150341, a3.F1); + Assert.Equal((uint)2090089360, a3.F2); + Assert.Equal((long)5731257538910387688, a4); + Assert.Equal((byte)171, a5.F0); + Assert.Equal((long)5742887585483060342, a6); + Assert.Equal((nuint)unchecked((nuint)1182236975680416316), a7.F0); + Assert.Equal((ushort)32137, a8); + Assert.Equal((byte)44, a9); + Assert.Equal((int)2143531010, a10); + Assert.Equal((uint)1271996557, a11); + Assert.Equal((int)1035188446, a12); + Assert.Equal((float)1925443, a13); + Assert.Equal((double)2591574394337603, a14.F0); + Assert.Equal((long)721102428782331317, a15); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F85_Ret(1768798158, 27348, 1836190158, 2058478254572549, unchecked((nint)7881716796049851507), 5099946246805224241, 1499623158991084417); + } + + [Fact] + public static void TestSwiftCallbackFunc85() + { + Console.Write("Running SwiftCallbackFunc85: "); + ExceptionDispatchInfo ex = null; + F85_Ret val = SwiftCallbackFunc85(&SwiftCallbackFunc85Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((uint)1768798158, val.F0); + Assert.Equal((ushort)27348, val.F1); + Assert.Equal((int)1836190158, val.F2); + Assert.Equal((double)2058478254572549, val.F3); + Assert.Equal((nint)unchecked((nint)7881716796049851507), val.F4); + Assert.Equal((ulong)5099946246805224241, val.F5); + Assert.Equal((long)1499623158991084417, val.F6); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 15)] + struct F86_S0 + { + public nint F0; + public float F1; + public short F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F86_S1 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F86_S2 + { + public nint F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F86_S3 + { + public ushort F0; + public float F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 17)] + struct F86_Ret + { + public short F0; + public uint F1; + public double F2; + public byte F3; + + public F86_Ret(short f0, uint f1, double f2, byte f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func861fAA7F86_RetVAESf_s5Int16VSiAGSfAA0G3_S0VAA0G3_S1VAA0G3_S2VSis6UInt32VS2uSfs5Int64VAA0G3_S3VSutXE_tF")] + private static extern F86_Ret SwiftCallbackFunc86(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F86_Ret SwiftCallbackFunc86Callback(float a0, short a1, nint a2, short a3, float a4, F86_S0 a5, F86_S1 a6, F86_S2 a7, nint a8, uint a9, nuint a10, nuint a11, float a12, long a13, F86_S3 a14, nuint a15, SwiftSelf self) + { + try + { + Assert.Equal((float)2913632, a0); + Assert.Equal((short)3735, a1); + Assert.Equal((nint)unchecked((nint)2773655476379499086), a2); + Assert.Equal((short)22973, a3); + Assert.Equal((float)8292778, a4); + Assert.Equal((nint)unchecked((nint)5562042565258891920), a5.F0); + Assert.Equal((float)8370233, a5.F1); + Assert.Equal((short)18292, a5.F2); + Assert.Equal((sbyte)-32, a5.F3); + Assert.Equal((double)486951152980016, a6.F0); + Assert.Equal((nint)unchecked((nint)170033426151098456), a7.F0); + Assert.Equal((float)3867810, a7.F1); + Assert.Equal((nint)unchecked((nint)7390780928011218856), a8); + Assert.Equal((uint)1504267943, a9); + Assert.Equal((nuint)unchecked((nuint)2046987193814931100), a10); + Assert.Equal((nuint)unchecked((nuint)4860202472307588968), a11); + Assert.Equal((float)1644019, a12); + Assert.Equal((long)8084012412562897328, a13); + Assert.Equal((ushort)46301, a14.F0); + Assert.Equal((float)5633701, a14.F1); + Assert.Equal((nuint)unchecked((nuint)1911608136082175332), a15); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F86_Ret(23398, 842205070, 544883763911905, 215); + } + + [Fact] + public static void TestSwiftCallbackFunc86() + { + Console.Write("Running SwiftCallbackFunc86: "); + ExceptionDispatchInfo ex = null; + F86_Ret val = SwiftCallbackFunc86(&SwiftCallbackFunc86Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((short)23398, val.F0); + Assert.Equal((uint)842205070, val.F1); + Assert.Equal((double)544883763911905, val.F2); + Assert.Equal((byte)215, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F87_S0 + { + public int F0; + public short F1; + public int F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F87_S1 + { + public float F0; + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func871fs6UInt64VAESf_SiAA6F87_S0VAA0H3_S1VtXE_tF")] + private static extern ulong SwiftCallbackFunc87(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static ulong SwiftCallbackFunc87Callback(float a0, nint a1, F87_S0 a2, F87_S1 a3, SwiftSelf self) + { + try + { + Assert.Equal((float)1413086, a0); + Assert.Equal((nint)unchecked((nint)4206825694012787823), a1); + Assert.Equal((int)70240457, a2.F0); + Assert.Equal((short)30503, a2.F1); + Assert.Equal((int)671751848, a2.F2); + Assert.Equal((float)6641304, a3.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return 7817329728997505478; + } + + [Fact] + public static void TestSwiftCallbackFunc87() + { + Console.Write("Running SwiftCallbackFunc87: "); + ExceptionDispatchInfo ex = null; + ulong val = SwiftCallbackFunc87(&SwiftCallbackFunc87Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((ulong)7817329728997505478, val); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F88_S0 + { + public sbyte F0; + public short F1; + public byte F2; + public double F3; + public ushort F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 9)] + struct F88_S1 + { + public double F0; + public byte F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F88_S2 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F88_S3 + { + public sbyte F0; + public uint F1; + } + [StructLayout(LayoutKind.Sequential, Size = 24)] - struct F0_Ret + struct F88_Ret + { + public int F0; + public uint F1; + public nint F2; + public ulong F3; + + public F88_Ret(int f0, uint f1, nint f2, ulong f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func881fAA7F88_RetVAeA0G3_S0V_AA0G3_S1VSfSuSfSiAA0G3_S2Vs6UInt64VAA0G3_S3VAMtXE_tF")] + private static extern F88_Ret SwiftCallbackFunc88(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F88_Ret SwiftCallbackFunc88Callback(F88_S0 a0, F88_S1 a1, float a2, nuint a3, float a4, nint a5, F88_S2 a6, ulong a7, F88_S3 a8, ulong a9, SwiftSelf self) + { + try + { + Assert.Equal((sbyte)125, a0.F0); + Assert.Equal((short)-10705, a0.F1); + Assert.Equal((byte)21, a0.F2); + Assert.Equal((double)361845689097003, a0.F3); + Assert.Equal((ushort)41749, a0.F4); + Assert.Equal((double)1754583995806427, a1.F0); + Assert.Equal((byte)178, a1.F1); + Assert.Equal((float)4705205, a2); + Assert.Equal((nuint)unchecked((nuint)5985040566226273121), a3); + Assert.Equal((float)2484194, a4); + Assert.Equal((nint)unchecked((nint)1904196135427766362), a5); + Assert.Equal((nuint)unchecked((nuint)5436710892090266406), a6.F0); + Assert.Equal((ulong)4250368992471675181, a7); + Assert.Equal((sbyte)-87, a8.F0); + Assert.Equal((uint)362108395, a8.F1); + Assert.Equal((ulong)3388632419732870796, a9); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F88_Ret(46260161, 1256453227, unchecked((nint)1136413683894590872), 5467618237876965483); + } + + [Fact] + public static void TestSwiftCallbackFunc88() + { + Console.Write("Running SwiftCallbackFunc88: "); + ExceptionDispatchInfo ex = null; + F88_Ret val = SwiftCallbackFunc88(&SwiftCallbackFunc88Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)46260161, val.F0); + Assert.Equal((uint)1256453227, val.F1); + Assert.Equal((nint)unchecked((nint)1136413683894590872), val.F2); + Assert.Equal((ulong)5467618237876965483, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F89_S0 + { + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F89_Ret_S0 + { + public double F0; + + public F89_Ret_S0(double f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F89_Ret + { + public int F0; + public F89_Ret_S0 F1; + public nuint F2; + public long F3; + + public F89_Ret(int f0, F89_Ret_S0 f1, nuint f2, long f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func891fAA7F89_RetVAeA0G3_S0VXE_tF")] + private static extern F89_Ret SwiftCallbackFunc89(delegate* unmanaged[Swift] func, void* funcContext); + + [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] + private static F89_Ret SwiftCallbackFunc89Callback(F89_S0 a0, SwiftSelf self) + { + try + { + Assert.Equal((double)2137010348736191, a0.F0); + } + catch (Exception ex) + { + *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); + } + + return new F89_Ret(891143792, new F89_Ret_S0(3363709596088133), unchecked((nuint)18782615486598250), 1765451025668395967); + } + + [Fact] + public static void TestSwiftCallbackFunc89() + { + Console.Write("Running SwiftCallbackFunc89: "); + ExceptionDispatchInfo ex = null; + F89_Ret val = SwiftCallbackFunc89(&SwiftCallbackFunc89Callback, &ex); + if (ex != null) + ex.Throw(); + + Assert.Equal((int)891143792, val.F0); + Assert.Equal((double)3363709596088133, val.F1.F0); + Assert.Equal((nuint)unchecked((nuint)18782615486598250), val.F2); + Assert.Equal((long)1765451025668395967, val.F3); + Console.WriteLine("OK"); + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F90_S0_S0_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F90_S0_S0 + { + public F90_S0_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 34)] + struct F90_S0 + { + public F90_S0_S0 F0; + public nuint F1; + public uint F2; + public long F3; + public short F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F90_S1 + { + public ushort F0; + public short F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F90_S2 + { + public nint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F90_S3 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F90_S4 { - public ushort F0; - public float F1; - public int F2; - public ulong F3; + public ulong F0; + } - public F0_Ret(ushort f0, float f1, int f2, ulong f3) + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F90_Ret + { + public short F0; + public nint F1; + + public F90_Ret(short f0, nint f1) { F0 = f0; F1 = f1; - F2 = f2; - F3 = f3; } } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func01fAA6F0_RetVAEs5Int16V_s5Int32Vs6UInt64Vs6UInt16Vs5Int64VSds6UInt32VAMSiAKtXE_tF")] - private static extern F0_Ret SwiftCallbackFunc0(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func901fAA7F90_RetVAEs5Int64V_SfAA0G3_S0Vs6UInt32Vs6UInt16VAA0G3_S1VAA0G3_S2VAA0G3_S3VAA0G3_S4VtXE_tF")] + private static extern F90_Ret SwiftCallbackFunc90(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F0_Ret SwiftCallbackFunc0Callback(short a0, int a1, ulong a2, ushort a3, long a4, double a5, uint a6, ushort a7, nint a8, ulong a9, SwiftSelf self) + private static F90_Ret SwiftCallbackFunc90Callback(long a0, float a1, F90_S0 a2, uint a3, ushort a4, F90_S1 a5, F90_S2 a6, F90_S3 a7, F90_S4 a8, SwiftSelf self) { try { - Assert.Equal((short)-17813, a0); - Assert.Equal((int)318006528, a1); - Assert.Equal((ulong)1195162122024233590, a2); - Assert.Equal((ushort)60467, a3); - Assert.Equal((long)4587464142261794085, a4); - Assert.Equal((double)2686980744237725, a5); - Assert.Equal((uint)331986645, a6); - Assert.Equal((ushort)56299, a7); - Assert.Equal((nint)unchecked((nint)6785053689615432643), a8); - Assert.Equal((ulong)6358078381523084952, a9); + Assert.Equal((long)920081051198141017, a0); + Assert.Equal((float)661904, a1); + Assert.Equal((nuint)unchecked((nuint)3898354148166517637), a2.F0.F0.F0); + Assert.Equal((nuint)unchecked((nuint)1003118682503285076), a2.F1); + Assert.Equal((uint)1418362079, a2.F2); + Assert.Equal((long)3276689793574299746, a2.F3); + Assert.Equal((short)-18559, a2.F4); + Assert.Equal((uint)1773011602, a3); + Assert.Equal((ushort)32638, a4); + Assert.Equal((ushort)47129, a5.F0); + Assert.Equal((short)-31849, a5.F1); + Assert.Equal((nint)unchecked((nint)4795020225668482328), a6.F0); + Assert.Equal((nuint)unchecked((nuint)5307513663902191175), a7.F0); + Assert.Equal((ulong)7057074401404034083, a8.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F0_Ret(65117, 981990, 1192391225, 7001579272668151908); + return new F90_Ret(25416, unchecked((nint)5015525780568020281)); } [Fact] - public static void TestSwiftCallbackFunc0() + public static void TestSwiftCallbackFunc90() { - Console.Write("Running SwiftCallbackFunc0: "); + Console.Write("Running SwiftCallbackFunc90: "); ExceptionDispatchInfo ex = null; - F0_Ret val = SwiftCallbackFunc0(&SwiftCallbackFunc0Callback, &ex); + F90_Ret val = SwiftCallbackFunc90(&SwiftCallbackFunc90Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((ushort)65117, val.F0); - Assert.Equal((float)981990, val.F1); - Assert.Equal((int)1192391225, val.F2); - Assert.Equal((ulong)7001579272668151908, val.F3); + Assert.Equal((short)25416, val.F0); + Assert.Equal((nint)unchecked((nint)5015525780568020281), val.F1); Console.WriteLine("OK"); } + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F91_S0 + { + public sbyte F0; + public nint F1; + public ushort F2; + public ushort F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 36)] + struct F91_S1 + { + public double F0; + public ulong F1; + public sbyte F2; + public long F3; + public float F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F91_S2_S0_S0 + { + public long F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F91_S2_S0 + { + public F91_S2_S0_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F91_S2 + { + public double F0; + public F91_S2_S0 F1; + public short F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F91_S3_S0 + { + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F91_S3 + { + public F91_S3_S0 F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 24)] + struct F91_Ret + { + public long F0; + public ulong F1; + public short F2; + public uint F3; + + public F91_Ret(long f0, ulong f1, short f2, uint f3) + { + F0 = f0; + F1 = f1; + F2 = f2; + F3 = f3; + } + } + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func11fS2uSd_s4Int8Vs5Int32Vs6UInt16Vs5UInt8VSdAKs6UInt64Vs5Int16VS2fAmEtXE_tF")] - private static extern nuint SwiftCallbackFunc1(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func911fAA7F91_RetVAeA0G3_S0V_s5Int16Vs6UInt32VSdAA0G3_S1Vs5Int64Vs6UInt64VSfAA0G3_S2VSiAA0G3_S3VtXE_tF")] + private static extern F91_Ret SwiftCallbackFunc91(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static nuint SwiftCallbackFunc1Callback(double a0, sbyte a1, int a2, ushort a3, byte a4, double a5, byte a6, ulong a7, short a8, float a9, float a10, ulong a11, sbyte a12, SwiftSelf self) + private static F91_Ret SwiftCallbackFunc91Callback(F91_S0 a0, short a1, uint a2, double a3, F91_S1 a4, long a5, ulong a6, float a7, F91_S2 a8, nint a9, F91_S3 a10, SwiftSelf self) { try { - Assert.Equal((double)3867437130564654, a0); - Assert.Equal((sbyte)-64, a1); - Assert.Equal((int)31081182, a2); - Assert.Equal((ushort)20316, a3); - Assert.Equal((byte)73, a4); - Assert.Equal((double)3543740592144911, a5); - Assert.Equal((byte)250, a6); - Assert.Equal((ulong)6680393408153342744, a7); - Assert.Equal((short)23758, a8); - Assert.Equal((float)7189013, a9); - Assert.Equal((float)5438196, a10); - Assert.Equal((ulong)3310322731568932038, a11); - Assert.Equal((sbyte)3, a12); + Assert.Equal((sbyte)-117, a0.F0); + Assert.Equal((nint)unchecked((nint)6851485542307521521), a0.F1); + Assert.Equal((ushort)23224, a0.F2); + Assert.Equal((ushort)28870, a0.F3); + Assert.Equal((short)-26318, a1); + Assert.Equal((uint)874052395, a2); + Assert.Equal((double)3651199868446152, a3); + Assert.Equal((double)3201729800438540, a4.F0); + Assert.Equal((ulong)7737032265509566019, a4.F1); + Assert.Equal((sbyte)123, a4.F2); + Assert.Equal((long)7508633930609553617, a4.F3); + Assert.Equal((float)8230501, a4.F4); + Assert.Equal((long)2726677037673277403, a5); + Assert.Equal((ulong)4990410590084533996, a6); + Assert.Equal((float)3864639, a7); + Assert.Equal((double)1763083442463892, a8.F0); + Assert.Equal((long)6783710957456602933, a8.F1.F0.F0); + Assert.Equal((short)2927, a8.F2); + Assert.Equal((nint)unchecked((nint)3359440517385934325), a9); + Assert.Equal((nuint)unchecked((nuint)3281136825102667421), a10.F0.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return unchecked((nuint)2172476334497055933); + return new F91_Ret(8703949006228331232, 4839530995689756024, 14798, 1337111683); } [Fact] - public static void TestSwiftCallbackFunc1() + public static void TestSwiftCallbackFunc91() { - Console.Write("Running SwiftCallbackFunc1: "); + Console.Write("Running SwiftCallbackFunc91: "); ExceptionDispatchInfo ex = null; - nuint val = SwiftCallbackFunc1(&SwiftCallbackFunc1Callback, &ex); + F91_Ret val = SwiftCallbackFunc91(&SwiftCallbackFunc91Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((nuint)unchecked((nuint)2172476334497055933), val); + Assert.Equal((long)8703949006228331232, val.F0); + Assert.Equal((ulong)4839530995689756024, val.F1); + Assert.Equal((short)14798, val.F2); + Assert.Equal((uint)1337111683, val.F3); Console.WriteLine("OK"); } - [StructLayout(LayoutKind.Sequential, Size = 12)] - struct F2_Ret_S0 + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F92_S0 { - public long F0; - public int F1; + public double F0; + public double F1; + } - public F2_Ret_S0(long f0, int f1) - { - F0 = f0; - F1 = f1; - } + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F92_S1 + { + public uint F0; + public long F1; + public uint F2; + public short F3; + public ulong F4; } - [StructLayout(LayoutKind.Sequential, Size = 14)] - struct F2_Ret + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F92_S2_S0 { - public F2_Ret_S0 F0; - public short F1; + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 18)] + struct F92_S2 + { + public uint F0; + public long F1; + public F92_S2_S0 F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F92_Ret + { + public int F0; - public F2_Ret(F2_Ret_S0 f0, short f1) + public F92_Ret(int f0) { F0 = f0; - F1 = f1; } } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func21fAA6F2_RetVAESu_s5UInt8VtXE_tF")] - private static extern F2_Ret SwiftCallbackFunc2(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func921fAA7F92_RetVAEs6UInt32V_s5Int64VAA0G3_S0VSis5UInt8VAA0G3_S1VAA0G3_S2VAMSis5Int32VtXE_tF")] + private static extern F92_Ret SwiftCallbackFunc92(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F2_Ret SwiftCallbackFunc2Callback(nuint a0, byte a1, SwiftSelf self) + private static F92_Ret SwiftCallbackFunc92Callback(uint a0, long a1, F92_S0 a2, nint a3, byte a4, F92_S1 a5, F92_S2 a6, byte a7, nint a8, int a9, SwiftSelf self) { try { - Assert.Equal((nuint)unchecked((nuint)2153637757371267722), a0); - Assert.Equal((byte)150, a1); + Assert.Equal((uint)479487770, a0); + Assert.Equal((long)3751818229732502126, a1); + Assert.Equal((double)3486664439392893, a2.F0); + Assert.Equal((double)1451061144702448, a2.F1); + Assert.Equal((nint)unchecked((nint)1103649059951788126), a3); + Assert.Equal((byte)17, a4); + Assert.Equal((uint)1542537473, a5.F0); + Assert.Equal((long)2256304993713022795, a5.F1); + Assert.Equal((uint)1773847876, a5.F2); + Assert.Equal((short)-4712, a5.F3); + Assert.Equal((ulong)2811859744132572185, a5.F4); + Assert.Equal((uint)290315682, a6.F0); + Assert.Equal((long)4847587202070249866, a6.F1); + Assert.Equal((ushort)20774, a6.F2.F0); + Assert.Equal((byte)8, a7); + Assert.Equal((nint)unchecked((nint)2206063999764082749), a8); + Assert.Equal((int)1481391120, a9); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F2_Ret(new F2_Ret_S0(5628852360797741825, 939232542), -9943); + return new F92_Ret(2031462105); } [Fact] - public static void TestSwiftCallbackFunc2() + public static void TestSwiftCallbackFunc92() { - Console.Write("Running SwiftCallbackFunc2: "); + Console.Write("Running SwiftCallbackFunc92: "); ExceptionDispatchInfo ex = null; - F2_Ret val = SwiftCallbackFunc2(&SwiftCallbackFunc2Callback, &ex); + F92_Ret val = SwiftCallbackFunc92(&SwiftCallbackFunc92Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((long)5628852360797741825, val.F0.F0); - Assert.Equal((int)939232542, val.F0.F1); - Assert.Equal((short)-9943, val.F1); + Assert.Equal((int)2031462105, val.F0); Console.WriteLine("OK"); } - [StructLayout(LayoutKind.Sequential, Size = 10)] - struct F3_Ret_S0 + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F93_S0 { - public short F0; - public int F1; - public ushort F2; + public sbyte F0; + public uint F1; + } - public F3_Ret_S0(short f0, int f1, ushort f2) - { - F0 = f0; - F1 = f1; - F2 = f2; - } + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F93_S1 + { + public uint F0; } - [StructLayout(LayoutKind.Sequential, Size = 33)] - struct F3_Ret + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F93_Ret { public nint F0; - public F3_Ret_S0 F1; - public nuint F2; - public sbyte F3; + public ulong F1; - public F3_Ret(nint f0, F3_Ret_S0 f1, nuint f2, sbyte f3) + public F93_Ret(nint f0, ulong f1) { F0 = f0; F1 = f1; - F2 = f2; - F3 = f3; } } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func31fAA6F3_RetVAEs6UInt16V_S2uSiSfAGtXE_tF")] - private static extern F3_Ret SwiftCallbackFunc3(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func931fAA7F93_RetVAESu_s6UInt16VSdAA0G3_S0VAA0G3_S1VtXE_tF")] + private static extern F93_Ret SwiftCallbackFunc93(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F3_Ret SwiftCallbackFunc3Callback(ushort a0, nuint a1, nuint a2, nint a3, float a4, ushort a5, SwiftSelf self) + private static F93_Ret SwiftCallbackFunc93Callback(nuint a0, ushort a1, double a2, F93_S0 a3, F93_S1 a4, SwiftSelf self) { try { - Assert.Equal((ushort)45065, a0); - Assert.Equal((nuint)unchecked((nuint)8506742096411295359), a1); - Assert.Equal((nuint)unchecked((nuint)8619375465417625458), a2); - Assert.Equal((nint)unchecked((nint)5288917394772427257), a3); - Assert.Equal((float)5678138, a4); - Assert.Equal((ushort)33467, a5); + Assert.Equal((nuint)unchecked((nuint)5170226481546239050), a0); + Assert.Equal((ushort)2989, a1); + Assert.Equal((double)1630717078645270, a2); + Assert.Equal((sbyte)-46, a3.F0); + Assert.Equal((uint)859171256, a3.F1); + Assert.Equal((uint)254449240, a4.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F3_Ret(unchecked((nint)3330016214205716187), new F3_Ret_S0(-29819, 2075852318, 671), unchecked((nuint)2368015527878194540), -79); + return new F93_Ret(unchecked((nint)7713003294977630041), 4769707787914611024); } [Fact] - public static void TestSwiftCallbackFunc3() + public static void TestSwiftCallbackFunc93() { - Console.Write("Running SwiftCallbackFunc3: "); + Console.Write("Running SwiftCallbackFunc93: "); ExceptionDispatchInfo ex = null; - F3_Ret val = SwiftCallbackFunc3(&SwiftCallbackFunc3Callback, &ex); + F93_Ret val = SwiftCallbackFunc93(&SwiftCallbackFunc93Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((nint)unchecked((nint)3330016214205716187), val.F0); - Assert.Equal((short)-29819, val.F1.F0); - Assert.Equal((int)2075852318, val.F1.F1); - Assert.Equal((ushort)671, val.F1.F2); - Assert.Equal((nuint)unchecked((nuint)2368015527878194540), val.F2); - Assert.Equal((sbyte)-79, val.F3); + Assert.Equal((nint)unchecked((nint)7713003294977630041), val.F0); + Assert.Equal((ulong)4769707787914611024, val.F1); Console.WriteLine("OK"); } - [StructLayout(LayoutKind.Sequential, Size = 24)] - struct F4_Ret + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F94_S0 { - public ulong F0; + public nuint F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F94_S1 + { + public int F0; + public nuint F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 14)] + struct F94_S2 + { + public nint F0; public uint F1; - public ulong F2; + public ushort F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 12)] + struct F94_S3 + { + public byte F0; + public int F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F94_S4 + { + public int F0; + public long F1; + public float F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 19)] + struct F94_S5 + { + public short F0; + public nuint F1; + public short F2; + public sbyte F3; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F94_Ret + { + public long F0; - public F4_Ret(ulong f0, uint f1, ulong f2) + public F94_Ret(long f0) { F0 = f0; - F1 = f1; - F2 = f2; } } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func41fAA6F4_RetVAEs5Int64V_s6UInt16Vs5Int32VAISiSdAISfAkIs4Int8VSfs6UInt64Vs5Int16VSdA2mKSiAk2GtXE_tF")] - private static extern F4_Ret SwiftCallbackFunc4(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func941fAA7F94_RetVAeA0G3_S0V_s5Int16VAA0G3_S1VAA0G3_S2VAA0G3_S3VSfAA0G3_S4Vs6UInt32VAA0G3_S5VAItXE_tF")] + private static extern F94_Ret SwiftCallbackFunc94(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F4_Ret SwiftCallbackFunc4Callback(long a0, ushort a1, int a2, ushort a3, nint a4, double a5, ushort a6, float a7, int a8, ushort a9, sbyte a10, float a11, ulong a12, short a13, double a14, sbyte a15, sbyte a16, int a17, nint a18, int a19, long a20, long a21, SwiftSelf self) + private static F94_Ret SwiftCallbackFunc94Callback(F94_S0 a0, short a1, F94_S1 a2, F94_S2 a3, F94_S3 a4, float a5, F94_S4 a6, uint a7, F94_S5 a8, short a9, SwiftSelf self) { try { - Assert.Equal((long)8771527078890676837, a0); - Assert.Equal((ushort)18667, a1); - Assert.Equal((int)224631333, a2); - Assert.Equal((ushort)13819, a3); - Assert.Equal((nint)unchecked((nint)8888237425788084647), a4); - Assert.Equal((double)2677321682649925, a5); - Assert.Equal((ushort)50276, a6); - Assert.Equal((float)2703201, a7); - Assert.Equal((int)545337834, a8); - Assert.Equal((ushort)11190, a9); - Assert.Equal((sbyte)112, a10); - Assert.Equal((float)4053251, a11); - Assert.Equal((ulong)7107857019164433129, a12); - Assert.Equal((short)-3092, a13); - Assert.Equal((double)2176685406663423, a14); - Assert.Equal((sbyte)57, a15); - Assert.Equal((sbyte)-61, a16); - Assert.Equal((int)866840318, a17); - Assert.Equal((nint)unchecked((nint)5927291145767969522), a18); - Assert.Equal((int)1818333546, a19); - Assert.Equal((long)6272248211765159948, a20); - Assert.Equal((long)6555966806846053216, a21); + Assert.Equal((nuint)unchecked((nuint)8626725032375870186), a0.F0); + Assert.Equal((short)-7755, a1); + Assert.Equal((int)544707027, a2.F0); + Assert.Equal((nuint)unchecked((nuint)2251410026467996594), a2.F1); + Assert.Equal((nint)unchecked((nint)2972912419231960385), a3.F0); + Assert.Equal((uint)740529487, a3.F1); + Assert.Equal((ushort)34526, a3.F2); + Assert.Equal((byte)41, a4.F0); + Assert.Equal((int)1598856955, a4.F1); + Assert.Equal((float)5126603, a4.F2); + Assert.Equal((float)7242977, a5); + Assert.Equal((int)473684762, a6.F0); + Assert.Equal((long)4023878650965716094, a6.F1); + Assert.Equal((float)2777693, a6.F2); + Assert.Equal((uint)1612378906, a7); + Assert.Equal((short)-17074, a8.F0); + Assert.Equal((nuint)unchecked((nuint)2666903737827472071), a8.F1); + Assert.Equal((short)418, a8.F2); + Assert.Equal((sbyte)106, a8.F3); + Assert.Equal((short)-14547, a9); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F4_Ret(2182947204061522719, 1721424472, 7504841280611598884); + return new F94_Ret(4965341488842559693); } [Fact] - public static void TestSwiftCallbackFunc4() + public static void TestSwiftCallbackFunc94() { - Console.Write("Running SwiftCallbackFunc4: "); + Console.Write("Running SwiftCallbackFunc94: "); ExceptionDispatchInfo ex = null; - F4_Ret val = SwiftCallbackFunc4(&SwiftCallbackFunc4Callback, &ex); + F94_Ret val = SwiftCallbackFunc94(&SwiftCallbackFunc94Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((ulong)2182947204061522719, val.F0); - Assert.Equal((uint)1721424472, val.F1); - Assert.Equal((ulong)7504841280611598884, val.F2); + Assert.Equal((long)4965341488842559693, val.F0); Console.WriteLine("OK"); } - [StructLayout(LayoutKind.Sequential, Size = 40)] - struct F5_Ret + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F95_S0 { - public ulong F0; - public int F1; - public nint F2; - public float F3; - public short F4; - public ulong F5; + public ushort F0; + public long F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F95_S1 + { + public uint F0; + public short F1; + public double F2; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F95_S2 + { + public ushort F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F95_Ret_S0 + { + public short F0; + + public F95_Ret_S0(short f0) + { + F0 = f0; + } + } + + [StructLayout(LayoutKind.Sequential, Size = 14)] + struct F95_Ret + { + public nint F0; + public short F1; + public sbyte F2; + public byte F3; + public F95_Ret_S0 F4; - public F5_Ret(ulong f0, int f1, nint f2, float f3, short f4, ulong f5) + public F95_Ret(nint f0, short f1, sbyte f2, byte f3, F95_Ret_S0 f4) { F0 = f0; F1 = f1; F2 = f2; F3 = f3; F4 = f4; - F5 = f5; } } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func51fAA6F5_RetVAEs5Int32V_s6UInt16VAIs5Int16Vs5UInt8Vs4Int8VAMSis6UInt64VAQs5Int64VA2ksimItXE_tF")] - private static extern F5_Ret SwiftCallbackFunc5(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func951fAA7F95_RetVAeA0G3_S0V_SuAA0G3_S1VAA0G3_S2VtXE_tF")] + private static extern F95_Ret SwiftCallbackFunc95(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F5_Ret SwiftCallbackFunc5Callback(int a0, ushort a1, ushort a2, short a3, byte a4, sbyte a5, byte a6, nint a7, ulong a8, ulong a9, long a10, short a11, short a12, long a13, ushort a14, byte a15, ushort a16, SwiftSelf self) + private static F95_Ret SwiftCallbackFunc95Callback(F95_S0 a0, nuint a1, F95_S1 a2, F95_S2 a3, SwiftSelf self) { try { - Assert.Equal((int)359602150, a0); - Assert.Equal((ushort)51495, a1); - Assert.Equal((ushort)37765, a2); - Assert.Equal((short)29410, a3); - Assert.Equal((byte)95, a4); - Assert.Equal((sbyte)-104, a5); - Assert.Equal((byte)32, a6); - Assert.Equal((nint)unchecked((nint)8530952551906271255), a7); - Assert.Equal((ulong)706266487837805024, a8); - Assert.Equal((ulong)707905209555595641, a9); - Assert.Equal((long)8386588676727568762, a10); - Assert.Equal((short)-8624, a11); - Assert.Equal((short)26113, a12); - Assert.Equal((long)8389143657021522019, a13); - Assert.Equal((ushort)13337, a14); - Assert.Equal((byte)229, a15); - Assert.Equal((ushort)51876, a16); + Assert.Equal((ushort)45388, a0.F0); + Assert.Equal((long)6620047889014935849, a0.F1); + Assert.Equal((nuint)unchecked((nuint)97365157264460373), a1); + Assert.Equal((uint)357234637, a2.F0); + Assert.Equal((short)-13720, a2.F1); + Assert.Equal((double)3313430568949662, a2.F2); + Assert.Equal((ushort)14248, a3.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F5_Ret(5224035852455624489, 493616651, unchecked((nint)3355493231962241213), 8151117, -6001, 2418751914358801711); + return new F95_Ret(unchecked((nint)6503817931835164175), 1481, 117, 79, new F95_Ret_S0(-2735)); } [Fact] - public static void TestSwiftCallbackFunc5() + public static void TestSwiftCallbackFunc95() { - Console.Write("Running SwiftCallbackFunc5: "); + Console.Write("Running SwiftCallbackFunc95: "); ExceptionDispatchInfo ex = null; - F5_Ret val = SwiftCallbackFunc5(&SwiftCallbackFunc5Callback, &ex); + F95_Ret val = SwiftCallbackFunc95(&SwiftCallbackFunc95Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((ulong)5224035852455624489, val.F0); - Assert.Equal((int)493616651, val.F1); - Assert.Equal((nint)unchecked((nint)3355493231962241213), val.F2); - Assert.Equal((float)8151117, val.F3); - Assert.Equal((short)-6001, val.F4); - Assert.Equal((ulong)2418751914358801711, val.F5); + Assert.Equal((nint)unchecked((nint)6503817931835164175), val.F0); + Assert.Equal((short)1481, val.F1); + Assert.Equal((sbyte)117, val.F2); + Assert.Equal((byte)79, val.F3); + Assert.Equal((short)-2735, val.F4.F0); Console.WriteLine("OK"); } + [StructLayout(LayoutKind.Sequential, Size = 32)] + struct F96_S0 + { + public long F0; + public uint F1; + public short F2; + public double F3; + public double F4; + } + + [StructLayout(LayoutKind.Sequential, Size = 8)] + struct F96_S1 + { + public ulong F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F96_S2 + { + public float F0; + } + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func61fs6UInt16VAEs5Int32V_s6UInt32Vs6UInt64VAGs4Int8VS2is5Int16VSiAi2Ks5Int64VAItXE_tF")] - private static extern ushort SwiftCallbackFunc6(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func961fs6UInt64VAEs6UInt32V_AA6F96_S0VSfAe2gA0I3_S1VAA0I3_S2Vs5Int64VtXE_tF")] + private static extern ulong SwiftCallbackFunc96(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static ushort SwiftCallbackFunc6Callback(int a0, uint a1, ulong a2, int a3, sbyte a4, nint a5, nint a6, short a7, nint a8, uint a9, ulong a10, ulong a11, long a12, uint a13, SwiftSelf self) + private static ulong SwiftCallbackFunc96Callback(uint a0, F96_S0 a1, float a2, ulong a3, uint a4, uint a5, F96_S1 a6, F96_S2 a7, long a8, SwiftSelf self) { try { - Assert.Equal((int)743741783, a0); - Assert.Equal((uint)850236948, a1); - Assert.Equal((ulong)5908745692727636656, a2); - Assert.Equal((int)2106839818, a3); - Assert.Equal((sbyte)77, a4); - Assert.Equal((nint)unchecked((nint)291907785975160065), a5); - Assert.Equal((nint)unchecked((nint)3560129042279209151), a6); - Assert.Equal((short)-30568, a7); - Assert.Equal((nint)unchecked((nint)5730241035812482149), a8); - Assert.Equal((uint)18625011, a9); - Assert.Equal((ulong)242340713355417257, a10); - Assert.Equal((ulong)6962175160124965670, a11); - Assert.Equal((long)2935089705514798822, a12); - Assert.Equal((uint)2051956645, a13); + Assert.Equal((uint)1103144790, a0); + Assert.Equal((long)496343164737276588, a1.F0); + Assert.Equal((uint)1541085564, a1.F1); + Assert.Equal((short)-16271, a1.F2); + Assert.Equal((double)1062575289573718, a1.F3); + Assert.Equal((double)570255786498865, a1.F4); + Assert.Equal((float)7616839, a2); + Assert.Equal((ulong)7370881799887414383, a3); + Assert.Equal((uint)390392554, a4); + Assert.Equal((uint)1492692139, a5); + Assert.Equal((ulong)1666031716012978365, a6.F0); + Assert.Equal((float)3427394, a7.F0); + Assert.Equal((long)4642371619161527189, a8); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return 45160; + return 8803914823303717324; } [Fact] - public static void TestSwiftCallbackFunc6() + public static void TestSwiftCallbackFunc96() { - Console.Write("Running SwiftCallbackFunc6: "); + Console.Write("Running SwiftCallbackFunc96: "); ExceptionDispatchInfo ex = null; - ushort val = SwiftCallbackFunc6(&SwiftCallbackFunc6Callback, &ex); + ulong val = SwiftCallbackFunc96(&SwiftCallbackFunc96Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((ushort)45160, val); + Assert.Equal((ulong)8803914823303717324, val); Console.WriteLine("OK"); } + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F97_S0 + { + public sbyte F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F97_S1 + { + public long F0; + public ulong F1; + } + + [StructLayout(LayoutKind.Sequential, Size = 16)] + struct F97_S2 + { + public byte F0; + public long F1; + } + [StructLayout(LayoutKind.Sequential, Size = 8)] - struct F7_Ret_S0 + struct F97_S3 { - public nint F0; + public double F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F97_Ret_S0 + { + public int F0; - public F7_Ret_S0(nint f0) + public F97_Ret_S0(int f0) { F0 = f0; } } - [StructLayout(LayoutKind.Sequential, Size = 20)] - struct F7_Ret + [StructLayout(LayoutKind.Sequential, Size = 28)] + struct F97_Ret { - public sbyte F0; - public sbyte F1; - public byte F2; - public F7_Ret_S0 F3; + public double F0; + public nuint F1; + public F97_Ret_S0 F2; + public ushort F3; public uint F4; - public F7_Ret(sbyte f0, sbyte f1, byte f2, F7_Ret_S0 f3, uint f4) + public F97_Ret(double f0, nuint f1, F97_Ret_S0 f2, ushort f3, uint f4) { F0 = f0; F1 = f1; @@ -474,150 +8424,150 @@ public F7_Ret(sbyte f0, sbyte f1, byte f2, F7_Ret_S0 f3, uint f4) } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func71fAA6F7_RetVAEs6UInt64V_s5UInt8Vs5Int16VSutXE_tF")] - private static extern F7_Ret SwiftCallbackFunc7(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func971fAA7F97_RetVAeA0G3_S0V_AA0G3_S1VAA0G3_S2VAA0G3_S3VtXE_tF")] + private static extern F97_Ret SwiftCallbackFunc97(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F7_Ret SwiftCallbackFunc7Callback(ulong a0, byte a1, short a2, nuint a3, SwiftSelf self) + private static F97_Ret SwiftCallbackFunc97Callback(F97_S0 a0, F97_S1 a1, F97_S2 a2, F97_S3 a3, SwiftSelf self) { try { - Assert.Equal((ulong)7625368278886567558, a0); - Assert.Equal((byte)70, a1); - Assert.Equal((short)26780, a2); - Assert.Equal((nuint)unchecked((nuint)7739343395912136630), a3); + Assert.Equal((sbyte)-87, a0.F0); + Assert.Equal((long)1414208343412494909, a1.F0); + Assert.Equal((ulong)453284654311256466, a1.F1); + Assert.Equal((byte)224, a2.F0); + Assert.Equal((long)1712859616922087053, a2.F1); + Assert.Equal((double)3987671154739178, a3.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F7_Ret(-96, -93, 251, new F7_Ret_S0(unchecked((nint)3590193056511262571)), 13223810); + return new F97_Ret(3262802544778620, unchecked((nuint)988644880611380240), new F97_Ret_S0(1818371708), 15694, 2068394006); } [Fact] - public static void TestSwiftCallbackFunc7() + public static void TestSwiftCallbackFunc97() { - Console.Write("Running SwiftCallbackFunc7: "); + Console.Write("Running SwiftCallbackFunc97: "); ExceptionDispatchInfo ex = null; - F7_Ret val = SwiftCallbackFunc7(&SwiftCallbackFunc7Callback, &ex); + F97_Ret val = SwiftCallbackFunc97(&SwiftCallbackFunc97Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((sbyte)-96, val.F0); - Assert.Equal((sbyte)-93, val.F1); - Assert.Equal((byte)251, val.F2); - Assert.Equal((nint)unchecked((nint)3590193056511262571), val.F3.F0); - Assert.Equal((uint)13223810, val.F4); + Assert.Equal((double)3262802544778620, val.F0); + Assert.Equal((nuint)unchecked((nuint)988644880611380240), val.F1); + Assert.Equal((int)1818371708, val.F2.F0); + Assert.Equal((ushort)15694, val.F3); + Assert.Equal((uint)2068394006, val.F4); Console.WriteLine("OK"); } + [StructLayout(LayoutKind.Sequential, Size = 4)] + struct F98_S0 + { + public int F0; + } + [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func81fs5UInt8VAESf_SutXE_tF")] - private static extern byte SwiftCallbackFunc8(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func981fS2iSf_s6UInt16VAA6F98_S0VAEtXE_tF")] + private static extern nint SwiftCallbackFunc98(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static byte SwiftCallbackFunc8Callback(float a0, nuint a1, SwiftSelf self) + private static nint SwiftCallbackFunc98Callback(float a0, ushort a1, F98_S0 a2, ushort a3, SwiftSelf self) { try { - Assert.Equal((float)6278007, a0); - Assert.Equal((nuint)unchecked((nuint)1620979945874429615), a1); + Assert.Equal((float)2863898, a0); + Assert.Equal((ushort)37573, a1); + Assert.Equal((int)1073068257, a2.F0); + Assert.Equal((ushort)53560, a3); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return 60; + return unchecked((nint)6686142382639170849); } [Fact] - public static void TestSwiftCallbackFunc8() + public static void TestSwiftCallbackFunc98() { - Console.Write("Running SwiftCallbackFunc8: "); + Console.Write("Running SwiftCallbackFunc98: "); ExceptionDispatchInfo ex = null; - byte val = SwiftCallbackFunc8(&SwiftCallbackFunc8Callback, &ex); + nint val = SwiftCallbackFunc98(&SwiftCallbackFunc98Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((byte)60, val); + Assert.Equal((nint)unchecked((nint)6686142382639170849), val); Console.WriteLine("OK"); } - [StructLayout(LayoutKind.Sequential, Size = 26)] - struct F9_Ret + [StructLayout(LayoutKind.Sequential, Size = 20)] + struct F99_S0 { - public uint F0; - public long F1; - public ulong F2; - public ushort F3; + public nint F0; + public uint F1; + public int F2; + public uint F3; + } - public F9_Ret(uint f0, long f1, ulong f2, ushort f3) - { - F0 = f0; - F1 = f1; - F2 = f2; - F3 = f3; - } + [StructLayout(LayoutKind.Sequential, Size = 2)] + struct F99_S1 + { + public short F0; + } + + [StructLayout(LayoutKind.Sequential, Size = 1)] + struct F99_S2 + { + public byte F0; } [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvSwift) })] - [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB5Func91fAA6F9_RetVAEs4Int8V_Sis5Int16Vs5Int64VS2dSis6UInt16VAMS2fAMs6UInt32VAIs5Int32VAQs6UInt64VAiKSis5UInt8VAmISiAItXE_tF")] - private static extern F9_Ret SwiftCallbackFunc9(delegate* unmanaged[Swift] func, void* funcContext); + [DllImport(SwiftLib, EntryPoint = "$s22SwiftCallbackAbiStress05swiftB6Func991fs6UInt64VAEs5Int64V_SuSfs6UInt16VAA6F99_S0Vs5UInt8VSfAMs4Int8VAA0J3_S1VAA0J3_S2VtXE_tF")] + private static extern ulong SwiftCallbackFunc99(delegate* unmanaged[Swift] func, void* funcContext); [UnmanagedCallersOnly(CallConvs = new Type[] { typeof(CallConvSwift) })] - private static F9_Ret SwiftCallbackFunc9Callback(sbyte a0, nint a1, short a2, long a3, double a4, double a5, nint a6, ushort a7, ushort a8, float a9, float a10, ushort a11, uint a12, short a13, int a14, int a15, ulong a16, short a17, long a18, nint a19, byte a20, ushort a21, short a22, nint a23, short a24, SwiftSelf self) + private static ulong SwiftCallbackFunc99Callback(long a0, nuint a1, float a2, ushort a3, F99_S0 a4, byte a5, float a6, byte a7, sbyte a8, F99_S1 a9, F99_S2 a10, SwiftSelf self) { try { - Assert.Equal((sbyte)17, a0); - Assert.Equal((nint)unchecked((nint)4720638462358523954), a1); - Assert.Equal((short)30631, a2); - Assert.Equal((long)8206569929240962953, a3); - Assert.Equal((double)1359667226908383, a4); - Assert.Equal((double)3776001892555053, a5); - Assert.Equal((nint)unchecked((nint)747160900180286726), a6); - Assert.Equal((ushort)12700, a7); - Assert.Equal((ushort)53813, a8); - Assert.Equal((float)7860389, a9); - Assert.Equal((float)1879743, a10); - Assert.Equal((ushort)61400, a11); - Assert.Equal((uint)1962814337, a12); - Assert.Equal((short)17992, a13); - Assert.Equal((int)677814589, a14); - Assert.Equal((int)1019483263, a15); - Assert.Equal((ulong)6326265259403184370, a16); - Assert.Equal((short)-14633, a17); - Assert.Equal((long)4127072498763789519, a18); - Assert.Equal((nint)unchecked((nint)4008108205305320386), a19); - Assert.Equal((byte)128, a20); - Assert.Equal((ushort)21189, a21); - Assert.Equal((short)32104, a22); - Assert.Equal((nint)unchecked((nint)384827814282870543), a23); - Assert.Equal((short)20647, a24); + Assert.Equal((long)1152281003884062246, a0); + Assert.Equal((nuint)unchecked((nuint)2482384127373829622), a1); + Assert.Equal((float)3361150, a2); + Assert.Equal((ushort)2121, a3); + Assert.Equal((nint)unchecked((nint)4484545590050696958), a4.F0); + Assert.Equal((uint)422528630, a4.F1); + Assert.Equal((int)1418346646, a4.F2); + Assert.Equal((uint)1281567856, a4.F3); + Assert.Equal((byte)223, a5); + Assert.Equal((float)1917656, a6); + Assert.Equal((byte)103, a7); + Assert.Equal((sbyte)-46, a8); + Assert.Equal((short)14554, a9.F0); + Assert.Equal((byte)68, a10.F0); } catch (Exception ex) { *(ExceptionDispatchInfo*)self.Value = ExceptionDispatchInfo.Capture(ex); } - return new F9_Ret(189282789, 114803850982111219, 4506415416389763390, 23584); + return 8220698022338840251; } [Fact] - public static void TestSwiftCallbackFunc9() + public static void TestSwiftCallbackFunc99() { - Console.Write("Running SwiftCallbackFunc9: "); + Console.Write("Running SwiftCallbackFunc99: "); ExceptionDispatchInfo ex = null; - F9_Ret val = SwiftCallbackFunc9(&SwiftCallbackFunc9Callback, &ex); + ulong val = SwiftCallbackFunc99(&SwiftCallbackFunc99Callback, &ex); if (ex != null) ex.Throw(); - Assert.Equal((uint)189282789, val.F0); - Assert.Equal((long)114803850982111219, val.F1); - Assert.Equal((ulong)4506415416389763390, val.F2); - Assert.Equal((ushort)23584, val.F3); + Assert.Equal((ulong)8220698022338840251, val); Console.WriteLine("OK"); } diff --git a/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.swift b/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.swift index 344fd65a5ed2a3..3089ae2e5e0978 100644 --- a/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.swift +++ b/src/tests/Interop/Swift/SwiftCallbackAbiStress/SwiftCallbackAbiStress.swift @@ -4,126 +4,3901 @@ import Foundation @frozen -public struct F0_Ret +public struct F0_S0 +{ + public let f0 : Double; + public let f1 : UInt32; + public let f2 : UInt16; +} + +@frozen +public struct F0_S1 +{ + public let f0 : UInt64; +} + +@frozen +public struct F0_S2 +{ + public let f0 : Float; +} + +public func swiftCallbackFunc0(f: (Int16, Int32, UInt64, UInt16, F0_S0, F0_S1, UInt8, F0_S2) -> Int32) -> Int32 { + return f(-17813, 318006528, 1195162122024233590, 60467, F0_S0(f0: 2239972725713766, f1: 1404066621, f2: 29895), F0_S1(f0: 7923486769850554262), 217, F0_S2(f0: 2497655)) +} + +@frozen +public struct F1_S0 { public let f0 : UInt16; - public let f1 : Float; - public let f2 : Int32; - public let f3 : UInt64; + public let f1 : UInt8; +} + +@frozen +public struct F1_S1 +{ + public let f0 : UInt8; + public let f1 : UInt64; + public let f2 : Int16; + public let f3 : Float; + public let f4 : Float; +} + +@frozen +public struct F1_S2_S0 +{ + public let f0 : UInt32; + public let f1 : Double; +} + +@frozen +public struct F1_S2 +{ + public let f0 : Int8; + public let f1 : UInt; + public let f2 : F1_S2_S0; + public let f3 : Int; +} + +@frozen +public struct F1_S3 +{ + public let f0 : UInt16; +} + +@frozen +public struct F1_S4 +{ + public let f0 : Int; +} + +@frozen +public struct F1_S5_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F1_S5 +{ + public let f0 : F1_S5_S0; +} + +public func swiftCallbackFunc1(f: (Int64, Double, Int8, F1_S0, F1_S1, F1_S2, UInt8, Int8, Int64, F1_S3, UInt, F1_S4, F1_S5, Int) -> UInt8) -> UInt8 { + return f(7920511243396412395, 1396130721334528, -55, F1_S0(f0: 33758, f1: 103), F1_S1(f0: 201, f1: 7390774039746135757, f2: 14699, f3: 7235330, f4: 7189013), F1_S2(f0: 37, f1: 3310322731568932038, f2: F1_S2_S0(f0: 1100328218, f1: 1060779460203640), f3: 8325292022909418877), 137, 82, 1197537325837505041, F1_S3(f0: 46950), 8181828233622947597, F1_S4(f0: 1851182205030289056), F1_S5(f0: F1_S5_S0(f0: 1971014225)), 6437995407675718392) } -public func swiftCallbackFunc0(f: (Int16, Int32, UInt64, UInt16, Int64, Double, UInt32, UInt16, Int, UInt64) -> F0_Ret) -> F0_Ret { - return f(-17813, 318006528, 1195162122024233590, 60467, 4587464142261794085, 2686980744237725, 331986645, 56299, 6785053689615432643, 6358078381523084952) +@frozen +public struct F2_S0 +{ + public let f0 : Int32; + public let f1 : UInt; + public let f2 : Float; } -public func swiftCallbackFunc1(f: (Double, Int8, Int32, UInt16, UInt8, Double, UInt8, UInt64, Int16, Float, Float, UInt64, Int8) -> UInt) -> UInt { - return f(3867437130564654, -64, 31081182, 20316, 73, 3543740592144911, 250, 6680393408153342744, 23758, 7189013, 5438196, 3310322731568932038, 3) +@frozen +public struct F2_S1_S0 +{ + public let f0 : UInt16; } @frozen -public struct F2_Ret_S0 +public struct F2_S1 { public let f0 : Int64; + public let f1 : UInt16; + public let f2 : F2_S1_S0; + public let f3 : Int; + public let f4 : Double; +} + +@frozen +public struct F2_S2 +{ + public let f0 : Float; public let f1 : Int32; + public let f2 : UInt16; + public let f3 : Int8; } @frozen -public struct F2_Ret +public struct F2_S3_S0 { - public let f0 : F2_Ret_S0; - public let f1 : Int16; + public let f0 : Int8; +} + +@frozen +public struct F2_S3 +{ + public let f0 : F2_S3_S0; +} + +public func swiftCallbackFunc2(f: (F2_S0, F2_S1, F2_S2, Float, UInt64, F2_S3) -> Int8) -> Int8 { + return f(F2_S0(f0: 1860840185, f1: 5407074783834178811, f2: 6261766), F2_S1(f0: 4033972792915237065, f1: 22825, f2: F2_S1_S0(f0: 44574), f3: 4536911485304731630, f4: 4282944015147385), F2_S2(f0: 2579193, f1: 586252933, f2: 47002, f3: 71), 3225929, 3599444831393612282, F2_S3(f0: F2_S3_S0(f0: 13))) +} + +@frozen +public struct F3_S0_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F3_S0 +{ + public let f0 : F3_S0_S0; } -public func swiftCallbackFunc2(f: (UInt, UInt8) -> F2_Ret) -> F2_Ret { - return f(2153637757371267722, 150) +@frozen +public struct F3_S1 +{ + public let f0 : UInt32; + public let f1 : Int64; } @frozen -public struct F3_Ret_S0 +public struct F3_S2_S0 +{ + public let f0 : Int16; + public let f1 : UInt8; +} + +@frozen +public struct F3_S2 +{ + public let f0 : F3_S2_S0; + public let f1 : Int8; + public let f2 : UInt8; +} + +@frozen +public struct F3_S3 +{ + public let f0 : UInt64; + public let f1 : Int64; +} + +@frozen +public struct F3_S4 { public let f0 : Int16; - public let f1 : Int32; - public let f2 : UInt16; } @frozen public struct F3_Ret { - public let f0 : Int; - public let f1 : F3_Ret_S0; - public let f2 : UInt; - public let f3 : Int8; + public let f0 : UInt16; + public let f1 : UInt8; + public let f2 : UInt16; + public let f3 : Float; } -public func swiftCallbackFunc3(f: (UInt16, UInt, UInt, Int, Float, UInt16) -> F3_Ret) -> F3_Ret { - return f(45065, 8506742096411295359, 8619375465417625458, 5288917394772427257, 5678138, 33467) +public func swiftCallbackFunc3(f: (F3_S0, Float, UInt16, F3_S1, UInt16, Int32, F3_S2, Int, F3_S3, F3_S4) -> F3_Ret) -> F3_Ret { + return f(F3_S0(f0: F3_S0_S0(f0: 5610153900386943274)), 7736836, 31355, F3_S1(f0: 1159208572, f1: 2707818827451590538), 37580, 1453603418, F3_S2(f0: F3_S2_S0(f0: 699, f1: 46), f1: -125, f2: 92), 94557706586779834, F3_S3(f0: 2368015527878194540, f1: 5026404532195049271), F3_S4(f0: 21807)) +} + +@frozen +public struct F4_S0_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F4_S0 +{ + public let f0 : F4_S0_S0; + public let f1 : Float; +} + +@frozen +public struct F4_Ret_S0 +{ + public let f0 : Int; } @frozen public struct F4_Ret { - public let f0 : UInt64; + public let f0 : Int32; + public let f1 : F4_Ret_S0; + public let f2 : Int; + public let f3 : Int16; + public let f4 : Int; + public let f5 : UInt32; +} + +public func swiftCallbackFunc4(f: (Double, F4_S0, UInt8, Int32, UInt32) -> F4_Ret) -> F4_Ret { + return f(4282972206489588, F4_S0(f0: F4_S0_S0(f0: 611688063), f1: 877466), 53, 965123506, 1301067653) +} + +@frozen +public struct F5_S0 +{ + public let f0 : UInt; public let f1 : UInt32; - public let f2 : UInt64; } -public func swiftCallbackFunc4(f: (Int64, UInt16, Int32, UInt16, Int, Double, UInt16, Float, Int32, UInt16, Int8, Float, UInt64, Int16, Double, Int8, Int8, Int32, Int, Int32, Int64, Int64) -> F4_Ret) -> F4_Ret { - return f(8771527078890676837, 18667, 224631333, 13819, 8888237425788084647, 2677321682649925, 50276, 2703201, 545337834, 11190, 112, 4053251, 7107857019164433129, -3092, 2176685406663423, 57, -61, 866840318, 5927291145767969522, 1818333546, 6272248211765159948, 6555966806846053216) +@frozen +public struct F5_S1_S0 +{ + public let f0 : Int; + public let f1 : UInt32; } @frozen -public struct F5_Ret +public struct F5_S1_S1 { - public let f0 : UInt64; - public let f1 : Int32; + public let f0 : Float; +} + +@frozen +public struct F5_S1 +{ + public let f0 : F5_S1_S0; + public let f1 : F5_S1_S1; +} + +@frozen +public struct F5_S2 +{ + public let f0 : Double; + public let f1 : Int8; public let f2 : Int; - public let f3 : Float; - public let f4 : Int16; - public let f5 : UInt64; } -public func swiftCallbackFunc5(f: (Int32, UInt16, UInt16, Int16, UInt8, Int8, UInt8, Int, UInt64, UInt64, Int64, Int16, Int16, Int64, UInt16, UInt8, UInt16) -> F5_Ret) -> F5_Ret { - return f(359602150, 51495, 37765, 29410, 95, -104, 32, 8530952551906271255, 706266487837805024, 707905209555595641, 8386588676727568762, -8624, 26113, 8389143657021522019, 13337, 229, 51876) +@frozen +public struct F5_S3 +{ + public let f0 : Int64; + public let f1 : Double; +} + +@frozen +public struct F5_S4 +{ + public let f0 : UInt16; +} + +@frozen +public struct F5_Ret +{ + public let f0 : Int16; + public let f1 : Int32; + public let f2 : Int32; + public let f3 : UInt64; + public let f4 : Int16; } -public func swiftCallbackFunc6(f: (Int32, UInt32, UInt64, Int32, Int8, Int, Int, Int16, Int, UInt32, UInt64, UInt64, Int64, UInt32) -> UInt16) -> UInt16 { - return f(743741783, 850236948, 5908745692727636656, 2106839818, 77, 291907785975160065, 3560129042279209151, -30568, 5730241035812482149, 18625011, 242340713355417257, 6962175160124965670, 2935089705514798822, 2051956645) +public func swiftCallbackFunc5(f: (UInt8, Int16, UInt64, UInt, UInt, UInt64, UInt8, F5_S0, Int8, Int8, F5_S1, F5_S2, F5_S3, Double, F5_S4, UInt16, Float, Float, UInt16) -> F5_Ret) -> F5_Ret { + return f(42, 18727, 3436765034579128495, 6305137336506323506, 6280137078630028944, 6252650621827449809, 129, F5_S0(f0: 6879980973426111678, f1: 1952654577), -34, 102, F5_S1(f0: F5_S1_S0(f0: 8389143657021522019, f1: 437030241), f1: F5_S1_S1(f0: 7522798)), F5_S2(f0: 523364011167530, f1: 16, f2: 3823439046574037759), F5_S3(f0: 3767260839267771462, f1: 1181031208183008), 2338830539621828, F5_S4(f0: 36276), 41286, 6683955, 6399917, 767) } @frozen -public struct F7_Ret_S0 +public struct F6_S0_S0 { - public let f0 : Int; + public let f0 : Float; } @frozen -public struct F7_Ret +public struct F6_S0 { public let f0 : Int8; public let f1 : Int8; - public let f2 : UInt8; - public let f3 : F7_Ret_S0; - public let f4 : UInt32; + public let f2 : Int32; + public let f3 : F6_S0_S0; +} + +@frozen +public struct F6_S1 +{ + public let f0 : Int32; + public let f1 : UInt64; + public let f2 : UInt64; + public let f3 : UInt32; } -public func swiftCallbackFunc7(f: (UInt64, UInt8, Int16, UInt) -> F7_Ret) -> F7_Ret { - return f(7625368278886567558, 70, 26780, 7739343395912136630) +@frozen +public struct F6_S2 +{ + public let f0 : Int64; + public let f1 : Int16; + public let f2 : Int8; } -public func swiftCallbackFunc8(f: (Float, UInt) -> UInt8) -> UInt8 { - return f(6278007, 1620979945874429615) +@frozen +public struct F6_S3 +{ + public let f0 : Float; } @frozen -public struct F9_Ret +public struct F6_Ret_S0 { - public let f0 : UInt32; + public let f0 : Int64; + public let f1 : UInt32; +} + +@frozen +public struct F6_Ret +{ + public let f0 : F6_Ret_S0; + public let f1 : UInt64; + public let f2 : Float; + public let f3 : Int8; +} + +public func swiftCallbackFunc6(f: (Float, F6_S0, Int64, Int8, UInt16, UInt, UInt16, UInt64, F6_S1, Int16, F6_S2, F6_S3, UInt16) -> F6_Ret) -> F6_Ret { + return f(2905241, F6_S0(f0: -27, f1: -77, f2: 1315779092, f3: F6_S0_S0(f0: 5373970)), 7022244764256789748, -110, 2074, 3560129042279209151, 2200, 5730241035812482149, F6_S1(f0: 18625011, f1: 242340713355417257, f2: 6962175160124965670, f3: 1983617839), -28374, F6_S2(f0: 6355748563312062178, f1: -23189, f2: 81), F6_S3(f0: 4547677), 6397) +} + +@frozen +public struct F7_S0 +{ + public let f0 : Float; public let f1 : Int64; - public let f2 : UInt64; - public let f3 : UInt16; + public let f2 : UInt; +} + +@frozen +public struct F7_S1 +{ + public let f0 : Int16; + public let f1 : UInt32; + public let f2 : UInt32; +} + +public func swiftCallbackFunc7(f: (Int64, UInt8, Double, UInt16, F7_S0, UInt8, Double, UInt32, F7_S1, Int32, Int32, Int, Int16, UInt16, Int, UInt64, UInt8, Int16) -> UInt16) -> UInt16 { + return f(7625368278886567558, 70, 2146971972122530, 54991, F7_S0(f0: 1072132, f1: 3890459003549150599, f2: 56791000421908673), 227, 3248250571953113, 1138780108, F7_S1(f0: -22670, f1: 1796712687, f2: 304251857), 1288765591, 1382721790, 6746417265635727373, -15600, 47575, 7200793040165597188, 2304985873826892392, 99, -9993) +} + +@frozen +public struct F8_S0 +{ + public let f0 : Int16; + public let f1 : Int16; + public let f2 : UInt; +} + +@frozen +public struct F8_S1 +{ + public let f0 : Int64; +} + +@frozen +public struct F8_Ret_S0 +{ + public let f0 : Int32; + public let f1 : UInt; + public let f2 : Int; +} + +@frozen +public struct F8_Ret +{ + public let f0 : Int64; + public let f1 : F8_Ret_S0; + public let f2 : Int; + public let f3 : UInt32; +} + +public func swiftCallbackFunc8(f: (F8_S0, F8_S1) -> F8_Ret) -> F8_Ret { + return f(F8_S0(f0: 16278, f1: -31563, f2: 2171308312325435543), F8_S1(f0: 8923668560896309835)) +} + +@frozen +public struct F9_S0_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F9_S0 +{ + public let f0 : F9_S0_S0; + public let f1 : Int16; +} + +@frozen +public struct F9_S1_S0 +{ + public let f0 : Int64; + public let f1 : Int64; +} + +@frozen +public struct F9_S1 +{ + public let f0 : Int; + public let f1 : F9_S1_S0; + public let f2 : Float; +} + +@frozen +public struct F9_S2 +{ + public let f0 : UInt64; + public let f1 : Double; + public let f2 : Int16; + public let f3 : Int8; +} + +@frozen +public struct F9_S3_S0_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F9_S3_S0 +{ + public let f0 : F9_S3_S0_S0; +} + +@frozen +public struct F9_S3 +{ + public let f0 : Int8; + public let f1 : F9_S3_S0; +} + +@frozen +public struct F9_S4_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F9_S4 +{ + public let f0 : F9_S4_S0; + public let f1 : Int8; +} + +@frozen +public struct F9_S5_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F9_S5 +{ + public let f0 : UInt32; + public let f1 : F9_S5_S0; +} + +@frozen +public struct F9_S6 +{ + public let f0 : Double; +} + +public func swiftCallbackFunc9(f: (Int8, UInt8, Int64, F9_S0, F9_S1, F9_S2, Double, F9_S3, F9_S4, Double, F9_S5, F9_S6) -> UInt16) -> UInt16 { + return f(17, 104, 8922699691031703191, F9_S0(f0: F9_S0_S0(f0: 123), f1: 31706), F9_S1(f0: 1804058604961822948, f1: F9_S1_S0(f0: 8772179036715198777, f1: 3320511540592563328), f2: 679540), F9_S2(f0: 8642590829466497926, f1: 4116322155252965, f2: 17992, f3: -48), 414017537937894, F9_S3(f0: 47, f1: F9_S3_S0(f0: F9_S3_S0_S0(f0: 7576380984563129085))), F9_S4(f0: F9_S4_S0(f0: 1356827400304742803), f1: -17), 4458031413035521, F9_S5(f0: 352075098, f1: F9_S5_S0(f0: 1840980094)), F9_S6(f0: 396957263013930)) +} + +@frozen +public struct F10_Ret +{ + public let f0 : Int64; + public let f1 : UInt32; + public let f2 : UInt16; + public let f3 : UInt32; +} + +public func swiftCallbackFunc10(f: (Int16) -> F10_Ret) -> F10_Ret { + return f(-7168) +} + +@frozen +public struct F11_S0_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F11_S0 +{ + public let f0 : UInt32; + public let f1 : F11_S0_S0; + public let f2 : UInt; + public let f3 : Int32; + public let f4 : Int64; +} + +@frozen +public struct F11_S1_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F11_S1 +{ + public let f0 : F11_S1_S0; + public let f1 : Int16; + public let f2 : UInt32; + public let f3 : Int16; +} + +@frozen +public struct F11_S2 +{ + public let f0 : UInt8; +} + +@frozen +public struct F11_Ret +{ + public let f0 : Int16; + public let f1 : Int16; + public let f2 : UInt8; + public let f3 : Int64; +} + +public func swiftCallbackFunc11(f: (UInt32, UInt, UInt64, Int16, F11_S0, Float, Int8, UInt16, F11_S1, UInt32, Int64, UInt32, F11_S2) -> F11_Ret) -> F11_Ret { + return f(454751144, 1696592254558667577, 5831587230944972245, 15352, F11_S0(f0: 1306601347, f1: F11_S0_S0(f0: 123), f2: 3064471520018434938, f3: 272956246, f4: 3683518307106722029), 5606122, -126, 50801, F11_S1(f0: F11_S1_S0(f0: 63467), f1: -31828, f2: 2117176776, f3: -27265), 1879606687, 4981244336430926707, 1159924856, F11_S2(f0: 29)) +} + +@frozen +public struct F12_S0 +{ + public let f0 : UInt64; + public let f1 : Int8; +} + +@frozen +public struct F12_S1_S0_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F12_S1_S0 +{ + public let f0 : F12_S1_S0_S0; +} + +@frozen +public struct F12_S1 +{ + public let f0 : UInt16; + public let f1 : UInt32; + public let f2 : F12_S1_S0; +} + +@frozen +public struct F12_Ret +{ + public let f0 : UInt64; + public let f1 : Int; +} + +public func swiftCallbackFunc12(f: (F12_S0, Int16, UInt64, F12_S1, Int8) -> F12_Ret) -> F12_Ret { + return f(F12_S0(f0: 3236871137735400659, f1: -123), -22828, 2132557792366642035, F12_S1(f0: 42520, f1: 879349060, f2: F12_S1_S0(f0: F12_S1_S0_S0(f0: 5694370973277919380))), -75) +} + +@frozen +public struct F13_S0_S0 +{ + public let f0 : Int64; + public let f1 : Int64; +} + +@frozen +public struct F13_S0 +{ + public let f0 : F13_S0_S0; + public let f1 : Float; + public let f2 : Int16; +} + +@frozen +public struct F13_S1 +{ + public let f0 : Int; + public let f1 : UInt64; +} + +@frozen +public struct F13_S2_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F13_S2 +{ + public let f0 : F13_S2_S0; + public let f1 : Double; +} + +@frozen +public struct F13_S3 +{ + public let f0 : Float; + public let f1 : Int8; +} + +@frozen +public struct F13_S4 +{ + public let f0 : Int; +} + +public func swiftCallbackFunc13(f: (F13_S0, Int32, Int, UInt16, UInt, F13_S1, F13_S2, Int, Double, Int8, Float, Int, F13_S3, UInt, F13_S4) -> Double) -> Double { + return f(F13_S0(f0: F13_S0_S0(f0: 9003727031576598067, f1: 8527798284445940986), f1: 3585628, f2: -12520), 1510815104, 5883331525294982326, 60738, 5291799143932627546, F13_S1(f0: 1949276559361384602, f1: 876048527237138968), F13_S2(f0: F13_S2_S0(f0: 67), f1: 2455575228564859), 2321408806345977320, 12750323283778, 46, 6774339, 5121910967292140178, F13_S3(f0: 8254279, f1: -7), 7533347207018595125, F13_S4(f0: 6605448167191082938)) +} + +@frozen +public struct F14_S0 +{ + public let f0 : Int8; + public let f1 : Float; + public let f2 : UInt16; +} + +@frozen +public struct F14_S1 +{ + public let f0 : UInt64; + public let f1 : UInt64; +} + +public func swiftCallbackFunc14(f: (Int64, F14_S0, Int8, UInt64, F14_S1, Int) -> Int64) -> Int64 { + return f(5547219684656041875, F14_S0(f0: -39, f1: 5768837, f2: 53063), -102, 5745438709817040873, F14_S1(f0: 2178706453119907411, f1: 4424726479787355131), 5693881223150438553) +} + +@frozen +public struct F15_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F15_S1 +{ + public let f0 : Int; + public let f1 : UInt32; + public let f2 : UInt8; + public let f3 : Int16; +} + +@frozen +public struct F15_S2 +{ + public let f0 : Int8; + public let f1 : UInt64; + public let f2 : Int64; + public let f3 : UInt8; +} + +@frozen +public struct F15_S3 +{ + public let f0 : Double; +} + +public func swiftCallbackFunc15(f: (UInt8, UInt16, UInt64, UInt64, Int8, UInt, Double, Float, Int, F15_S0, F15_S1, UInt16, F15_S2, UInt8, F15_S3) -> Int) -> Int { + return f(0, 31081, 8814881608835743979, 4283853687332682681, 80, 7895994601265649979, 1855521542692398, 3235683, 215122646177738904, F15_S0(f0: 2044750195), F15_S1(f0: 1772412898183620625, f1: 131256973, f2: 153, f3: 25281), 50965, F15_S2(f0: -83, f1: 7751486385861474282, f2: 3744400479301818340, f3: 150), 179, F15_S3(f0: 3108143600787174)) +} + +@frozen +public struct F16_S0 +{ + public let f0 : Int8; + public let f1 : Int32; + public let f2 : UInt16; + public let f3 : UInt16; + public let f4 : UInt32; +} + +@frozen +public struct F16_S1 +{ + public let f0 : UInt16; + public let f1 : Int8; + public let f2 : UInt8; + public let f3 : Int; + public let f4 : Int; +} + +@frozen +public struct F16_S2_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F16_S2 +{ + public let f0 : Int32; + public let f1 : Int32; + public let f2 : UInt32; + public let f3 : UInt8; + public let f4 : F16_S2_S0; +} + +@frozen +public struct F16_S3 +{ + public let f0 : Int16; + public let f1 : Double; + public let f2 : Double; + public let f3 : Int32; +} + +public func swiftCallbackFunc16(f: (F16_S0, Int16, Float, F16_S1, F16_S2, UInt64, F16_S3, UInt) -> Int8) -> Int8 { + return f(F16_S0(f0: -59, f1: 1181591186, f2: 44834, f3: 28664, f4: 404461767), 2482, 2997348, F16_S1(f0: 22423, f1: -106, f2: 182, f3: 3784074551275084420, f4: 7092934571108982079), F16_S2(f0: 1835134709, f1: 246067261, f2: 1986526591, f3: 24, f4: F16_S2_S0(f0: -112)), 1465053746911704089, F16_S3(f0: -27636, f1: 1896887612303356, f2: 4263157082840190, f3: 774653659), 3755775782607884861) +} + +@frozen +public struct F17_S0 +{ + public let f0 : Int32; + public let f1 : UInt; +} + +@frozen +public struct F17_S1_S0 +{ + public let f0 : Double; + public let f1 : UInt32; +} + +@frozen +public struct F17_S1 +{ + public let f0 : F17_S1_S0; + public let f1 : Int32; + public let f2 : UInt8; +} + +@frozen +public struct F17_S2 +{ + public let f0 : UInt32; +} + +public func swiftCallbackFunc17(f: (UInt32, F17_S0, F17_S1, Double, UInt64, F17_S2) -> Double) -> Double { + return f(201081002, F17_S0(f0: 2018751226, f1: 8488544433072104028), F17_S1(f0: F17_S1_S0(f0: 1190765430157980, f1: 70252071), f1: 1297775609, f2: 160), 4290084351352688, 4738339757002694731, F17_S2(f0: 1829312773)) +} + +@frozen +public struct F18_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F18_S1 +{ + public let f0 : UInt16; + public let f1 : Int16; + public let f2 : Double; + public let f3 : UInt; +} + +@frozen +public struct F18_S2 +{ + public let f0 : Int; +} + +@frozen +public struct F18_Ret_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F18_Ret +{ + public let f0 : F18_Ret_S0; +} + +public func swiftCallbackFunc18(f: (F18_S0, F18_S1, F18_S2, UInt, UInt32, Int64, Int16, Double) -> F18_Ret) -> F18_Ret { + return f(F18_S0(f0: 106), F18_S1(f0: 21619, f1: -4350, f2: 3457288266203248, f3: 9020447812661292883), F18_S2(f0: 2317132584983719004), 7379425918918939512, 2055208746, 1042861174364145790, 28457, 1799004152435515) +} + +@frozen +public struct F19_S0 +{ + public let f0 : Int16; + public let f1 : Int8; + public let f2 : Float; +} + +@frozen +public struct F19_S1 +{ + public let f0 : Int64; + public let f1 : UInt16; +} + +@frozen +public struct F19_S2 +{ + public let f0 : UInt64; + public let f1 : Int64; +} + +@frozen +public struct F19_S3 +{ + public let f0 : UInt32; + public let f1 : Int32; +} + +@frozen +public struct F19_Ret_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F19_Ret +{ + public let f0 : UInt32; + public let f1 : Int64; + public let f2 : UInt16; + public let f3 : F19_Ret_S0; + public let f4 : Double; + public let f5 : Double; + public let f6 : Double; +} + +public func swiftCallbackFunc19(f: (Int64, UInt8, F19_S0, Int, F19_S1, Int32, Int32, UInt, UInt64, F19_S2, UInt16, F19_S3, Int8, Int64) -> F19_Ret) -> F19_Ret { + return f(7456120134117592143, 114, F19_S0(f0: -7583, f1: 97, f2: 2768322), 3605245176125291560, F19_S1(f0: 4445885313084714470, f1: 15810), 1179699879, 109603412, 6521628547431964799, 7687430644226018854, F19_S2(f0: 8464855230956039883, f1: 861462819289140037), 26519, F19_S3(f0: 1864602741, f1: 397176384), 81, 4909173176891211442) +} + +@frozen +public struct F20_S0_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F20_S0 +{ + public let f0 : Int16; + public let f1 : UInt; + public let f2 : F20_S0_S0; +} + +@frozen +public struct F20_S1_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F20_S1 +{ + public let f0 : Int64; + public let f1 : UInt; + public let f2 : F20_S1_S0; + public let f3 : Int64; + public let f4 : Int32; +} + +@frozen +public struct F20_S2 +{ + public let f0 : UInt32; +} + +@frozen +public struct F20_Ret +{ + public let f0 : UInt16; + public let f1 : UInt16; + public let f2 : Double; + public let f3 : Int16; + public let f4 : Double; +} + +public func swiftCallbackFunc20(f: (F20_S0, F20_S1, Float, Float, Int8, F20_S2, Float) -> F20_Ret) -> F20_Ret { + return f(F20_S0(f0: 28858, f1: 7024100299344418039, f2: F20_S0_S0(f0: 13025)), F20_S1(f0: 7900431324553135989, f1: 8131425055682506706, f2: F20_S1_S0(f0: 3884322), f3: 605453501265278638, f4: 353756684), 622319, 1401604, -101, F20_S2(f0: 1355570413), 2912776) +} + +@frozen +public struct F21_S0 +{ + public let f0 : Double; + public let f1 : UInt64; +} + +@frozen +public struct F21_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F21_Ret +{ + public let f0 : UInt16; + public let f1 : UInt32; + public let f2 : Int64; +} + +public func swiftCallbackFunc21(f: (Int32, Int16, F21_S0, Int32, F21_S1, Int64, UInt32, Int64, UInt8, UInt16) -> F21_Ret) -> F21_Ret { + return f(256017319, 14555, F21_S0(f0: 2102091966108033, f1: 8617538752301505079), 834677431, F21_S1(f0: 7043), 7166819734655141128, 965538086, 3827752442102685645, 110, 33646) +} + +@frozen +public struct F22_S0 +{ + public let f0 : Int; + public let f1 : Float; + public let f2 : Double; +} + +@frozen +public struct F22_S1 +{ + public let f0 : UInt; +} + +@frozen +public struct F22_S2 +{ + public let f0 : Int32; + public let f1 : Double; + public let f2 : Float; + public let f3 : Int16; + public let f4 : UInt16; +} + +@frozen +public struct F22_S3 +{ + public let f0 : Int64; + public let f1 : UInt16; +} + +@frozen +public struct F22_S4 +{ + public let f0 : Double; + public let f1 : UInt16; +} + +@frozen +public struct F22_S5 +{ + public let f0 : UInt32; + public let f1 : Int16; +} + +@frozen +public struct F22_S6 +{ + public let f0 : Float; +} + +@frozen +public struct F22_Ret +{ + public let f0 : UInt16; + public let f1 : Int16; + public let f2 : UInt; +} + +public func swiftCallbackFunc22(f: (Int32, F22_S0, F22_S1, F22_S2, F22_S3, Int8, F22_S4, UInt8, UInt16, Int64, F22_S5, Int64, Float, F22_S6, UInt16) -> F22_Ret) -> F22_Ret { + return f(640156952, F22_S0(f0: 824774470287401457, f1: 6163704, f2: 54328782764685), F22_S1(f0: 1679730195865415747), F22_S2(f0: 1462995665, f1: 2554087365600344, f2: 8193295, f3: 16765, f4: 45388), F22_S3(f0: 5560492364570389430, f1: 48308), 71, F22_S4(f0: 1639169280741045, f1: 12045), 217, 62917, 1465918945905384332, F22_S5(f0: 1364750179, f1: 3311), 9003480567517966914, 2157327, F22_S6(f0: 6647392), 1760) +} + +@frozen +public struct F23_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F23_S1 +{ + public let f0 : Int; +} + +public func swiftCallbackFunc23(f: (UInt, UInt8, Int8, UInt8, UInt8, F23_S0, UInt, F23_S1, Double) -> Double) -> Double { + return f(5779410841248940897, 192, -128, 133, 20, F23_S0(f0: 2959916071636885436), 3651155214497129159, F23_S1(f0: 8141565342203061885), 1465425469608034) +} + +@frozen +public struct F24_S0 +{ + public let f0 : Int8; + public let f1 : UInt8; + public let f2 : UInt64; + public let f3 : UInt32; +} + +@frozen +public struct F24_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F24_S2_S0 +{ + public let f0 : UInt16; + public let f1 : UInt32; +} + +@frozen +public struct F24_S2_S1 +{ + public let f0 : Int64; +} + +@frozen +public struct F24_S2 +{ + public let f0 : Int; + public let f1 : UInt32; + public let f2 : F24_S2_S0; + public let f3 : F24_S2_S1; +} + +@frozen +public struct F24_S3 +{ + public let f0 : Int16; + public let f1 : Float; + public let f2 : Int64; +} + +@frozen +public struct F24_S4 +{ + public let f0 : UInt8; +} + +public func swiftCallbackFunc24(f: (Int32, UInt, F24_S0, UInt16, F24_S1, Int8, F24_S2, UInt64, UInt64, F24_S3, Double, F24_S4) -> Float) -> Float { + return f(1710754874, 6447433131978039331, F24_S0(f0: -92, f1: 181, f2: 3710374263631495948, f3: 257210428), 6631, F24_S1(f0: 2303), 15, F24_S2(f0: 2509049432824972381, f1: 616918672, f2: F24_S2_S0(f0: 50635, f1: 1337844540), f3: F24_S2_S1(f0: 335964796567786281)), 1114365571136806382, 8988425145801188208, F24_S3(f0: 31969, f1: 3008861, f2: 5466306080595269107), 2027780227887952, F24_S4(f0: 234)) +} + +@frozen +public struct F25_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F25_S1 +{ + public let f0 : Float; + public let f1 : Int8; + public let f2 : Float; + public let f3 : Int; +} + +@frozen +public struct F25_S2 +{ + public let f0 : UInt; + public let f1 : UInt; + public let f2 : Int64; + public let f3 : UInt8; +} + +@frozen +public struct F25_S3 +{ + public let f0 : Float; +} + +@frozen +public struct F25_S4 +{ + public let f0 : Int8; +} + +@frozen +public struct F25_Ret +{ + public let f0 : UInt64; + public let f1 : Int64; + public let f2 : UInt8; + public let f3 : UInt16; +} + +public func swiftCallbackFunc25(f: (F25_S0, UInt16, UInt, F25_S1, Int16, F25_S2, UInt64, UInt64, UInt64, F25_S3, F25_S4) -> F25_Ret) -> F25_Ret { + return f(F25_S0(f0: 6077761381429658786), 2300, 3498354181807010234, F25_S1(f0: 5360721, f1: -40, f2: 109485, f3: 2311625789899959825), -28395, F25_S2(f0: 8729509817732080529, f1: 860365359368130822, f2: 7498894262834346040, f3: 218), 961687210282504701, 7184177441364400868, 8389319500274436977, F25_S3(f0: 4437173), F25_S4(f0: -107)) +} + +@frozen +public struct F26_S0 +{ + public let f0 : Int8; + public let f1 : Int; + public let f2 : UInt8; + public let f3 : UInt8; +} + +@frozen +public struct F26_S1_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F26_S1 +{ + public let f0 : Int8; + public let f1 : Int32; + public let f2 : Int16; + public let f3 : F26_S1_S0; +} + +@frozen +public struct F26_S2 +{ + public let f0 : Int64; +} + +@frozen +public struct F26_S3 +{ + public let f0 : UInt8; +} + +@frozen +public struct F26_Ret +{ + public let f0 : UInt; + public let f1 : UInt8; +} + +public func swiftCallbackFunc26(f: (Int8, UInt8, UInt32, F26_S0, F26_S1, F26_S2, F26_S3) -> F26_Ret) -> F26_Ret { + return f(-16, 220, 72386567, F26_S0(f0: -33, f1: 6488877286424796715, f2: 143, f3: 74), F26_S1(f0: 104, f1: 1719453315, f2: 20771, f3: F26_S1_S0(f0: 3636117595999837800)), F26_S2(f0: 2279530426119665839), F26_S3(f0: 207)) +} + +@frozen +public struct F27_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F27_S1_S0 +{ + public let f0 : UInt16; + public let f1 : Int8; +} + +@frozen +public struct F27_S1 +{ + public let f0 : Int64; + public let f1 : F27_S1_S0; + public let f2 : Float; +} + +@frozen +public struct F27_S2 +{ + public let f0 : UInt64; + public let f1 : Int8; + public let f2 : UInt32; + public let f3 : Int64; +} + +@frozen +public struct F27_S3_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F27_S3 +{ + public let f0 : F27_S3_S0; +} + +public func swiftCallbackFunc27(f: (UInt64, UInt8, F27_S0, UInt8, UInt8, F27_S1, Int32, F27_S2, Int, UInt32, F27_S3) -> Float) -> Float { + return f(4847421047018330189, 214, F27_S0(f0: 31313), 207, 174, F27_S1(f0: 4476120319602257660, f1: F27_S1_S0(f0: 26662, f1: -55), f2: 70666), 1340306103, F27_S2(f0: 2772939788297637999, f1: -65, f2: 7500441, f3: 4926907273817562134), 5862689255099071258, 1077270996, F27_S3(f0: F27_S3_S0(f0: 35167))) +} + +@frozen +public struct F28_S0 +{ + public let f0 : UInt64; + public let f1 : Int8; +} + +@frozen +public struct F28_S1 +{ + public let f0 : Int64; + public let f1 : UInt; + public let f2 : Int; + public let f3 : Int32; +} + +@frozen +public struct F28_S2 +{ + public let f0 : Int; +} + +@frozen +public struct F28_S3 +{ + public let f0 : Int64; +} + +@frozen +public struct F28_Ret_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F28_Ret +{ + public let f0 : F28_Ret_S0; + public let f1 : UInt16; +} + +public func swiftCallbackFunc28(f: (UInt32, UInt16, Int8, Int8, UInt16, Float, F28_S0, Double, UInt64, F28_S1, F28_S2, F28_S3) -> F28_Ret) -> F28_Ret { + return f(893827094, 38017, -90, -1, 16109, 5844449, F28_S0(f0: 176269147098539470, f1: 23), 1431426259441210, 6103261251702315645, F28_S1(f0: 3776818122826483419, f1: 9181420263296840471, f2: 3281861424961082542, f3: 1442905253), F28_S2(f0: 8760009193798370900), F28_S3(f0: 7119917900929398683)) +} + +@frozen +public struct F29_S0 +{ + public let f0 : UInt8; + public let f1 : Double; + public let f2 : UInt16; +} + +@frozen +public struct F29_S1 +{ + public let f0 : UInt32; + public let f1 : Int; + public let f2 : UInt64; + public let f3 : UInt32; +} + +@frozen +public struct F29_S2 +{ + public let f0 : Int32; +} + +@frozen +public struct F29_S3 +{ + public let f0 : UInt32; + public let f1 : UInt32; + public let f2 : Float; +} + +@frozen +public struct F29_S4 +{ + public let f0 : Int32; +} + +@frozen +public struct F29_Ret_S0 +{ + public let f0 : Int; + public let f1 : UInt64; +} + +@frozen +public struct F29_Ret +{ + public let f0 : UInt; + public let f1 : UInt; + public let f2 : UInt; + public let f3 : F29_Ret_S0; + public let f4 : UInt64; + public let f5 : UInt32; +} + +public func swiftCallbackFunc29(f: (F29_S0, Int, UInt64, UInt8, Int64, UInt8, Int, F29_S1, Int32, Int8, UInt8, UInt64, F29_S2, F29_S3, Int16, F29_S4, UInt32) -> F29_Ret) -> F29_Ret { + return f(F29_S0(f0: 152, f1: 737900189383874, f2: 33674), 5162040247631126074, 6524156301721885895, 129, 6661424933974053497, 145, 7521422786615537370, F29_S1(f0: 1361601345, f1: 3366726213840694614, f2: 7767610514138029164, f3: 1266864987), 1115803878, 5, 80, 2041754562738600205, F29_S2(f0: 1492686870), F29_S3(f0: 142491811, f1: 1644962309, f2: 1905811), -3985, F29_S4(f0: 1921386549), 1510666400) +} + +@frozen +public struct F30_S0 +{ + public let f0 : UInt16; + public let f1 : Int16; + public let f2 : Int16; + public let f3 : Int8; +} + +@frozen +public struct F30_S1 +{ + public let f0 : UInt16; + public let f1 : UInt; +} + +@frozen +public struct F30_S2 +{ + public let f0 : Int64; + public let f1 : Int8; + public let f2 : UInt16; +} + +@frozen +public struct F30_S3 +{ + public let f0 : Int8; +} + +public func swiftCallbackFunc30(f: (F30_S0, F30_S1, F30_S2, F30_S3, Int) -> Float) -> Float { + return f(F30_S0(f0: 50723, f1: 19689, f2: -6469, f3: 83), F30_S1(f0: 51238, f1: 5879147675377398012), F30_S2(f0: 7909999288286190848, f1: -99, f2: 61385), F30_S3(f0: 48), 2980085298293056148) +} + +@frozen +public struct F31_S0 +{ + public let f0 : Int32; + public let f1 : UInt64; + public let f2 : UInt; +} + +@frozen +public struct F31_Ret_S0 +{ + public let f0 : UInt32; + public let f1 : Float; + public let f2 : UInt16; + public let f3 : Int16; + public let f4 : Float; +} + +@frozen +public struct F31_Ret +{ + public let f0 : F31_Ret_S0; + public let f1 : UInt16; +} + +public func swiftCallbackFunc31(f: (F31_S0, Double) -> F31_Ret) -> F31_Ret { + return f(F31_S0(f0: 1072945099, f1: 5760996810500287322, f2: 3952909367135409979), 2860786541632685) +} + +@frozen +public struct F32_Ret +{ + public let f0 : UInt; + public let f1 : Double; + public let f2 : Int; +} + +public func swiftCallbackFunc32(f: (UInt16, Int16) -> F32_Ret) -> F32_Ret { + return f(21020, 7462) +} + +@frozen +public struct F33_S0 +{ + public let f0 : Int16; + public let f1 : UInt64; +} + +@frozen +public struct F33_S1_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F33_S1 +{ + public let f0 : F33_S1_S0; + public let f1 : UInt32; + public let f2 : UInt; +} + +@frozen +public struct F33_S2 +{ + public let f0 : UInt32; + public let f1 : UInt64; + public let f2 : Int8; + public let f3 : Int8; + public let f4 : UInt; +} + +@frozen +public struct F33_S3_S0_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F33_S3_S0 +{ + public let f0 : F33_S3_S0_S0; +} + +@frozen +public struct F33_S3 +{ + public let f0 : F33_S3_S0; +} + +public func swiftCallbackFunc33(f: (F33_S0, Float, F33_S1, UInt32, Int, Int8, Int8, Float, UInt8, Float, Int8, F33_S2, Int, F33_S3, Int, UInt32) -> UInt) -> UInt { + return f(F33_S0(f0: -23471, f1: 2736941806609505888), 6930550, F33_S1(f0: F33_S1_S0(f0: 32476), f1: 165441961, f2: 3890227499323387948), 591524870, 1668420058132495503, -67, 94, 3180786, 42, 7674952, 43, F33_S2(f0: 771356149, f1: 3611576949210389997, f2: -15, f3: 7, f4: 2577587324978560192), 8266150294848599489, F33_S3(f0: F33_S3_S0(f0: F33_S3_S0_S0(f0: 9216))), 710302565025364450, 1060812904) +} + +@frozen +public struct F34_S0_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F34_S0 +{ + public let f0 : F34_S0_S0; + public let f1 : UInt; +} + +public func swiftCallbackFunc34(f: (UInt32, F34_S0, UInt, Int16) -> UInt16) -> UInt16 { + return f(2068009847, F34_S0(f0: F34_S0_S0(f0: 845123292), f1: 5148244462913472487), 8632568386462910655, 7058) +} + +@frozen +public struct F35_S0_S0_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F35_S0_S0 +{ + public let f0 : Int64; + public let f1 : F35_S0_S0_S0; +} + +@frozen +public struct F35_S0_S1 +{ + public let f0 : Double; +} + +@frozen +public struct F35_S0 +{ + public let f0 : F35_S0_S0; + public let f1 : Int32; + public let f2 : F35_S0_S1; + public let f3 : Int; +} + +@frozen +public struct F35_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F35_S2_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F35_S2 +{ + public let f0 : F35_S2_S0; +} + +public func swiftCallbackFunc35(f: (UInt8, Int8, Float, Int64, Int, F35_S0, F35_S1, F35_S2) -> UInt64) -> UInt64 { + return f(182, -16, 7763558, 5905028570860904693, 5991001624972063224, F35_S0(f0: F35_S0_S0(f0: 6663912001709962059, f1: F35_S0_S0_S0(f0: 1843939591)), f1: 1095170337, f2: F35_S0_S1(f0: 3908756332193409), f3: 8246190362462442203), F35_S1(f0: 52167), F35_S2(f0: F35_S2_S0(f0: 283499999631068))) +} + +@frozen +public struct F36_S0 +{ + public let f0 : UInt32; + public let f1 : Int64; + public let f2 : UInt8; + public let f3 : UInt; +} + +public func swiftCallbackFunc36(f: (UInt, Double, UInt, UInt8, Int64, F36_S0, Int8) -> Int) -> Int { + return f(5079603407518207003, 2365862518115571, 6495651757722767835, 46, 1550138390178394449, F36_S0(f0: 1858960269, f1: 1925263848394986294, f2: 217, f3: 8520779488644482307), -83) +} + +@frozen +public struct F37_S0_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F37_S0 +{ + public let f0 : UInt; + public let f1 : UInt32; + public let f2 : F37_S0_S0; + public let f3 : Float; +} + +@frozen +public struct F37_S1 +{ + public let f0 : UInt; + public let f1 : UInt32; +} + +@frozen +public struct F37_S2 +{ + public let f0 : UInt16; +} + +@frozen +public struct F37_Ret +{ + public let f0 : Float; + public let f1 : UInt8; + public let f2 : Int16; + public let f3 : UInt64; +} + +public func swiftCallbackFunc37(f: (UInt64, F37_S0, Double, UInt16, F37_S1, F37_S2) -> F37_Ret) -> F37_Ret { + return f(1623104856688575867, F37_S0(f0: 3785544303342575322, f1: 717682682, f2: F37_S0_S0(f0: 2674933748436691896), f3: 3211458), 996705046384579, 8394, F37_S1(f0: 1048947722954084863, f1: 252415487), F37_S2(f0: 3664)) +} + +@frozen +public struct F38_S0_S0 +{ + public let f0 : Int; + public let f1 : Float; +} + +@frozen +public struct F38_S0 +{ + public let f0 : F38_S0_S0; + public let f1 : UInt16; + public let f2 : Int32; + public let f3 : Float; +} + +@frozen +public struct F38_S1 +{ + public let f0 : Int16; + public let f1 : Int32; + public let f2 : UInt32; +} + +public func swiftCallbackFunc38(f: (F38_S0, F38_S1, Double, Int16, Int8, UInt32, Int16, Float, Int, Float, UInt32, UInt8, Double, Int8) -> Double) -> Double { + return f(F38_S0(f0: F38_S0_S0(f0: 7389960750529773276, f1: 4749108), f1: 54323, f2: 634649910, f3: 83587), F38_S1(f0: -15547, f1: 1747384081, f2: 851987981), 3543874366683681, 5045, -32, 2084540698, 25583, 3158067, 1655263182833369283, 829404, 1888859844, 153, 222366180309763, 61) +} + +@frozen +public struct F39_S0_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F39_S0_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F39_S0 +{ + public let f0 : F39_S0_S0; + public let f1 : Int32; + public let f2 : F39_S0_S1; + public let f3 : UInt; +} + +@frozen +public struct F39_S1 +{ + public let f0 : UInt16; + public let f1 : UInt8; + public let f2 : Float; + public let f3 : Int64; +} + +@frozen +public struct F39_S2 +{ + public let f0 : Int32; + public let f1 : Float; +} + +@frozen +public struct F39_S3 +{ + public let f0 : UInt32; + public let f1 : Int; + public let f2 : Int; +} + +public func swiftCallbackFunc39(f: (F39_S0, UInt, UInt32, Double, F39_S1, F39_S2, Int8, F39_S3, Int32, UInt64, UInt8) -> Int) -> Int { + return f(F39_S0(f0: F39_S0_S0(f0: -31212), f1: 1623216479, f2: F39_S0_S1(f0: 7181), f3: 8643545152918150186), 799631211988519637, 94381581, 761127371030426, F39_S1(f0: 417, f1: 85, f2: 1543931, f3: 3918460222899735322), F39_S2(f0: 883468300, f1: 2739152), -94, F39_S3(f0: 1374766954, f1: 2042223450490396789, f2: 2672454113535023130), 946259065, 6805548458517673751, 61) +} + +@frozen +public struct F40_S0 +{ + public let f0 : Int16; + public let f1 : Int32; +} + +@frozen +public struct F40_S1 +{ + public let f0 : Int32; +} + +@frozen +public struct F40_S2 +{ + public let f0 : Int64; + public let f1 : UInt16; + public let f2 : Int; + public let f3 : UInt8; +} + +@frozen +public struct F40_S3_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F40_S3 +{ + public let f0 : UInt; + public let f1 : Double; + public let f2 : F40_S3_S0; + public let f3 : Double; +} + +public func swiftCallbackFunc40(f: (F40_S0, UInt32, UInt8, F40_S1, F40_S2, UInt64, UInt, UInt64, Int, UInt16, UInt32, F40_S3, UInt) -> UInt) -> UInt { + return f(F40_S0(f0: 22601, f1: 312892872), 1040102825, 56, F40_S1(f0: 101203812), F40_S2(f0: 4298883321494088257, f1: 2095, f2: 1536552108568739270, f3: 220), 2564624804830565018, 173855559108584219, 6222832940831380264, 1898370824516510398, 3352, 1643571476, F40_S3(f0: 7940054758811932961, f1: 246670432251533, f2: F40_S3_S0(f0: 7890596), f3: 1094140965415232), 2081923113238309816) +} + +@frozen +public struct F41_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F41_Ret +{ + public let f0 : UInt64; + public let f1 : Double; + public let f2 : UInt32; + public let f3 : UInt32; +} + +public func swiftCallbackFunc41(f: (F41_S0) -> F41_Ret) -> F41_Ret { + return f(F41_S0(f0: 1430200072)) +} + +@frozen +public struct F42_S0_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F42_S0 +{ + public let f0 : F42_S0_S0; +} + +@frozen +public struct F42_S1 +{ + public let f0 : UInt32; +} + +public func swiftCallbackFunc42(f: (Int32, UInt32, F42_S0, Float, UInt8, F42_S1) -> Int) -> Int { + return f(1046060439, 1987212952, F42_S0(f0: F42_S0_S0(f0: 4714080408858753964)), 2364146, 25, F42_S1(f0: 666986488)) +} + +@frozen +public struct F43_S0 +{ + public let f0 : Int32; + public let f1 : Int32; + public let f2 : Int; +} + +@frozen +public struct F43_S1 +{ + public let f0 : Int8; +} + +@frozen +public struct F43_Ret +{ + public let f0 : UInt16; +} + +public func swiftCallbackFunc43(f: (F43_S0, F43_S1) -> F43_Ret) -> F43_Ret { + return f(F43_S0(f0: 406102630, f1: 1946236062, f2: 663606396354980308), F43_S1(f0: -8)) +} + +@frozen +public struct F44_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F44_S1_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F44_S1_S1 +{ + public let f0 : UInt; +} + +@frozen +public struct F44_S1 +{ + public let f0 : Int16; + public let f1 : Int16; + public let f2 : F44_S1_S0; + public let f3 : F44_S1_S1; +} + +@frozen +public struct F44_S2 +{ + public let f0 : UInt; +} + +@frozen +public struct F44_S3 +{ + public let f0 : Int8; +} + +@frozen +public struct F44_Ret_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F44_Ret +{ + public let f0 : Int; + public let f1 : F44_Ret_S0; + public let f2 : Double; +} + +public func swiftCallbackFunc44(f: (Double, F44_S0, F44_S1, F44_S2, F44_S3) -> F44_Ret) -> F44_Ret { + return f(4281406007431544, F44_S0(f0: 2097291497), F44_S1(f0: -10489, f1: -9573, f2: F44_S1_S0(f0: 62959), f3: F44_S1_S1(f0: 7144119809173057975)), F44_S2(f0: 168733393207234277), F44_S3(f0: 64)) +} + +@frozen +public struct F45_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F45_S1 +{ + public let f0 : UInt; + public let f1 : Int16; +} + +@frozen +public struct F45_Ret_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F45_Ret +{ + public let f0 : Double; + public let f1 : F45_Ret_S0; + public let f2 : Int64; + public let f3 : Double; + public let f4 : UInt64; + public let f5 : Int8; + public let f6 : Int32; +} + +public func swiftCallbackFunc45(f: (F45_S0, F45_S1, UInt8) -> F45_Ret) -> F45_Ret { + return f(F45_S0(f0: 5311803360204128233), F45_S1(f0: 2204790044275015546, f1: 8942), 207) +} + +@frozen +public struct F46_Ret +{ + public let f0 : UInt; + public let f1 : Double; + public let f2 : Int64; + public let f3 : UInt16; +} + +public func swiftCallbackFunc46(f: (Int, UInt, UInt16, UInt16, Int64) -> F46_Ret) -> F46_Ret { + return f(1855296013283572041, 1145047910516899437, 20461, 58204, 1923767011143317115) +} + +@frozen +public struct F47_S0 +{ + public let f0 : UInt8; + public let f1 : Int32; +} + +@frozen +public struct F47_S1 +{ + public let f0 : Int; + public let f1 : UInt32; + public let f2 : Int8; +} + +@frozen +public struct F47_S2_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F47_S2 +{ + public let f0 : Int8; + public let f1 : Float; + public let f2 : Int32; + public let f3 : Float; + public let f4 : F47_S2_S0; +} + +@frozen +public struct F47_S3 +{ + public let f0 : UInt64; + public let f1 : Int64; +} + +@frozen +public struct F47_S4 +{ + public let f0 : UInt64; +} + +@frozen +public struct F47_Ret +{ + public let f0 : Int16; + public let f1 : Int16; + public let f2 : Int64; +} + +public func swiftCallbackFunc47(f: (Int, Float, UInt32, F47_S0, F47_S1, UInt16, Float, Int, Int, UInt, UInt, Int16, F47_S2, F47_S3, F47_S4) -> F47_Ret) -> F47_Ret { + return f(6545360066379352091, 1240616, 575670382, F47_S0(f0: 27, f1: 1769677101), F47_S1(f0: 4175209822525678639, f1: 483151627, f2: -41), 20891, 1011044, 8543308148327168378, 9126721646663585297, 5438914191614359864, 5284613245897089025, -9227, F47_S2(f0: -23, f1: 1294109, f2: 411726757, f3: 6621598, f4: F47_S2_S0(f0: 249)), F47_S3(f0: 5281612261430853979, f1: 7161295082465816089), F47_S4(f0: 1995556861952451598)) +} + +@frozen +public struct F48_S0 +{ + public let f0 : UInt64; + public let f1 : Int16; + public let f2 : UInt64; +} + +@frozen +public struct F48_S1_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F48_S1 +{ + public let f0 : Double; + public let f1 : Int32; + public let f2 : Int32; + public let f3 : F48_S1_S0; + public let f4 : UInt; +} + +public func swiftCallbackFunc48(f: (Int8, Int16, Int16, UInt32, F48_S0, UInt32, F48_S1, Int32, Int32, UInt16, Int64, UInt32) -> Int64) -> Int64 { + return f(-34, 11634, -27237, 1039294154, F48_S0(f0: 1367847206719062131, f1: 22330, f2: 689282484471011648), 1572626904, F48_S1(f0: 3054128759424009, f1: 1677338134, f2: 1257237843, f3: F48_S1_S0(f0: 6264494), f4: 8397097040610783205), 1060447208, 269785114, 20635, 7679010342730986048, 1362633148) +} + +@frozen +public struct F49_S0_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F49_S0 +{ + public let f0 : F49_S0_S0; + public let f1 : UInt64; +} + +@frozen +public struct F49_Ret +{ + public let f0 : Int32; + public let f1 : Int16; + public let f2 : UInt8; + public let f3 : UInt8; + public let f4 : Int8; + public let f5 : Int64; +} + +public func swiftCallbackFunc49(f: (F49_S0, Int64) -> F49_Ret) -> F49_Ret { + return f(F49_S0(f0: F49_S0_S0(f0: 48), f1: 7563394992711018452), 4358370311341042916) +} + +@frozen +public struct F50_S0_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F50_S0 +{ + public let f0 : UInt16; + public let f1 : F50_S0_S0; +} + +@frozen +public struct F50_S1 +{ + public let f0 : Double; + public let f1 : UInt16; + public let f2 : Int32; + public let f3 : Int; + public let f4 : Double; +} + +@frozen +public struct F50_S2 +{ + public let f0 : Int32; + public let f1 : Float; + public let f2 : UInt32; +} + +@frozen +public struct F50_S3 +{ + public let f0 : Int64; + public let f1 : Int32; + public let f2 : Float; + public let f3 : Int8; +} + +@frozen +public struct F50_S4 +{ + public let f0 : Int64; +} + +@frozen +public struct F50_S5_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F50_S5 +{ + public let f0 : F50_S5_S0; +} + +public func swiftCallbackFunc50(f: (F50_S0, F50_S1, UInt8, F50_S2, Int32, UInt64, Int8, Int8, Float, F50_S3, F50_S4, F50_S5, Float) -> UInt8) -> UInt8 { + return f(F50_S0(f0: 31857, f1: F50_S0_S0(f0: 1743417849706254)), F50_S1(f0: 4104577461772135, f1: 13270, f2: 2072598986, f3: 9056978834867675248, f4: 844742439929087), 87, F50_S2(f0: 1420884537, f1: 78807, f2: 1081688273), 336878110, 1146514566942283069, -93, 73, 2321639, F50_S3(f0: 1940888991336881606, f1: 688345394, f2: 712275, f3: -128), F50_S4(f0: 2638503583829414770), F50_S5(f0: F50_S5_S0(f0: 23681)), 8223218) +} + +@frozen +public struct F51_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F51_Ret +{ + public let f0 : UInt16; + public let f1 : Int8; + public let f2 : Int; + public let f3 : UInt16; + public let f4 : UInt64; +} + +public func swiftCallbackFunc51(f: (Int16, UInt, F51_S0, UInt64) -> F51_Ret) -> F51_Ret { + return f(10812, 470861239714315155, F51_S0(f0: 5415660333180374788), 2389942629143476149) +} + +@frozen +public struct F52_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F52_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F52_Ret +{ + public let f0 : Float; + public let f1 : UInt16; + public let f2 : Int64; + public let f3 : Int16; + public let f4 : UInt64; + public let f5 : Int8; +} + +public func swiftCallbackFunc52(f: (Int, F52_S0, Int16, Int16, F52_S1) -> F52_Ret) -> F52_Ret { + return f(3233654765973602550, F52_S0(f0: 5997729), -7404, -20804, F52_S1(f0: 17231)) +} + +@frozen +public struct F53_S0_S0_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F53_S0_S0 +{ + public let f0 : F53_S0_S0_S0; +} + +@frozen +public struct F53_S0 +{ + public let f0 : Int8; + public let f1 : F53_S0_S0; + public let f2 : UInt8; + public let f3 : UInt; + public let f4 : Int64; +} + +@frozen +public struct F53_S1 +{ + public let f0 : Float; + public let f1 : UInt8; +} + +@frozen +public struct F53_S2 +{ + public let f0 : Int8; + public let f1 : Int64; +} + +@frozen +public struct F53_S3_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F53_S3 +{ + public let f0 : Int32; + public let f1 : UInt32; + public let f2 : F53_S3_S0; +} + +@frozen +public struct F53_S4 +{ + public let f0 : Int16; +} + +@frozen +public struct F53_S5_S0 +{ + public let f0 : UInt32; +} + +@frozen +public struct F53_S5_S1_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F53_S5_S1 +{ + public let f0 : F53_S5_S1_S0; +} + +@frozen +public struct F53_S5 +{ + public let f0 : F53_S5_S0; + public let f1 : UInt; + public let f2 : UInt16; + public let f3 : F53_S5_S1; + public let f4 : Int8; +} + +@frozen +public struct F53_S6 +{ + public let f0 : Int; +} + +@frozen +public struct F53_Ret +{ + public let f0 : Int; +} + +public func swiftCallbackFunc53(f: (F53_S0, UInt8, Int64, F53_S1, F53_S2, F53_S3, Int64, F53_S4, F53_S5, F53_S6) -> F53_Ret) -> F53_Ret { + return f(F53_S0(f0: -123, f1: F53_S0_S0(f0: F53_S0_S0_S0(f0: 3494916243607193741)), f2: 167, f3: 4018943158751734338, f4: 6768175524813742847), 207, 8667995458064724392, F53_S1(f0: 492157, f1: 175), F53_S2(f0: 76, f1: 5794486968525461488), F53_S3(f0: 2146070335, f1: 1109141712, f2: F53_S3_S0(f0: 44270)), 3581380181786253859, F53_S4(f0: 23565), F53_S5(f0: F53_S5_S0(f0: 1995174927), f1: 5025417700244056666, f2: 1847, f3: F53_S5_S1(f0: F53_S5_S1_S0(f0: 6)), f4: -87), F53_S6(f0: 5737280129078653969)) +} + +@frozen +public struct F54_S0 +{ + public let f0 : Int32; + public let f1 : Float; + public let f2 : UInt; + public let f3 : UInt8; +} + +@frozen +public struct F54_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F54_S2_S0_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F54_S2_S0 +{ + public let f0 : Int16; + public let f1 : F54_S2_S0_S0; +} + +@frozen +public struct F54_S2 +{ + public let f0 : Double; + public let f1 : F54_S2_S0; + public let f2 : Int64; + public let f3 : UInt64; +} + +@frozen +public struct F54_S3 +{ + public let f0 : Float; +} + +@frozen +public struct F54_S4 +{ + public let f0 : UInt16; + public let f1 : Int8; +} + +@frozen +public struct F54_S5 +{ + public let f0 : UInt16; +} + +@frozen +public struct F54_Ret +{ + public let f0 : Int16; + public let f1 : Int; +} + +public func swiftCallbackFunc54(f: (UInt16, F54_S0, Float, F54_S1, Int64, Int32, F54_S2, F54_S3, F54_S4, Float, F54_S5) -> F54_Ret) -> F54_Ret { + return f(16440, F54_S0(f0: 922752112, f1: 7843043, f2: 1521939500434086364, f3: 50), 3111108, F54_S1(f0: 50535), 4761507229870258916, 1670668155, F54_S2(f0: 432665443852892, f1: F54_S2_S0(f0: 13094, f1: F54_S2_S0_S0(f0: 669143993481144)), f2: 30067117315069590, f3: 874012622621600805), F54_S3(f0: 7995066), F54_S4(f0: 48478, f1: 23), 4383787, F54_S5(f0: 61633)) +} + +@frozen +public struct F55_S0_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F55_S0 +{ + public let f0 : UInt; + public let f1 : F55_S0_S0; + public let f2 : Int8; +} + +@frozen +public struct F55_S1 +{ + public let f0 : Int; +} + +@frozen +public struct F55_S2 +{ + public let f0 : UInt64; +} + +@frozen +public struct F55_Ret_S0 +{ + public let f0 : Int16; + public let f1 : Int32; +} + +@frozen +public struct F55_Ret +{ + public let f0 : UInt; + public let f1 : Int; + public let f2 : Double; + public let f3 : F55_Ret_S0; + public let f4 : UInt64; +} + +public func swiftCallbackFunc55(f: (F55_S0, Int64, F55_S1, Int8, F55_S2, Float) -> F55_Ret) -> F55_Ret { + return f(F55_S0(f0: 2856661562863799725, f1: F55_S0_S0(f0: 1260582440479139), f2: 5), 7945068527720423751, F55_S1(f0: 4321616441998677375), -68, F55_S2(f0: 3311106172201778367), 5600069) +} + +@frozen +public struct F56_S0 +{ + public let f0 : Double; +} + +public func swiftCallbackFunc56(f: (F56_S0) -> UInt32) -> UInt32 { + return f(F56_S0(f0: 3082602006731666)) +} + +@frozen +public struct F57_S0 +{ + public let f0 : Int64; + public let f1 : Int32; + public let f2 : UInt64; +} + +@frozen +public struct F57_S1 +{ + public let f0 : UInt8; +} + +@frozen +public struct F57_S2 +{ + public let f0 : Float; +} + +@frozen +public struct F57_Ret_S0 +{ + public let f0 : Int64; + public let f1 : UInt8; + public let f2 : Int16; +} + +@frozen +public struct F57_Ret +{ + public let f0 : F57_Ret_S0; + public let f1 : UInt8; +} + +public func swiftCallbackFunc57(f: (Int8, UInt, UInt32, Int64, UInt64, Int16, Int64, F57_S0, F57_S1, F57_S2) -> F57_Ret) -> F57_Ret { + return f(54, 753245150862584974, 1470962934, 1269392070140776313, 2296560034524654667, 12381, 198893062684618980, F57_S0(f0: 1310571041794038100, f1: 18741662, f2: 7855196891704523814), F57_S1(f0: 156), F57_S2(f0: 72045)) +} + +@frozen +public struct F58_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F58_S1 +{ + public let f0 : Float; + public let f1 : UInt16; +} + +@frozen +public struct F58_S2_S0_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F58_S2_S0 +{ + public let f0 : F58_S2_S0_S0; +} + +@frozen +public struct F58_S2 +{ + public let f0 : F58_S2_S0; +} + +public func swiftCallbackFunc58(f: (UInt64, Int8, Int, F58_S0, F58_S1, Int64, F58_S2, Int32) -> Int) -> Int { + return f(4612004722568513699, -96, 1970590839325113617, F58_S0(f0: 211), F58_S1(f0: 5454927, f1: 48737), 921570327236881486, F58_S2(f0: F58_S2_S0(f0: F58_S2_S0_S0(f0: 7726203059421444802))), 491616915) +} + +public func swiftCallbackFunc59(f: (UInt16, Int64, Int) -> UInt64) -> UInt64 { + return f(9232, 7281011081566942937, 8203439771560005792) +} + +@frozen +public struct F60_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F60_S1 +{ + public let f0 : UInt64; + public let f1 : Int32; +} + +public func swiftCallbackFunc60(f: (Float, Double, Int64, UInt16, Float, Float, F60_S0, Int16, F60_S1, Int16, Int64) -> UInt64) -> UInt64 { + return f(2682255, 2041676057169359, 5212916666940122160, 64444, 6372882, 8028835, F60_S0(f0: 6629286640024570381), 1520, F60_S1(f0: 8398497739914283366, f1: 1882981891), 7716, 6631047215535600409) +} + +@frozen +public struct F61_S0_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F61_S0 +{ + public let f0 : F61_S0_S0; + public let f1 : Int64; + public let f2 : UInt32; +} + +@frozen +public struct F61_S1 +{ + public let f0 : Int8; + public let f1 : Float; + public let f2 : Int; +} + +@frozen +public struct F61_S2_S0_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F61_S2_S0 +{ + public let f0 : F61_S2_S0_S0; +} + +@frozen +public struct F61_S2_S1 +{ + public let f0 : Int8; +} + +@frozen +public struct F61_S2 +{ + public let f0 : F61_S2_S0; + public let f1 : F61_S2_S1; +} + +@frozen +public struct F61_S3 +{ + public let f0 : UInt64; + public let f1 : Int; +} + +public func swiftCallbackFunc61(f: (UInt32, UInt32, F61_S0, F61_S1, F61_S2, Int8, Int16, F61_S3, Int32, UInt32) -> UInt32) -> UInt32 { + return f(1070797065, 135220309, F61_S0(f0: F61_S0_S0(f0: 6475887024664217162), f1: 563444654083452485, f2: 1748956360), F61_S1(f0: -112, f1: 3433396, f2: 8106074956722850624), F61_S2(f0: F61_S2_S0(f0: F61_S2_S0_S0(f0: 2318628619979263858)), f1: F61_S2_S1(f0: -93)), -122, -11696, F61_S3(f0: 5229393236090246212, f1: 4021449757638811198), 689517945, 657677740) +} + +@frozen +public struct F62_S0 +{ + public let f0 : Float; +} + +@frozen +public struct F62_Ret +{ + public let f0 : UInt16; + public let f1 : Int64; + public let f2 : Int; + public let f3 : Int64; +} + +public func swiftCallbackFunc62(f: (F62_S0) -> F62_Ret) -> F62_Ret { + return f(F62_S0(f0: 6500993)) +} + +@frozen +public struct F63_S0 +{ + public let f0 : Int; +} + +public func swiftCallbackFunc63(f: (F63_S0, Int16) -> Float) -> Float { + return f(F63_S0(f0: 8391317504019075904), 11218) +} + +@frozen +public struct F64_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F64_S1 +{ + public let f0 : UInt64; +} + +@frozen +public struct F64_S2 +{ + public let f0 : UInt32; +} + +@frozen +public struct F64_Ret_S0 +{ + public let f0 : UInt16; + public let f1 : UInt; + public let f2 : UInt64; +} + +@frozen +public struct F64_Ret +{ + public let f0 : UInt; + public let f1 : F64_Ret_S0; + public let f2 : Double; +} + +public func swiftCallbackFunc64(f: (Int8, F64_S0, F64_S1, UInt, F64_S2) -> F64_Ret) -> F64_Ret { + return f(-22, F64_S0(f0: 1591678205), F64_S1(f0: 8355549563000003325), 5441989206466502201, F64_S2(f0: 2097092811)) +} + +@frozen +public struct F65_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F65_S1 +{ + public let f0 : UInt16; + public let f1 : Int; +} + +@frozen +public struct F65_S2 +{ + public let f0 : Int16; +} + +@frozen +public struct F65_S3 +{ + public let f0 : Int32; + public let f1 : UInt32; + public let f2 : Int8; + public let f3 : UInt; + public let f4 : Double; +} + +@frozen +public struct F65_Ret +{ + public let f0 : Int; + public let f1 : Int; + public let f2 : Int; + public let f3 : Float; +} + +public func swiftCallbackFunc65(f: (F65_S0, Int16, Double, UInt, F65_S1, UInt64, F65_S2, Int, F65_S3, Int32, Int64, UInt32, Double) -> F65_Ret) -> F65_Ret { + return f(F65_S0(f0: 2969223123583220), -10269, 3909264978196109, 522883062031213707, F65_S1(f0: 37585, f1: 5879827541057349126), 1015270399093748716, F65_S2(f0: 19670), 1900026319968050423, F65_S3(f0: 1440511399, f1: 1203865685, f2: 12, f3: 4061296318630567634, f4: 2406524883317724), 1594888000, 2860599972459787263, 1989052358, 1036075606072593) +} + +@frozen +public struct F66_Ret_S0 +{ + public let f0 : Float; + public let f1 : UInt8; +} + +@frozen +public struct F66_Ret +{ + public let f0 : UInt32; + public let f1 : Int32; + public let f2 : UInt32; + public let f3 : F66_Ret_S0; + public let f4 : Int; +} + +public func swiftCallbackFunc66(f: (Int64) -> F66_Ret) -> F66_Ret { + return f(8300712022174991120) +} + +@frozen +public struct F67_S0 +{ + public let f0 : UInt32; + public let f1 : UInt8; + public let f2 : UInt8; + public let f3 : Int32; +} + +@frozen +public struct F67_S1 +{ + public let f0 : UInt32; +} + +@frozen +public struct F67_S2_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F67_S2 +{ + public let f0 : UInt64; + public let f1 : UInt32; + public let f2 : Int; + public let f3 : UInt32; + public let f4 : F67_S2_S0; +} + +@frozen +public struct F67_S3 +{ + public let f0 : Int16; + public let f1 : UInt64; + public let f2 : UInt64; + public let f3 : Float; +} + +public func swiftCallbackFunc67(f: (Double, F67_S0, Float, F67_S1, Int16, UInt, F67_S2, UInt16, UInt, UInt, F67_S3, UInt64) -> Int32) -> Int32 { + return f(2365334314089079, F67_S0(f0: 1133369490, f1: 54, f2: 244, f3: 411611102), 4453912, F67_S1(f0: 837821989), -3824, 2394019088612006082, F67_S2(f0: 2219661088889353540, f1: 294254132, f2: 5363897228951721947, f3: 2038380379, f4: F67_S2_S0(f0: 8364879421385869437)), 27730, 1854446871602777695, 5020910156102352016, F67_S3(f0: -2211, f1: 5910581461792482729, f2: 9095210648679611609, f3: 6138428), 4274242076331880276) +} + +@frozen +public struct F68_S0_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F68_S0 +{ + public let f0 : Int64; + public let f1 : F68_S0_S0; +} + +@frozen +public struct F68_S1 +{ + public let f0 : UInt16; +} + +@frozen +public struct F68_S2_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F68_S2_S1_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F68_S2_S1 +{ + public let f0 : F68_S2_S1_S0; +} + +@frozen +public struct F68_S2 +{ + public let f0 : F68_S2_S0; + public let f1 : F68_S2_S1; +} + +@frozen +public struct F68_S3 +{ + public let f0 : Int16; +} + +@frozen +public struct F68_Ret +{ + public let f0 : UInt16; + public let f1 : Int64; +} + +public func swiftCallbackFunc68(f: (UInt8, Float, Int32, Int, F68_S0, Int16, Int, Int32, Int, F68_S1, Double, F68_S2, F68_S3) -> F68_Ret) -> F68_Ret { + return f(203, 7725681, 323096997, 7745650233784541800, F68_S0(f0: 4103074885750473230, f1: F68_S0_S0(f0: 12)), 28477, 3772772447290536725, 1075348149, 2017898311184593242, F68_S1(f0: 60280), 4052387873895590, F68_S2(f0: F68_S2_S0(f0: 1321857087602747558), f1: F68_S2_S1(f0: F68_S2_S1_S0(f0: 9011155097138053416))), F68_S3(f0: 8332)) +} + +@frozen +public struct F69_S0_S0 +{ + public let f0 : UInt64; +} + +@frozen +public struct F69_S0 +{ + public let f0 : F69_S0_S0; +} + +@frozen +public struct F69_S1 +{ + public let f0 : Int64; +} + +@frozen +public struct F69_S2 +{ + public let f0 : Int32; +} + +@frozen +public struct F69_S3 +{ + public let f0 : UInt8; +} + +@frozen +public struct F69_S4_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F69_S4 +{ + public let f0 : F69_S4_S0; +} + +@frozen +public struct F69_Ret +{ + public let f0 : UInt8; + public let f1 : Int64; + public let f2 : UInt32; +} + +public func swiftCallbackFunc69(f: (F69_S0, Int, Int32, F69_S1, UInt32, Int8, F69_S2, Int, F69_S3, F69_S4) -> F69_Ret) -> F69_Ret { + return f(F69_S0(f0: F69_S0_S0(f0: 7154553222175076145)), 6685908100026425691, 1166526155, F69_S1(f0: 6042278185730963289), 182060391, 45, F69_S2(f0: 1886331345), 485542148877875333, F69_S3(f0: 209), F69_S4(f0: F69_S4_S0(f0: 6856847647688321191))) +} + +@frozen +public struct F70_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F70_S1 +{ + public let f0 : Int; + public let f1 : Double; + public let f2 : Int16; +} + +@frozen +public struct F70_S2 +{ + public let f0 : UInt32; +} + +@frozen +public struct F70_S3 +{ + public let f0 : UInt16; + public let f1 : Double; + public let f2 : UInt8; + public let f3 : UInt64; + public let f4 : Int32; +} + +@frozen +public struct F70_S4_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F70_S4 +{ + public let f0 : F70_S4_S0; +} + +@frozen +public struct F70_Ret +{ + public let f0 : Int8; + public let f1 : UInt32; + public let f2 : UInt64; + public let f3 : Int16; + public let f4 : Int16; +} + +public func swiftCallbackFunc70(f: (Int16, UInt8, Int, UInt32, F70_S0, Int32, F70_S1, F70_S2, F70_S3, Int64, Int32, UInt16, Int, Int, UInt, F70_S4) -> F70_Ret) -> F70_Ret { + return f(-13167, 126, 3641983584484741827, 1090448265, F70_S0(f0: 3696858216713616004), 1687025402, F70_S1(f0: 714916953527626038, f1: 459810445900614, f2: 4276), F70_S2(f0: 529194028), F70_S3(f0: 40800, f1: 3934985905568056, f2: 230, f3: 7358783417346157372, f4: 187926922), 228428560763393434, 146501405, 58804, 7098488973446286248, 1283658442251334575, 3644681944588099582, F70_S4(f0: F70_S4_S0(f0: 8197135412164695911))) +} + +@frozen +public struct F71_S0_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F71_S0 +{ + public let f0 : F71_S0_S0; +} + +@frozen +public struct F71_S1 +{ + public let f0 : Int64; +} + +public func swiftCallbackFunc71(f: (F71_S0, F71_S1) -> UInt64) -> UInt64 { + return f(F71_S0(f0: F71_S0_S0(f0: 258165353)), F71_S1(f0: 8603744544763953916)) +} + +@frozen +public struct F72_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F72_Ret +{ + public let f0 : UInt32; + public let f1 : Float; + public let f2 : Float; + public let f3 : Int64; +} + +public func swiftCallbackFunc72(f: (F72_S0, Int64, Int8) -> F72_Ret) -> F72_Ret { + return f(F72_S0(f0: 2021509367), 2480039820482100351, 91) +} + +@frozen +public struct F73_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F73_S1_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F73_S1 +{ + public let f0 : F73_S1_S0; +} + +@frozen +public struct F73_S2 +{ + public let f0 : Int32; + public let f1 : Float; +} + +@frozen +public struct F73_S3 +{ + public let f0 : UInt; + public let f1 : Int16; + public let f2 : Int8; +} + +@frozen +public struct F73_S4 +{ + public let f0 : Int16; +} + +@frozen +public struct F73_S5 +{ + public let f0 : UInt32; +} + +public func swiftCallbackFunc73(f: (Double, Float, F73_S0, Int64, F73_S1, F73_S2, Int16, Double, Int8, Int32, Int64, F73_S3, UInt, UInt64, Int32, F73_S4, UInt8, F73_S5) -> Int8) -> Int8 { + return f(3038361048801008, 7870661, F73_S0(f0: 1555231180), 7433951069104961, F73_S1(f0: F73_S1_S0(f0: 63298)), F73_S2(f0: 1759846580, f1: 1335901), 11514, 695278874601974, 108, 48660527, 7762050749172332624, F73_S3(f0: 7486686356276472663, f1: 11622, f2: 112), 884183974530885885, 7434462110419085390, 170242607, F73_S4(f0: -26039), 41, F73_S5(f0: 191302504)) +} + +@frozen +public struct F74_S0_S0 +{ + public let f0 : UInt16; + public let f1 : UInt; + public let f2 : Int8; +} + +@frozen +public struct F74_S0 +{ + public let f0 : F74_S0_S0; + public let f1 : Int; +} + +@frozen +public struct F74_S1 +{ + public let f0 : Float; +} + +public func swiftCallbackFunc74(f: (F74_S0, F74_S1, Int16) -> Int64) -> Int64 { + return f(F74_S0(f0: F74_S0_S0(f0: 59883, f1: 5554216411943233256, f2: 126), f1: 724541378819571203), F74_S1(f0: 172601), 27932) +} + +@frozen +public struct F75_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F75_S1_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F75_S1 +{ + public let f0 : F75_S1_S0; +} + +@frozen +public struct F75_S2 +{ + public let f0 : Int8; +} + +@frozen +public struct F75_S3_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F75_S3 +{ + public let f0 : F75_S3_S0; +} + +@frozen +public struct F75_Ret +{ + public let f0 : UInt8; + public let f1 : Double; + public let f2 : Double; + public let f3 : Int64; + public let f4 : UInt32; +} + +public func swiftCallbackFunc75(f: (Int8, Int8, Int8, F75_S0, F75_S1, F75_S2, F75_S3) -> F75_Ret) -> F75_Ret { + return f(-105, 71, 108, F75_S0(f0: 7224638108479292438), F75_S1(f0: F75_S1_S0(f0: 126)), F75_S2(f0: -88), F75_S3(f0: F75_S3_S0(f0: 4934))) +} + +@frozen +public struct F76_S0 +{ + public let f0 : UInt16; + public let f1 : Int; +} + +@frozen +public struct F76_S1_S0 +{ + public let f0 : Int; +} + +@frozen +public struct F76_S1 +{ + public let f0 : F76_S1_S0; + public let f1 : UInt; + public let f2 : Double; +} + +@frozen +public struct F76_S2 +{ + public let f0 : UInt64; + public let f1 : Int; + public let f2 : UInt16; +} + +@frozen +public struct F76_S3_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F76_S3 +{ + public let f0 : F76_S3_S0; +} + +@frozen +public struct F76_S4 +{ + public let f0 : Int64; +} + +@frozen +public struct F76_S5 +{ + public let f0 : UInt; + public let f1 : Double; +} + +public func swiftCallbackFunc76(f: (UInt8, F76_S0, Int8, F76_S1, F76_S2, F76_S3, UInt32, F76_S4, UInt8, F76_S5, Double, Int16) -> UInt64) -> UInt64 { + return f(69, F76_S0(f0: 25503, f1: 4872234474620951743), 43, F76_S1(f0: F76_S1_S0(f0: 1199076663426903579), f1: 4639522222462236688, f2: 4082956091930029), F76_S2(f0: 5171821618947987626, f1: 3369410144919558564, f2: 5287), F76_S3(f0: F76_S3_S0(f0: 929854460912895550)), 1208311201, F76_S4(f0: 7033993025788649145), 58, F76_S5(f0: 1401399014740601512, f1: 2523645319232571), 230232835550369, -22975) +} + +@frozen +public struct F77_S0 +{ + public let f0 : Int64; + public let f1 : Double; + public let f2 : UInt; +} + +@frozen +public struct F77_S1 +{ + public let f0 : Int16; + public let f1 : Float; + public let f2 : Float; + public let f3 : Int64; + public let f4 : Int64; +} + +@frozen +public struct F77_S2 +{ + public let f0 : UInt16; + public let f1 : Int8; + public let f2 : Int32; + public let f3 : Float; + public let f4 : Float; +} + +@frozen +public struct F77_Ret +{ + public let f0 : Double; + public let f1 : UInt16; + public let f2 : Int8; + public let f3 : UInt; +} + +public func swiftCallbackFunc77(f: (Double, F77_S0, F77_S1, F77_S2, UInt32) -> F77_Ret) -> F77_Ret { + return f(1623173949127682, F77_S0(f0: 5204451347781433070, f1: 3469485630755805, f2: 7586276835848725004), F77_S1(f0: 2405, f1: 2419792, f2: 6769317, f3: 1542327522833750776, f4: 1297586130846695275), F77_S2(f0: 10102, f1: -48, f2: 14517107, f3: 4856023, f4: 2681358), 1463251524) +} + +@frozen +public struct F78_S0 +{ + public let f0 : UInt; + public let f1 : Int; +} + +@frozen +public struct F78_S1_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F78_S1 +{ + public let f0 : Int16; + public let f1 : UInt64; + public let f2 : F78_S1_S0; + public let f3 : Int32; + public let f4 : Int; +} + +@frozen +public struct F78_S2 +{ + public let f0 : UInt; + public let f1 : UInt64; +} + +@frozen +public struct F78_S3 +{ + public let f0 : UInt64; +} + +@frozen +public struct F78_S4 +{ + public let f0 : UInt64; +} + +public func swiftCallbackFunc78(f: (UInt64, F78_S0, UInt64, F78_S1, F78_S2, Int32, UInt64, Int64, F78_S3, Float, Float, UInt16, F78_S4, Double) -> Double) -> Double { + return f(6780767594736146373, F78_S0(f0: 6264193481541646332, f1: 6600856439035088503), 1968254881389492170, F78_S1(f0: -17873, f1: 5581169895682201971, f2: F78_S1_S0(f0: 127), f3: 1942346704, f4: 118658265323815307), F78_S2(f0: 1489326778640378879, f1: 1427061853707270770), 858391966, 5830110056171302270, 2953614358173898788, F78_S3(f0: 6761452244699684409), 3452451, 3507119, 40036, F78_S4(f0: 4800085294404376817), 780368756754436) +} + +@frozen +public struct F79_S0_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F79_S0 +{ + public let f0 : F79_S0_S0; + public let f1 : Int; +} + +@frozen +public struct F79_Ret +{ + public let f0 : UInt32; + public let f1 : UInt64; + public let f2 : Double; +} + +public func swiftCallbackFunc79(f: (F79_S0, Float) -> F79_Ret) -> F79_Ret { + return f(F79_S0(f0: F79_S0_S0(f0: 1013911700897046117), f1: 7323935615297665289), 5159506) +} + +@frozen +public struct F80_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F80_S1_S0_S0 +{ + public let f0 : UInt8; +} + +@frozen +public struct F80_S1_S0 +{ + public let f0 : F80_S1_S0_S0; +} + +@frozen +public struct F80_S1 +{ + public let f0 : Int; + public let f1 : F80_S1_S0; +} + +@frozen +public struct F80_S2 +{ + public let f0 : UInt64; +} + +public func swiftCallbackFunc80(f: (UInt64, Int, Int32, Int16, UInt, F80_S0, Int16, Int, Int8, Int32, UInt32, F80_S1, F80_S2, UInt64) -> Float) -> Float { + return f(4470427843910624516, 8383677749057878551, 2017117925, -10531, 3438375001906177611, F80_S0(f0: 65220), 7107, 7315288835693680178, -48, 813870434, 1092037477, F80_S1(f0: 7104962838387954470, f1: F80_S1_S0(f0: F80_S1_S0_S0(f0: 236))), F80_S2(f0: 7460392384225808790), 364121728483540667) +} + +@frozen +public struct F81_S0 +{ + public let f0 : Float; + public let f1 : Float; + public let f2 : Int; + public let f3 : Int; + public let f4 : Int; +} + +@frozen +public struct F81_Ret +{ + public let f0 : Int; +} + +public func swiftCallbackFunc81(f: (UInt8, UInt32, UInt8, F81_S0, Int8) -> F81_Ret) -> F81_Ret { + return f(53, 57591489, 19, F81_S0(f0: 5675845, f1: 6469988, f2: 5775316279348621124, f3: 7699091894067057939, f4: 1049086627558950131), 15) +} + +@frozen +public struct F82_S0_S0 +{ + public let f0 : Float; + public let f1 : UInt; + public let f2 : UInt16; +} + +@frozen +public struct F82_S0 +{ + public let f0 : UInt; + public let f1 : F82_S0_S0; + public let f2 : UInt16; +} + +@frozen +public struct F82_S1 +{ + public let f0 : Int32; +} + +@frozen +public struct F82_S2 +{ + public let f0 : Int; +} + +@frozen +public struct F82_S3_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F82_S3 +{ + public let f0 : Double; + public let f1 : UInt; + public let f2 : F82_S3_S0; +} + +@frozen +public struct F82_S4 +{ + public let f0 : UInt64; +} + +public func swiftCallbackFunc82(f: (Int64, F82_S0, Int16, Int8, UInt32, F82_S1, Int32, Int64, Int8, Double, F82_S2, F82_S3, F82_S4) -> Float) -> Float { + return f(6454754584537364459, F82_S0(f0: 6703634779264968131, f1: F82_S0_S0(f0: 1010059, f1: 4772968591609202284, f2: 64552), f2: 47126), 9869, -8, 1741550381, F82_S1(f0: 705741282), 1998781399, 7787961471254401526, -27, 4429830670351707, F82_S2(f0: 4975772762589349422), F82_S3(f0: 1423948098664774, f1: 504607538824251986, f2: F82_S3_S0(f0: 1940911018)), F82_S4(f0: 2988623645681463667)) +} + +@frozen +public struct F83_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F83_Ret +{ + public let f0 : Int16; +} + +public func swiftCallbackFunc83(f: (Int8, F83_S0, Int16) -> F83_Ret) -> F83_Ret { + return f(17, F83_S0(f0: 530755056), -11465) +} + +@frozen +public struct F84_S0 +{ + public let f0 : UInt; + public let f1 : UInt32; + public let f2 : UInt; + public let f3 : UInt64; + public let f4 : Int32; +} + +@frozen +public struct F84_S1 +{ + public let f0 : UInt; +} + +@frozen +public struct F84_S2 +{ + public let f0 : Float; +} + +@frozen +public struct F84_S3 +{ + public let f0 : UInt8; +} + +@frozen +public struct F84_S4 +{ + public let f0 : Int16; +} + +@frozen +public struct F84_S5 +{ + public let f0 : Int; + public let f1 : Int16; +} + +@frozen +public struct F84_S6 +{ + public let f0 : Int16; +} + +@frozen +public struct F84_S7 +{ + public let f0 : Int32; +} + +public func swiftCallbackFunc84(f: (Int32, F84_S0, F84_S1, Double, Int32, Int16, Double, F84_S2, F84_S3, Double, F84_S4, F84_S5, F84_S6, F84_S7, UInt) -> Int) -> Int { + return f(1605022009, F84_S0(f0: 6165049220831866664, f1: 1235491183, f2: 7926620970405586826, f3: 2633248816907294140, f4: 2012834055), F84_S1(f0: 2881830362339122988), 4065309434963087, 1125165825, -32360, 1145602045200029, F84_S2(f0: 5655563), F84_S3(f0: 14), 3919593995303128, F84_S4(f0: 26090), F84_S5(f0: 8584898862398781737, f1: -5185), F84_S6(f0: 144), F84_S7(f0: 2138004352), 9102562043027810686) +} + +@frozen +public struct F85_S0 +{ + public let f0 : Double; + public let f1 : Double; + public let f2 : Int8; + public let f3 : Int32; +} + +@frozen +public struct F85_S1 +{ + public let f0 : Int64; + public let f1 : UInt16; + public let f2 : UInt64; + public let f3 : UInt; +} + +@frozen +public struct F85_S2 +{ + public let f0 : Float; + public let f1 : Float; + public let f2 : UInt32; +} + +@frozen +public struct F85_S3 +{ + public let f0 : UInt8; +} + +@frozen +public struct F85_S4 +{ + public let f0 : UInt; +} + +@frozen +public struct F85_S5 +{ + public let f0 : Double; +} + +@frozen +public struct F85_Ret +{ + public let f0 : UInt32; + public let f1 : UInt16; + public let f2 : Int32; + public let f3 : Double; + public let f4 : Int; + public let f5 : UInt64; + public let f6 : Int64; +} + +public func swiftCallbackFunc85(f: (F85_S0, F85_S1, UInt32, F85_S2, Int64, F85_S3, Int64, F85_S4, UInt16, UInt8, Int32, UInt32, Int32, Float, F85_S5, Int64) -> F85_Ret) -> F85_Ret { + return f(F85_S0(f0: 4325646965362202, f1: 3313084380250914, f2: 42, f3: 2034100272), F85_S1(f0: 1365643665271339575, f1: 25442, f2: 3699631470459352980, f3: 7611776251925132200), 911446742, F85_S2(f0: 352423, f1: 7150341, f2: 2090089360), 5731257538910387688, F85_S3(f0: 171), 5742887585483060342, F85_S4(f0: 1182236975680416316), 32137, 44, 2143531010, 1271996557, 1035188446, 1925443, F85_S5(f0: 2591574394337603), 721102428782331317) +} + +@frozen +public struct F86_S0 +{ + public let f0 : Int; + public let f1 : Float; + public let f2 : Int16; + public let f3 : Int8; +} + +@frozen +public struct F86_S1 +{ + public let f0 : Double; +} + +@frozen +public struct F86_S2 +{ + public let f0 : Int; + public let f1 : Float; +} + +@frozen +public struct F86_S3 +{ + public let f0 : UInt16; + public let f1 : Float; +} + +@frozen +public struct F86_Ret +{ + public let f0 : Int16; + public let f1 : UInt32; + public let f2 : Double; + public let f3 : UInt8; +} + +public func swiftCallbackFunc86(f: (Float, Int16, Int, Int16, Float, F86_S0, F86_S1, F86_S2, Int, UInt32, UInt, UInt, Float, Int64, F86_S3, UInt) -> F86_Ret) -> F86_Ret { + return f(2913632, 3735, 2773655476379499086, 22973, 8292778, F86_S0(f0: 5562042565258891920, f1: 8370233, f2: 18292, f3: -32), F86_S1(f0: 486951152980016), F86_S2(f0: 170033426151098456, f1: 3867810), 7390780928011218856, 1504267943, 2046987193814931100, 4860202472307588968, 1644019, 8084012412562897328, F86_S3(f0: 46301, f1: 5633701), 1911608136082175332) +} + +@frozen +public struct F87_S0 +{ + public let f0 : Int32; + public let f1 : Int16; + public let f2 : Int32; +} + +@frozen +public struct F87_S1 +{ + public let f0 : Float; +} + +public func swiftCallbackFunc87(f: (Float, Int, F87_S0, F87_S1) -> UInt64) -> UInt64 { + return f(1413086, 4206825694012787823, F87_S0(f0: 70240457, f1: 30503, f2: 671751848), F87_S1(f0: 6641304)) +} + +@frozen +public struct F88_S0 +{ + public let f0 : Int8; + public let f1 : Int16; + public let f2 : UInt8; + public let f3 : Double; + public let f4 : UInt16; +} + +@frozen +public struct F88_S1 +{ + public let f0 : Double; + public let f1 : UInt8; +} + +@frozen +public struct F88_S2 +{ + public let f0 : UInt; +} + +@frozen +public struct F88_S3 +{ + public let f0 : Int8; + public let f1 : UInt32; +} + +@frozen +public struct F88_Ret +{ + public let f0 : Int32; + public let f1 : UInt32; + public let f2 : Int; + public let f3 : UInt64; +} + +public func swiftCallbackFunc88(f: (F88_S0, F88_S1, Float, UInt, Float, Int, F88_S2, UInt64, F88_S3, UInt64) -> F88_Ret) -> F88_Ret { + return f(F88_S0(f0: 125, f1: -10705, f2: 21, f3: 361845689097003, f4: 41749), F88_S1(f0: 1754583995806427, f1: 178), 4705205, 5985040566226273121, 2484194, 1904196135427766362, F88_S2(f0: 5436710892090266406), 4250368992471675181, F88_S3(f0: -87, f1: 362108395), 3388632419732870796) +} + +@frozen +public struct F89_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F89_Ret_S0 +{ + public let f0 : Double; +} + +@frozen +public struct F89_Ret +{ + public let f0 : Int32; + public let f1 : F89_Ret_S0; + public let f2 : UInt; + public let f3 : Int64; +} + +public func swiftCallbackFunc89(f: (F89_S0) -> F89_Ret) -> F89_Ret { + return f(F89_S0(f0: 2137010348736191)) +} + +@frozen +public struct F90_S0_S0_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F90_S0_S0 +{ + public let f0 : F90_S0_S0_S0; +} + +@frozen +public struct F90_S0 +{ + public let f0 : F90_S0_S0; + public let f1 : UInt; + public let f2 : UInt32; + public let f3 : Int64; + public let f4 : Int16; +} + +@frozen +public struct F90_S1 +{ + public let f0 : UInt16; + public let f1 : Int16; +} + +@frozen +public struct F90_S2 +{ + public let f0 : Int; +} + +@frozen +public struct F90_S3 +{ + public let f0 : UInt; +} + +@frozen +public struct F90_S4 +{ + public let f0 : UInt64; +} + +@frozen +public struct F90_Ret +{ + public let f0 : Int16; + public let f1 : Int; +} + +public func swiftCallbackFunc90(f: (Int64, Float, F90_S0, UInt32, UInt16, F90_S1, F90_S2, F90_S3, F90_S4) -> F90_Ret) -> F90_Ret { + return f(920081051198141017, 661904, F90_S0(f0: F90_S0_S0(f0: F90_S0_S0_S0(f0: 3898354148166517637)), f1: 1003118682503285076, f2: 1418362079, f3: 3276689793574299746, f4: -18559), 1773011602, 32638, F90_S1(f0: 47129, f1: -31849), F90_S2(f0: 4795020225668482328), F90_S3(f0: 5307513663902191175), F90_S4(f0: 7057074401404034083)) +} + +@frozen +public struct F91_S0 +{ + public let f0 : Int8; + public let f1 : Int; + public let f2 : UInt16; + public let f3 : UInt16; +} + +@frozen +public struct F91_S1 +{ + public let f0 : Double; + public let f1 : UInt64; + public let f2 : Int8; + public let f3 : Int64; + public let f4 : Float; +} + +@frozen +public struct F91_S2_S0_S0 +{ + public let f0 : Int64; +} + +@frozen +public struct F91_S2_S0 +{ + public let f0 : F91_S2_S0_S0; +} + +@frozen +public struct F91_S2 +{ + public let f0 : Double; + public let f1 : F91_S2_S0; + public let f2 : Int16; +} + +@frozen +public struct F91_S3_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F91_S3 +{ + public let f0 : F91_S3_S0; +} + +@frozen +public struct F91_Ret +{ + public let f0 : Int64; + public let f1 : UInt64; + public let f2 : Int16; + public let f3 : UInt32; +} + +public func swiftCallbackFunc91(f: (F91_S0, Int16, UInt32, Double, F91_S1, Int64, UInt64, Float, F91_S2, Int, F91_S3) -> F91_Ret) -> F91_Ret { + return f(F91_S0(f0: -117, f1: 6851485542307521521, f2: 23224, f3: 28870), -26318, 874052395, 3651199868446152, F91_S1(f0: 3201729800438540, f1: 7737032265509566019, f2: 123, f3: 7508633930609553617, f4: 8230501), 2726677037673277403, 4990410590084533996, 3864639, F91_S2(f0: 1763083442463892, f1: F91_S2_S0(f0: F91_S2_S0_S0(f0: 6783710957456602933)), f2: 2927), 3359440517385934325, F91_S3(f0: F91_S3_S0(f0: 3281136825102667421))) +} + +@frozen +public struct F92_S0 +{ + public let f0 : Double; + public let f1 : Double; +} + +@frozen +public struct F92_S1 +{ + public let f0 : UInt32; + public let f1 : Int64; + public let f2 : UInt32; + public let f3 : Int16; + public let f4 : UInt64; +} + +@frozen +public struct F92_S2_S0 +{ + public let f0 : UInt16; +} + +@frozen +public struct F92_S2 +{ + public let f0 : UInt32; + public let f1 : Int64; + public let f2 : F92_S2_S0; +} + +@frozen +public struct F92_Ret +{ + public let f0 : Int32; +} + +public func swiftCallbackFunc92(f: (UInt32, Int64, F92_S0, Int, UInt8, F92_S1, F92_S2, UInt8, Int, Int32) -> F92_Ret) -> F92_Ret { + return f(479487770, 3751818229732502126, F92_S0(f0: 3486664439392893, f1: 1451061144702448), 1103649059951788126, 17, F92_S1(f0: 1542537473, f1: 2256304993713022795, f2: 1773847876, f3: -4712, f4: 2811859744132572185), F92_S2(f0: 290315682, f1: 4847587202070249866, f2: F92_S2_S0(f0: 20774)), 8, 2206063999764082749, 1481391120) +} + +@frozen +public struct F93_S0 +{ + public let f0 : Int8; + public let f1 : UInt32; +} + +@frozen +public struct F93_S1 +{ + public let f0 : UInt32; +} + +@frozen +public struct F93_Ret +{ + public let f0 : Int; + public let f1 : UInt64; +} + +public func swiftCallbackFunc93(f: (UInt, UInt16, Double, F93_S0, F93_S1) -> F93_Ret) -> F93_Ret { + return f(5170226481546239050, 2989, 1630717078645270, F93_S0(f0: -46, f1: 859171256), F93_S1(f0: 254449240)) +} + +@frozen +public struct F94_S0 +{ + public let f0 : UInt; +} + +@frozen +public struct F94_S1 +{ + public let f0 : Int32; + public let f1 : UInt; +} + +@frozen +public struct F94_S2 +{ + public let f0 : Int; + public let f1 : UInt32; + public let f2 : UInt16; +} + +@frozen +public struct F94_S3 +{ + public let f0 : UInt8; + public let f1 : Int32; + public let f2 : Float; +} + +@frozen +public struct F94_S4 +{ + public let f0 : Int32; + public let f1 : Int64; + public let f2 : Float; +} + +@frozen +public struct F94_S5 +{ + public let f0 : Int16; + public let f1 : UInt; + public let f2 : Int16; + public let f3 : Int8; +} + +@frozen +public struct F94_Ret +{ + public let f0 : Int64; +} + +public func swiftCallbackFunc94(f: (F94_S0, Int16, F94_S1, F94_S2, F94_S3, Float, F94_S4, UInt32, F94_S5, Int16) -> F94_Ret) -> F94_Ret { + return f(F94_S0(f0: 8626725032375870186), -7755, F94_S1(f0: 544707027, f1: 2251410026467996594), F94_S2(f0: 2972912419231960385, f1: 740529487, f2: 34526), F94_S3(f0: 41, f1: 1598856955, f2: 5126603), 7242977, F94_S4(f0: 473684762, f1: 4023878650965716094, f2: 2777693), 1612378906, F94_S5(f0: -17074, f1: 2666903737827472071, f2: 418, f3: 106), -14547) +} + +@frozen +public struct F95_S0 +{ + public let f0 : UInt16; + public let f1 : Int64; +} + +@frozen +public struct F95_S1 +{ + public let f0 : UInt32; + public let f1 : Int16; + public let f2 : Double; +} + +@frozen +public struct F95_S2 +{ + public let f0 : UInt16; +} + +@frozen +public struct F95_Ret_S0 +{ + public let f0 : Int16; +} + +@frozen +public struct F95_Ret +{ + public let f0 : Int; + public let f1 : Int16; + public let f2 : Int8; + public let f3 : UInt8; + public let f4 : F95_Ret_S0; +} + +public func swiftCallbackFunc95(f: (F95_S0, UInt, F95_S1, F95_S2) -> F95_Ret) -> F95_Ret { + return f(F95_S0(f0: 45388, f1: 6620047889014935849), 97365157264460373, F95_S1(f0: 357234637, f1: -13720, f2: 3313430568949662), F95_S2(f0: 14248)) +} + +@frozen +public struct F96_S0 +{ + public let f0 : Int64; + public let f1 : UInt32; + public let f2 : Int16; + public let f3 : Double; + public let f4 : Double; +} + +@frozen +public struct F96_S1 +{ + public let f0 : UInt64; +} + +@frozen +public struct F96_S2 +{ + public let f0 : Float; +} + +public func swiftCallbackFunc96(f: (UInt32, F96_S0, Float, UInt64, UInt32, UInt32, F96_S1, F96_S2, Int64) -> UInt64) -> UInt64 { + return f(1103144790, F96_S0(f0: 496343164737276588, f1: 1541085564, f2: -16271, f3: 1062575289573718, f4: 570255786498865), 7616839, 7370881799887414383, 390392554, 1492692139, F96_S1(f0: 1666031716012978365), F96_S2(f0: 3427394), 4642371619161527189) +} + +@frozen +public struct F97_S0 +{ + public let f0 : Int8; +} + +@frozen +public struct F97_S1 +{ + public let f0 : Int64; + public let f1 : UInt64; +} + +@frozen +public struct F97_S2 +{ + public let f0 : UInt8; + public let f1 : Int64; +} + +@frozen +public struct F97_S3 +{ + public let f0 : Double; +} + +@frozen +public struct F97_Ret_S0 +{ + public let f0 : Int32; +} + +@frozen +public struct F97_Ret +{ + public let f0 : Double; + public let f1 : UInt; + public let f2 : F97_Ret_S0; + public let f3 : UInt16; + public let f4 : UInt32; +} + +public func swiftCallbackFunc97(f: (F97_S0, F97_S1, F97_S2, F97_S3) -> F97_Ret) -> F97_Ret { + return f(F97_S0(f0: -87), F97_S1(f0: 1414208343412494909, f1: 453284654311256466), F97_S2(f0: 224, f1: 1712859616922087053), F97_S3(f0: 3987671154739178)) +} + +@frozen +public struct F98_S0 +{ + public let f0 : Int32; +} + +public func swiftCallbackFunc98(f: (Float, UInt16, F98_S0, UInt16) -> Int) -> Int { + return f(2863898, 37573, F98_S0(f0: 1073068257), 53560) +} + +@frozen +public struct F99_S0 +{ + public let f0 : Int; + public let f1 : UInt32; + public let f2 : Int32; + public let f3 : UInt32; +} + +@frozen +public struct F99_S1 +{ + public let f0 : Int16; +} + +@frozen +public struct F99_S2 +{ + public let f0 : UInt8; } -public func swiftCallbackFunc9(f: (Int8, Int, Int16, Int64, Double, Double, Int, UInt16, UInt16, Float, Float, UInt16, UInt32, Int16, Int32, Int32, UInt64, Int16, Int64, Int, UInt8, UInt16, Int16, Int, Int16) -> F9_Ret) -> F9_Ret { - return f(17, 4720638462358523954, 30631, 8206569929240962953, 1359667226908383, 3776001892555053, 747160900180286726, 12700, 53813, 7860389, 1879743, 61400, 1962814337, 17992, 677814589, 1019483263, 6326265259403184370, -14633, 4127072498763789519, 4008108205305320386, 128, 21189, 32104, 384827814282870543, 20647) +public func swiftCallbackFunc99(f: (Int64, UInt, Float, UInt16, F99_S0, UInt8, Float, UInt8, Int8, F99_S1, F99_S2) -> UInt64) -> UInt64 { + return f(1152281003884062246, 2482384127373829622, 3361150, 2121, F99_S0(f0: 4484545590050696958, f1: 422528630, f2: 1418346646, f3: 1281567856), 223, 1917656, 103, -46, F99_S1(f0: 14554), F99_S2(f0: 68)) } From 997f09cacbc1bbbdd3c83cccbf46eabf8df353d4 Mon Sep 17 00:00:00 2001 From: Vlad Brezae Date: Fri, 5 Apr 2024 11:47:45 +0300 Subject: [PATCH 108/132] [mono][interp] Reduce false pinning from interp stack (#100400) * [mono][interp] Reduce false pinning from interp stack Interpreter opcodes operate on the interp stack, an area of memory separately allocated. Each interp var will have an allocated stack offset in the current interpreter stack frame. When we allocate the storage for an interp var we can take into account the var type. If the type can represent a potential ref to an object or an interior ref then we mark the pointer slot as potentially containing refs, for the method that is being compiled. During GC, we used to conservatively scan the entire interp stack space used by each thread. After this change, in the first stage, we do a stack walkwhere we detect slots in each interp frame where no refs can reside. We mark these slots in a bit array. Afterwards we conservatively scan the interp stack of the thread, while ignoring slots that were previously marked as not containing any refs. System.Runtime.Tests suite was used for testing the effectiveness of the change, by computing the cumulative number of pinned objects throughout all GCs (about 1100). minijit - avg 702000 pinned objects old-interp - avg 641000 pinned objects precise-interp - avg 578000 pinned objects This resulted in 10% reduction in the number of pinned objects during collection. This change is meant to reduce memory usage of apps by making objects die earlier. We could further improve by being more precise. For example, for call sites we could reuse liveness information to precisely know which slots actually contain refs. This is a bit more complex to implement and it is unclear yet how impactful it would be. * [mono][interp] Add option to disable precise scanning of stack * [mono][interp] Fix pushing of byrefs on execution stack A lot of times, when we were pushing a byref type on the stack during compilation, we would first get the mint_type which would be MINT_TYPE_I4/I8. From the mint_type we would then obtain the STACK_TYPE_I4/I8, losing information because it should have been STACK_TYPE_MP. Because of this, the underlying interp var would end up being created as MONO_TYPE_I4/I8 instead of MONO_TYPE_I. Add another method for pushing directly a MonoType, with less confusing indirections. Code around here could further be refactored. This is only relevant for GC stack scanning, since we would want to scan only slots containing MONO_TYPE_I. --- src/mono/mono/metadata/class-getters.h | 1 + src/mono/mono/mini/interp/interp-internals.h | 4 + src/mono/mono/mini/interp/interp.c | 73 ++++++++++- src/mono/mono/mini/interp/interp.h | 3 +- src/mono/mono/mini/interp/transform-opt.c | 7 +- src/mono/mono/mini/interp/transform.c | 127 +++++++++++++++++-- src/mono/mono/mini/interp/transform.h | 4 + 7 files changed, 203 insertions(+), 16 deletions(-) diff --git a/src/mono/mono/metadata/class-getters.h b/src/mono/mono/metadata/class-getters.h index eb69558a1d01b1..57ff9afefceb37 100644 --- a/src/mono/mono/metadata/class-getters.h +++ b/src/mono/mono/metadata/class-getters.h @@ -39,6 +39,7 @@ MONO_CLASS_GETTER(m_class_is_delegate, gboolean, , MonoClass, delegate) MONO_CLASS_GETTER(m_class_is_gc_descr_inited, gboolean, , MonoClass, gc_descr_inited) MONO_CLASS_GETTER(m_class_has_cctor, gboolean, , MonoClass, has_cctor) MONO_CLASS_GETTER(m_class_has_references, gboolean, , MonoClass, has_references) +MONO_CLASS_GETTER(m_class_has_ref_fields, gboolean, , MonoClass, has_ref_fields) MONO_CLASS_GETTER(m_class_has_static_refs, gboolean, , MonoClass, has_static_refs) MONO_CLASS_GETTER(m_class_has_no_special_static_fields, gboolean, , MonoClass, no_special_static_fields) MONO_CLASS_GETTER(m_class_is_nested_classes_inited, gboolean, , MonoClass, nested_classes_inited) diff --git a/src/mono/mono/mini/interp/interp-internals.h b/src/mono/mono/mini/interp/interp-internals.h index c5f3707ab1ca5f..a815f39f9c8e67 100644 --- a/src/mono/mono/mini/interp/interp-internals.h +++ b/src/mono/mono/mini/interp/interp-internals.h @@ -145,6 +145,7 @@ struct InterpMethod { MonoFtnDesc *ftndesc_unbox; MonoDelegateTrampInfo *del_info; + /* locals_size is equal to the offset of the param_area */ guint32 locals_size; guint32 alloca_size; int num_clauses; // clauses @@ -153,6 +154,7 @@ struct InterpMethod { unsigned int hasthis; // boolean MonoProfilerCallInstrumentationFlags prof_flags; InterpMethodCodeType code_type; + MonoBitSet *ref_slots; #ifdef ENABLE_EXPERIMENT_TIERED MiniTieredCounter tiered_counter; #endif @@ -268,6 +270,8 @@ typedef struct { guchar *stack_pointer; /* Used for allocation of localloc regions */ FrameDataAllocator data_stack; + /* If bit n is set, it means that the n-th stack slot (pointer sized) from stack_start doesn't contain any refs */ + guint8 *no_ref_slots; } ThreadContext; typedef struct { diff --git a/src/mono/mono/mini/interp/interp.c b/src/mono/mono/mini/interp/interp.c index ee9566afc23397..f8e2ad02ad95cf 100644 --- a/src/mono/mono/mini/interp/interp.c +++ b/src/mono/mono/mini/interp/interp.c @@ -412,6 +412,9 @@ get_context (void) if (context == NULL) { context = g_new0 (ThreadContext, 1); context->stack_start = (guchar*)mono_valloc_aligned (INTERP_STACK_SIZE, MINT_STACK_ALIGNMENT, MONO_MMAP_READ | MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_INTERP_STACK); + // A bit for every pointer sized slot in the stack. FIXME don't allocate whole bit array + if (mono_interp_opt & INTERP_OPT_PRECISE_GC) + context->no_ref_slots = (guchar*)mono_valloc (NULL, INTERP_STACK_SIZE / (8 * sizeof (gpointer)), MONO_MMAP_READ | MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_INTERP_STACK); context->stack_end = context->stack_start + INTERP_STACK_SIZE - INTERP_REDZONE_SIZE; context->stack_real_end = context->stack_start + INTERP_STACK_SIZE; /* We reserve a stack slot at the top of the interp stack to make temp objects visible to GC */ @@ -8011,6 +8014,8 @@ interp_parse_options (const char *options) #endif else if (strncmp (arg, "ssa", 3) == 0) opt = INTERP_OPT_SSA; + else if (strncmp (arg, "precise", 7) == 0) + opt = INTERP_OPT_PRECISE_GC; else if (strncmp (arg, "all", 3) == 0) opt = ~INTERP_OPT_NONE; @@ -8473,6 +8478,57 @@ interp_stop_single_stepping (void) ss_enabled = FALSE; } + +static void +interp_mark_frame_no_ref_slots (ThreadContext *context, InterpFrame *frame, gpointer *top_limit) +{ + InterpMethod *imethod = frame->imethod; + gpointer *frame_stack = (gpointer*)frame->stack; + gpointer *frame_stack_end = (gpointer*)((guchar*)frame->stack + imethod->alloca_size); + // The way interpreter implements calls is by moving arguments to the param area, at the + // top of the stack and then proceed with the call. Up to the moment of the call these slots + // are owned by the calling frame. Once we do the call, the stack pointer of the called + // frame will point inside the param area of the calling frame. + // + // We mark no ref slots from top to bottom and we use the top limit to ignore slots + // that were already handled in the called frame. + if (top_limit && top_limit < frame_stack_end) + frame_stack_end = top_limit; + + for (gpointer *current = frame_stack; current < frame_stack_end; current++) { + gsize slot_index = current - frame_stack; + if (!mono_bitset_test_fast (imethod->ref_slots, slot_index)) { + gsize global_slot_index = current - (gpointer*)context->stack_start; + gsize table_index = global_slot_index / 8; + int bit_index = global_slot_index % 8; + context->no_ref_slots [table_index] |= 1 << bit_index; + } + } +} + +static void +interp_mark_no_ref_slots (ThreadContext *context, MonoLMF* lmf) +{ + memset (context->no_ref_slots, 0, (context->stack_pointer - context->stack_start) / (8 * sizeof (gpointer)) + 1); + while (lmf) { + if ((gsize)lmf->previous_lmf & 2) { + MonoLMFExt *lmf_ext = (MonoLMFExt*) lmf; + if (lmf_ext->kind == MONO_LMFEXT_INTERP_EXIT || lmf_ext->kind == MONO_LMFEXT_INTERP_EXIT_WITH_CTX) { + InterpFrame *frame = (InterpFrame*)lmf_ext->interp_exit_data; + gpointer *top_limit = NULL; + while (frame) { + if (frame->imethod) { + interp_mark_frame_no_ref_slots (context, frame, top_limit); + top_limit = (gpointer*)frame->stack; + } + frame = frame->parent; + } + } + } + lmf = (MonoLMF*)((gsize)lmf->previous_lmf & ~3); + } +} + /* * interp_mark_stack: * @@ -8505,9 +8561,20 @@ interp_mark_stack (gpointer thread_data, GcScanFunc func, gpointer gc_data, gboo if (!context || !context->stack_start) return; - // FIXME: Scan the whole area with 1 call - for (gpointer *p = (gpointer*)context->stack_start; p < (gpointer*)context->stack_pointer; p++) - func (p, gc_data); + if (mono_interp_opt & INTERP_OPT_PRECISE_GC) { + MonoLMF **lmf_addr = (MonoLMF**)info->tls [TLS_KEY_LMF_ADDR]; + if (lmf_addr) + interp_mark_no_ref_slots (context, *lmf_addr); + } + + int slot_index = 0; + for (gpointer *p = (gpointer*)context->stack_start; p < (gpointer*)context->stack_pointer; p++) { + if (context->no_ref_slots && (context->no_ref_slots [slot_index / 8] & (1 << (slot_index % 8)))) + ;// This slot is marked as no ref, we don't scan it + else + func (p, gc_data); + slot_index++; + } FrameDataFragment *frag; for (frag = context->data_stack.first; frag; frag = frag->next) { diff --git a/src/mono/mono/mini/interp/interp.h b/src/mono/mono/mini/interp/interp.h index 742e93bf06e59a..a09111c490bec4 100644 --- a/src/mono/mono/mini/interp/interp.h +++ b/src/mono/mono/mini/interp/interp.h @@ -42,7 +42,8 @@ enum { INTERP_OPT_JITERPRETER = 64, #endif INTERP_OPT_SSA = 128, - INTERP_OPT_DEFAULT = INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS | INTERP_OPT_TIERING | INTERP_OPT_SIMD | INTERP_OPT_SSA + INTERP_OPT_PRECISE_GC = 256, + INTERP_OPT_DEFAULT = INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS | INTERP_OPT_TIERING | INTERP_OPT_SIMD | INTERP_OPT_SSA | INTERP_OPT_PRECISE_GC #if HOST_BROWSER | INTERP_OPT_JITERPRETER #endif diff --git a/src/mono/mono/mini/interp/transform-opt.c b/src/mono/mono/mini/interp/transform-opt.c index 88231ac8bd40fa..4ee96b7a541d28 100644 --- a/src/mono/mono/mini/interp/transform-opt.c +++ b/src/mono/mono/mini/interp/transform-opt.c @@ -32,7 +32,9 @@ alloc_var_offset (TransformData *td, int local, gint32 *ptos) int interp_alloc_global_var_offset (TransformData *td, int var) { - return alloc_var_offset (td, var, &td->total_locals_size); + int offset = alloc_var_offset (td, var, &td->total_locals_size); + interp_mark_ref_slots_for_var (td, var); + return offset; } static void @@ -464,6 +466,8 @@ interp_alloc_offsets (TransformData *td) add_active_call (td, &ac, td->vars [var].call); } else if (!td->vars [var].global && td->vars [var].offset == -1) { alloc_var_offset (td, var, ¤t_offset); + interp_mark_ref_slots_for_var (td, var); + if (current_offset > final_total_locals_size) final_total_locals_size = current_offset; @@ -492,6 +496,7 @@ interp_alloc_offsets (TransformData *td) // These are allocated separately at the end of the stack if (td->vars [i].call_args) { td->vars [i].offset += td->param_area_offset; + interp_mark_ref_slots_for_var (td, i); final_total_locals_size = MAX (td->vars [i].offset + td->vars [i].size, final_total_locals_size); } } diff --git a/src/mono/mono/mini/interp/transform.c b/src/mono/mono/mini/interp/transform.c index 144967c4e8806e..70773797a47f79 100644 --- a/src/mono/mono/mini/interp/transform.c +++ b/src/mono/mono/mini/interp/transform.c @@ -551,6 +551,22 @@ set_simple_type_and_var (TransformData *td, StackInfo *sp, int type) set_type_and_var (td, sp, type, NULL); } +static void +push_mono_type (TransformData *td, MonoType *type, int mt, MonoClass *k) +{ + if (mt == -1) + mt = mono_mint_type (type); + if (!k) + k = mono_class_from_mono_type_internal (type); + + g_assert (mt != MINT_TYPE_VT); + + if (m_type_is_byref (type)) + push_type_explicit (td, STACK_TYPE_MP, k, MINT_STACK_SLOT_SIZE); + else + push_type_explicit (td, stack_type [mt], k, MINT_STACK_SLOT_SIZE); +} + static void push_type (TransformData *td, int type, MonoClass *k) { @@ -1006,7 +1022,7 @@ load_arg(TransformData *td, int n) if (hasthis && n == 0) { mt = MINT_TYPE_I; klass = NULL; - push_type (td, stack_type [mt], klass); + push_type (td, STACK_TYPE_MP, klass); } else { g_assert (size < G_MAXUINT16); push_type_vt (td, klass, size); @@ -1020,7 +1036,7 @@ load_arg(TransformData *td, int n) if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); } - push_type (td, stack_type [mt], klass); + push_mono_type (td, type, mt, klass); } interp_add_ins (td, interp_get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, n); @@ -1069,7 +1085,7 @@ load_local (TransformData *td, int local) MonoClass *klass = NULL; if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); - push_type (td, stack_type [mt], klass); + push_mono_type (td, type, mt, klass); } interp_add_ins (td, interp_get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, local); @@ -3699,7 +3715,7 @@ interp_transform_call (TransformData *td, MonoMethod *method, MonoMethod *target return FALSE; } } else { - push_type (td, stack_type[mt], klass); + push_mono_type (td, csignature->ret, mt, klass); } dreg = td->sp [-1].var; } else { @@ -4346,6 +4362,7 @@ interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMet td->vars [i].size = size; offset = ALIGN_TO (offset, align); td->vars [i].offset = offset; + interp_mark_ref_slots_for_var (td, i); offset += size; } offset = ALIGN_TO (offset, MINT_STACK_ALIGNMENT); @@ -4371,6 +4388,7 @@ interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMet td->vars [index].mt = mono_mint_type (header->locals [i]); td->vars [index].ext_index = -1; td->vars [index].size = size; + interp_mark_ref_slots_for_var (td, index); // Every local takes a MINT_STACK_SLOT_SIZE so IL locals have same behavior as execution locals offset += size; } @@ -4568,7 +4586,7 @@ interp_emit_sfld_access (TransformData *td, MonoClassField *field, MonoClass *fi interp_add_ins (td, interp_get_ldind_for_mt (mt)); interp_ins_set_sreg (td->last_ins, td->sp [-1].var); td->sp--; - push_type (td, stack_type [mt], field_class); + push_mono_type (td, ftype, mt, field_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].var); } } else { @@ -4595,14 +4613,14 @@ interp_emit_sfld_access (TransformData *td, MonoClassField *field, MonoClass *fi if (mt == MINT_TYPE_VT) { push_type_vt (td, field_class, size); } else { - push_type (td, stack_type [mt], field_class); + push_mono_type (td, ftype, mt, field_class); } } else if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_LDSFLD_VT); push_type_vt (td, field_class, size); } else { interp_add_ins (td, MINT_LDSFLD_I1 + mt - MINT_TYPE_I1); - push_type (td, stack_type [mt], field_class); + push_mono_type (td, ftype, mt, field_class); } interp_ins_set_dreg (td->last_ins, td->sp [-1].var); } else { @@ -6709,7 +6727,7 @@ generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else - push_type (td, stack_type [mt], field_klass); + push_mono_type (td, ftype, mt, field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].var); } else { if (G_UNLIKELY (m_field_is_from_update (field))) { @@ -6739,7 +6757,7 @@ generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else - push_type (td, stack_type [mt], field_klass); + push_mono_type (td, ftype, mt, field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].var); } } @@ -7695,8 +7713,7 @@ generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, int param_offset = get_tos_offset (td); if (!MONO_TYPE_IS_VOID (info->sig->ret)) { - mt = mono_mint_type (info->sig->ret); - push_simple_type (td, stack_type [mt]); + push_mono_type (td, info->sig->ret, -1, NULL); dreg = td->sp [-1].var; } else { // dummy dreg @@ -8507,6 +8524,78 @@ get_short_brop (int opcode) return opcode; } +static void +interp_mark_ref_slots_for_vt (TransformData *td, int base_offset, MonoClass *klass) +{ + if (!m_class_has_references (klass) && !m_class_has_ref_fields (klass)) + return; + + gpointer iter = NULL; + MonoClassField *field; + while ((field = mono_class_get_fields_internal (klass, &iter))) { + MonoType *ftype = mono_field_get_type_internal (field); + if (ftype->attrs & FIELD_ATTRIBUTE_STATIC) + continue; + int offset = base_offset + m_field_get_offset (field) - MONO_ABI_SIZEOF (MonoObject); +retry: + if (mini_type_is_reference (ftype) || ftype->type == MONO_TYPE_I || ftype->type == MONO_TYPE_U || m_type_is_byref (ftype)) { + int index = offset / sizeof (gpointer); + mono_bitset_set_fast (td->ref_slots, index); + if (td->verbose_level) + g_print ("Stack ref slot vt field at off %d\n", offset); + } else if (ftype->type == MONO_TYPE_VALUETYPE || ftype->type == MONO_TYPE_GENERICINST) { + interp_mark_ref_slots_for_vt (td, offset, mono_class_from_mono_type_internal (ftype)); + } + + if (m_class_is_inlinearray (klass)) { + int max_offset = base_offset + m_class_get_instance_size (klass) - MONO_ABI_SIZEOF (MonoObject); + int align; + int field_size = mono_type_size (ftype, &align); + offset += field_size; + offset = ALIGN_TO (offset, align); + if (offset < max_offset) + goto retry; + } + } +} + +void +interp_mark_ref_slots_for_var (TransformData *td, int var) +{ + if (!(mono_interp_opt & INTERP_OPT_PRECISE_GC)) + return; + + g_assert (td->vars [var].offset != -1); + + gsize max_index = (td->vars [var].offset + td->vars [var].size) / sizeof (gpointer); + + if (!td->ref_slots || max_index >= td->ref_slots->size) { + guint32 old_size = td->ref_slots ? (guint32)td->ref_slots->size : 0; + guint32 new_size = old_size ? old_size * 2 : 32; + + gpointer mem = mono_mempool_alloc0 (td->mempool, mono_bitset_alloc_size (new_size, 0)); + MonoBitSet *new_ref_slots = mono_bitset_mem_new (mem, new_size, 0); + + if (old_size) + memcpy (&new_ref_slots->data, &td->ref_slots->data, old_size / 8); + td->ref_slots = new_ref_slots; + } + + MonoType *type = td->vars [var].type; + if (td->vars [var].mt == MINT_TYPE_VT) { + MonoClass *klass = mono_class_from_mono_type_internal (type); + interp_mark_ref_slots_for_vt (td, td->vars [var].offset, klass); + } else { + // Managed pointers in interp are normally MONO_TYPE_I + if (mini_type_is_reference (type) || type->type == MONO_TYPE_I || type->type == MONO_TYPE_U || m_type_is_byref (type)) { + int index = td->vars [var].offset / sizeof (gpointer); + mono_bitset_set_fast (td->ref_slots, index); + if (td->verbose_level) + g_print ("Stack ref slot at off %d for var %d\n", index * sizeof (gpointer), var); + } + } +} + static int get_var_offset (TransformData *td, int var) { @@ -8526,6 +8615,7 @@ get_var_offset (TransformData *td, int var) g_assert (td->vars [var].execution_stack); td->vars [var].offset = td->total_locals_size + td->vars [var].stack_offset; + interp_mark_ref_slots_for_var (td, var); return td->vars [var].offset; } @@ -9155,6 +9245,21 @@ generate (MonoMethod *method, MonoMethodHeader *header, InterpMethod *rtm, MonoG mono_interp_register_imethod_data_items (rtm->data_items, td->imethod_items); rtm->patchpoint_data = td->patchpoint_data; + if (td->ref_slots) { + gpointer ref_slots_mem = mono_mem_manager_alloc0 (td->mem_manager, mono_bitset_alloc_size (rtm->alloca_size / sizeof (gpointer), 0)); + rtm->ref_slots = mono_bitset_mem_new (ref_slots_mem, rtm->alloca_size / sizeof (gpointer), 0); + gsize copy_size = rtm->ref_slots->size; + if (td->ref_slots->size < copy_size) + copy_size = td->ref_slots->size; + memcpy (&rtm->ref_slots->data, &td->ref_slots->data, copy_size / 8); + if (!td->optimized) { + // Unoptimized code can have some stack slot moving patterns as part of calls. + // Just conservatively mark all these slots as potentially containing refs. + for (guint32 offset = rtm->locals_size; offset < rtm->alloca_size; offset += sizeof (gpointer)) + mono_bitset_set (rtm->ref_slots, offset / sizeof (gpointer)); + } + } + /* Save debug info */ interp_save_debug_info (rtm, header, td, td->line_numbers); diff --git a/src/mono/mono/mini/interp/transform.h b/src/mono/mono/mini/interp/transform.h index f05556a44c7c21..1e6185f8089c80 100644 --- a/src/mono/mono/mini/interp/transform.h +++ b/src/mono/mono/mini/interp/transform.h @@ -340,6 +340,8 @@ typedef struct int inline_depth; int patchpoint_data_n; int *patchpoint_data; + // This marks each stack slot offset that might contain refs throughout the execution of this method + MonoBitSet *ref_slots; guint has_localloc : 1; // If method compilation fails due to certain limits being exceeded, we disable inlining // and retry compilation. @@ -543,6 +545,8 @@ interp_foreach_ins_var (TransformData *td, InterpInst *ins, gpointer data, void void interp_foreach_ins_svar (TransformData *td, InterpInst *ins, gpointer data, void (*callback)(TransformData*, int*, gpointer)); +void +interp_mark_ref_slots_for_var (TransformData *td, int var); /* Forward definitions for simd methods */ static gboolean From 7def0b725852d55741262f43ed36ed501606f89f Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Fri, 5 Apr 2024 07:33:11 -0400 Subject: [PATCH 109/132] Use \e instead of \u001B or \x1B (#99659) --- .../src/AnsiParser.cs | 56 +++++++++---------- .../AnsiParserTests.cs | 56 +++++++++---------- .../TextWriterExtensionsTests.cs | 8 +-- .../System.Console/src/System/IO/KeyParser.cs | 2 +- .../src/System/TerminalFormatStrings.cs | 10 ++-- .../System.Console/tests/KeyParserTests.cs | 44 +++++++-------- .../System.Console/tests/TermInfo.Unix.cs | 36 ++++++------ .../Strings/StringSearchValues.cs | 2 +- .../Text/RegularExpressions/RegexParser.cs | 2 +- .../FunctionalTests/Regex.Groups.Tests.cs | 4 +- .../tests/FunctionalTests/RegexPcreTests.cs | 2 +- 11 files changed, 111 insertions(+), 111 deletions(-) diff --git a/src/libraries/Microsoft.Extensions.Logging.Console/src/AnsiParser.cs b/src/libraries/Microsoft.Extensions.Logging.Console/src/AnsiParser.cs index 4e8725118b1f13..71ea987bff1953 100644 --- a/src/libraries/Microsoft.Extensions.Logging.Console/src/AnsiParser.cs +++ b/src/libraries/Microsoft.Extensions.Logging.Console/src/AnsiParser.cs @@ -50,7 +50,7 @@ public void Parse(string message) ConsoleColor? foreground = null; ConsoleColor? background = null; var span = message.AsSpan(); - const char EscapeChar = '\x1B'; + const char EscapeChar = '\e'; ConsoleColor? color = null; bool isBright = false; for (int i = 0; i < span.Length; i++) @@ -59,7 +59,7 @@ public void Parse(string message) { if (span[i + 3] == 'm') { - // Example: \x1B[1m + // Example: \e[1m if (IsDigit(span[i + 2])) { escapeCode = (int)(span[i + 2] - '0'); @@ -77,7 +77,7 @@ public void Parse(string message) } else if (span.Length >= i + 5 && span[i + 4] == 'm') { - // Example: \x1B[40m + // Example: \e[40m if (IsDigit(span[i + 2]) && IsDigit(span[i + 3])) { escapeCode = (int)(span[i + 2] - '0') * 10 + (int)(span[i + 3] - '0'); @@ -127,28 +127,28 @@ public void Parse(string message) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static bool IsDigit(char c) => (uint)(c - '0') <= ('9' - '0'); - internal const string DefaultForegroundColor = "\x1B[39m\x1B[22m"; // reset to default foreground color - internal const string DefaultBackgroundColor = "\x1B[49m"; // reset to the background color + internal const string DefaultForegroundColor = "\e[39m\e[22m"; // reset to default foreground color + internal const string DefaultBackgroundColor = "\e[49m"; // reset to the background color internal static string GetForegroundColorEscapeCode(ConsoleColor color) { return color switch { - ConsoleColor.Black => "\x1B[30m", - ConsoleColor.DarkRed => "\x1B[31m", - ConsoleColor.DarkGreen => "\x1B[32m", - ConsoleColor.DarkYellow => "\x1B[33m", - ConsoleColor.DarkBlue => "\x1B[34m", - ConsoleColor.DarkMagenta => "\x1B[35m", - ConsoleColor.DarkCyan => "\x1B[36m", - ConsoleColor.Gray => "\x1B[37m", - ConsoleColor.Red => "\x1B[1m\x1B[31m", - ConsoleColor.Green => "\x1B[1m\x1B[32m", - ConsoleColor.Yellow => "\x1B[1m\x1B[33m", - ConsoleColor.Blue => "\x1B[1m\x1B[34m", - ConsoleColor.Magenta => "\x1B[1m\x1B[35m", - ConsoleColor.Cyan => "\x1B[1m\x1B[36m", - ConsoleColor.White => "\x1B[1m\x1B[37m", + ConsoleColor.Black => "\e[30m", + ConsoleColor.DarkRed => "\e[31m", + ConsoleColor.DarkGreen => "\e[32m", + ConsoleColor.DarkYellow => "\e[33m", + ConsoleColor.DarkBlue => "\e[34m", + ConsoleColor.DarkMagenta => "\e[35m", + ConsoleColor.DarkCyan => "\e[36m", + ConsoleColor.Gray => "\e[37m", + ConsoleColor.Red => "\e[1m\e[31m", + ConsoleColor.Green => "\e[1m\e[32m", + ConsoleColor.Yellow => "\e[1m\e[33m", + ConsoleColor.Blue => "\e[1m\e[34m", + ConsoleColor.Magenta => "\e[1m\e[35m", + ConsoleColor.Cyan => "\e[1m\e[36m", + ConsoleColor.White => "\e[1m\e[37m", _ => DefaultForegroundColor // default foreground color }; } @@ -157,14 +157,14 @@ internal static string GetBackgroundColorEscapeCode(ConsoleColor color) { return color switch { - ConsoleColor.Black => "\x1B[40m", - ConsoleColor.DarkRed => "\x1B[41m", - ConsoleColor.DarkGreen => "\x1B[42m", - ConsoleColor.DarkYellow => "\x1B[43m", - ConsoleColor.DarkBlue => "\x1B[44m", - ConsoleColor.DarkMagenta => "\x1B[45m", - ConsoleColor.DarkCyan => "\x1B[46m", - ConsoleColor.Gray => "\x1B[47m", + ConsoleColor.Black => "\e[40m", + ConsoleColor.DarkRed => "\e[41m", + ConsoleColor.DarkGreen => "\e[42m", + ConsoleColor.DarkYellow => "\e[43m", + ConsoleColor.DarkBlue => "\e[44m", + ConsoleColor.DarkMagenta => "\e[45m", + ConsoleColor.DarkCyan => "\e[46m", + ConsoleColor.Gray => "\e[47m", _ => DefaultBackgroundColor // Use default background color }; } diff --git a/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/AnsiParserTests.cs b/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/AnsiParserTests.cs index 74d04894eba40c..215535c8efb9e4 100644 --- a/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/AnsiParserTests.cs +++ b/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/AnsiParserTests.cs @@ -11,12 +11,12 @@ namespace Microsoft.Extensions.Logging.Console.Test { public class AnsiParserTests { - private const char EscapeChar = '\x1B'; + private const char EscapeChar = '\e'; [Theory] [InlineData(1, "No Color", "No Color")] - [InlineData(2, "\x1B[41mColored\x1B[49mNo Color", "No Color")] - [InlineData(2, "\x1B[41m\x1B[1m\x1B[31mmColored\x1B[39m\x1B[49mNo Color", "No Color")] + [InlineData(2, "\e[41mColored\e[49mNo Color", "No Color")] + [InlineData(2, "\e[41m\e[1m\e[31mmColored\e[39m\e[49mNo Color", "No Color")] public void Parse_CheckTimesWrittenToConsole(int numSegments, string message, string lastSegment) { // Arrange @@ -151,33 +151,33 @@ public void Parse_RepeatedColorChange_PicksLastSet() [Theory] // supported - [InlineData("\x1B[77mInfo", "Info")] - [InlineData("\x1B[77m\x1B[1m\x1B[2m\x1B[0mInfo\x1B[1m", "Info")] - [InlineData("\x1B[7mInfo", "Info")] - [InlineData("\x1B[40m\x1B[1m\x1B[33mwarn\x1B[39m\x1B[22m\x1B[49m:", "warn", ":")] + [InlineData("\e[77mInfo", "Info")] + [InlineData("\e[77m\e[1m\e[2m\e[0mInfo\e[1m", "Info")] + [InlineData("\e[7mInfo", "Info")] + [InlineData("\e[40m\e[1m\e[33mwarn\e[39m\e[22m\e[49m:", "warn", ":")] // unsupported: skips - [InlineData("Info\x1B[77m:", "Info", ":")] - [InlineData("Info\x1B[7m:", "Info", ":")] + [InlineData("Info\e[77m:", "Info", ":")] + [InlineData("Info\e[7m:", "Info", ":")] // treats as content - [InlineData("\x1B", "\x1B")] - [InlineData("\x1B ", "\x1B ")] - [InlineData("\x1Bm", "\x1Bm")] - [InlineData("\x1B m", "\x1B m")] - [InlineData("\x1Bxym", "\x1Bxym")] - [InlineData("\x1B[", "\x1B[")] - [InlineData("\x1B[m", "\x1B[m")] - [InlineData("\x1B[ ", "\x1B[ ")] - [InlineData("\x1B[ m", "\x1B[ m")] - [InlineData("\x1B[xym", "\x1B[xym")] - [InlineData("\x1B[7777m", "\x1B[7777m")] - [InlineData("\x1B\x1B\x1B", "\x1B\x1B\x1B")] - [InlineData("Message\x1B\x1B\x1B", "Message\x1B\x1B\x1B")] - [InlineData("\x1B\x1BMessage\x1B", "\x1B\x1BMessage\x1B")] - [InlineData("\x1B\x1B\x1BMessage", "\x1B\x1B\x1BMessage")] - [InlineData("Message\x1B ", "Message\x1B ")] - [InlineData("\x1BmMessage", "\x1BmMessage")] - [InlineData("\x1B[77m\x1B m\x1B[40m", "\x1B m")] - [InlineData("\x1B mMessage\x1Bxym", "\x1B mMessage\x1Bxym")] + [InlineData("\e", "\e")] + [InlineData("\e ", "\e ")] + [InlineData("\em", "\em")] + [InlineData("\e m", "\e m")] + [InlineData("\exym", "\exym")] + [InlineData("\e[", "\e[")] + [InlineData("\e[m", "\e[m")] + [InlineData("\e[ ", "\e[ ")] + [InlineData("\e[ m", "\e[ m")] + [InlineData("\e[xym", "\e[xym")] + [InlineData("\e[7777m", "\e[7777m")] + [InlineData("\e\e\e", "\e\e\e")] + [InlineData("Message\e\e\e", "Message\e\e\e")] + [InlineData("\e\eMessage\e", "\e\eMessage\e")] + [InlineData("\e\e\eMessage", "\e\e\eMessage")] + [InlineData("Message\e ", "Message\e ")] + [InlineData("\emMessage", "\emMessage")] + [InlineData("\e[77m\e m\e[40m", "\e m")] + [InlineData("\e mMessage\exym", "\e mMessage\exym")] public void Parse_ValidSupportedOrUnsupportedCodesInMessage_MessageParsedSuccessfully(string messageWithUnsupportedCode, params string[] output) { // Arrange diff --git a/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/TextWriterExtensionsTests.cs b/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/TextWriterExtensionsTests.cs index 3c6520ebb9bfc5..79c1d6b14ea99b 100644 --- a/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/TextWriterExtensionsTests.cs +++ b/src/libraries/Microsoft.Extensions.Logging.Console/tests/Microsoft.Extensions.Logging.Console.Tests/TextWriterExtensionsTests.cs @@ -16,7 +16,7 @@ public void WriteColoredMessage_WithForegroundEscapeCode_AndNoBackgroundColorSpe var message = "Request received"; var expectedMessage = AnsiParser.GetForegroundColorEscapeCode(ConsoleColor.DarkGreen) + message - + "\x1B[39m\x1B[22m"; //resets foreground color + + "\e[39m\e[22m"; //resets foreground color var textWriter = new StringWriter(); // Act @@ -33,7 +33,7 @@ public void WriteColoredMessage_WithBackgroundEscapeCode_AndNoForegroundColorSpe var message = "Request received"; var expectedMessage = AnsiParser.GetBackgroundColorEscapeCode(ConsoleColor.Red) + message - + "\x1B[49m"; //resets background color + + "\e[49m"; //resets background color var textWriter = new StringWriter(); // Act @@ -51,8 +51,8 @@ public void WriteColoredMessage_InOrder_WhenBothForegroundOrBackgroundColorsSpec var expectedMessage = AnsiParser.GetBackgroundColorEscapeCode(ConsoleColor.Red) + AnsiParser.GetForegroundColorEscapeCode(ConsoleColor.DarkGreen) + "Request received" - + "\x1B[39m\x1B[22m" //resets foreground color - + "\x1B[49m"; //resets background color + + "\e[39m\e[22m" //resets foreground color + + "\e[49m"; //resets background color var textWriter = new StringWriter(); // Act diff --git a/src/libraries/System.Console/src/System/IO/KeyParser.cs b/src/libraries/System.Console/src/System/IO/KeyParser.cs index fd09e7fe227eea..23326d485d4977 100644 --- a/src/libraries/System.Console/src/System/IO/KeyParser.cs +++ b/src/libraries/System.Console/src/System/IO/KeyParser.cs @@ -8,7 +8,7 @@ namespace System.IO; internal static class KeyParser { - private const char Escape = '\u001B'; + private const char Escape = '\e'; private const char Delete = '\u007F'; private const char VtSequenceEndTag = '~'; private const char ModifierSeparator = ';'; diff --git a/src/libraries/System.Console/src/System/TerminalFormatStrings.cs b/src/libraries/System.Console/src/System/TerminalFormatStrings.cs index e4e0392d82b114..a1f194184a09e6 100644 --- a/src/libraries/System.Console/src/System/TerminalFormatStrings.cs +++ b/src/libraries/System.Console/src/System/TerminalFormatStrings.cs @@ -46,7 +46,7 @@ internal sealed class TerminalFormatStrings /// doesn't contain it (as appears to be the case with e.g. screen and tmux on Ubuntu), at the risk /// of outputting the sequence on some terminal that's not compatible. /// - public const string CursorPositionReport = "\x1B[6n"; + public const string CursorPositionReport = "\e[6n"; /// /// The dictionary of keystring to ConsoleKeyInfo. /// Only some members of the ConsoleKeyInfo are used; in particular, the actual char is ignored. @@ -210,13 +210,13 @@ private static string GetTitle(TermInfo.Database db) case "linux": case "rxvt": case "xterm": - return "\x1B]0;%p1%s\x07"; + return "\e]0;%p1%s\x07"; case "cygwin": - return "\x1B];%p1%s\x07"; + return "\e];%p1%s\x07"; case "konsole": - return "\x1B]30;%p1%s\x07"; + return "\e]30;%p1%s\x07"; case "screen": - return "\x1Bk%p1%s\x1B\\"; + return "\ek%p1%s\e\\"; default: return string.Empty; } diff --git a/src/libraries/System.Console/tests/KeyParserTests.cs b/src/libraries/System.Console/tests/KeyParserTests.cs index 557e889d2fb533..a893988995d5ee 100644 --- a/src/libraries/System.Console/tests/KeyParserTests.cs +++ b/src/libraries/System.Console/tests/KeyParserTests.cs @@ -42,7 +42,7 @@ public class KeyParserTests yield return ('.', ConsoleKey.OemPeriod); yield return (',', ConsoleKey.OemComma); - yield return ('\u001B', ConsoleKey.Escape); + yield return ('\e', ConsoleKey.Escape); for (char i = '0'; i <= '9'; i++) { @@ -212,7 +212,7 @@ public void KeysAreProperlyMapped(TerminalData terminalData, byte[] recordedByte yield return (GetString(33), ConsoleKey.F19); yield return (GetString(34), ConsoleKey.F20); - static string GetString(int i) => $"\u001B[{i}~"; + static string GetString(int i) => $"\e[{i}~"; } } @@ -223,7 +223,7 @@ public static IEnumerable VTSequencesArguments [MemberData(nameof(VTSequencesArguments))] public void VTSequencesAreProperlyMapped(TerminalData terminalData, string input, ConsoleKey expectedKey) { - if (terminalData is RxvtUnicode && input == "\u001B[4~" && expectedKey == ConsoleKey.End) + if (terminalData is RxvtUnicode && input == "\e[4~" && expectedKey == ConsoleKey.End) { expectedKey = ConsoleKey.Select; // rxvt binds this key to Select in Terminfo and uses "^[[8~" for End key } @@ -239,10 +239,10 @@ public void VTSequencesAreProperlyMapped(TerminalData terminalData, string input { get { - yield return ("\u001BOa", ConsoleKey.UpArrow); - yield return ("\u001BOb", ConsoleKey.DownArrow); - yield return ("\u001BOc", ConsoleKey.RightArrow); - yield return ("\u001BOd", ConsoleKey.LeftArrow); + yield return ("\eOa", ConsoleKey.UpArrow); + yield return ("\eOb", ConsoleKey.DownArrow); + yield return ("\eOc", ConsoleKey.RightArrow); + yield return ("\eOd", ConsoleKey.LeftArrow); } } @@ -272,9 +272,9 @@ public void ExtendedStringCodePath() // Ctrl+Backspace yield return ("\b", new[] { new ConsoleKeyInfo('\b', ConsoleKey.Backspace, false, false, true) }); // Alt+Backspace - yield return ("\u001B\u007F", new[] { new ConsoleKeyInfo((char)0x7F, ConsoleKey.Backspace, false, true, false) }); + yield return ("\e\u007F", new[] { new ConsoleKeyInfo((char)0x7F, ConsoleKey.Backspace, false, true, false) }); // Ctrl+Alt+Backspace - yield return ("\u001B\b", new[] { new ConsoleKeyInfo('\b', ConsoleKey.Backspace, false, true, true) }); + yield return ("\e\b", new[] { new ConsoleKeyInfo('\b', ConsoleKey.Backspace, false, true, true) }); // Enter yield return ("\r", new[] { new ConsoleKeyInfo('\r', ConsoleKey.Enter, false, false, false) }); // Ctrl+Enter @@ -283,18 +283,18 @@ public void ExtendedStringCodePath() // Escape key pressed multiple times for (int i = 1; i <= 5; i++) { - yield return (new string('\u001B', i), Enumerable.Repeat(new ConsoleKeyInfo('\u001B', ConsoleKey.Escape, false, false, false), i).ToArray()); + yield return (new string('\e', i), Enumerable.Repeat(new ConsoleKeyInfo('\e', ConsoleKey.Escape, false, false, false), i).ToArray()); } // Home key (^[[H) followed by H key - yield return ("\u001B[HH", new[] + yield return ("\e[HH", new[] { new ConsoleKeyInfo(default, ConsoleKey.Home, false, false, false), new ConsoleKeyInfo('H', ConsoleKey.H, true, false, false) }); // escape sequence (F12 '^[[24~') followed by an extra tylde: - yield return ($"\u001B[24~~", new[] + yield return ($"\e[24~~", new[] { new ConsoleKeyInfo(default, ConsoleKey.F12, false, false, false), new ConsoleKeyInfo('~', default, false, false, false), @@ -304,9 +304,9 @@ public void ExtendedStringCodePath() // Invalid modifiers (valid values are <2, 8>) foreach (int invalidModifier in new[] { 0, 1, 9 }) { - yield return ($"\u001B[1;{invalidModifier}H", new[] + yield return ($"\e[1;{invalidModifier}H", new[] { - new ConsoleKeyInfo('\u001B', ConsoleKey.Escape, false, false, false), + new ConsoleKeyInfo('\e', ConsoleKey.Escape, false, false, false), new ConsoleKeyInfo('[', default, false, false, false), new ConsoleKeyInfo('1', ConsoleKey.D1, false, false, false), new ConsoleKeyInfo(';', default, false, false, false), @@ -317,9 +317,9 @@ public void ExtendedStringCodePath() // Invalid ID (valid values are <1, 34> except of 9, 16, 22, 27, 30 and 35) foreach (int invalidId in new[] { 16, 22, 27, 30, 35, 36, 77, 99 }) { - yield return ($"\u001B[{invalidId}~", new[] + yield return ($"\e[{invalidId}~", new[] { - new ConsoleKeyInfo('\u001B', ConsoleKey.Escape, false, false, false), + new ConsoleKeyInfo('\e', ConsoleKey.Escape, false, false, false), new ConsoleKeyInfo('[', default, false, false, false), new ConsoleKeyInfo((char)('0' + invalidId / 10), ConsoleKey.D0 + invalidId / 10, false, false, false), new ConsoleKeyInfo((char)('0' + invalidId % 10), ConsoleKey.D0 + invalidId % 10, false, false, false), @@ -327,9 +327,9 @@ public void ExtendedStringCodePath() }); } // too long ID (more than 2 digits) - yield return ($"\u001B[111~", new[] + yield return ($"\e[111~", new[] { - new ConsoleKeyInfo('\u001B', ConsoleKey.Escape, false, false, false), + new ConsoleKeyInfo('\e', ConsoleKey.Escape, false, false, false), new ConsoleKeyInfo('[', default, false, false, false), new ConsoleKeyInfo('1', ConsoleKey.D1, false, false, false), new ConsoleKeyInfo('1', ConsoleKey.D1, false, false, false), @@ -337,9 +337,9 @@ public void ExtendedStringCodePath() new ConsoleKeyInfo('~', default, false, false, false), }); // missing closing tag (tylde): - yield return ($"\u001B[24", new[] + yield return ($"\e[24", new[] { - new ConsoleKeyInfo('\u001B', ConsoleKey.Escape, false, false, false), + new ConsoleKeyInfo('\e', ConsoleKey.Escape, false, false, false), new ConsoleKeyInfo('[', default, false, false, false), new ConsoleKeyInfo('2', ConsoleKey.D2, false, false, false), new ConsoleKeyInfo('4', ConsoleKey.D4, false, false, false), @@ -386,7 +386,7 @@ public void NewLineEscapeSequenceProducesCharacter() { XTermData xTerm = new(); - ConsoleKeyInfo consoleKeyInfo = Parse("\u001BOM".ToCharArray(), xTerm.TerminalDb, xTerm.Verase, 3); + ConsoleKeyInfo consoleKeyInfo = Parse("\eOM".ToCharArray(), xTerm.TerminalDb, xTerm.Verase, 3); Assert.Equal(ConsoleKey.Enter, consoleKeyInfo.Key); Assert.Equal('\r', consoleKeyInfo.KeyChar); @@ -398,7 +398,7 @@ public void BackTabEscapeSequence() { XTermData xTerm = new(); - ConsoleKeyInfo consoleKeyInfo = Parse("\u001B[Z".ToCharArray(), xTerm.TerminalDb, xTerm.Verase, 3); + ConsoleKeyInfo consoleKeyInfo = Parse("\e[Z".ToCharArray(), xTerm.TerminalDb, xTerm.Verase, 3); Assert.Equal(ConsoleKey.Tab, consoleKeyInfo.Key); Assert.Equal(default, consoleKeyInfo.KeyChar); diff --git a/src/libraries/System.Console/tests/TermInfo.Unix.cs b/src/libraries/System.Console/tests/TermInfo.Unix.cs index e2fad1c0f183a5..2ce7c4c9ff204a 100644 --- a/src/libraries/System.Console/tests/TermInfo.Unix.cs +++ b/src/libraries/System.Console/tests/TermInfo.Unix.cs @@ -76,21 +76,21 @@ public void VerifyTermInfoSupportsNewAndLegacyNcurses() [Theory] [PlatformSpecific(TestPlatforms.AnyUnix)] // Tests TermInfo - [InlineData("xterm-256color", "\u001B\u005B\u00330m", "\u001B\u005B\u00340m", 0)] - [InlineData("xterm-256color", "\u001B\u005B\u00331m", "\u001B\u005B\u00341m", 1)] - [InlineData("xterm-256color", "\u001B\u005B90m", "\u001B\u005B100m", 8)] - [InlineData("screen", "\u001B\u005B\u00330m", "\u001B\u005B\u00340m", 0)] - [InlineData("screen", "\u001B\u005B\u00332m", "\u001B\u005B\u00342m", 2)] - [InlineData("screen", "\u001B\u005B\u00339m", "\u001B\u005B\u00349m", 9)] - [InlineData("Eterm", "\u001B\u005B\u00330m", "\u001B\u005B\u00340m", 0)] - [InlineData("Eterm", "\u001B\u005B\u00333m", "\u001B\u005B\u00343m", 3)] - [InlineData("Eterm", "\u001B\u005B\u003310m", "\u001B\u005B\u003410m", 10)] - [InlineData("wsvt25", "\u001B\u005B\u00330m", "\u001B\u005B\u00340m", 0)] - [InlineData("wsvt25", "\u001B\u005B\u00334m", "\u001B\u005B\u00344m", 4)] - [InlineData("wsvt25", "\u001B\u005B\u003311m", "\u001B\u005B\u003411m", 11)] - [InlineData("mach-color", "\u001B\u005B\u00330m", "\u001B\u005B\u00340m", 0)] - [InlineData("mach-color", "\u001B\u005B\u00335m", "\u001B\u005B\u00345m", 5)] - [InlineData("mach-color", "\u001B\u005B\u003312m", "\u001B\u005B\u003412m", 12)] + [InlineData("xterm-256color", "\e\u005B\u00330m", "\e\u005B\u00340m", 0)] + [InlineData("xterm-256color", "\e\u005B\u00331m", "\e\u005B\u00341m", 1)] + [InlineData("xterm-256color", "\e\u005B90m", "\e\u005B100m", 8)] + [InlineData("screen", "\e\u005B\u00330m", "\e\u005B\u00340m", 0)] + [InlineData("screen", "\e\u005B\u00332m", "\e\u005B\u00342m", 2)] + [InlineData("screen", "\e\u005B\u00339m", "\e\u005B\u00349m", 9)] + [InlineData("Eterm", "\e\u005B\u00330m", "\e\u005B\u00340m", 0)] + [InlineData("Eterm", "\e\u005B\u00333m", "\e\u005B\u00343m", 3)] + [InlineData("Eterm", "\e\u005B\u003310m", "\e\u005B\u003410m", 10)] + [InlineData("wsvt25", "\e\u005B\u00330m", "\e\u005B\u00340m", 0)] + [InlineData("wsvt25", "\e\u005B\u00334m", "\e\u005B\u00344m", 4)] + [InlineData("wsvt25", "\e\u005B\u003311m", "\e\u005B\u003411m", 11)] + [InlineData("mach-color", "\e\u005B\u00330m", "\e\u005B\u00340m", 0)] + [InlineData("mach-color", "\e\u005B\u00335m", "\e\u005B\u00345m", 5)] + [InlineData("mach-color", "\e\u005B\u003312m", "\e\u005B\u003412m", 12)] public void TermInfoVerification(string termToTest, string expectedForeground, string expectedBackground, int colorValue) { TermInfo.Database db = TermInfo.DatabaseFactory.ReadDatabase(termToTest); @@ -109,8 +109,8 @@ public void TermInfoClearIncludesE3WhenExpected() { // XTerm defines E3 for clearing scrollback buffer and tmux does not. // This can't be added to TermInfoVerification because xterm-256color sometimes has E3 defined (e.g. on Ubuntu but not macOS) - Assert.Equal("\u001B[H\u001B[2J\u001B[3J", new XTermData().TerminalDb.Clear); - Assert.Equal("\u001B[H\u001B[J", new TmuxData().TerminalDb.Clear); + Assert.Equal("\e[H\e[2J\e[3J", new XTermData().TerminalDb.Clear); + Assert.Equal("\e[H\e[J", new TmuxData().TerminalDb.Clear); } [Fact] @@ -119,7 +119,7 @@ public void EmuTermInfoDoesntBreakParser() { // This file (available by default on OS X) is called out specifically since it contains a format where it has %i // but only one variable instead of two. Make sure we don't break in this case - TermInfoVerification("emu", "\u001Br1;", "\u001Bs1;", 0); + TermInfoVerification("emu", "\er1;", "\es1;", 0); } [Fact] diff --git a/src/libraries/System.Private.CoreLib/src/System/SearchValues/Strings/StringSearchValues.cs b/src/libraries/System.Private.CoreLib/src/System/SearchValues/Strings/StringSearchValues.cs index 2bff05214c518a..e2ae3c61b04455 100644 --- a/src/libraries/System.Private.CoreLib/src/System/SearchValues/Strings/StringSearchValues.cs +++ b/src/libraries/System.Private.CoreLib/src/System/SearchValues/Strings/StringSearchValues.cs @@ -21,7 +21,7 @@ internal static class StringSearchValues SearchValues.Create("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); private static readonly SearchValues s_allAsciiExceptLowercase = - SearchValues.Create("\0\u0001\u0002\u0003\u0004\u0005\u0006\a\b\t\n\v\f\r\u000E\u000F\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001A\u001B\u001C\u001D\u001E\u001F !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`{|}~\u007F"); + SearchValues.Create("\0\u0001\u0002\u0003\u0004\u0005\u0006\a\b\t\n\v\f\r\u000E\u000F\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001A\e\u001C\u001D\u001E\u001F !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`{|}~\u007F"); public static SearchValues Create(ReadOnlySpan values, bool ignoreCase) { diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexParser.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexParser.cs index 6485f5e04659e5..fc6fa74e114ba2 100644 --- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexParser.cs +++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexParser.cs @@ -1592,7 +1592,7 @@ private char ScanCharEscape() case 'b': return '\b'; case 'e': - return '\u001B'; + return '\e'; case 'f': return '\f'; case 'n': diff --git a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.Groups.Tests.cs b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.Groups.Tests.cs index 42456389f9906c..d3db90dfdfca9e 100644 --- a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.Groups.Tests.cs +++ b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.Groups.Tests.cs @@ -289,7 +289,7 @@ public static IEnumerable Groups_MemberData() yield return (enUS, @"(cat)([\u0041]*)(dog)", "catAAAdog", RegexOptions.None, new string[] { "catAAAdog", "cat", "AAA", "dog" }); yield return (enUS, @"(cat)([\a]*)(dog)", "cat\a\a\adog", RegexOptions.None, new string[] { "cat\a\a\adog", "cat", "\a\a\a", "dog" }); yield return (enUS, @"(cat)([\b]*)(dog)", "cat\b\b\bdog", RegexOptions.None, new string[] { "cat\b\b\bdog", "cat", "\b\b\b", "dog" }); - yield return (enUS, @"(cat)([\e]*)(dog)", "cat\u001B\u001B\u001Bdog", RegexOptions.None, new string[] { "cat\u001B\u001B\u001Bdog", "cat", "\u001B\u001B\u001B", "dog" }); + yield return (enUS, @"(cat)([\e]*)(dog)", "cat\e\e\edog", RegexOptions.None, new string[] { "cat\e\e\edog", "cat", "\e\e\e", "dog" }); yield return (enUS, @"(cat)([\f]*)(dog)", "cat\f\f\fdog", RegexOptions.None, new string[] { "cat\f\f\fdog", "cat", "\f\f\f", "dog" }); yield return (enUS, @"(cat)([\r]*)(dog)", "cat\r\r\rdog", RegexOptions.None, new string[] { "cat\r\r\rdog", "cat", "\r\r\r", "dog" }); yield return (enUS, @"(cat)([\v]*)(dog)", "cat\v\v\vdog", RegexOptions.None, new string[] { "cat\v\v\vdog", "cat", "\v\v\v", "dog" }); @@ -433,7 +433,7 @@ public static IEnumerable Groups_MemberData() if (!PlatformDetection.IsNetFramework) // `\c[` was not handled in .NET Framework. See https://github.com/dotnet/runtime/issues/24759. { - yield return (enUS, @"(cat)(\c[*)(dog)", "asdlkcat\u001bdogiwod", RegexOptions.None, new string[] { "cat\u001bdog", "cat", "\u001b", "dog" }); + yield return (enUS, @"(cat)(\c[*)(dog)", "asdlkcat\edogiwod", RegexOptions.None, new string[] { "cat\edog", "cat", "\e", "dog" }); } // Atomic Zero-Width Assertions \A \G ^ \Z \z \b \B diff --git a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexPcreTests.cs b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexPcreTests.cs index d522ab3bc2ab62..6ced2daba37234 100644 --- a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexPcreTests.cs +++ b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexPcreTests.cs @@ -37,7 +37,7 @@ public static IEnumerable PcreTestData() yield return ("The quick brown fox", RegexOptions.IgnoreCase, "The quick brown FOX", true); yield return ("The quick brown fox", RegexOptions.IgnoreCase, "What do you know about the quick brown fox?", true); yield return ("The quick brown fox", RegexOptions.IgnoreCase, "What do you know about THE QUICK BROWN FOX?", true); - yield return ("abcd\\t\\n\\r\\f\\a\\e\\071\\x3b\\$\\\\\\?caxyz", RegexOptions.None, "abcd\t\n\r\f\a\u001b9;$\\?caxyz", true); + yield return ("abcd\\t\\n\\r\\f\\a\\e\\071\\x3b\\$\\\\\\?caxyz", RegexOptions.None, "abcd\t\n\r\f\a\e9;$\\?caxyz", true); yield return ("a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz", RegexOptions.None, "abxyzpqrrrabbxyyyypqAzz", true); yield return ("a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz", RegexOptions.None, "aabxyzpqrrrabbxyyyypqAzz", true); yield return ("a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz", RegexOptions.None, "aaabxyzpqrrrabbxyyyypqAzz", true); From f91b911c812a3c826692702dce62c482975e39cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Strehovsk=C3=BD?= Date: Fri, 5 Apr 2024 23:54:08 +0900 Subject: [PATCH 110/132] Delete odd definition of VolatileLoad/Store (#100660) https://github.com/dotnet/runtime/issues/100627#issuecomment-2037392935 --- src/coreclr/nativeaot/Runtime/stressLog.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/coreclr/nativeaot/Runtime/stressLog.cpp b/src/coreclr/nativeaot/Runtime/stressLog.cpp index b99f48bed801b4..a04f2b8169fba7 100644 --- a/src/coreclr/nativeaot/Runtime/stressLog.cpp +++ b/src/coreclr/nativeaot/Runtime/stressLog.cpp @@ -29,9 +29,7 @@ #include "threadstore.h" #include "threadstore.inl" #include "thread.inl" - -template inline T VolatileLoad(T const * pt) { return *(T volatile const *)pt; } -template inline void VolatileStore(T* pt, T val) { *(T volatile *)pt = val; } +#include "volatile.h" #ifdef STRESS_LOG From ddae9bd96d5f89ab94fbed8fbea7309a872a2168 Mon Sep 17 00:00:00 2001 From: Milos Kotlar Date: Fri, 5 Apr 2024 17:15:01 +0200 Subject: [PATCH 111/132] [mono][infra] Enable runtime tests in fullAOT LLVM mode on linux-x64 (#92057) This PR enables jobs to run runtime tests in fullAOT LLVM mode on linux-x64. The jobs utilize the CBL-Mariner docker image, which contains the clang toolchain instead of binutils. Due to OOM and timeout issues, the tests are split into two jobs: intrinsics and other runtime tests. --- .../build-runtime-tests-and-send-to-helix.yml | 2 +- .../runtime-extra-platforms-other.yml | 49 ++----- eng/pipelines/runtime-llvm.yml | 134 ++++++++++++------ src/tests/Interop/Interop.csproj | 2 + .../AnsiBSTR/AnsiBStrTest.csproj | 2 +- .../StringMarshalling/BSTR/BSTRTest.csproj | 2 +- .../LPTSTR/LPTSTRTest.csproj | 2 +- .../VBByRefStr/VBByRefStrTest.csproj | 2 +- .../BoxPatternMatchAndSideEffects.csproj | 2 +- .../JIT/Methodical/Methodical_others.csproj | 2 +- .../JitBlue/GitHub_26491/GitHub_26491.ilproj | 2 +- src/tests/JIT/Regression/Regression_3.csproj | 2 +- src/tests/build.proj | 12 +- src/tests/issues.targets | 74 ++++++++++ .../readytorun_coreroot_determinism.csproj | 2 +- src/tests/readytorun/readytorun.csproj | 2 +- 16 files changed, 192 insertions(+), 101 deletions(-) diff --git a/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml b/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml index 127ddbcfeb0284..b0a2043bbd5710 100644 --- a/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml +++ b/eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml @@ -68,7 +68,7 @@ steps: displayName: "AOT compile CoreCLR tests" target: ${{ coalesce(parameters.llvmAotStepContainer, parameters.container) }} - ${{ if in(parameters.runtimeVariant, 'llvmfullaot', 'minifullaot') }}: - - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} /p:RuntimeVariant=${{ parameters.runtimeVariant }} + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot mono_fullaot ${{ parameters.buildConfig }} ${{ parameters.archType }} /p:RuntimeVariant=${{ parameters.runtimeVariant }} -maxcpucount:1 displayName: "AOT compile CoreCLR tests" target: ${{ coalesce(parameters.llvmAotStepContainer, parameters.container) }} - ${{ if eq(parameters.archType, 'arm64') }}: diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml index c1a3e2cd1f4965..e47cb4996cc704 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml @@ -108,47 +108,6 @@ jobs: eq(variables['monoContainsChange'], true), eq(variables['isRollingBuild'], true)) -# -# Mono CoreCLR runtime Test executions using live libraries and LLVM Full AOT -# Only when Mono is changed -# -# Disabled due to OOM issues https://github.com/dotnet/runtime/issues/90427 -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/common/global-build-job.yml -# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml -# buildConfig: Release -# runtimeFlavor: mono -# platforms: -# - linux_x64 -# - linux_arm64 -# variables: -# - name: timeoutPerTestInMinutes -# value: 60 -# - name: timeoutPerTestCollectionInMinutes -# value: 180 -# jobParameters: -# testGroup: innerloop -# nameSuffix: AllSubsets_Mono_LLVMFullAot_RuntimeTests -# runtimeVariant: llvmfullaot -# buildArgs: -s mono+libs+clr.hosts+clr.iltools -c Release /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true -# timeoutInMinutes: 300 -# isExtraPlatformsBuild: ${{ parameters.isExtraPlatformsBuild }} - -# condition: >- -# or( -# eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), -# eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), -# eq(variables['isRollingBuild'], true)) -# postBuildSteps: -# - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml -# parameters: -# creator: dotnet-bot -# llvmAotStepContainer: linux_x64_llvmaot -# testRunNamePrefixSuffix: Mono_Release -# extraVariablesTemplates: -# - template: /eng/pipelines/common/templates/runtimes/test-variables.yml - # # Mono CoreCLR runtime test executions using live libraries and mini Full AOT # Only when Mono is changed @@ -161,6 +120,8 @@ jobs: runtimeFlavor: mono platforms: - linux_x64 + # Tracking issue: https://github.com/dotnet/runtime/issues/90427 + # linux_arm64 variables: - name: timeoutPerTestInMinutes value: 60 @@ -172,6 +133,12 @@ jobs: runtimeVariant: minifullaot buildArgs: -s mono+libs+clr.hosts -c Release timeoutInMinutes: 300 + isExtraPlatformsBuild: ${{ parameters.isExtraPlatformsBuild }} + condition: >- + or( + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), + eq(variables['isRollingBuild'], true)) postBuildSteps: - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml parameters: diff --git a/eng/pipelines/runtime-llvm.yml b/eng/pipelines/runtime-llvm.yml index 6338116afd4102..ee96cc2a04f555 100644 --- a/eng/pipelines/runtime-llvm.yml +++ b/eng/pipelines/runtime-llvm.yml @@ -28,24 +28,6 @@ schedules: - main always: false # run only if there were changes since the last successful scheduled run. -pr: - branches: - include: - - main - - release/*.* - paths: - include: - - '*' - exclude: - - '**.md' - - eng/Version.Details.xml - - .devcontainer/* - - .github/* - - docs/* - - LICENSE.TXT - - PATENTS.TXT - - THIRD-PARTY-NOTICES.TXT - variables: - template: /eng/pipelines/common/variables.yml @@ -117,8 +99,7 @@ extends: jobParameters: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT - buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true condition: >- or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -136,8 +117,7 @@ extends: jobParameters: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT - buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true condition: >- or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -149,55 +129,123 @@ extends: parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - buildConfig: release + buildConfig: Release runtimeFlavor: mono platforms: - - linux_x64 - # Disabled pending outcome of https://github.com/dotnet/runtime/issues/60234 investigation - #- linux_arm64 + - linux_x64 + # Disabled pending outcome of https://github.com/dotnet/runtime/issues/60234 investigation + #- linux_arm64 + variables: + - name: timeoutPerTestInMinutes + value: 60 + - name: timeoutPerTestCollectionInMinutes + value: 180 jobParameters: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT_RuntimeTests - buildArgs: -s mono+clr.iltools+clr.hosts+libs+host+packs -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} - /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true - postBuildSteps: - - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - parameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_Release - llvmAotStepContainer: linux_x64_llvmaot - runtimeVariant: llvmaot + runtimeVariant: llvmaot + buildArgs: -s mono+libs+clr.hosts+clr.iltools -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} /p:MonoEnableLLVM=true + timeoutInMinutes: 360 condition: >- or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + llvmAotStepContainer: linux_x64_llvmaot + testRunNamePrefixSuffix: Mono_Release + extraVariablesTemplates: + - template: /eng/pipelines/common/templates/runtimes/test-variables.yml + # + # Mono CoreCLR runtime Test executions using live libraries and LLVM Full AOT + # Only when Mono is changed + # This job runs non-intrinsics runtime tests due to OOM issues + # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - buildConfig: release + buildConfig: Release runtimeFlavor: mono platforms: - - linux_x64 - - linux_arm64 + - linux_x64 + # Tracking issue: https://github.com/dotnet/runtime/issues/90427 + # - linux_arm64 + variables: + - name: timeoutPerTestInMinutes + value: 60 + - name: timeoutPerTestCollectionInMinutes + value: 180 jobParameters: testGroup: innerloop - nameSuffix: AllSubsets_Mono_LLVMFullAOT_RuntimeTests - buildArgs: -s mono+clr.iltools+clr.hosts+libs+host+packs -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} - /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + nameSuffix: AllSubsets_Mono_LLVMFULLAOT_RuntimeTests + runtimeVariant: llvmfullaot + buildArgs: -s mono+libs+clr.hosts+clr.iltools -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} /p:MonoEnableLLVM=true + timeoutInMinutes: 360 + condition: >- + or( + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), + eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), + eq(variables['isRollingBuild'], true)) postBuildSteps: - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml parameters: creator: dotnet-bot - testRunNamePrefixSuffix: Mono_Release llvmAotStepContainer: linux_x64_llvmaot - runtimeVariant: llvmfullaot + testRunNamePrefixSuffix: Mono_Release + testBuildArgs: >- + -tree:CoreMangLib -tree:Exceptions -tree:GC -tree:Interop -tree:Loader -tree:Regressions -tree:baseservices + -tree:ilasm -tree:ilverify -tree:managed -tree:profiler -tree:readytorun -tree:reflection -tree:tracing + -tree:JIT/BBT -tree:JIT/CodeGenBringUpTests -tree:JIT/Directed -tree:JIT/Generics -tree:JIT/IL_Conformance + -tree:JIT/Math -tree:JIT/Methodical -tree:JIT/PGO -tree:JIT/Performance -tree:JIT/Regression -tree:JIT/RyuJIT + -tree:JIT/Stress -tree:JIT/common -tree:JIT/jit64 -tree:JIT/opt -tree:JIT/superpmi + extraVariablesTemplates: + - template: /eng/pipelines/common/templates/runtimes/test-variables.yml + + # + # Mono CoreCLR runtime Test executions using live libraries and LLVM Full AOT + # Only when Mono is changed + # This job runs the runtime intrinsics tests due to OOM issues + # + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/global-build-job.yml + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + buildConfig: Release + runtimeFlavor: mono + platforms: + - linux_x64 + # Tracking issue: https://github.com/dotnet/runtime/issues/90427 + # - linux_arm64 + variables: + - name: timeoutPerTestInMinutes + value: 60 + - name: timeoutPerTestCollectionInMinutes + value: 180 + jobParameters: + testGroup: innerloop + nameSuffix: AllSubsets_Mono_LLVMFULLAOT_RuntimeIntrinsicsTests + runtimeVariant: llvmfullaot + buildArgs: -s mono+libs+clr.hosts+clr.iltools -c $(_BuildConfig) -lc ${{ variables.debugOnPrReleaseOnRolling }} /p:MonoEnableLLVM=true + timeoutInMinutes: 360 condition: >- or( eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + llvmAotStepContainer: linux_x64_llvmaot + testRunNamePrefixSuffix: Mono_Release + testBuildArgs: -tree:JIT/Intrinsics -tree:JIT/HardwareIntrinsics -tree:JIT/SIMD + extraVariablesTemplates: + - template: /eng/pipelines/common/templates/runtimes/test-variables.yml diff --git a/src/tests/Interop/Interop.csproj b/src/tests/Interop/Interop.csproj index ccbdc2f5e1e7fd..2a02c40a4fd511 100644 --- a/src/tests/Interop/Interop.csproj +++ b/src/tests/Interop/Interop.csproj @@ -1,5 +1,7 @@ + + true true Debug;Release;Checked diff --git a/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj b/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj index ae4b7c519db820..cbf26e1293034f 100644 --- a/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj +++ b/src/tests/Interop/StringMarshalling/AnsiBSTR/AnsiBStrTest.csproj @@ -3,7 +3,7 @@ true $(DefineConstants);ANSIBSTR - true + true diff --git a/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj b/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj index 539f34ced8c87f..c3b9f50d217f7d 100644 --- a/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj +++ b/src/tests/Interop/StringMarshalling/BSTR/BSTRTest.csproj @@ -3,7 +3,7 @@ true $(DefineConstants);BSTR - true + true diff --git a/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj b/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj index e436167e972f0a..af1674c301841c 100644 --- a/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj +++ b/src/tests/Interop/StringMarshalling/LPTSTR/LPTSTRTest.csproj @@ -3,7 +3,7 @@ true $(DefineConstants);LPTSTR - true + true diff --git a/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj b/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj index 9154f936b74255..a3017d0e814515 100644 --- a/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj +++ b/src/tests/Interop/StringMarshalling/VBByRefStr/VBByRefStrTest.csproj @@ -2,7 +2,7 @@ true - true + true diff --git a/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj b/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj index e1e12460ae03bc..7df006c9f1a4ec 100644 --- a/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj +++ b/src/tests/JIT/Methodical/Boxing/boxunbox/BoxPatternMatchAndSideEffects.csproj @@ -2,7 +2,7 @@ PdbOnly - true + true diff --git a/src/tests/JIT/Methodical/Methodical_others.csproj b/src/tests/JIT/Methodical/Methodical_others.csproj index 998101aaa4553b..2fd089bdb777f9 100644 --- a/src/tests/JIT/Methodical/Methodical_others.csproj +++ b/src/tests/JIT/Methodical/Methodical_others.csproj @@ -1,7 +1,7 @@ - true + true diff --git a/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj b/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj index 439e29a31673e1..623b7601a72ccc 100644 --- a/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj +++ b/src/tests/JIT/Regression/JitBlue/GitHub_26491/GitHub_26491.ilproj @@ -1,7 +1,7 @@ - true + true diff --git a/src/tests/JIT/Regression/Regression_3.csproj b/src/tests/JIT/Regression/Regression_3.csproj index 0006ba5709b72f..9eeb2fe0500797 100644 --- a/src/tests/JIT/Regression/Regression_3.csproj +++ b/src/tests/JIT/Regression/Regression_3.csproj @@ -1,7 +1,7 @@ - true + true diff --git a/src/tests/build.proj b/src/tests/build.proj index e7886f37d77037..7f2a0b6b38af5b 100644 --- a/src/tests/build.proj +++ b/src/tests/build.proj @@ -115,15 +115,15 @@ - - - + @@ -383,8 +383,8 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + https://github.com/dotnet/runtime/issues/82859 diff --git a/src/tests/readytorun/coreroot_determinism/readytorun_coreroot_determinism.csproj b/src/tests/readytorun/coreroot_determinism/readytorun_coreroot_determinism.csproj index af73a80930400a..56b857d7935688 100644 --- a/src/tests/readytorun/coreroot_determinism/readytorun_coreroot_determinism.csproj +++ b/src/tests/readytorun/coreroot_determinism/readytorun_coreroot_determinism.csproj @@ -1,7 +1,7 @@ - true + true diff --git a/src/tests/readytorun/readytorun.csproj b/src/tests/readytorun/readytorun.csproj index 585b0be3fcdfe9..cb657444dd8d18 100644 --- a/src/tests/readytorun/readytorun.csproj +++ b/src/tests/readytorun/readytorun.csproj @@ -1,7 +1,7 @@ - true + true From 4c20b21e2b9cae656bc687ca0d0cad229d3b4dfa Mon Sep 17 00:00:00 2001 From: Nikola Milosavljevic Date: Fri, 5 Apr 2024 08:20:38 -0700 Subject: [PATCH 112/132] Azure Linux 3.0 deps package (#100656) --- src/installer/pkg/sfx/installers.proj | 1 + .../dotnet-runtime-deps/dotnet-runtime-deps-azl.3.proj | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-azl.3.proj diff --git a/src/installer/pkg/sfx/installers.proj b/src/installer/pkg/sfx/installers.proj index 06e366db911d06..7f4ce6b9c1c409 100644 --- a/src/installer/pkg/sfx/installers.proj +++ b/src/installer/pkg/sfx/installers.proj @@ -10,6 +10,7 @@ + diff --git a/src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-azl.3.proj b/src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-azl.3.proj new file mode 100644 index 00000000000000..ee363967c365fd --- /dev/null +++ b/src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-azl.3.proj @@ -0,0 +1,10 @@ + + + false + azl.3 + + + + + + From b973027549d44fa802a7f5883a9006ab2d4e5c4d Mon Sep 17 00:00:00 2001 From: Elinor Fung Date: Fri, 5 Apr 2024 08:38:11 -0700 Subject: [PATCH 113/132] [cdac] Add basic cdacreader project (#100623) This change adds a basic `cdacreader` project under src/native/managed. - Built as part of the clr subset (via runtime-prereqs referencing compile-native.proj) - Not yet integrated into anything in the product (that is, the dac doesn't try to use it yet) - Can return a class that can implement the ISOSDacInterface* interfaces - currently does ISOSDacInterface9 --- .../managed/cdacreader/cmake/CMakeLists.txt | 2 + .../managed/cdacreader/inc/cdac_reader.h | 20 ++++++++ .../managed/cdacreader/src/Entrypoints.cs | 50 +++++++++++++++++++ .../managed/cdacreader/src/SOSDacImpl.cs | 36 +++++++++++++ src/native/managed/cdacreader/src/Target.cs | 11 ++++ .../managed/cdacreader/src/cdacreader.csproj | 17 +++++++ src/native/managed/compile-native.proj | 1 + 7 files changed, 137 insertions(+) create mode 100644 src/native/managed/cdacreader/cmake/CMakeLists.txt create mode 100644 src/native/managed/cdacreader/inc/cdac_reader.h create mode 100644 src/native/managed/cdacreader/src/Entrypoints.cs create mode 100644 src/native/managed/cdacreader/src/SOSDacImpl.cs create mode 100644 src/native/managed/cdacreader/src/Target.cs create mode 100644 src/native/managed/cdacreader/src/cdacreader.csproj diff --git a/src/native/managed/cdacreader/cmake/CMakeLists.txt b/src/native/managed/cdacreader/cmake/CMakeLists.txt new file mode 100644 index 00000000000000..2a7459c37b8516 --- /dev/null +++ b/src/native/managed/cdacreader/cmake/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(cdacreader_api INTERFACE) +target_include_directories(cdacreader_api INTERFACE ${CLR_SRC_NATIVE_DIR}/managed/cdacreader/inc) diff --git a/src/native/managed/cdacreader/inc/cdac_reader.h b/src/native/managed/cdacreader/inc/cdac_reader.h new file mode 100644 index 00000000000000..b6c71b671a6eda --- /dev/null +++ b/src/native/managed/cdacreader/inc/cdac_reader.h @@ -0,0 +1,20 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#ifndef CDAC_READER_H +#define CDAC_READER_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +int cdac_reader_init(intptr_t descriptor, intptr_t* handle); +int cdac_reader_free(intptr_t handle); +int cdac_reader_get_sos_interface(intptr_t handle, IUnknown** obj); + +#ifdef __cplusplus +} +#endif + +#endif // CDAC_READER_H diff --git a/src/native/managed/cdacreader/src/Entrypoints.cs b/src/native/managed/cdacreader/src/Entrypoints.cs new file mode 100644 index 00000000000000..a65ba9c5fa5ea8 --- /dev/null +++ b/src/native/managed/cdacreader/src/Entrypoints.cs @@ -0,0 +1,50 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.InteropServices; +using System.Runtime.InteropServices.Marshalling; + +namespace Microsoft.Diagnostics.DataContractReader; + +internal static class Entrypoints +{ + private const string CDAC = "cdac_reader_"; + + [UnmanagedCallersOnly(EntryPoint = $"{CDAC}init")] + private static unsafe int Init(nint descriptor, IntPtr* handle) + { + Target target = new(descriptor); + GCHandle gcHandle = GCHandle.Alloc(target); + *handle = GCHandle.ToIntPtr(gcHandle); + return 0; + } + + [UnmanagedCallersOnly(EntryPoint = $"{CDAC}free")] + private static unsafe int Free(IntPtr handle) + { + GCHandle h = GCHandle.FromIntPtr(handle); + h.Free(); + return 0; + } + + /// + /// Get the SOS-DAC interface implementation. + /// + /// Handle crated via cdac initialization + /// IUnknown pointer that can be queried for ISOSDacInterface* + /// + [UnmanagedCallersOnly(EntryPoint = $"{CDAC}get_sos_interface")] + private static unsafe int GetSOSInterface(IntPtr handle, nint* obj) + { + ComWrappers cw = new StrategyBasedComWrappers(); + Target? target = GCHandle.FromIntPtr(handle).Target as Target; + if (target == null) + return -1; + + SOSDacImpl impl = new(target); + nint ptr = cw.GetOrCreateComInterfaceForObject(impl, CreateComInterfaceFlags.None); + *obj = ptr; + return 0; + } +} diff --git a/src/native/managed/cdacreader/src/SOSDacImpl.cs b/src/native/managed/cdacreader/src/SOSDacImpl.cs new file mode 100644 index 00000000000000..893c39bff8830d --- /dev/null +++ b/src/native/managed/cdacreader/src/SOSDacImpl.cs @@ -0,0 +1,36 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.InteropServices; +using System.Runtime.InteropServices.Marshalling; + +namespace Microsoft.Diagnostics.DataContractReader; + +[GeneratedComInterface] +[Guid("4eca42d8-7e7b-4c8a-a116-7bfbf6929267")] +internal partial interface ISOSDacInterface9 +{ + int GetBreakingChangeVersion(); +} + +/// +/// Implementation of ISOSDacInterface* interfaces intended to be passed out to consumers +/// interacting with the DAC via those COM interfaces. +/// +[GeneratedComClass] +internal sealed partial class SOSDacImpl : ISOSDacInterface9 +{ + private readonly Target _target; + + public SOSDacImpl(Target target) + { + _target = target; + } + + public int GetBreakingChangeVersion() + { + // TODO: Return non-hard-coded version + return 4; + } +} diff --git a/src/native/managed/cdacreader/src/Target.cs b/src/native/managed/cdacreader/src/Target.cs new file mode 100644 index 00000000000000..1590984f017c30 --- /dev/null +++ b/src/native/managed/cdacreader/src/Target.cs @@ -0,0 +1,11 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +namespace Microsoft.Diagnostics.DataContractReader; + +internal sealed class Target +{ + public Target(nint _) + { + } +} diff --git a/src/native/managed/cdacreader/src/cdacreader.csproj b/src/native/managed/cdacreader/src/cdacreader.csproj new file mode 100644 index 00000000000000..51f87fa8908d9e --- /dev/null +++ b/src/native/managed/cdacreader/src/cdacreader.csproj @@ -0,0 +1,17 @@ + + + + $(NetCoreAppToolCurrent) + enable + true + + false + + + + + + + + + diff --git a/src/native/managed/compile-native.proj b/src/native/managed/compile-native.proj index 4203835936ecbc..453d84ab4dc072 100644 --- a/src/native/managed/compile-native.proj +++ b/src/native/managed/compile-native.proj @@ -12,6 +12,7 @@ + From b76baa856babaac858c44bbcb943b873ba862b41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Petryka?= <35800402+MichalPetryka@users.noreply.github.com> Date: Fri, 5 Apr 2024 17:46:08 +0200 Subject: [PATCH 114/132] Cleanup NativeAOT math helpers (#100375) * Cleanup NativeAOT math helpers * Restore checked cast helpers * Rename variables --------- Co-authored-by: Jan Kotas --- .../Runtime/CompilerHelpers/MathHelpers.cs | 406 ++++++++---------- 1 file changed, 169 insertions(+), 237 deletions(-) diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/MathHelpers.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/MathHelpers.cs index 85fef043acb0e1..42485ea3ef63cd 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/MathHelpers.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/MathHelpers.cs @@ -2,20 +2,78 @@ // The .NET Foundation licenses this file to you under the MIT license. using System; +using System.Diagnostics; using System.Runtime; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using Internal.Runtime; - namespace Internal.Runtime.CompilerHelpers { /// - /// Math helpers for generated code. The helpers marked with [RuntimeExport] and the type - /// itself need to be public because they constitute a public contract with the .NET Native toolchain. + /// Math helpers for generated code. The helpers here are referenced by the runtime. /// + [StackTraceHidden] internal static partial class MathHelpers { + private const double Int32MaxValueOffset = (double)int.MaxValue + 1; + private const double UInt32MaxValueOffset = (double)uint.MaxValue + 1; + + [RuntimeExport("Dbl2IntOvf")] + public static int Dbl2IntOvf(double value) + { + // Note that this expression also works properly for val = NaN case + if (value is > -Int32MaxValueOffset - 1 and < Int32MaxValueOffset) + { + return (int)value; + } + + ThrowHelper.ThrowOverflowException(); + return 0; + } + + [RuntimeExport("Dbl2UIntOvf")] + public static uint Dbl2UIntOvf(double value) + { + // Note that this expression also works properly for val = NaN case + if (value is > -1.0 and < UInt32MaxValueOffset) + { + return (uint)value; + } + + ThrowHelper.ThrowOverflowException(); + return 0; + } + + [RuntimeExport("Dbl2LngOvf")] + public static long Dbl2LngOvf(double value) + { + const double two63 = Int32MaxValueOffset * UInt32MaxValueOffset; + + // Note that this expression also works properly for val = NaN case + // We need to compare with the very next double to two63. 0x402 is epsilon to get us there. + if (value is > -two63 - 0x402 and < two63) + { + return (long)value; + } + + ThrowHelper.ThrowOverflowException(); + return 0; + } + + [RuntimeExport("Dbl2ULngOvf")] + public static ulong Dbl2ULngOvf(double value) + { + const double two64 = UInt32MaxValueOffset * UInt32MaxValueOffset; + // Note that this expression also works properly for val = NaN case + if (value is > -1.0 and < two64) + { + return (ulong)value; + } + + ThrowHelper.ThrowOverflowException(); + return 0; + } + #if !TARGET_64BIT // // 64-bit checked multiplication for 32-bit platforms @@ -23,360 +81,234 @@ internal static partial class MathHelpers private const string RuntimeLibrary = "*"; - // Helper to multiply two 32-bit uints - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong Mul32x32To64(uint a, uint b) - { - return a * (ulong)b; - } - - // Helper to get high 32-bit of 64-bit int [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint Hi32Bits(long a) + private static uint High32Bits(ulong a) { return (uint)(a >> 32); } - // Helper to get high 32-bit of 64-bit int [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint Hi32Bits(ulong a) + private static ulong BigMul(uint left, uint right) { - return (uint)(a >> 32); + return (ulong)left * right; } [RuntimeExport("LMulOvf")] - public static long LMulOvf(long i, long j) + public static long LMulOvf(long left, long right) { - long ret; +#if DEBUG + long result = left * right; +#endif // Remember the sign of the result - int sign = (int)(Hi32Bits(i) ^ Hi32Bits(j)); + int sign = (int)(High32Bits((ulong)left) ^ High32Bits((ulong)right)); // Convert to unsigned multiplication - if (i < 0) i = -i; - if (j < 0) j = -j; + if (left < 0) + left = -left; + if (right < 0) + right = -right; // Get the upper 32 bits of the numbers - uint val1High = Hi32Bits(i); - uint val2High = Hi32Bits(j); + uint val1High = High32Bits((ulong)left); + uint val2High = High32Bits((ulong)right); ulong valMid; if (val1High == 0) { // Compute the 'middle' bits of the long multiplication - valMid = Mul32x32To64(val2High, (uint)i); + valMid = BigMul(val2High, (uint)left); } else { if (val2High != 0) - goto ThrowExcep; + goto Overflow; // Compute the 'middle' bits of the long multiplication - valMid = Mul32x32To64(val1High, (uint)j); + valMid = BigMul(val1High, (uint)right); } // See if any bits after bit 32 are set - if (Hi32Bits(valMid) != 0) - goto ThrowExcep; + if (High32Bits(valMid) != 0) + goto Overflow; - ret = (long)(Mul32x32To64((uint)i, (uint)j) + (valMid << 32)); + long ret = (long)(BigMul((uint)left, (uint)right) + (valMid << 32)); // check for overflow - if (Hi32Bits(ret) < (uint)valMid) - goto ThrowExcep; + if (High32Bits((ulong)ret) < (uint)valMid) + goto Overflow; if (sign >= 0) { // have we spilled into the sign bit? if (ret < 0) - goto ThrowExcep; + goto Overflow; } else { ret = -ret; // have we spilled into the sign bit? if (ret > 0) - goto ThrowExcep; + goto Overflow; } + +#if DEBUG + Debug.Assert(ret == result, $"Multiply overflow got: {ret}, expected: {result}"); +#endif return ret; - ThrowExcep: - return ThrowLngOvf(); + Overflow: + ThrowHelper.ThrowOverflowException(); + return 0; } [RuntimeExport("ULMulOvf")] - public static ulong ULMulOvf(ulong i, ulong j) + public static ulong ULMulOvf(ulong left, ulong right) { - ulong ret; - // Get the upper 32 bits of the numbers - uint val1High = Hi32Bits(i); - uint val2High = Hi32Bits(j); + uint val1High = High32Bits(left); + uint val2High = High32Bits(right); ulong valMid; if (val1High == 0) { if (val2High == 0) - return Mul32x32To64((uint)i, (uint)j); + return (ulong)(uint)left * (uint)right; // Compute the 'middle' bits of the long multiplication - valMid = Mul32x32To64(val2High, (uint)i); + valMid = BigMul(val2High, (uint)left); } else { if (val2High != 0) - goto ThrowExcep; + goto Overflow; // Compute the 'middle' bits of the long multiplication - valMid = Mul32x32To64(val1High, (uint)j); + valMid = BigMul(val1High, (uint)right); } // See if any bits after bit 32 are set - if (Hi32Bits(valMid) != 0) - goto ThrowExcep; + if (High32Bits(valMid) != 0) + goto Overflow; - ret = Mul32x32To64((uint)i, (uint)j) + (valMid << 32); + ulong ret = BigMul((uint)left, (uint)right) + (valMid << 32); // check for overflow - if (Hi32Bits(ret) < (uint)valMid) - goto ThrowExcep; + if (High32Bits(ret) < (uint)valMid) + goto Overflow; + + Debug.Assert(ret == left * right, $"Multiply overflow got: {ret}, expected: {left * right}"); return ret; - ThrowExcep: - return ThrowULngOvf(); + Overflow: + ThrowHelper.ThrowOverflowException(); + return 0; } [LibraryImport(RuntimeLibrary)] [SuppressGCTransition] - private static partial ulong RhpULMod(ulong i, ulong j); + private static partial ulong RhpULMod(ulong dividend, ulong divisor); - public static ulong ULMod(ulong i, ulong j) + public static ulong ULMod(ulong dividend, ulong divisor) { - if (j == 0) - return ThrowULngDivByZero(); - else - return RhpULMod(i, j); - } - - [LibraryImport(RuntimeLibrary)] - [SuppressGCTransition] - private static partial long RhpLMod(long i, long j); + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); - public static long LMod(long i, long j) - { - if (j == 0) - return ThrowLngDivByZero(); - else if (j == -1 && i == long.MinValue) - return ThrowLngOvf(); - else - return RhpLMod(i, j); + return RhpULMod(dividend, divisor); } [LibraryImport(RuntimeLibrary)] [SuppressGCTransition] - private static partial ulong RhpULDiv(ulong i, ulong j); + private static partial long RhpLMod(long dividend, long divisor); - public static ulong ULDiv(ulong i, ulong j) + public static long LMod(long dividend, long divisor) { - if (j == 0) - return ThrowULngDivByZero(); - else - return RhpULDiv(i, j); + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); + if (divisor == -1 && dividend == long.MinValue) + ThrowHelper.ThrowOverflowException(); + + return RhpLMod(dividend, divisor); } [LibraryImport(RuntimeLibrary)] [SuppressGCTransition] - private static partial long RhpLDiv(long i, long j); - - public static long LDiv(long i, long j) - { - if (j == 0) - return ThrowLngDivByZero(); - else if (j == -1 && i == long.MinValue) - return ThrowLngOvf(); - else - return RhpLDiv(i, j); - } - - [MethodImpl(MethodImplOptions.NoInlining)] - private static long ThrowLngDivByZero() - { - throw new DivideByZeroException(); - } - - [MethodImpl(MethodImplOptions.NoInlining)] - private static ulong ThrowULngDivByZero() - { - throw new DivideByZeroException(); - } -#endif // TARGET_64BIT + private static partial ulong RhpULDiv(ulong dividend, ulong divisor); - [RuntimeExport("Dbl2IntOvf")] - public static int Dbl2IntOvf(double val) - { - const double two31 = 2147483648.0; - - // Note that this expression also works properly for val = NaN case - if (val > -two31 - 1 && val < two31) - return unchecked((int)val); - - return ThrowIntOvf(); - } - - [RuntimeExport("Dbl2UIntOvf")] - public static uint Dbl2UIntOvf(double val) - { - // Note that this expression also works properly for val = NaN case - if (val > -1.0 && val < 4294967296.0) - return unchecked((uint)val); - - return ThrowUIntOvf(); - } - - [RuntimeExport("Dbl2LngOvf")] - public static long Dbl2LngOvf(double val) - { - const double two63 = 2147483648.0 * 4294967296.0; - - // Note that this expression also works properly for val = NaN case - // We need to compare with the very next double to two63. 0x402 is epsilon to get us there. - if (val > -two63 - 0x402 && val < two63) - return unchecked((long)val); - - return ThrowLngOvf(); - } - - [RuntimeExport("Dbl2ULngOvf")] - public static ulong Dbl2ULngOvf(double val) + public static ulong ULDiv(ulong dividend, ulong divisor) { - const double two64 = 2.0 * 2147483648.0 * 4294967296.0; - - // Note that this expression also works properly for val = NaN case - if (val > -1.0 && val < two64) - return unchecked((ulong)val); + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); - return ThrowULngOvf(); + return RhpULDiv(dividend, divisor); } - [RuntimeExport("Flt2IntOvf")] - public static int Flt2IntOvf(float val) - { - const double two31 = 2147483648.0; - - // Note that this expression also works properly for val = NaN case - if (val > -two31 - 1 && val < two31) - return ((int)val); - - return ThrowIntOvf(); - } + [LibraryImport(RuntimeLibrary)] + [SuppressGCTransition] + private static partial long RhpLDiv(long dividend, long divisor); - [RuntimeExport("Flt2LngOvf")] - public static long Flt2LngOvf(float val) + public static long LDiv(long dividend, long divisor) { - const double two63 = 2147483648.0 * 4294967296.0; - - // Note that this expression also works properly for val = NaN case - // We need to compare with the very next double to two63. 0x402 is epsilon to get us there. - if (val > -two63 - 0x402 && val < two63) - return ((long)val); + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); + if (divisor == -1 && dividend == long.MinValue) + ThrowHelper.ThrowOverflowException(); - return ThrowIntOvf(); + return RhpLDiv(dividend, divisor); } #if TARGET_ARM [RuntimeImport(RuntimeLibrary, "RhpIDiv")] - [MethodImplAttribute(MethodImplOptions.InternalCall)] - private static extern int RhpIDiv(int i, int j); + [MethodImpl(MethodImplOptions.InternalCall)] + private static extern int RhpIDiv(int dividend, int divisor); - public static int IDiv(int i, int j) + public static int IDiv(int dividend, int divisor) { - if (j == 0) - return ThrowIntDivByZero(); - else if (j == -1 && i == int.MinValue) - return ThrowIntOvf(); - else - return RhpIDiv(i, j); - } + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); + if (divisor == -1 && dividend == int.MinValue) + ThrowHelper.ThrowOverflowException(); - [RuntimeImport(RuntimeLibrary, "RhpUDiv")] - [MethodImplAttribute(MethodImplOptions.InternalCall)] - private static extern uint RhpUDiv(uint i, uint j); - - public static long UDiv(uint i, uint j) - { - if (j == 0) - return ThrowUIntDivByZero(); - else - return RhpUDiv(i, j); + return RhpIDiv(dividend, divisor); } - [RuntimeImport(RuntimeLibrary, "RhpIMod")] - [MethodImplAttribute(MethodImplOptions.InternalCall)] - private static extern int RhpIMod(int i, int j); + [RuntimeImport(RuntimeLibrary, "RhpUDiv")] + [MethodImpl(MethodImplOptions.InternalCall)] + private static extern uint RhpUDiv(uint dividend, uint divisor); - public static int IMod(int i, int j) + public static long UDiv(uint dividend, uint divisor) { - if (j == 0) - return ThrowIntDivByZero(); - else if (j == -1 && i == int.MinValue) - return ThrowIntOvf(); - else - return RhpIMod(i, j); - } + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); - [RuntimeImport(RuntimeLibrary, "RhpUMod")] - [MethodImplAttribute(MethodImplOptions.InternalCall)] - private static extern uint RhpUMod(uint i, uint j); - - public static long UMod(uint i, uint j) - { - if (j == 0) - return ThrowUIntDivByZero(); - else - return RhpUMod(i, j); + return RhpUDiv(dividend, divisor); } -#endif // TARGET_ARM - - // - // Matching return types of throw helpers enables tailcalling them. It improves performance - // of the hot path because of it does not need to raise full stackframe. - // - [MethodImpl(MethodImplOptions.NoInlining)] - private static int ThrowIntOvf() - { - throw new OverflowException(); - } + [RuntimeImport(RuntimeLibrary, "RhpIMod")] + [MethodImpl(MethodImplOptions.InternalCall)] + private static extern int RhpIMod(int dividend, int divisor); - [MethodImpl(MethodImplOptions.NoInlining)] - private static uint ThrowUIntOvf() + public static int IMod(int dividend, int divisor) { - throw new OverflowException(); - } + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); + if (divisor == -1 && dividend == int.MinValue) + ThrowHelper.ThrowOverflowException(); - [MethodImpl(MethodImplOptions.NoInlining)] - private static long ThrowLngOvf() - { - throw new OverflowException(); + return RhpIMod(dividend, divisor); } - [MethodImpl(MethodImplOptions.NoInlining)] - private static ulong ThrowULngOvf() - { - throw new OverflowException(); - } + [RuntimeImport(RuntimeLibrary, "RhpUMod")] + [MethodImpl(MethodImplOptions.InternalCall)] + private static extern uint RhpUMod(uint dividend, uint divisor); -#if TARGET_ARM - [MethodImpl(MethodImplOptions.NoInlining)] - private static int ThrowIntDivByZero() + public static long UMod(uint dividend, uint divisor) { - throw new DivideByZeroException(); - } + if (divisor == 0) + ThrowHelper.ThrowDivideByZeroException(); - [MethodImpl(MethodImplOptions.NoInlining)] - private static uint ThrowUIntDivByZero() - { - throw new DivideByZeroException(); + return RhpUMod(dividend, divisor); } #endif // TARGET_ARM +#endif // TARGET_64BIT } } From 068ba8eba9787bb180878561d443abcf6133be9f Mon Sep 17 00:00:00 2001 From: Kevin Jones Date: Fri, 5 Apr 2024 11:49:43 -0400 Subject: [PATCH 115/132] Improve error when private key is missing in RSABCrypt CNG bcrypt gives an unhelpful error when a public key used in a way that requires the private key. RSABCrypt knows if the key is public or not so we can use this to throw a more helpful exception. --- .../System/Security/Cryptography/RSABCrypt.cs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/RSABCrypt.cs b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/RSABCrypt.cs index c5ac24ab029b2e..6e1fbbdb23d919 100644 --- a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/RSABCrypt.cs +++ b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/RSABCrypt.cs @@ -20,6 +20,7 @@ internal sealed class RSABCrypt : RSA private SafeBCryptKeyHandle? _key; private int _lastKeySize; + private bool _publicOnly; internal RSABCrypt() { @@ -51,11 +52,11 @@ private SafeBCryptKeyHandle GetKey() SafeBCryptKeyHandle newKey = Interop.BCrypt.BCryptGenerateKeyPair(s_algHandle, keySize); Interop.BCrypt.BCryptFinalizeKeyPair(newKey); - SetKey(newKey); + SetKey(newKey, publicOnly: false); return newKey; } - private void SetKey(SafeBCryptKeyHandle newKey) + private void SetKey(SafeBCryptKeyHandle newKey, bool publicOnly) { Debug.Assert(!newKey.IsInvalid); @@ -65,6 +66,7 @@ private void SetKey(SafeBCryptKeyHandle newKey) SafeBCryptKeyHandle? oldKey = Interlocked.Exchange(ref _key, newKey); ForceSetKeySize(keySize); + _publicOnly = publicOnly; oldKey?.Dispose(); } @@ -112,7 +114,7 @@ public override void ImportParameters(RSAParameters parameters) CryptoPool.Return(keyBlob); } - SetKey(newKey); + SetKey(newKey, publicOnly: parameters.D is null); } public override byte[] Encrypt(byte[] data, RSAEncryptionPadding padding) @@ -190,6 +192,8 @@ public override bool TryDecrypt( throw new CryptographicException(SR.Cryptography_RSA_DecryptWrongSize); } + ThrowIfPublicOnly(); + switch (padding.Mode) { case RSAEncryptionPaddingMode.Pkcs1: @@ -261,6 +265,7 @@ public override bool TrySignHash( string? hashAlgorithmName = hashAlgorithm.Name; ArgumentException.ThrowIfNullOrEmpty(hashAlgorithmName, nameof(hashAlgorithm)); ArgumentNullException.ThrowIfNull(padding); + ThrowIfPublicOnly(); SafeBCryptKeyHandle key = GetKey(); @@ -426,5 +431,13 @@ private void ThrowIfDisposed() { ObjectDisposedException.ThrowIf(_lastKeySize < 0, this); } + + private void ThrowIfPublicOnly() + { + if (_publicOnly) + { + throw new CryptographicException(SR.Cryptography_CSP_NoPrivateKey); + } + } } } From d79a753f0b4f78bf809b173018f18c6c98e6953d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20K=C3=B6plinger?= Date: Fri, 5 Apr 2024 18:14:56 +0200 Subject: [PATCH 116/132] Add -cross flag to build.ps1 (#100678) Fixes VMR build error on Windows after https://github.com/dotnet/installer/pull/19321 We haven't used or passed this on Windows before and a bunch of logic is conditioned based on the `CrossBuild` property (which is what `-cross` is turned into in build.sh) so ignore it for now. --- eng/build.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/eng/build.ps1 b/eng/build.ps1 index 91bba3afc7e620..1f72dabed00e5f 100644 --- a/eng/build.ps1 +++ b/eng/build.ps1 @@ -12,6 +12,7 @@ Param( [string]$testscope, [switch]$testnobuild, [ValidateSet("x86","x64","arm","arm64","wasm")][string[]][Alias('a')]$arch = @([System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture.ToString().ToLowerInvariant()), + [switch]$cross = $false, [string][Alias('s')]$subset, [ValidateSet("Debug","Release","Checked")][string][Alias('rc')]$runtimeConfiguration, [ValidateSet("Debug","Release")][string][Alias('lc')]$librariesConfiguration, From 0c92024e9d360b3dc11055326e342a9ead9a820f Mon Sep 17 00:00:00 2001 From: Katelyn Gadd Date: Fri, 5 Apr 2024 10:41:50 -0700 Subject: [PATCH 117/132] [wasm] Improvements to startup performance of mono_wasm_get_assembly_exports (#99924) Change generated JSImport/JSExport initializer to not rely on Environment.Version, for faster startup --- .../gen/JSImportGenerator/Constants.cs | 1 + .../JSImportGenerator/JSExportGenerator.cs | 46 +++++++++++++------ 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/Constants.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/Constants.cs index 520eeba3ec4e87..a5475e27c70a46 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/Constants.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/Constants.cs @@ -17,6 +17,7 @@ internal static class Constants public const string ModuleInitializerAttributeGlobal = "global::System.Runtime.CompilerServices.ModuleInitializerAttribute"; public const string CompilerGeneratedAttributeGlobal = "global::System.Runtime.CompilerServices.CompilerGeneratedAttribute"; public const string DynamicDependencyAttributeGlobal = "global::System.Diagnostics.CodeAnalysis.DynamicDependencyAttribute"; + public const string DynamicallyAccessedMemberTypesGlobal = "global::System.Diagnostics.CodeAnalysis.DynamicallyAccessedMemberTypes"; public const string ThreadStaticGlobal = "global::System.ThreadStaticAttribute"; public const string TaskGlobal = "global::System.Threading.Tasks.Task"; public const string SpanGlobal = "global::System.Span"; diff --git a/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/JSExportGenerator.cs b/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/JSExportGenerator.cs index ffe0fe6a0f995b..ee7eb94cdd3e51 100644 --- a/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/JSExportGenerator.cs +++ b/src/libraries/System.Runtime.InteropServices.JavaScript/gen/JSImportGenerator/JSExportGenerator.cs @@ -215,7 +215,7 @@ private static NamespaceDeclarationSyntax GenerateRegSource( const string generatedNamespace = "System.Runtime.InteropServices.JavaScript"; const string initializerClass = "__GeneratedInitializer"; const string initializerName = "__Register_"; - const string selfInitName = "__Net7SelfInit_"; + const string trimmingPreserveName = "__TrimmingPreserve_"; if (methods.IsEmpty) return NamespaceDeclaration(IdentifierName(generatedNamespace)); @@ -241,22 +241,42 @@ private static NamespaceDeclarationSyntax GenerateRegSource( .WithModifiers(TokenList(new[] { Token(SyntaxKind.StaticKeyword) })) .WithBody(Block(registerStatements)); - // when we are running code generated by .NET8 on .NET7 runtime we need to auto initialize the assembly, because .NET7 doesn't call the registration from JS - // this also keeps the code protected from trimming - MemberDeclarationSyntax initializerMethod = MethodDeclaration(PredefinedType(Token(SyntaxKind.VoidKeyword)), Identifier(selfInitName)) - .WithAttributeLists(List(new[]{ - AttributeList(SingletonSeparatedList(Attribute(IdentifierName(Constants.ModuleInitializerAttributeGlobal)))), - })) + // HACK: protect the code from trimming with DynamicDependency attached to a ModuleInitializer + MemberDeclarationSyntax initializerMethod = MethodDeclaration(PredefinedType(Token(SyntaxKind.VoidKeyword)), Identifier(trimmingPreserveName)) + .WithAttributeLists( + SingletonList( + AttributeList( + SeparatedList( + new SyntaxNodeOrToken[]{ + Attribute( + IdentifierName(Constants.ModuleInitializerAttributeGlobal)), + Token(SyntaxKind.CommaToken), + Attribute( + IdentifierName(Constants.DynamicDependencyAttributeGlobal)) + .WithArgumentList( + AttributeArgumentList( + SeparatedList( + new SyntaxNodeOrToken[]{ + AttributeArgument( + BinaryExpression( + SyntaxKind.BitwiseOrExpression, + MemberAccessExpression( + SyntaxKind.SimpleMemberAccessExpression, + IdentifierName(Constants.DynamicallyAccessedMemberTypesGlobal), + IdentifierName("PublicMethods")), + MemberAccessExpression( + SyntaxKind.SimpleMemberAccessExpression, + IdentifierName(Constants.DynamicallyAccessedMemberTypesGlobal), + IdentifierName("NonPublicMethods")))), + Token(SyntaxKind.CommaToken), + AttributeArgument( + TypeOfExpression( + IdentifierName(initializerClass)))})))})))) .WithModifiers(TokenList(new[] { Token(SyntaxKind.StaticKeyword), Token(SyntaxKind.InternalKeyword) })) - .WithBody(Block( - IfStatement(BinaryExpression(SyntaxKind.EqualsExpression, - IdentifierName("Environment.Version.Major"), - LiteralExpression(SyntaxKind.NumericLiteralExpression, Literal(7))), - Block(SingletonList( - ExpressionStatement(InvocationExpression(IdentifierName(initializerName)))))))); + .WithBody(Block()); var ns = NamespaceDeclaration(IdentifierName(generatedNamespace)) .WithMembers( From afbebfe01026d9fc719f85a28d6e15da09150adc Mon Sep 17 00:00:00 2001 From: Lakshan Fernando Date: Fri, 5 Apr 2024 10:42:03 -0700 Subject: [PATCH 118/132] Remove DAM annotations from enum converter (#100347) * Remove DAM annotations from enum converter * expand the debug assert for enum types * FB * FB2 --- .../System.ComponentModel.TypeConverter.cs | 3 +- .../src/Resources/Strings.resx | 3 ++ .../System/ComponentModel/EnumConverter.cs | 36 ++++++++++++++++--- .../ReflectTypeDescriptionProvider.cs | 10 +----- ...iCompatBaseline.NetCoreAppLatestStable.xml | 36 +++++++++++++++++++ 5 files changed, 72 insertions(+), 16 deletions(-) diff --git a/src/libraries/System.ComponentModel.TypeConverter/ref/System.ComponentModel.TypeConverter.cs b/src/libraries/System.ComponentModel.TypeConverter/ref/System.ComponentModel.TypeConverter.cs index 5b3f3fdef12ff4..59c3284a7fabf0 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/ref/System.ComponentModel.TypeConverter.cs +++ b/src/libraries/System.ComponentModel.TypeConverter/ref/System.ComponentModel.TypeConverter.cs @@ -428,9 +428,8 @@ public DoubleConverter() { } } public partial class EnumConverter : System.ComponentModel.TypeConverter { - public EnumConverter([System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute(System.Diagnostics.CodeAnalysis.DynamicallyAccessedMemberTypes.PublicFields | System.Diagnostics.CodeAnalysis.DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] System.Type type) { } + public EnumConverter(System.Type type) { } protected virtual System.Collections.IComparer Comparer { get { throw null; } } - [System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute(System.Diagnostics.CodeAnalysis.DynamicallyAccessedMemberTypes.PublicFields | System.Diagnostics.CodeAnalysis.DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] protected System.Type EnumType { get { throw null; } } protected System.ComponentModel.TypeConverter.StandardValuesCollection? Values { get { throw null; } set { } } public override bool CanConvertFrom(System.ComponentModel.ITypeDescriptorContext? context, System.Type sourceType) { throw null; } diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx b/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx index 6930398b398144..ea75a801456861 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx +++ b/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx @@ -76,6 +76,9 @@ The value '{0}' is not a valid value for the enum '{1}'. + + Type provided must be an Enum. + Invalid event handler for the {0} event. diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/EnumConverter.cs b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/EnumConverter.cs index 5dcc263e0d0c0e..c9d6d5a13a384f 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/EnumConverter.cs +++ b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/EnumConverter.cs @@ -4,6 +4,7 @@ using System.Collections; using System.Collections.Generic; using System.ComponentModel.Design.Serialization; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; @@ -20,12 +21,16 @@ public class EnumConverter : TypeConverter /// Initializes a new instance of the class for the given /// type. /// - public EnumConverter([DynamicallyAccessedMembers(TypeDescriptor.ReflectTypesDynamicallyAccessedMembers)] Type type) + public EnumConverter(Type type) { + if (!type.IsEnum && !type.Equals(typeof(Enum))) + { + throw new ArgumentException(SR.EnumInvalidValue); + } + EnumType = type; } - [DynamicallyAccessedMembers(TypeDescriptor.ReflectTypesDynamicallyAccessedMembers)] protected Type EnumType { get; } protected StandardValuesCollection? Values { get; set; } @@ -156,7 +161,10 @@ private static long GetEnumValue(bool isUnderlyingTypeUInt64, object enumVal, Cu } else { - FieldInfo? info = EnumType.GetField(enumName); + [UnconditionalSuppressMessage("Trimming", "IL2075:", Justification = "Trimmer does not trim Enums")] + FieldInfo? GetEnumField(string name) => EnumType.GetField(name); + + FieldInfo? info = GetEnumField(enumName); if (info != null) { return new InstanceDescriptor(info, null); @@ -227,9 +235,27 @@ public override StandardValuesCollection GetStandardValues(ITypeDescriptorContex // We need to get the enum values in this rather round-about way so we can filter // out fields marked Browsable(false). Note that if multiple fields have the same value, // the behavior is undefined, since what we return are just enum values, not names. - Type reflectType = TypeDescriptor.GetReflectionType(EnumType) ?? EnumType; + // Given that EnumType is constrained to be an enum, we suppress calls for reflection with Enum. + + [UnconditionalSuppressMessage("Trimming", "IL2067:", Justification = "Trimmer does not trim Enums")] + [return: DynamicallyAccessedMembers(TypeDescriptor.ReflectTypesDynamicallyAccessedMembers)] + static Type GetTypeDescriptorReflectionType(Type enumType) => TypeDescriptor.GetReflectionType(enumType); + + Type _reflectType = GetTypeDescriptorReflectionType(EnumType); + FieldInfo[]? fields; + + if (_reflectType == null) + { + [UnconditionalSuppressMessage("Trimming", "IL2070:", Justification = "Trimmer does not trim Enums")] + static FieldInfo[]? GetPublicStaticEnumFields(Type type) => type.GetFields(BindingFlags.Public | BindingFlags.Static); + + fields = GetPublicStaticEnumFields(EnumType); + } + else + { + fields = _reflectType.GetFields(BindingFlags.Public | BindingFlags.Static); + } - FieldInfo[]? fields = reflectType.GetFields(BindingFlags.Public | BindingFlags.Static); ArrayList? objValues = null; if (fields != null && fields.Length > 0) diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/ReflectTypeDescriptionProvider.cs b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/ReflectTypeDescriptionProvider.cs index 5dacbaa8a25260..5fc0d064f81098 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/ReflectTypeDescriptionProvider.cs +++ b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/ReflectTypeDescriptionProvider.cs @@ -193,7 +193,7 @@ private static Dictionary IntrinsicTypeConve // [typeof(Array)] = new IntrinsicTypeConverterData((type) => new ArrayConverter()), [typeof(ICollection)] = new IntrinsicTypeConverterData((type) => new CollectionConverter()), - [typeof(Enum)] = new IntrinsicTypeConverterData((type) => CreateEnumConverter(type), cacheConverterInstance: false), + [typeof(Enum)] = new IntrinsicTypeConverterData((type) => new EnumConverter(type), cacheConverterInstance: false), [s_intrinsicNullableKey] = new IntrinsicTypeConverterData((type) => CreateNullableConverter(type), cacheConverterInstance: false), [s_intrinsicReferenceKey] = new IntrinsicTypeConverterData((type) => new ReferenceConverter(type), cacheConverterInstance: false), }); @@ -204,14 +204,6 @@ private static Dictionary IntrinsicTypeConve Justification = "IntrinsicTypeConverters is marked with RequiresUnreferencedCode. It is the only place that should call this.")] private static NullableConverter CreateNullableConverter(Type type) => new NullableConverter(type); - [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2067:UnrecognizedReflectionPattern", - Justification = "Trimmer does not trim enums")] - private static EnumConverter CreateEnumConverter(Type type) - { - Debug.Assert(type.IsEnum || type == typeof(Enum)); - return new EnumConverter(type); - } - private static Hashtable PropertyCache => LazyInitializer.EnsureInitialized(ref s_propertyCache, () => new Hashtable()); private static Hashtable EventCache => LazyInitializer.EnsureInitialized(ref s_eventCache, () => new Hashtable()); diff --git a/src/libraries/apicompat/ApiCompatBaseline.NetCoreAppLatestStable.xml b/src/libraries/apicompat/ApiCompatBaseline.NetCoreAppLatestStable.xml index b908b6cbab0714..09cfe417e3812f 100644 --- a/src/libraries/apicompat/ApiCompatBaseline.NetCoreAppLatestStable.xml +++ b/src/libraries/apicompat/ApiCompatBaseline.NetCoreAppLatestStable.xml @@ -85,6 +85,12 @@ net8.0/netstandard.dll net9.0/netstandard.dll + + CP0014 + M:System.ComponentModel.EnumConverter.#ctor(System.Type)$0:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/netstandard.dll + net9.0/netstandard.dll + CP0014 P:System.ComponentModel.DesignerAttribute.DesignerBaseTypeName:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] @@ -109,6 +115,12 @@ net8.0/netstandard.dll net9.0/netstandard.dll + + CP0014 + P:System.ComponentModel.EnumConverter.EnumType:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/netstandard.dll + net9.0/netstandard.dll + CP0014 M:System.ComponentModel.DesignerAttribute.#ctor(System.String,System.String)$0:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] @@ -301,6 +313,12 @@ net8.0/System.ComponentModel.TypeConverter.dll net9.0/System.ComponentModel.TypeConverter.dll + + CP0014 + M:System.ComponentModel.EnumConverter.#ctor(System.Type)$0:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/System.ComponentModel.TypeConverter.dll + net9.0/System.ComponentModel.TypeConverter.dll + CP0014 P:System.ComponentModel.DesignerAttribute.DesignerBaseTypeName:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] @@ -325,6 +343,12 @@ net8.0/System.ComponentModel.TypeConverter.dll net9.0/System.ComponentModel.TypeConverter.dll + + CP0014 + P:System.ComponentModel.EnumConverter.EnumType:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/System.ComponentModel.TypeConverter.dll + net9.0/System.ComponentModel.TypeConverter.dll + CP0014 M:System.ComponentModel.DesignerAttribute.#ctor(System.String,System.String)$0:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] @@ -409,6 +433,12 @@ net8.0/System.dll net9.0/System.dll + + CP0014 + M:System.ComponentModel.EnumConverter.#ctor(System.Type)$0:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/System.dll + net9.0/System.dll + CP0014 P:System.ComponentModel.DesignerAttribute.DesignerBaseTypeName:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] @@ -433,4 +463,10 @@ net8.0/System.dll net9.0/System.dll + + CP0014 + P:System.ComponentModel.EnumConverter.EnumType:[T:System.Diagnostics.CodeAnalysis.DynamicallyAccessedMembersAttribute] + net8.0/System.dll + net9.0/System.dll + \ No newline at end of file From e1f53785bfc203cd70aac486f49e269e9033f639 Mon Sep 17 00:00:00 2001 From: Pavel Savara Date: Fri, 5 Apr 2024 19:55:12 +0200 Subject: [PATCH 119/132] [browser] eslint more autofix (#100681) --- src/mono/browser/runtime/.eslintrc.cjs | 4 +++- src/mono/browser/runtime/hybrid-globalization/calendar.ts | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/mono/browser/runtime/.eslintrc.cjs b/src/mono/browser/runtime/.eslintrc.cjs index 0cae796f4016aa..0885027b96303c 100644 --- a/src/mono/browser/runtime/.eslintrc.cjs +++ b/src/mono/browser/runtime/.eslintrc.cjs @@ -53,12 +53,14 @@ module.exports = { ], "brace-style": ["error"], "eol-last": ["error"], - "space-before-blocks": ["error"], + "space-before-blocks": ["error", { "functions": "always", "keywords": "always", "classes": "always" }], "semi-spacing": ["error", { "before": false, "after": true }], + "keyword-spacing": ["error", { "before": true, "after": true, "overrides": { "this": { "before": false } } }], "no-trailing-spaces": ["error"], "object-curly-spacing": ["error", "always"], "array-bracket-spacing": ["error"], "space-infix-ops": ["error"], + "func-call-spacing": ["error", "never"], "space-before-function-paren": ["error", "always"], } }; diff --git a/src/mono/browser/runtime/hybrid-globalization/calendar.ts b/src/mono/browser/runtime/hybrid-globalization/calendar.ts index 4356f3391a16ad..d3944f514ce854 100644 --- a/src/mono/browser/runtime/hybrid-globalization/calendar.ts +++ b/src/mono/browser/runtime/hybrid-globalization/calendar.ts @@ -231,7 +231,7 @@ function getDayNames (locale: string | undefined) : { long: string[], abbreviate const dayNames = []; const dayNamesAbb = []; const dayNamesSS = []; - for(let i = 0; i < 7; i++) { + for (let i = 0; i < 7; i++) { dayNames[i] = weekDay.toLocaleDateString(locale, { weekday: "long" }); dayNamesAbb[i] = weekDay.toLocaleDateString(locale, { weekday: "short" }); dayNamesSS[i] = weekDay.toLocaleDateString(locale, { weekday: "narrow" }); @@ -251,7 +251,7 @@ function getMonthNames (locale: string | undefined) : { long: string[], abbrevia const monthsGen: string[] = []; const monthsAbbGen: string[] = []; let isChineeseStyle, isShortFormBroken; - for(let i = firstMonthShift; i < 12 + firstMonthShift; i++) { + for (let i = firstMonthShift; i < 12 + firstMonthShift; i++) { const monthCnt = i % 12; date.setMonth(monthCnt); From 41b10918904ac1a81616a854c230dea479fb060c Mon Sep 17 00:00:00 2001 From: Filip Navara Date: Fri, 5 Apr 2024 20:46:18 +0200 Subject: [PATCH 120/132] Replace FEATURE_EH_FUNCLETS in JIT with runtime switch (#99191) * Replace FEATURE_EH_FUNCLETS/FEATURE_EH_CALLFINALLY_THUNKS in JIT with runtime switch * Cache Native AOT ABI check to see if TP improves --------- Co-authored-by: Bruce Forstall --- docs/design/coreclr/botr/clr-abi.md | 6 +- src/coreclr/clrdefinitions.cmake | 4 - src/coreclr/crosscomponents.cmake | 7 - src/coreclr/inc/clrnt.h | 2 - src/coreclr/inc/gcinfo.h | 2 +- src/coreclr/jit/CMakeLists.txt | 3 - src/coreclr/jit/block.cpp | 13 +- src/coreclr/jit/block.h | 2 +- src/coreclr/jit/codegen.h | 19 +- src/coreclr/jit/codegencommon.cpp | 195 +++++----- src/coreclr/jit/codegenlinear.cpp | 40 +-- src/coreclr/jit/codegenxarch.cpp | 284 +++++++-------- src/coreclr/jit/compiler.cpp | 29 +- src/coreclr/jit/compiler.h | 120 +++---- src/coreclr/jit/compiler.hpp | 128 +++---- src/coreclr/jit/compphases.h | 2 - src/coreclr/jit/emit.cpp | 79 ++--- src/coreclr/jit/emit.h | 14 - src/coreclr/jit/emitxarch.cpp | 2 - src/coreclr/jit/fgbasic.cpp | 335 +++++++++--------- src/coreclr/jit/fgdiagnostic.cpp | 14 +- src/coreclr/jit/fgehopt.cpp | 294 +++++++-------- src/coreclr/jit/fgopt.cpp | 18 +- src/coreclr/jit/fgstmt.cpp | 4 +- src/coreclr/jit/flowgraph.cpp | 40 +-- src/coreclr/jit/gcencode.cpp | 26 +- src/coreclr/jit/gcinfo.cpp | 12 +- src/coreclr/jit/gentree.cpp | 22 +- src/coreclr/jit/gtlist.h | 4 +- src/coreclr/jit/gtstructs.h | 2 +- src/coreclr/jit/importer.cpp | 61 ++-- src/coreclr/jit/jiteh.cpp | 262 +++++++------- src/coreclr/jit/jiteh.h | 8 +- src/coreclr/jit/jitgcinfo.h | 4 - src/coreclr/jit/lclvars.cpp | 47 ++- src/coreclr/jit/liveness.cpp | 148 ++++---- src/coreclr/jit/lsraxarch.cpp | 2 +- src/coreclr/jit/optimizer.cpp | 4 +- src/coreclr/jit/scopeinfo.cpp | 65 ++-- src/coreclr/jit/targetamd64.h | 1 - src/coreclr/jit/targetarm.h | 1 - src/coreclr/jit/targetarm64.h | 1 - src/coreclr/jit/targetloongarch64.h | 2 - src/coreclr/jit/targetriscv64.h | 1 - src/coreclr/jit/targetx86.h | 9 +- src/coreclr/jit/unwind.cpp | 29 +- src/coreclr/jit/unwindarmarch.cpp | 2 - src/coreclr/jit/unwindx86.cpp | 38 +- src/coreclr/jit/valuenum.cpp | 2 +- .../Common/JitInterface/JitConfigProvider.cs | 6 - 50 files changed, 1104 insertions(+), 1311 deletions(-) diff --git a/docs/design/coreclr/botr/clr-abi.md b/docs/design/coreclr/botr/clr-abi.md index b1680651465775..417f6fdec53460 100644 --- a/docs/design/coreclr/botr/clr-abi.md +++ b/docs/design/coreclr/botr/clr-abi.md @@ -177,11 +177,13 @@ This section describes the conventions the JIT needs to follow when generating c ## Funclets -For all platforms except Windows/x86, all managed EH handlers (finally, fault, filter, filter-handler, and catch) are extracted into their own 'funclets'. To the OS they are treated just like first class functions (separate PDATA and XDATA (`RUNTIME_FUNCTION` entry), etc.). The CLR currently treats them just like part of the parent function in many ways. The main function and all funclets must be allocated in a single code allocation (see hot cold splitting). They 'share' GC info. Only the main function prolog can be hot patched. +For all platforms except Windows/x86 on CoreCLR, all managed EH handlers (finally, fault, filter, filter-handler, and catch) are extracted into their own 'funclets'. To the OS they are treated just like first class functions (separate PDATA and XDATA (`RUNTIME_FUNCTION` entry), etc.). The CLR currently treats them just like part of the parent function in many ways. The main function and all funclets must be allocated in a single code allocation (see hot cold splitting). They 'share' GC info. Only the main function prolog can be hot patched. The only way to enter a handler funclet is via a call. In the case of an exception, the call is from the VM's EH subsystem as part of exception dispatch/unwind. In the non-exceptional case, this is called local unwind or a non-local exit. In C# this is accomplished by simply falling-through/out of a try body or an explicit goto. In IL this is always accomplished via a LEAVE opcode, within a try body, targeting an IL offset outside the try body. In such cases the call is from the JITed code of the parent function. -For Windows/x86, all handlers are generated within the method body, typically in lexical order. A nested try/catch is generated completely within the EH region in which it is nested. These handlers are essentially "in-line funclets", but they do not look like normal functions: they do not have a normal prolog or epilog, although they do have special entry/exit and register conventions. Also, nested handlers are not un-nested as for funclets: the code for a nested handler is generated within the handler in which it is nested. +For Windows/x86 on CoreCLR, all handlers are generated within the method body, typically in lexical order. A nested try/catch is generated completely within the EH region in which it is nested. These handlers are essentially "in-line funclets", but they do not look like normal functions: they do not have a normal prolog or epilog, although they do have special entry/exit and register conventions. Also, nested handlers are not un-nested as for funclets: the code for a nested handler is generated within the handler in which it is nested. + +For Windows/x86 on NativeAOT and Linux/x86, funclets are used just like on other platforms. ## Cloned finallys diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 0040f9575de27f..4201c06692eeb5 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -288,8 +288,4 @@ function(set_target_definitions_to_custom_os_and_arch) if (TARGETDETAILS_ARCH STREQUAL "armel") target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE ARM_SOFTFP) endif() - - if (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot")) - target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_EH_FUNCLETS) - endif (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot")) endfunction() diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake index b06b7060704892..11e923805a6ea0 100644 --- a/src/coreclr/crosscomponents.cmake +++ b/src/coreclr/crosscomponents.cmake @@ -25,13 +25,6 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS OR CLR_CMAKE_TARGET_IOS OR CL DESTINATIONS . COMPONENT crosscomponents ) - if (CLR_CMAKE_TARGET_ARCH_I386) - install_clr (TARGETS - clrjit_win_aot_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} - DESTINATIONS . - COMPONENT crosscomponents - ) - endif() endif() endif() endif() diff --git a/src/coreclr/inc/clrnt.h b/src/coreclr/inc/clrnt.h index fb7d8102545625..cacc865b715f02 100644 --- a/src/coreclr/inc/clrnt.h +++ b/src/coreclr/inc/clrnt.h @@ -201,7 +201,6 @@ typedef struct _DISPATCHER_CONTEXT { #define RUNTIME_FUNCTION__BeginAddress(prf) (prf)->BeginAddress #define RUNTIME_FUNCTION__SetBeginAddress(prf,addr) ((prf)->BeginAddress = (addr)) -#ifdef FEATURE_EH_FUNCLETS #include "win64unwind.h" #include "daccess.h" @@ -235,7 +234,6 @@ RtlVirtualUnwind ( __inout_opt PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers ); #endif // HOST_X86 -#endif // FEATURE_EH_FUNCLETS #endif // TARGET_X86 diff --git a/src/coreclr/inc/gcinfo.h b/src/coreclr/inc/gcinfo.h index f334b099f2578e..16bff25525a97d 100644 --- a/src/coreclr/inc/gcinfo.h +++ b/src/coreclr/inc/gcinfo.h @@ -25,7 +25,7 @@ const unsigned OFFSET_MASK = 0x3; // mask to access the low 2 bits // const unsigned byref_OFFSET_FLAG = 0x1; // the offset is an interior ptr const unsigned pinned_OFFSET_FLAG = 0x2; // the offset is a pinned ptr -#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) // JIT32_ENCODER has additional restriction on x86 without funclets: // - for untracked locals the flags allowed are "pinned" and "byref" // - for tracked locals the flags allowed are "this" and "byref" diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index 2f0b3659aa5ad3..766a0a0150e873 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -23,8 +23,6 @@ function(create_standalone_jit) if(TARGETDETAILS_OS STREQUAL "unix_osx" OR TARGETDETAILS_OS STREQUAL "unix_anyos") set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_${TARGETDETAILS_ARCH}) - elseif(TARGETDETAILS_OS STREQUAL "win_aot") - set(JIT_ARCH_LINK_LIBRARIES gcinfo_win_${TARGETDETAILS_ARCH}) else() set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH}) endif() @@ -658,7 +656,6 @@ else() create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) - create_standalone_jit(TARGET clrjit_win_aot_x86_${ARCH_HOST_NAME} OS win_aot ARCH x86 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_RISCV64) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 6cde9e0e93d8b1..60dbce6aaf00a0 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1812,9 +1812,7 @@ bool BasicBlock::hasEHBoundaryIn() const bool returnVal = (bbCatchTyp != BBCT_NONE); if (!returnVal) { -#if FEATURE_EH_FUNCLETS assert(!HasFlag(BBF_FUNCLET_BEG)); -#endif // FEATURE_EH_FUNCLETS } return returnVal; } @@ -1833,16 +1831,7 @@ bool BasicBlock::hasEHBoundaryIn() const // bool BasicBlock::hasEHBoundaryOut() const { - bool returnVal = KindIs(BBJ_EHFILTERRET, BBJ_EHFINALLYRET, BBJ_EHFAULTRET); - -#if FEATURE_EH_FUNCLETS - if (bbKind == BBJ_EHCATCHRET) - { - returnVal = true; - } -#endif // FEATURE_EH_FUNCLETS - - return returnVal; + return KindIs(BBJ_EHFILTERRET, BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHCATCHRET); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 16321157664a50..500b5274b6f41c 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -66,7 +66,7 @@ enum BBKinds : BYTE BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally) BBJ_EHFAULTRET, // block ends with 'endfinally' (IL alias for 'endfault') (for fault) BBJ_EHFILTERRET, // block ends with 'endfilter' - BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) + BBJ_EHCATCHRET, // block ends with a leave out of a catch BBJ_THROW, // block ends with 'throw' BBJ_RETURN, // block ends with 'ret' BBJ_ALWAYS, // block always jumps to the target diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index ae97be76fbe5a2..410bc440874e32 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -559,8 +559,6 @@ class CodeGen final : public CodeGenInterface void genFnProlog(); void genFnEpilog(BasicBlock* block); -#if defined(FEATURE_EH_FUNCLETS) - void genReserveFuncletProlog(BasicBlock* block); void genReserveFuncletEpilog(BasicBlock* block); void genFuncletProlog(BasicBlock* block); @@ -643,16 +641,6 @@ class CodeGen final : public CodeGenInterface void genUpdateCurrentFunclet(BasicBlock* block); -#else // !FEATURE_EH_FUNCLETS - - // This is a no-op when there are no funclets! - void genUpdateCurrentFunclet(BasicBlock* block) - { - return; - } - -#endif // !FEATURE_EH_FUNCLETS - void genGeneratePrologsAndEpilogs(); #if defined(DEBUG) @@ -747,9 +735,7 @@ class CodeGen final : public CodeGenInterface void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset); protected: -#if defined(FEATURE_EH_FUNCLETS) bool siInFuncletRegion; // Have we seen the start of the funclet region? -#endif // FEATURE_EH_FUNCLETS IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed @@ -1294,11 +1280,10 @@ class CodeGen final : public CodeGenInterface void genCodeForBfiz(GenTreeOp* tree); #endif // TARGET_ARM64 -#if defined(FEATURE_EH_FUNCLETS) void genEHCatchRet(BasicBlock* block); -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) void genEHFinallyOrFilterRet(BasicBlock* block); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode); void genMultiRegStoreToLocal(GenTreeLclVar* lclNode); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 2cee578a47bdc3..021a5d9dc5798b 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -391,25 +391,22 @@ void CodeGen::genMarkLabelsForCodegen() case BBJ_CALLFINALLY: // The finally target itself will get marked by walking the EH table, below, and marking // all handler begins. - -#if FEATURE_EH_CALLFINALLY_THUNKS - { - // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, - // as that's needed for identifying the range of the "duplicate finally" region in EH data. - BasicBlock* bbToLabel = block->Next(); - if (block->isBBCallFinallyPair()) - { - bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET - } - if (bbToLabel != nullptr) + if (compiler->UsesCallFinallyThunks()) { - JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); - bbToLabel->SetFlags(BBF_HAS_LABEL); + // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, + // as that's needed for identifying the range of the "duplicate finally" region in EH data. + BasicBlock* bbToLabel = block->Next(); + if (block->isBBCallFinallyPair()) + { + bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET + } + if (bbToLabel != nullptr) + { + JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); + bbToLabel->SetFlags(BBF_HAS_LABEL); + } } - } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - - break; + break; case BBJ_CALLFINALLYRET: JITDUMP(" " FMT_BB " : finally continuation\n", block->GetFinallyContinuation()->bbNum); @@ -1463,10 +1460,11 @@ void CodeGen::genExitCode(BasicBlock* block) void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk) { bool useThrowHlpBlk = compiler->fgUseThrowHelperBlocks(); -#if defined(UNIX_X86_ABI) && defined(FEATURE_EH_FUNCLETS) +#if defined(UNIX_X86_ABI) + // TODO: Is this really UNIX_X86_ABI specific? Should we guard with compiler->UsesFunclets() instead? // Inline exception-throwing code in funclet to make it possible to unwind funclet frames. useThrowHlpBlk = useThrowHlpBlk && (compiler->funCurrentFunc()->funKind == FUNC_ROOT); -#endif // UNIX_X86_ABI && FEATURE_EH_FUNCLETS +#endif // UNIX_X86_ABI if (useThrowHlpBlk) { @@ -1586,8 +1584,6 @@ void CodeGen::genCheckOverflow(GenTree* tree) } #endif -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Update the current funclet as needed by calling genUpdateCurrentFunclet(). @@ -1598,6 +1594,11 @@ void CodeGen::genCheckOverflow(GenTree* tree) void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) { + if (!compiler->UsesFunclets()) + { + return; + } + if (block->HasFlag(BBF_FUNCLET_BEG)) { compiler->funSetCurrentFunc(compiler->funGetFuncIdx(block)); @@ -1614,7 +1615,7 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) } else { - assert(compiler->compCurrFuncIdx <= compiler->compFuncInfoCount); + assert(compiler->funCurrentFuncIdx() <= compiler->compFuncInfoCount); if (compiler->funCurrentFunc()->funKind == FUNC_FILTER) { assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InFilterRegionBBRange(block)); @@ -1631,8 +1632,6 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) } } -#endif // FEATURE_EH_FUNCLETS - //---------------------------------------------------------------------- // genGenerateCode: Generate code for the function. // @@ -2193,14 +2192,13 @@ void CodeGen::genReportEH() unsigned EHCount = compiler->compHndBBtabCount; -#if defined(FEATURE_EH_FUNCLETS) // Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the // VM. unsigned duplicateClauseCount = 0; unsigned enclosingTryIndex; // Duplicate clauses are not used by NativeAOT ABI - if (!isNativeAOT) + if (compiler->UsesFunclets() && !isNativeAOT) { for (XTnum = 0; XTnum < compiler->compHndBBtabCount; XTnum++) { @@ -2215,11 +2213,10 @@ void CodeGen::genReportEH() EHCount += duplicateClauseCount; } -#if FEATURE_EH_CALLFINALLY_THUNKS unsigned clonedFinallyCount = 0; // Duplicate clauses are not used by NativeAOT ABI - if (!isNativeAOT) + if (compiler->UsesFunclets() && compiler->UsesCallFinallyThunks() && !isNativeAOT) { // We don't keep track of how many cloned finally there are. So, go through and count. // We do a quick pass first through the EH table to see if there are any try/finally @@ -2247,27 +2244,33 @@ void CodeGen::genReportEH() EHCount += clonedFinallyCount; } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - -#endif // FEATURE_EH_FUNCLETS #ifdef DEBUG if (compiler->opts.dspEHTable) { -#if defined(FEATURE_EH_FUNCLETS) -#if FEATURE_EH_CALLFINALLY_THUNKS - printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to VM\n", - compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount); - assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount); -#else // !FEATURE_EH_CALLFINALLY_THUNKS - printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n", - compiler->compHndBBtabCount, duplicateClauseCount, EHCount); - assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount); -#endif // !FEATURE_EH_CALLFINALLY_THUNKS -#else // !FEATURE_EH_FUNCLETS - printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount); - assert(compiler->compHndBBtabCount == EHCount); -#endif // !FEATURE_EH_FUNCLETS + if (compiler->UsesFunclets()) + { + if (compiler->UsesCallFinallyThunks()) + { + printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to " + "VM\n", + compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount); + assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount); + } + else + { + printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n", + compiler->compHndBBtabCount, duplicateClauseCount, EHCount); + assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount); + } + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount); + assert(compiler->compHndBBtabCount == EHCount); + } +#endif // FEATURE_EH_WINDOWS_X86 } #endif // DEBUG @@ -2335,7 +2338,6 @@ void CodeGen::genReportEH() ++XTnum; } -#if defined(FEATURE_EH_FUNCLETS) // Now output duplicated clauses. // // If a funclet has been created by moving a handler out of a try region that it was originally nested @@ -2558,7 +2560,6 @@ void CodeGen::genReportEH() assert(duplicateClauseCount == reportedDuplicateClauseCount); } // if (duplicateClauseCount > 0) -#if FEATURE_EH_CALLFINALLY_THUNKS if (clonedFinallyCount > 0) { unsigned reportedClonedFinallyCount = 0; @@ -2612,10 +2613,7 @@ void CodeGen::genReportEH() } // for each block assert(clonedFinallyCount == reportedClonedFinallyCount); - } // if (clonedFinallyCount > 0) -#endif // FEATURE_EH_CALLFINALLY_THUNKS - -#endif // FEATURE_EH_FUNCLETS + } // if (clonedFinallyCount > 0) assert(XTnum == EHCount); } @@ -4510,6 +4508,7 @@ void CodeGen::genCheckUseBlockInit() #else // !defined(TARGET_AMD64) genUseBlockInit = (genInitStkLclCnt > 8); + #endif #else @@ -5341,8 +5340,6 @@ void CodeGen::genReserveEpilog(BasicBlock* block) block->IsLast()); } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Reserve space for a funclet prolog. @@ -5350,6 +5347,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block) void CodeGen::genReserveFuncletProlog(BasicBlock* block) { + assert(compiler->UsesFunclets()); assert(block != nullptr); /* Currently, no registers are live on entry to the prolog, except maybe @@ -5380,6 +5378,7 @@ void CodeGen::genReserveFuncletProlog(BasicBlock* block) void CodeGen::genReserveFuncletEpilog(BasicBlock* block) { + assert(compiler->UsesFunclets()); assert(block != nullptr); JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); @@ -5388,8 +5387,6 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block) gcInfo.gcRegByrefSetCur, block->IsLast()); } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * Finalize the frame size and offset assignments. * @@ -5703,7 +5700,7 @@ void CodeGen::genFnProlog() } #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) && defined(DEBUG) +#if defined(DEBUG) // We cannot force 0-initialization of the PSPSym // as it will overwrite the real value @@ -5713,7 +5710,7 @@ void CodeGen::genFnProlog() assert(!varDsc->lvMustInit); } -#endif // FEATURE_EH_FUNCLETS && DEBUG +#endif // DEBUG /*------------------------------------------------------------------------- * @@ -6243,33 +6240,35 @@ void CodeGen::genFnProlog() genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed); -#if defined(FEATURE_EH_FUNCLETS) - - genSetPSPSym(initReg, &initRegZeroed); - -#else // !FEATURE_EH_FUNCLETS - - // when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots - if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem) + if (compiler->UsesFunclets()) + { + genSetPSPSym(initReg, &initRegZeroed); + } + else { - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE; +#if defined(FEATURE_EH_WINDOWS_X86) + // when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots + if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem) + { + // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) + unsigned filterEndOffsetSlotOffs = + compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE; - // Zero out the slot for nesting level 0 - unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE; + // Zero out the slot for nesting level 0 + unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE; - if (!initRegZeroed) - { - instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); - initRegZeroed = true; - } + if (!initRegZeroed) + { + instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); + initRegZeroed = true; + } - GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, - firstSlotOffs); + GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, + firstSlotOffs); + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS - genReportGenericContextArg(initReg, &initRegZeroed); #ifdef JIT32_GCENCODER @@ -6750,15 +6749,14 @@ void CodeGen::genGeneratePrologsAndEpilogs() // Generate all the prologs and epilogs. -#if defined(FEATURE_EH_FUNCLETS) - - // Capture the data we're going to use in the funclet prolog and epilog generation. This is - // information computed during codegen, or during function prolog generation, like - // frame offsets. It must run after main function prolog generation. - - genCaptureFuncletPrologEpilogInfo(); + if (compiler->UsesFunclets()) + { + // Capture the data we're going to use in the funclet prolog and epilog generation. This is + // information computed during codegen, or during function prolog generation, like + // frame offsets. It must run after main function prolog generation. -#endif // FEATURE_EH_FUNCLETS + genCaptureFuncletPrologEpilogInfo(); + } // Walk the list of prologs and epilogs and generate them. // We maintain a list of prolog and epilog basic blocks in @@ -7918,20 +7916,25 @@ void CodeGen::genReturn(GenTree* treeNode) #if defined(DEBUG) && defined(TARGET_XARCH) bool doStackPointerCheck = compiler->opts.compStackCheckOnRet; -#if defined(FEATURE_EH_FUNCLETS) - // Don't do stack pointer check at the return from a funclet; only for the main function. - if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) + if (compiler->UsesFunclets()) { - doStackPointerCheck = false; + // Don't do stack pointer check at the return from a funclet; only for the main function. + if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) + { + doStackPointerCheck = false; + } } -#else // !FEATURE_EH_FUNCLETS - // Don't generate stack checks for x86 finally/filter EH returns: these are not invoked - // with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet(). - if (compiler->compCurBB->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHFILTERRET)) + else { - doStackPointerCheck = false; +#if defined(FEATURE_EH_WINDOWS_X86) + // Don't generate stack checks for x86 finally/filter EH returns: these are not invoked + // with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet(). + if (compiler->compCurBB->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHFILTERRET)) + { + doStackPointerCheck = false; + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck); #endif // defined(DEBUG) && defined(TARGET_XARCH) diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index cdfd7b1666c460..351ca14942838b 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -376,12 +376,11 @@ void CodeGen::genCodeForBBlist() bool firstMapping = true; -#if defined(FEATURE_EH_FUNCLETS) if (block->HasFlag(BBF_FUNCLET_BEG)) { + assert(compiler->UsesFunclets()); genReserveFuncletProlog(block); } -#endif // FEATURE_EH_FUNCLETS // Clear compCurStmt and compCurLifeTree. compiler->compCurStmt = nullptr; @@ -737,32 +736,26 @@ void CodeGen::genCodeForBBlist() block = genCallFinally(block); break; -#if defined(FEATURE_EH_FUNCLETS) - case BBJ_EHCATCHRET: + assert(compiler->UsesFunclets()); genEHCatchRet(block); FALLTHROUGH; case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: case BBJ_EHFILTERRET: - genReserveFuncletEpilog(block); - break; - -#else // !FEATURE_EH_FUNCLETS - - case BBJ_EHCATCHRET: - noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86 - break; - - case BBJ_EHFINALLYRET: - case BBJ_EHFAULTRET: - case BBJ_EHFILTERRET: - genEHFinallyOrFilterRet(block); + if (compiler->UsesFunclets()) + { + genReserveFuncletEpilog(block); + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + genEHFinallyOrFilterRet(block); + } +#endif // FEATURE_EH_WINDOWS_X86 break; -#endif // !FEATURE_EH_FUNCLETS - case BBJ_SWITCH: break; @@ -838,9 +831,7 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallFinallyPairTail()); -#if FEATURE_EH_CALLFINALLY_THUNKS assert(!block->KindIs(BBJ_CALLFINALLY)); -#endif // FEATURE_EH_CALLFINALLY_THUNKS GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS) && !removedJmp)); } @@ -866,7 +857,7 @@ void CodeGen::genCodeForBBlist() #endif // DEBUG } //------------------ END-FOR each block of the method ------------------- -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // If this is a synchronized method on x86, and we generated all the code without // generating the "exit monitor" call, then we must have deleted the single return block // with that call because it was dead code. We still need to report the monitor range @@ -876,14 +867,15 @@ void CodeGen::genCodeForBBlist() // Do this before cleaning the GC refs below; we don't want to create an IG that clears // the `this` pointer for lvaKeepAliveAndReportThis. - if ((compiler->info.compFlags & CORINFO_FLG_SYNCH) && (compiler->syncEndEmitCookie == nullptr)) + if (!compiler->UsesFunclets() && (compiler->info.compFlags & CORINFO_FLG_SYNCH) && + (compiler->syncEndEmitCookie == nullptr)) { JITDUMP("Synchronized method with missing exit monitor call; adding final label\n"); compiler->syncEndEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); noway_assert(compiler->syncEndEmitCookie != nullptr); } -#endif // !FEATURE_EH_FUNCLETS +#endif // There could be variables alive at this point. For example see lvaKeepAliveAndReportThis. // This call is for cleaning the GC refs diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 6f68063d0b4fa6..3e5f1a4b38a691 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -210,127 +210,131 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const nextBlock = block->Next(); -#if defined(FEATURE_EH_FUNCLETS) - // Generate a call to the finally, like this: - // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym - // call finally-funclet - // jmp finally-return // Only for non-retless finally calls - // The jmp can be a NOP if we're going to the next block. - // If we're generating code for the main function (not a funclet), and there is no localloc, - // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP - // instead of loading the PSPSym in this case, or if PSPSym is not used (NativeAOT ABI). - - if ((compiler->lvaPSPSym == BAD_VAR_NUM) || - (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT))) + if (compiler->UsesFunclets()) { + // Generate a call to the finally, like this: + // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym + // call finally-funclet + // jmp finally-return // Only for non-retless finally calls + // The jmp can be a NOP if we're going to the next block. + // If we're generating code for the main function (not a funclet), and there is no localloc, + // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP + // instead of loading the PSPSym in this case, or if PSPSym is not used (NativeAOT ABI). + + if ((compiler->lvaPSPSym == BAD_VAR_NUM) || + (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT))) + { #ifndef UNIX_X86_ABI - inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false); + inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false); #endif // !UNIX_X86_ABI - } - else - { - GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0); - } - GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - - if (block->HasFlag(BBF_RETLESS_CALL)) - { - // We have a retless call, and the last instruction generated was a call. - // If the next block is in a different EH region (or is the end of the code - // block), then we need to generate a breakpoint here (since it will never - // get executed) to get proper unwind behavior. + } + else + { + GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0); + } + GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) + if (block->HasFlag(BBF_RETLESS_CALL)) { - instGen(INS_BREAKPOINT); // This should never get executed + // We have a retless call, and the last instruction generated was a call. + // If the next block is in a different EH region (or is the end of the code + // block), then we need to generate a breakpoint here (since it will never + // get executed) to get proper unwind behavior. + + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) + { + instGen(INS_BREAKPOINT); // This should never get executed + } } - } - else - { + else + { // TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other // architectures? #ifndef JIT32_GCENCODER - // Because of the way the flowgraph is connected, the liveness info for this one instruction - // after the call is not (can not be) correct in cases where a variable has a last use in the - // handler. So turn off GC reporting for this single instruction. - GetEmitter()->emitDisableGC(); + // Because of the way the flowgraph is connected, the liveness info for this one instruction + // after the call is not (can not be) correct in cases where a variable has a last use in the + // handler. So turn off GC reporting for this single instruction. + GetEmitter()->emitDisableGC(); #endif // JIT32_GCENCODER - BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); + BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); - // Now go to where the finally funclet needs to return to. - if (nextBlock->NextIs(finallyContinuation) && !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) - { - // Fall-through. - // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly - // to the next instruction? This would depend on stack walking from within the finally - // handler working without this instruction being in this special EH region. - instGen(INS_nop); - } - else - { - inst_JMP(EJ_jmp, finallyContinuation); - } + // Now go to where the finally funclet needs to return to. + if (nextBlock->NextIs(finallyContinuation) && + !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) + { + // Fall-through. + // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly + // to the next instruction? This would depend on stack walking from within the finally + // handler working without this instruction being in this special EH region. + instGen(INS_nop); + } + else + { + inst_JMP(EJ_jmp, finallyContinuation); + } #ifndef JIT32_GCENCODER - GetEmitter()->emitEnableGC(); + GetEmitter()->emitEnableGC(); #endif // JIT32_GCENCODER + } } - -#else // !FEATURE_EH_FUNCLETS - - // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot - // corresponding to the finally's nesting level. When invoked in response to an exception, the - // EE does this. - // - // We have a BBJ_CALLFINALLY possibly paired with a following BBJ_CALLFINALLYRET. - // - // We will emit : - // mov [ebp - (n + 1)], 0 - // mov [ebp - n ], 0xFC - // push &step - // jmp finallyBlock - // ... - // step: - // mov [ebp - n ], 0 - // jmp leaveTarget - // ... - // leaveTarget: - - noway_assert(isFramePointerUsed()); - - // Get the nesting level which contains the finally - unsigned finallyNesting = 0; - compiler->fgGetNestingLevel(block, &finallyNesting); - - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs; - filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); - - unsigned curNestingSlotOffs; - curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE)); - - // Zero out the slot for the next nesting level - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, - curNestingSlotOffs - TARGET_POINTER_SIZE, 0); - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK); - - // Now push the address where the finally funclet should return to directly. - if (!block->HasFlag(BBF_RETLESS_CALL)) - { - assert(block->isBBCallFinallyPair()); - GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetFinallyContinuation()); - } +#if defined(FEATURE_EH_WINDOWS_X86) else { - // EE expects a DWORD, so we provide 0 - inst_IV(INS_push_hide, 0); - } - - // Jump to the finally BB - inst_JMP(EJ_jmp, block->GetTarget()); + // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot + // corresponding to the finally's nesting level. When invoked in response to an exception, the + // EE does this. + // + // We have a BBJ_CALLFINALLY possibly paired with a following BBJ_CALLFINALLYRET. + // + // We will emit : + // mov [ebp - (n + 1)], 0 + // mov [ebp - n ], 0xFC + // push &step + // jmp finallyBlock + // ... + // step: + // mov [ebp - n ], 0 + // jmp leaveTarget + // ... + // leaveTarget: + + noway_assert(isFramePointerUsed()); + + // Get the nesting level which contains the finally + unsigned finallyNesting = 0; + compiler->fgGetNestingLevel(block, &finallyNesting); + + // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) + unsigned filterEndOffsetSlotOffs; + filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); + + unsigned curNestingSlotOffs; + curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE)); + + // Zero out the slot for the next nesting level + GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, + curNestingSlotOffs - TARGET_POINTER_SIZE, 0); + GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, + LCL_FINALLY_MARK); + + // Now push the address where the finally funclet should return to directly. + if (!block->HasFlag(BBF_RETLESS_CALL)) + { + assert(block->isBBCallFinallyPair()); + GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetFinallyContinuation()); + } + else + { + // EE expects a DWORD, so we provide 0 + inst_IV(INS_push_hide, 0); + } -#endif // !FEATURE_EH_FUNCLETS + // Jump to the finally BB + inst_JMP(EJ_jmp, block->GetTarget()); + } +#endif // FEATURE_EH_WINDOWS_X86 // The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the // jump target using bbTargetEdge - that is already used to point @@ -344,7 +348,6 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) return block; } -#if defined(FEATURE_EH_FUNCLETS) void CodeGen::genEHCatchRet(BasicBlock* block) { // Set RAX to the address the VM should return to after the catch. @@ -354,10 +357,11 @@ void CodeGen::genEHCatchRet(BasicBlock* block) GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->GetTarget(), REG_INTRET); } -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) { + assert(!compiler->UsesFunclets()); // The last statement of the block must be a GT_RETFILT, which has already been generated. assert(block->lastNode() != nullptr); assert(block->lastNode()->OperGet() == GT_RETFILT); @@ -383,7 +387,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Move an immediate value into an integer register @@ -2144,7 +2148,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genConsumeReg(treeNode); break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates: @@ -2167,7 +2171,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs, 0); break; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PINVOKE_PROLOG: noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & @@ -6120,37 +6124,42 @@ void CodeGen::genCall(GenTreeCall* call) compiler->lvaCallSpCheck, call->CallerPop() ? 0 : stackArgBytes, REG_ARG_0); #endif // defined(DEBUG) && defined(TARGET_X86) -#if !defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------- - // Create a label for tracking of region protected by the monitor in synchronized methods. - // This needs to be here, rather than above where fPossibleSyncHelperCall is set, - // so the GC state vars have been updated before creating the label. - - if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets()) { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); - switch (helperNum) + //------------------------------------------------------------------------- + // Create a label for tracking of region protected by the monitor in synchronized methods. + // This needs to be here, rather than above where fPossibleSyncHelperCall is set, + // so the GC state vars have been updated before creating the label. + + if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH)) { - case CORINFO_HELP_MON_ENTER: - case CORINFO_HELP_MON_ENTER_STATIC: - noway_assert(compiler->syncStartEmitCookie == nullptr); - compiler->syncStartEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncStartEmitCookie != nullptr); - break; - case CORINFO_HELP_MON_EXIT: - case CORINFO_HELP_MON_EXIT_STATIC: - noway_assert(compiler->syncEndEmitCookie == nullptr); - compiler->syncEndEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncEndEmitCookie != nullptr); - break; - default: - break; + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); + switch (helperNum) + { + case CORINFO_HELP_MON_ENTER: + case CORINFO_HELP_MON_ENTER_STATIC: + noway_assert(compiler->syncStartEmitCookie == nullptr); + compiler->syncStartEmitCookie = + GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, + gcInfo.gcRegByrefSetCur); + noway_assert(compiler->syncStartEmitCookie != nullptr); + break; + case CORINFO_HELP_MON_EXIT: + case CORINFO_HELP_MON_EXIT_STATIC: + noway_assert(compiler->syncEndEmitCookie == nullptr); + compiler->syncEndEmitCookie = + GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, + gcInfo.gcRegByrefSetCur); + noway_assert(compiler->syncEndEmitCookie != nullptr); + break; + default: + break; + } } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 unsigned stackAdjustBias = 0; @@ -8847,13 +8856,12 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, int s_cached; -#ifdef FEATURE_EH_FUNCLETS // We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it if (compiler->ehAnyFunclets()) { + assert(compiler->UsesFunclets()); gcInfo.gcMarkFilterVarsPinned(); } -#endif #ifdef DEBUG size_t headerSize = @@ -10430,8 +10438,6 @@ void CodeGen::genFnEpilog(BasicBlock* block) } } -#if defined(FEATURE_EH_FUNCLETS) - #if defined(TARGET_AMD64) /***************************************************************************** @@ -10821,8 +10827,6 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) #endif // TARGET* } -#endif // FEATURE_EH_FUNCLETS - //----------------------------------------------------------------------------- // genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case // `genUseBlockInit` is set. diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 697c76527afe55..1edc4294140189 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1863,6 +1863,11 @@ void Compiler::compInit(ArenaAllocator* pAlloc, eeInfoInitialized = false; +#if defined(FEATURE_EH_WINDOWS_X86) + // Cache Native AOT ABI check. This must happen *after* eeInfoInitialized is initialized, above. + eeIsNativeAotAbi = IsTargetAbi(CORINFO_NATIVEAOT_ABI); +#endif + compDoAggressiveInlining = false; if (compIsForInlining()) @@ -4901,13 +4906,12 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); -#if defined(FEATURE_EH_FUNCLETS) - - // Create funclets from the EH handlers. - // - DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); - -#endif // FEATURE_EH_FUNCLETS + if (UsesFunclets()) + { + // Create funclets from the EH handlers. + // + DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); + } if (opts.OptimizationEnabled()) { @@ -5448,29 +5452,26 @@ bool Compiler::shouldAlignLoop(FlowGraphNaturalLoop* loop, BasicBlock* top) assert(!top->IsFirst()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (top->Prev()->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && top->Prev()->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!top->Prev()->isBBCallFinallyPair()); // If the block before the loop start is a retless BBJ_CALLFINALLY - // with FEATURE_EH_CALLFINALLY_THUNKS, we can't add alignment + // with UsesCallFinallyThunks, we can't add alignment // because it will affect reported EH region range. For x86 (where - // !FEATURE_EH_CALLFINALLY_THUNKS), we can allow this. + // !UsesCallFinallyThunks), we can allow this. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY block\n", loop->GetIndex()); return false; } -#endif // FEATURE_EH_CALLFINALLY_THUNKS if (top->Prev()->isBBCallFinallyPairTail()) { // If the previous block is the BBJ_CALLFINALLYRET of a // BBJ_CALLFINALLY/BBJ_CALLFINALLYRET pair, then we can't add alignment // because we can't add instructions in that block. In the - // FEATURE_EH_CALLFINALLY_THUNKS case, it would affect the - // reported EH, as above. + // UsesCallFinallyThunks case, it would affect the reported EH, as above. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY/ALWAYS pair\n", loop->GetIndex()); return false; diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index f27618af986f09..f95d04c9418233 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2722,7 +2722,7 @@ class Compiler // Exception handling functions // -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) bool ehNeedsShadowSPslots() { @@ -2735,7 +2735,7 @@ class Compiler // etc. unsigned ehMaxHndNestingCount; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); @@ -2832,7 +2832,6 @@ class Compiler bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See @@ -2853,23 +2852,6 @@ class Compiler unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks -#else // !FEATURE_EH_FUNCLETS - - bool ehAnyFunclets() - { - return false; - } - unsigned ehFuncletCount() - { - return 0; - } - - unsigned bbThrowIndex(BasicBlock* blk) - { - return blk->bbTryIndex; - } // Get the index to use as the cache key for sharing throw blocks -#endif // !FEATURE_EH_FUNCLETS - FlowEdge* BlockPredsWithEH(BasicBlock* blk); FlowEdge* BlockDominancePreds(BasicBlock* blk); @@ -2918,12 +2900,8 @@ class Compiler void fgRemoveEHTableEntry(unsigned XTnum); -#if defined(FEATURE_EH_FUNCLETS) - EHblkDsc* fgAddEHTableEntry(unsigned XTnum); -#endif // FEATURE_EH_FUNCLETS - void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting @@ -3909,10 +3887,10 @@ class Compiler //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // Block-layout TYP_STRUCT variable for all the shadow SP slots -#endif // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as @@ -4259,9 +4237,7 @@ class Compiler unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX -#if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym -#endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; @@ -4484,6 +4460,9 @@ class Compiler GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); +#if defined(FEATURE_EH_WINDOWS_X86) + void impImportLeaveEHRegions(BasicBlock* block); +#endif void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); @@ -5032,9 +5011,7 @@ class Compiler BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) -#if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) -#endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks @@ -5236,9 +5213,7 @@ class Compiler // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data -#if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run -#endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places @@ -5292,16 +5267,12 @@ class Compiler GenTree* fgGetCritSectOfStaticMethod(); -#if defined(FEATURE_EH_FUNCLETS) - void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); -#endif // FEATURE_EH_FUNCLETS - void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); @@ -6044,15 +6015,14 @@ class Compiler }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); -#if defined(FEATURE_EH_FUNCLETS) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); PhaseStatus fgCreateFunclets(); -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) bool fgRelocateEHRegions(); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); @@ -6079,9 +6049,7 @@ class Compiler bool fgReorderBlocks(bool useProfile); -#ifdef FEATURE_EH_FUNCLETS bool fgFuncletsAreCold(); -#endif // FEATURE_EH_FUNCLETS PhaseStatus fgDetermineFirstColdBlock(); @@ -8275,6 +8243,30 @@ class Compiler return eeGetEEInfo()->targetAbi == abi; } +#if defined(FEATURE_EH_WINDOWS_X86) + bool eeIsNativeAotAbi; + bool UsesFunclets() const + { + return eeIsNativeAotAbi; + } + + bool UsesCallFinallyThunks() const + { + // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. + return UsesFunclets(); + } +#else + bool UsesFunclets() const + { + return true; + } + + bool UsesCallFinallyThunks() const + { + return true; + } +#endif + bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) @@ -8491,37 +8483,29 @@ class Compiler } // Things that MAY belong either in CodeGen or CodeGenContext - -#if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; + FuncInfoDsc compFuncInfoRoot; unsigned short compFuncCount() { - assert(fgFuncletsCreated); - return compFuncInfoCount; - } - -#else // !FEATURE_EH_FUNCLETS - - // This is a no-op when there are no funclets! - void genUpdateCurrentFunclet(BasicBlock* block) - { - return; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + return compFuncInfoCount; + } + else + { + return 1; + } } - FuncInfoDsc compFuncInfoRoot; - - static const unsigned compCurrFuncIdx = 0; - - unsigned short compFuncCount() + unsigned short funCurrentFuncIdx() { - return 1; + return UsesFunclets() ? compCurrFuncIdx : 0; } -#endif // !FEATURE_EH_FUNCLETS - FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); @@ -8629,22 +8613,20 @@ class Compiler // private: -#if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); -#endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); -#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) +#if defined(TARGET_AMD64) || defined(TARGET_X86) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); -#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) +#endif // TARGET_AMD64 || TARGET_X86 UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); @@ -10516,14 +10498,14 @@ class Compiler unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks @@ -11410,9 +11392,9 @@ class GenTreeVisitor case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // !FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 62efd5282a16a1..6387d17b2e1ac3 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -789,8 +789,6 @@ inline bool BasicBlock::HasPotentialEHSuccs(Compiler* comp) return hndDesc->InFilterRegionBBRange(this); } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. @@ -798,7 +796,14 @@ inline bool BasicBlock::HasPotentialEHSuccs(Compiler* comp) */ inline FuncInfoDsc* Compiler::funCurrentFunc() { - return funGetFunc(compCurrFuncIdx); + if (UsesFunclets()) + { + return funGetFunc(compCurrFuncIdx); + } + else + { + return &compFuncInfoRoot; + } } /***************************************************************************** @@ -808,10 +813,17 @@ inline FuncInfoDsc* Compiler::funCurrentFunc() */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { - assert(fgFuncletsCreated); - assert(FitsIn(funcIdx)); - noway_assert(funcIdx < compFuncInfoCount); - compCurrFuncIdx = (unsigned short)funcIdx; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + assert(FitsIn(funcIdx)); + noway_assert(funcIdx < compFuncInfoCount); + compCurrFuncIdx = (unsigned short)funcIdx; + } + else + { + assert(funcIdx == 0); + } } /***************************************************************************** @@ -821,9 +833,17 @@ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { - assert(fgFuncletsCreated); - assert(funcIdx < compFuncInfoCount); - return &compFuncInfos[funcIdx]; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + assert(funcIdx < compFuncInfoCount); + return &compFuncInfos[funcIdx]; + } + else + { + assert(funcIdx == 0); + return &compFuncInfoRoot; + } } /***************************************************************************** @@ -836,71 +856,33 @@ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { - assert(fgFuncletsCreated); - assert(block->HasFlag(BBF_FUNCLET_BEG)); - - EHblkDsc* eh = ehGetDsc(block->getHndIndex()); - unsigned int funcIdx = eh->ebdFuncIndex; - if (eh->ebdHndBeg != block) + if (UsesFunclets()) { - // If this is a filter EH clause, but we want the funclet - // for the filter (not the filter handler), it is the previous one - noway_assert(eh->HasFilter()); - noway_assert(eh->ebdFilter == block); - assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); - assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); - assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); - funcIdx--; - } - - return funcIdx; -} - -#else // !FEATURE_EH_FUNCLETS + assert(fgFuncletsCreated); + assert(block->HasFlag(BBF_FUNCLET_BEG)); -/***************************************************************************** - * Get the FuncInfoDsc for the funclet we are currently generating code for. - * This is only valid during codegen. For non-funclet platforms, this is - * always the root function. - * - */ -inline FuncInfoDsc* Compiler::funCurrentFunc() -{ - return &compFuncInfoRoot; -} - -/***************************************************************************** - * Change which funclet we are currently generating code for. - * This is only valid after funclets are created. - * - */ -inline void Compiler::funSetCurrentFunc(unsigned funcIdx) -{ - assert(funcIdx == 0); -} - -/***************************************************************************** - * Get the FuncInfoDsc for the givven funclet. - * This is only valid after funclets are created. - * - */ -inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) -{ - assert(funcIdx == 0); - return &compFuncInfoRoot; -} + EHblkDsc* eh = ehGetDsc(block->getHndIndex()); + unsigned int funcIdx = eh->ebdFuncIndex; + if (eh->ebdHndBeg != block) + { + // If this is a filter EH clause, but we want the funclet + // for the filter (not the filter handler), it is the previous one + noway_assert(eh->HasFilter()); + noway_assert(eh->ebdFilter == block); + assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); + assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); + assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); + funcIdx--; + } -/***************************************************************************** - * No funclets, so always 0. - * - */ -inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) -{ - return 0; + return funcIdx; + } + else + { + return 0; + } } -#endif // !FEATURE_EH_FUNCLETS - //------------------------------------------------------------------------------ // genRegNumFromMask : Maps a single register mask to a register number. // @@ -4114,9 +4096,7 @@ bool Compiler::fgVarIsNeverZeroInitializedInProlog(unsigned varNum) result = result || (varNum == lvaOutgoingArgSpaceVar); #endif -#if defined(FEATURE_EH_FUNCLETS) result = result || (varNum == lvaPSPSym); -#endif return result; } @@ -4233,9 +4213,9 @@ void GenTree::VisitOperands(TVisitor visitor) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/compphases.h b/src/coreclr/jit/compphases.h index b222451d26e230..4bd236ad7f1962 100644 --- a/src/coreclr/jit/compphases.h +++ b/src/coreclr/jit/compphases.h @@ -55,9 +55,7 @@ CompPhaseNameMacro(PHASE_POST_MORPH, "Post-Morph", CompPhaseNameMacro(PHASE_MORPH_END, "Morph - Finish", false, -1, true) CompPhaseNameMacro(PHASE_GS_COOKIE, "GS Cookie", false, -1, false) CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS, "Compute edge weights (1, false)",false, -1, false) -#if defined(FEATURE_EH_FUNCLETS) CompPhaseNameMacro(PHASE_CREATE_FUNCLETS, "Create EH funclets", false, -1, false) -#endif // FEATURE_EH_FUNCLETS CompPhaseNameMacro(PHASE_HEAD_TAIL_MERGE, "Head and tail merge", false, -1, false) CompPhaseNameMacro(PHASE_MERGE_THROWS, "Merge throw blocks", false, -1, false) CompPhaseNameMacro(PHASE_INVERT_LOOPS, "Invert loops", false, -1, false) diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 85bbda9f3cfbc6..5259b936646fbf 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -1608,11 +1608,8 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) // the prolog/epilog placeholder groups ARE generated in order, and are // re-used. But generating additional groups would not work. if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 1) && emitCurIGinsCnt && !emitIGisInProlog(emitCurIG) && - !emitIGisInEpilog(emitCurIG) && !emitCurIG->endsWithAlignInstr() -#if defined(FEATURE_EH_FUNCLETS) - && !emitIGisInFuncletProlog(emitCurIG) && !emitIGisInFuncletEpilog(emitCurIG) -#endif // FEATURE_EH_FUNCLETS - ) + !emitIGisInEpilog(emitCurIG) && !emitCurIG->endsWithAlignInstr() && !emitIGisInFuncletProlog(emitCurIG) && + !emitIGisInFuncletEpilog(emitCurIG)) { emitNxtIG(true); } @@ -2070,11 +2067,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, bool extend = false; - if (igType == IGPT_EPILOG -#if defined(FEATURE_EH_FUNCLETS) - || igType == IGPT_FUNCLET_EPILOG -#endif // FEATURE_EH_FUNCLETS - ) + if (igType == IGPT_EPILOG || igType == IGPT_FUNCLET_EPILOG) { #ifdef TARGET_AMD64 emitOutputPreEpilogNOP(); @@ -2108,7 +2101,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, * case, we need to make sure any re-used fields, such as igFuncIdx, are correct. */ - igPh->igFuncIdx = emitComp->compCurrFuncIdx; + igPh->igFuncIdx = emitComp->funCurrentFuncIdx(); /* Create a separate block of memory to store placeholder information. * We could use unions to put some of this into the insGroup itself, but we don't @@ -2144,7 +2137,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, { igPh->igFlags |= IGF_EPILOG; } -#if defined(FEATURE_EH_FUNCLETS) else if (igType == IGPT_FUNCLET_PROLOG) { igPh->igFlags |= IGF_FUNCLET_PROLOG; @@ -2153,7 +2145,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, { igPh->igFlags |= IGF_FUNCLET_EPILOG; } -#endif // FEATURE_EH_FUNCLETS /* Link it into the placeholder list */ @@ -2174,7 +2165,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, emitCurIGsize += MAX_PLACEHOLDER_IG_SIZE; emitCurCodeOffset += emitCurIGsize; -#if defined(FEATURE_EH_FUNCLETS) // Add the appropriate IP mapping debugging record for this placeholder // group. genExitCode() adds the mapping for main function epilogs. if (emitComp->opts.compDbgInfo) @@ -2188,7 +2178,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, codeGen->genIPmappingAdd(IPmappingDscKind::Epilog, DebugInfo(), true); } } -#endif // FEATURE_EH_FUNCLETS /* Start a new IG if more code follows */ @@ -2198,11 +2187,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, } else { - if (igType == IGPT_EPILOG -#if defined(FEATURE_EH_FUNCLETS) - || igType == IGPT_FUNCLET_EPILOG -#endif // FEATURE_EH_FUNCLETS - ) + if (igType == IGPT_EPILOG || igType == IGPT_FUNCLET_EPILOG) { // If this was an epilog, then assume this is the end of any currently in progress // no-GC region. If a block after the epilog needs to be no-GC, it needs to call @@ -2249,12 +2234,10 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, void emitter::emitGeneratePrologEpilog() { #ifdef DEBUG - unsigned prologCnt = 0; - unsigned epilogCnt = 0; -#if defined(FEATURE_EH_FUNCLETS) + unsigned prologCnt = 0; + unsigned epilogCnt = 0; unsigned funcletPrologCnt = 0; unsigned funcletEpilogCnt = 0; -#endif // FEATURE_EH_FUNCLETS #endif // DEBUG insGroup* igPh; @@ -2284,8 +2267,6 @@ void emitter::emitGeneratePrologEpilog() emitEndFnEpilog(); break; -#if defined(FEATURE_EH_FUNCLETS) - case IGPT_FUNCLET_PROLOG: INDEBUG(++funcletPrologCnt); emitBegFuncletProlog(igPh); @@ -2300,8 +2281,6 @@ void emitter::emitGeneratePrologEpilog() emitEndFuncletEpilog(); break; -#endif // FEATURE_EH_FUNCLETS - default: unreached(); } @@ -2311,17 +2290,16 @@ void emitter::emitGeneratePrologEpilog() if (emitComp->verbose) { printf("%d prologs, %d epilogs", prologCnt, epilogCnt); -#if defined(FEATURE_EH_FUNCLETS) - printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); -#endif // FEATURE_EH_FUNCLETS + if (emitComp->UsesFunclets()) + { + printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); + } printf("\n"); -// prolog/epilog code doesn't use this yet -// noway_assert(prologCnt == 1); -// noway_assert(epilogCnt == emitEpilogCnt); // Is this correct? -#if defined(FEATURE_EH_FUNCLETS) + // prolog/epilog code doesn't use this yet + // noway_assert(prologCnt == 1); + // noway_assert(epilogCnt == emitEpilogCnt); // Is this correct? assert(funcletPrologCnt == emitComp->ehFuncletCount()); -#endif // FEATURE_EH_FUNCLETS } #endif // DEBUG } @@ -2519,8 +2497,6 @@ void emitter::emitEndFnEpilog() #endif // JIT32_GCENCODER } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Begin generating a funclet prolog. @@ -2528,6 +2504,7 @@ void emitter::emitEndFnEpilog() void emitter::emitBegFuncletProlog(insGroup* igPh) { + assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2538,6 +2515,7 @@ void emitter::emitBegFuncletProlog(insGroup* igPh) void emitter::emitEndFuncletProlog() { + assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } @@ -2548,6 +2526,7 @@ void emitter::emitEndFuncletProlog() void emitter::emitBegFuncletEpilog(insGroup* igPh) { + assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2558,11 +2537,10 @@ void emitter::emitBegFuncletEpilog(insGroup* igPh) void emitter::emitEndFuncletEpilog() { + assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } -#endif // FEATURE_EH_FUNCLETS - #ifdef JIT32_GCENCODER // @@ -2999,16 +2977,12 @@ bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFrag if (ig->igNext->igFlags & IGF_FUNCLET_PROLOG) return true; -#if defined(FEATURE_EH_FUNCLETS) - // Is the next IG a placeholder group for a funclet prolog? if ((ig->igNext->igFlags & IGF_PLACEHOLDER) && (ig->igNext->igPhData->igPhType == IGPT_FUNCLET_PROLOG)) { return true; } -#endif // FEATURE_EH_FUNCLETS - return false; } @@ -4042,14 +4016,12 @@ void emitter::emitDispIG(insGroup* ig, bool displayFunc, bool displayInstruction case IGPT_EPILOG: pszType = "epilog"; break; -#if defined(FEATURE_EH_FUNCLETS) case IGPT_FUNCLET_PROLOG: pszType = "funclet prolog"; break; case IGPT_FUNCLET_EPILOG: pszType = "funclet epilog"; break; -#endif // FEATURE_EH_FUNCLETS default: pszType = "UNKNOWN"; break; @@ -7112,16 +7084,15 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); -#ifdef JIT32_GCENCODER -#ifndef FEATURE_EH_FUNCLETS +#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) // Remember the frame offset of the "this" argument for synchronized methods. - if (emitComp->lvaIsOriginalThisArg(num) && emitComp->lvaKeepAliveAndReportThis()) + if (!emitComp->UsesFunclets() && emitComp->lvaIsOriginalThisArg(num) && + emitComp->lvaKeepAliveAndReportThis()) { emitSyncThisObjOffs = offs; offs |= this_OFFSET_FLAG; } -#endif -#endif // JIT32_GCENCODER +#endif // JIT32_GCENCODER && FEATURE_EH_WINDOWS_X86 if (dsc->TypeGet() == TYP_BYREF) { @@ -8604,8 +8575,8 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp /* the lower 2 bits encode props about the stk ptr */ -#if defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS) - if (offs == emitSyncThisObjOffs) +#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) + if (!emitComp->UsesFunclets() && offs == emitSyncThisObjOffs) { desc->vpdVarNum |= this_OFFSET_FLAG; } @@ -9572,7 +9543,7 @@ void emitter::emitInitIG(insGroup* ig) /* Set the current function index */ - ig->igFuncIdx = emitComp->compCurrFuncIdx; + ig->igFuncIdx = emitComp->funCurrentFuncIdx(); ig->igFlags = 0; diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index f3767fc6f1807f..40a729dd70fee2 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -234,10 +234,8 @@ enum insGroupPlaceholderType : unsigned char { IGPT_PROLOG, // currently unused IGPT_EPILOG, -#if defined(FEATURE_EH_FUNCLETS) IGPT_FUNCLET_PROLOG, IGPT_FUNCLET_EPILOG, -#endif // FEATURE_EH_FUNCLETS }; #if defined(_MSC_VER) && defined(TARGET_ARM) @@ -317,15 +315,11 @@ struct insGroup // Mask of IGF_* flags that should be propagated to new blocks when they are created. // This allows prologs and epilogs to be any number of IGs, but still be // automatically marked properly. -#if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG | IGF_FUNCLET_EPILOG) #else // DEBUG #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG) #endif // DEBUG -#else // !FEATURE_EH_FUNCLETS -#define IGF_PROPAGATE_MASK (IGF_EPILOG) -#endif // !FEATURE_EH_FUNCLETS // Try to do better packing based on how large regMaskSmall is (8, 16, or 64 bits). @@ -544,8 +538,6 @@ class emitter return (ig != nullptr) && ((ig->igFlags & IGF_EPILOG) != 0); } -#if defined(FEATURE_EH_FUNCLETS) - bool emitIGisInFuncletProlog(const insGroup* ig) { return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_PROLOG) != 0); @@ -556,8 +548,6 @@ class emitter return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_EPILOG) != 0); } -#endif // FEATURE_EH_FUNCLETS - void emitRecomputeIGoffsets(); void emitDispCommentForHandle(size_t handle, size_t cookie, GenTreeFlags flags); @@ -2356,16 +2346,12 @@ class emitter void emitBegFnEpilog(insGroup* igPh); void emitEndFnEpilog(); -#if defined(FEATURE_EH_FUNCLETS) - void emitBegFuncletProlog(insGroup* igPh); void emitEndFuncletProlog(); void emitBegFuncletEpilog(insGroup* igPh); void emitEndFuncletEpilog(); -#endif // FEATURE_EH_FUNCLETS - /************************************************************************/ /* Methods to record a code position and later convert to offset */ /************************************************************************/ diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index b1cc6a78108b52..6bf148cf2d8883 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -17778,9 +17778,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) #if !FEATURE_FIXED_OUT_ARGS bool updateStackLevel = !emitIGisInProlog(ig) && !emitIGisInEpilog(ig); -#if defined(FEATURE_EH_FUNCLETS) updateStackLevel = updateStackLevel && !emitIGisInFuncletProlog(ig) && !emitIGisInFuncletEpilog(ig); -#endif // FEATURE_EH_FUNCLETS // Make sure we keep the current stack level up to date if (updateStackLevel) diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 48d8765857fd0b..37683b188c303b 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -39,10 +39,8 @@ void Compiler::fgInit() fgOSREntryBB = nullptr; fgEntryBBExtraRefs = 0; -#if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; fgFuncletsCreated = false; -#endif // FEATURE_EH_FUNCLETS fgBBcount = 0; @@ -113,9 +111,9 @@ void Compiler::fgInit() fgUsedSharedTemps = nullptr; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) ehMaxHndNestingCount = 0; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ for (int i = 0; i < TYP_COUNT; i++) @@ -4039,9 +4037,9 @@ void Compiler::fgFindBasicBlocks() * try-finally blocks. */ -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) HBtab->ebdHandlerNestingLevel = 0; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; @@ -4051,12 +4049,12 @@ void Compiler::fgFindBasicBlocks() for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { -#if !defined(FEATURE_EH_FUNCLETS) - if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets() && jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) { xtab->ebdHandlerNestingLevel++; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the @@ -4089,15 +4087,16 @@ void Compiler::fgFindBasicBlocks() } // end foreach handler table entry -#if !defined(FEATURE_EH_FUNCLETS) - - for (EHblkDsc* const HBtab : EHClauses(this)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) - ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) + ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; + } } - -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 { // always run these checks for a debug build @@ -4332,7 +4331,7 @@ void Compiler::fgCheckBasicBlockControlFlow() } break; - case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) + case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only if UsesFunclets() == true) case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbKind"); // these blocks don't get created until importing @@ -5150,18 +5149,19 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) fgFirstColdBlock = bPrev->Next(); } -#if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG - // You can't unlink a range that includes the first funclet block. A range certainly - // can't cross the non-funclet/funclet region. And you can't unlink the first block - // of the first funclet with this, either. (If that's necessary, it could be allowed - // by updating fgFirstFuncletBB to bEnd->bbNext.) - for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) + if (UsesFunclets()) { - assert(tempBB != fgFirstFuncletBB); + // You can't unlink a range that includes the first funclet block. A range certainly + // can't cross the non-funclet/funclet region. And you can't unlink the first block + // of the first funclet with this, either. (If that's necessary, it could be allowed + // by updating fgFirstFuncletBB to bEnd->bbNext.) + for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) + { + assert(tempBB != fgFirstFuncletBB); + } } #endif // DEBUG -#endif // FEATURE_EH_FUNCLETS } //------------------------------------------------------------------------ @@ -5196,13 +5196,11 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgUnreachableBlock(block); -#if defined(FEATURE_EH_FUNCLETS) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->Next(); } -#endif // FEATURE_EH_FUNCLETS // If this is the first Cold basic block update fgFirstColdBlock if (block->IsFirstColdBlock(this)) @@ -5272,13 +5270,11 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgFirstColdBlock = block->Next(); } -#if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->Next(); } -#endif // FEATURE_EH_FUNCLETS // Update successor block start IL offset, if empty predecessor // covers the immediately preceding range. @@ -5687,10 +5683,8 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* bLast = nullptr; BasicBlock* bPrev = nullptr; -#if defined(FEATURE_EH_FUNCLETS) // We don't support moving try regions... yet? - noway_assert(relocateType == FG_RELOCATE_HANDLER); -#endif // FEATURE_EH_FUNCLETS + noway_assert(!UsesFunclets() || relocateType == FG_RELOCATE_HANDLER); HBtab = ehGetDsc(regionIndex); @@ -5728,24 +5722,24 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r goto FAILURE; } -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // In the funclets case, we still need to set some information on the handler blocks - if (bLast->IsLast()) + if (!UsesFunclets() && bLast->IsLast()) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) bool isRare; isRare = bStart->isRunRarely(); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 block = fgFirstBB; while (true) { @@ -5763,14 +5757,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r if (inTheRange) { -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // Unless all blocks are (not) run rarely we must return false. - if (isRare != block->isRunRarely()) + if (!UsesFunclets() && isRare != block->isRunRarely()) { INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) goto FAILURE; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 validRange = true; } @@ -5798,11 +5792,10 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgDispHandlerTab(); } -#if !defined(FEATURE_EH_FUNCLETS) - +#if defined(FEATURE_EH_WINDOWS_X86) // This is really expensive, and quickly becomes O(n^n) with funclets // so only do it once after we've created them (see fgCreateFunclets) - if (expensiveDebugCheckLevel >= 2) + if (!UsesFunclets() && expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } @@ -5810,16 +5803,15 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) - - bStart->SetFlags(BBF_FUNCLET_BEG); // Mark the start block of the funclet - - if (bMiddle != nullptr) + if (UsesFunclets()) { - bMiddle->SetFlags(BBF_FUNCLET_BEG); // Also mark the start block of a filter handler as a funclet - } + bStart->SetFlags(BBF_FUNCLET_BEG); // Mark the start block of the funclet -#endif // FEATURE_EH_FUNCLETS + if (bMiddle != nullptr) + { + bMiddle->SetFlags(BBF_FUNCLET_BEG); // Also mark the start block of a filter handler as a funclet + } + } BasicBlock* bNext; bNext = bLast->Next(); @@ -5830,60 +5822,134 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; -#if defined(FEATURE_EH_FUNCLETS) - - // There are several cases we need to consider when moving an EH range. - // If moving a range X, we must consider its relationship to every other EH - // range A in the table. Note that each entry in the table represents both - // a protected region and a handler region (possibly including a filter region - // that must live before and adjacent to the handler region), so we must - // consider try and handler regions independently. These are the cases: - // 1. A is completely contained within X (where "completely contained" means - // that the 'begin' and 'last' parts of A are strictly between the 'begin' - // and 'end' parts of X, and aren't equal to either, for example, they don't - // share 'last' blocks). In this case, when we move X, A moves with it, and - // the EH table doesn't need to change. - // 2. X is completely contained within A. In this case, X gets extracted from A, - // and the range of A shrinks, but because A is strictly within X, the EH - // table doesn't need to change. - // 3. A and X have exactly the same range. In this case, A is moving with X and - // the EH table doesn't need to change. - // 4. A and X share the 'last' block. There are two sub-cases: - // (a) A is a larger range than X (such that the beginning of A precedes the - // beginning of X): in this case, we are moving the tail of A. We set the - // 'last' block of A to the block preceding the beginning block of X. - // (b) A is a smaller range than X. Thus, we are moving the entirety of A along - // with X. In this case, nothing in the EH record for A needs to change. - // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). - // This can never happen here, because we are only moving handler ranges (we don't - // move try ranges), and handler regions cannot start at the beginning of a try - // range or handler range and be a subset. - // - // Note that A and X must properly nest for the table to be well-formed. For example, - // the beginning of A can't be strictly within the range of X (that is, the beginning - // of A isn't shared with the beginning of X) and the end of A outside the range. + if (UsesFunclets()) + { + // There are several cases we need to consider when moving an EH range. + // If moving a range X, we must consider its relationship to every other EH + // range A in the table. Note that each entry in the table represents both + // a protected region and a handler region (possibly including a filter region + // that must live before and adjacent to the handler region), so we must + // consider try and handler regions independently. These are the cases: + // 1. A is completely contained within X (where "completely contained" means + // that the 'begin' and 'last' parts of A are strictly between the 'begin' + // and 'end' parts of X, and aren't equal to either, for example, they don't + // share 'last' blocks). In this case, when we move X, A moves with it, and + // the EH table doesn't need to change. + // 2. X is completely contained within A. In this case, X gets extracted from A, + // and the range of A shrinks, but because A is strictly within X, the EH + // table doesn't need to change. + // 3. A and X have exactly the same range. In this case, A is moving with X and + // the EH table doesn't need to change. + // 4. A and X share the 'last' block. There are two sub-cases: + // (a) A is a larger range than X (such that the beginning of A precedes the + // beginning of X): in this case, we are moving the tail of A. We set the + // 'last' block of A to the block preceding the beginning block of X. + // (b) A is a smaller range than X. Thus, we are moving the entirety of A along + // with X. In this case, nothing in the EH record for A needs to change. + // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). + // This can never happen here, because we are only moving handler ranges (we don't + // move try ranges), and handler regions cannot start at the beginning of a try + // range or handler range and be a subset. + // + // Note that A and X must properly nest for the table to be well-formed. For example, + // the beginning of A can't be strictly within the range of X (that is, the beginning + // of A isn't shared with the beginning of X) and the end of A outside the range. - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + { + if (XTnum != regionIndex) // we don't need to update our 'last' pointer + { + if (HBtab->ebdTryLast == bLast) + { + // If we moved a set of blocks that were at the end of + // a different try region then we may need to update ebdTryLast + for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next()) + { + if (block == bPrev) + { + // We were contained within it, so shrink its region by + // setting its 'last' + fgSetTryEnd(HBtab, bPrev); + break; + } + else if (HBtab->ebdTryLast->NextIs(block)) + { + // bPrev does not come after the TryBeg, thus we are larger, and + // it is moving with us. + break; + } + } + } + if (HBtab->ebdHndLast == bLast) + { + // If we moved a set of blocks that were at the end of + // a different handler region then we must update ebdHndLast + for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next()) + { + if (block == bPrev) + { + fgSetHndEnd(HBtab, bPrev); + break; + } + else if (HBtab->ebdHndLast->NextIs(block)) + { + // bPrev does not come after the HndBeg + break; + } + } + } + } + } // end exception table iteration + + // Insert the block(s) we are moving after fgLastBlock + fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); + + if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet + { + fgFirstFuncletBB = bStart; + } + else + { + assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the + // funclet region. + } + +#ifdef DEBUG + if (verbose) + { + printf("Create funclets: moved region\n"); + fgDispHandlerTab(); + } + +// We have to wait to do this until we've created all the additional regions +// Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex +#endif // DEBUG + } + else { - if (XTnum != regionIndex) // we don't need to update our 'last' pointer +#if defined(FEATURE_EH_WINDOWS_X86) + for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { + if (XTnum == regionIndex) + { + // Don't update our handler's Last info + continue; + } + if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next()) + for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) { if (block == bPrev) { - // We were contained within it, so shrink its region by - // setting its 'last' fgSetTryEnd(HBtab, bPrev); break; } else if (HBtab->ebdTryLast->NextIs(block)) { - // bPrev does not come after the TryBeg, thus we are larger, and - // it is moving with us. + // bPrev does not come after the TryBeg break; } } @@ -5892,7 +5958,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next()) + for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) { if (block == bPrev) { @@ -5906,84 +5972,12 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r } } } - } - } // end exception table iteration + } // end exception table iteration - // Insert the block(s) we are moving after fgLastBlock - fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); - - if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet - { - fgFirstFuncletBB = bStart; + // We have decided to insert the block(s) after fgLastBlock + fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); +#endif // FEATURE_EH_WINDOWS_X86 } - else - { - assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the - // funclet region. - } - -#ifdef DEBUG - if (verbose) - { - printf("Create funclets: moved region\n"); - fgDispHandlerTab(); - } - -// We have to wait to do this until we've created all the additional regions -// Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex -#endif // DEBUG - -#else // !FEATURE_EH_FUNCLETS - - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) - { - if (XTnum == regionIndex) - { - // Don't update our handler's Last info - continue; - } - - if (HBtab->ebdTryLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetTryEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdTryLast->NextIs(block)) - { - // bPrev does not come after the TryBeg - break; - } - } - } - if (HBtab->ebdHndLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetHndEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdHndLast->NextIs(block)) - { - // bPrev does not come after the HndBeg - break; - } - } - } - } // end exception table iteration - - // We have decided to insert the block(s) after fgLastBlock - fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); -#endif // !FEATURE_EH_FUNCLETS goto DONE; @@ -6127,16 +6121,11 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) fgInsertBBafter(insertBeforeBlk->Prev(), newBlk); } -#if defined(FEATURE_EH_FUNCLETS) - /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ - if (fgFirstFuncletBB == insertBeforeBlk) { fgFirstFuncletBB = newBlk; } - -#endif // FEATURE_EH_FUNCLETS } /***************************************************************************** @@ -6607,7 +6596,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make - // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), + // loop iteration easier). Note that, after funclets have been created (for UsesFunclets() == true), // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index e2af55f45ac820..6311d189186bdd 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -2406,7 +2406,6 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, blockTargetFieldWidth, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); // } -#if defined(FEATURE_EH_FUNCLETS) if (inDefaultOrder && (block == fgFirstFuncletBB)) { printf("++++++%*s+++++++++++++++++++++++++++++++++++++%*s++++++++++++++++++++++++++%*s++++++++++" @@ -2415,7 +2414,6 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, ibcColWidth, "++++++++++++", // blockTargetFieldWidth, "++++++++++++++++++++++++++++++++++++++++++++++"); // } -#endif // FEATURE_EH_FUNCLETS fgTableDispBasicBlock(block, nextBlock, printEdgeLikelihoods, blockTargetFieldWidth, ibcColWidth); @@ -2887,8 +2885,6 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) } } -#if defined(FEATURE_EH_FUNCLETS) - if (!found && comp->fgFuncletsCreated) { // There is no easy way to search just the funclets that were pulled out of @@ -2907,8 +2903,6 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) } } -#endif // FEATURE_EH_FUNCLETS - assert(found && "BBJ_EHFINALLYRET predecessor of block that doesn't follow a BBJ_CALLFINALLY!"); return found; } @@ -2969,7 +2963,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef return; } -#if defined(FEATURE_EH_FUNCLETS) bool reachedFirstFunclet = false; if (fgFuncletsCreated) { @@ -2983,7 +2976,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(fgFirstFuncletBB->HasFlag(BBF_FUNCLET_BEG)); } } -#endif // FEATURE_EH_FUNCLETS /* Check bbNum, bbRefs and bbPreds */ // First, pick a traversal stamp, and label all the blocks with it. @@ -3071,7 +3063,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(block->bbPreds == nullptr); } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // @@ -3096,7 +3087,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(block->hasHndIndex() == true); } } -#endif // FEATURE_EH_FUNCLETS if (checkBBRefs) { @@ -3180,7 +3170,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // try { // try { // LEAVE L_OUTER; // this becomes a branch to a BBJ_CALLFINALLY in an outer try region - // // (in the FEATURE_EH_CALLFINALLY_THUNKS case) + // // (in the UsesCallFinallyThunks case) // } catch { // } // } finally { @@ -3191,7 +3181,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (ehDsc->ebdTryBeg == succBlock) { // The BBJ_CALLFINALLY is the first block of it's `try` region. Don't check the predecessor. - // Note that this case won't occur in the FEATURE_EH_CALLFINALLY_THUNKS case, since the + // Note that this case won't occur in the UsesCallFinallyThunks case, since the // BBJ_CALLFINALLY in that case won't exist in the `try` region of the `finallyIndex`. } else diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 0e1ce24c39ed87..47127fc0ad20fb 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -32,10 +32,8 @@ // PhaseStatus Compiler::fgRemoveEmptyFinally() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -271,10 +269,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() { JITDUMP("\n*************** In fgRemoveEmptyTry()\n"); -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -341,6 +337,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() BasicBlock* const lastTryBlock = HBtab->ebdTryLast; BasicBlock* const firstHandlerBlock = HBtab->ebdHndBeg; BasicBlock* const lastHandlerBlock = HBtab->ebdHndLast; + BasicBlock* callFinally; assert(firstTryBlock->getTryIndex() == XTnum); @@ -353,63 +350,64 @@ PhaseStatus Compiler::fgRemoveEmptyTry() continue; } -#if FEATURE_EH_CALLFINALLY_THUNKS - - // Look for blocks that are always jumps to a call finally - // pair that targets the finally - if (!firstTryBlock->KindIs(BBJ_ALWAYS)) + if (UsesCallFinallyThunks()) { - JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for blocks that are always jumps to a call finally + // pair that targets the finally + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) + { + JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - BasicBlock* const callFinally = firstTryBlock->GetTarget(); + callFinally = firstTryBlock->GetTarget(); - // Look for call finally pair. Note this will also disqualify - // empty try removal in cases where the finally doesn't - // return. - if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) - { - JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for call finally pair. Note this will also disqualify + // empty try removal in cases where the finally doesn't + // return. + if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) + { + JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - // Try itself must be a single block. - if (firstTryBlock != lastTryBlock) - { - JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, - firstTryBlock->Next()->bbNum); - XTnum++; - continue; + // Try itself must be a single block. + if (firstTryBlock != lastTryBlock) + { + JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, + firstTryBlock->Next()->bbNum); + XTnum++; + continue; + } } - -#else - // Look for call finally pair within the try itself. Note this - // will also disqualify empty try removal in cases where the - // finally doesn't return. - if (!firstTryBlock->isBBCallFinallyPair() || !firstTryBlock->TargetIs(firstHandlerBlock)) + else { - JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for call finally pair within the try itself. Note this + // will also disqualify empty try removal in cases where the + // finally doesn't return. + if (!firstTryBlock->isBBCallFinallyPair() || !firstTryBlock->TargetIs(firstHandlerBlock)) + { + JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); + XTnum++; + continue; + } - BasicBlock* const callFinally = firstTryBlock; + callFinally = firstTryBlock; - // Try must be a callalways pair of blocks. - if (!firstTryBlock->NextIs(lastTryBlock)) - { - JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->Next()->bbNum); - XTnum++; - continue; + // Try must be a callalways pair of blocks. + if (!firstTryBlock->NextIs(lastTryBlock)) + { + JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, + firstTryBlock->Next()->bbNum); + XTnum++; + continue; + } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - JITDUMP("EH#%u has empty try, removing the try region and promoting the finally.\n", XTnum); // There should be just one callfinally that invokes this @@ -527,21 +525,24 @@ PhaseStatus Compiler::fgRemoveEmptyTry() } } -#if !defined(FEATURE_EH_FUNCLETS) - // If we're in a non-funclet model, decrement the nesting - // level of any GT_END_LFIN we find in the handler region, - // since we're removing the enclosing handler. - for (Statement* const stmt : block->Statements()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - GenTree* expr = stmt->GetRootNode(); - if (expr->gtOper == GT_END_LFIN) + // If we're in a non-funclet model, decrement the nesting + // level of any GT_END_LFIN we find in the handler region, + // since we're removing the enclosing handler. + for (Statement* const stmt : block->Statements()) { - const size_t nestLevel = expr->AsVal()->gtVal1; - assert(nestLevel > 0); - expr->AsVal()->gtVal1 = nestLevel - 1; + GenTree* expr = stmt->GetRootNode(); + if (expr->gtOper == GT_END_LFIN) + { + const size_t nestLevel = expr->AsVal()->gtVal1; + assert(nestLevel > 0); + expr->AsVal()->gtVal1 = nestLevel - 1; + } } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 } // (6) Remove the try-finally EH region. This will compact the @@ -605,10 +606,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // PhaseStatus Compiler::fgCloneFinally() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -795,25 +794,29 @@ PhaseStatus Compiler::fgCloneFinally() for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->Prev()) { -#if FEATURE_EH_CALLFINALLY_THUNKS - // Blocks that transfer control to callfinallies are usually - // BBJ_ALWAYS blocks, but the last block of a try may fall - // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, - // indicating a chained callfinally. BasicBlock* jumpDest = nullptr; - if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) + if (UsesCallFinallyThunks()) { - jumpDest = block->GetTarget(); - } + // Blocks that transfer control to callfinallies are usually + // BBJ_ALWAYS blocks, but the last block of a try may fall + // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, + // indicating a chained callfinally. + + if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) + { + jumpDest = block->GetTarget(); + } - if (jumpDest == nullptr) + if (jumpDest == nullptr) + { + continue; + } + } + else { - continue; + jumpDest = block; } -#else - BasicBlock* const jumpDest = block; -#endif // FEATURE_EH_CALLFINALLY_THUNKS // The jumpDest must be a callfinally that in turn invokes the // finally of interest. @@ -880,29 +883,32 @@ PhaseStatus Compiler::fgCloneFinally() isUpdate = true; } -#if FEATURE_EH_CALLFINALLY_THUNKS - // When there are callfinally thunks, we don't expect to see the - // callfinally within a handler region either. - assert(!jumpDest->hasHndIndex()); - - // Update the clone insertion point to just after the - // call always pair. - cloneInsertAfter = finallyReturnBlock; - - // We will consider moving the callfinally so we can fall - // through from the try into the clone. - tryToRelocateCallFinally = true; - - JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); -#else - JITDUMP("%s path to clone: try block " FMT_BB " is a callfinally;" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); -#endif // FEATURE_EH_CALLFINALLY_THUNKS + if (UsesCallFinallyThunks()) + { + // When there are callfinally thunks, we don't expect to see the + // callfinally within a handler region either. + assert(!jumpDest->hasHndIndex()); + + // Update the clone insertion point to just after the + // call always pair. + cloneInsertAfter = finallyReturnBlock; + + // We will consider moving the callfinally so we can fall + // through from the try into the clone. + tryToRelocateCallFinally = true; + + JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" + " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", + isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, + postTryFinallyBlock->bbNum); + } + else + { + JITDUMP("%s path to clone: try block " FMT_BB " is a callfinally;" + " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", + isUpdate ? "Updating" : "Choosing", block->bbNum, finallyReturnBlock->bbNum, + postTryFinallyBlock->bbNum); + } // For non-pgo just take the first one we find. // For pgo, keep searching in case we find one we like better. @@ -1335,19 +1341,15 @@ void Compiler::fgDebugCheckTryFinallyExits() continue; } -#if FEATURE_EH_CALLFINALLY_THUNKS - // When there are callfinally thunks, callfinallies // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) { continue; } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - // Now we know block lies directly within the try of a // try-finally, and succBlock is in an enclosing // region (possibly the method region). So this path @@ -1365,19 +1367,16 @@ void Compiler::fgDebugCheckTryFinallyExits() // (e) via an always jump clonefinally exit bool isCallToFinally = false; -#if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) isCallToFinally = isFinally && succBlock->TargetIs(finallyBlock); } -#else // !FEATURE_EH_CALLFINALLY_THUNKS - if (block->KindIs(BBJ_CALLFINALLY)) + else if (!UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) { // case (a2) isCallToFinally = isFinally && block->TargetIs(finallyBlock); } -#endif // !FEATURE_EH_CALLFINALLY_THUNKS bool isJumpToClonedFinally = false; @@ -1455,27 +1454,30 @@ void Compiler::fgDebugCheckTryFinallyExits() // void Compiler::fgCleanupContinuation(BasicBlock* continuation) { -#if !defined(FEATURE_EH_FUNCLETS) - // The continuation may be a finalStep block. - // It is now a normal block, so clear the special keep - // always flag. - continuation->RemoveFlags(BBF_KEEP_BBJ_ALWAYS); - - // Remove the GT_END_LFIN from the continuation, - // Note we only expect to see one such statement. - bool foundEndLFin = false; - for (Statement* const stmt : continuation->Statements()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - GenTree* expr = stmt->GetRootNode(); - if (expr->gtOper == GT_END_LFIN) + // The continuation may be a finalStep block. + // It is now a normal block, so clear the special keep + // always flag. + continuation->RemoveFlags(BBF_KEEP_BBJ_ALWAYS); + + // Remove the GT_END_LFIN from the continuation, + // Note we only expect to see one such statement. + bool foundEndLFin = false; + for (Statement* const stmt : continuation->Statements()) { - assert(!foundEndLFin); - fgRemoveStmt(continuation, stmt); - foundEndLFin = true; + GenTree* expr = stmt->GetRootNode(); + if (expr->gtOper == GT_END_LFIN) + { + assert(!foundEndLFin); + fgRemoveStmt(continuation, stmt); + foundEndLFin = true; + } } + assert(foundEndLFin); } - assert(foundEndLFin); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 } //------------------------------------------------------------------------ @@ -1493,10 +1495,8 @@ void Compiler::fgCleanupContinuation(BasicBlock* continuation) // PhaseStatus Compiler::fgMergeFinallyChains() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -1521,22 +1521,26 @@ PhaseStatus Compiler::fgMergeFinallyChains() bool enableMergeFinallyChains = true; -#if !defined(FEATURE_EH_FUNCLETS) - // For non-funclet models (x86) the callfinallys may contain - // statements and the continuations contain GT_END_LFINs. So no - // merging is possible until the GT_END_LFIN blocks can be merged - // and merging is not safe unless the callfinally blocks are split. - JITDUMP("EH using non-funclet model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; -#endif // !FEATURE_EH_FUNCLETS - -#if !FEATURE_EH_CALLFINALLY_THUNKS - // For non-thunk EH models (x86) the callfinallys may contain - // statements, and merging is not safe unless the callfinally - // blocks are split. - JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; -#endif +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + // For non-funclet models (x86) the callfinallys may contain + // statements and the continuations contain GT_END_LFINs. So no + // merging is possible until the GT_END_LFIN blocks can be merged + // and merging is not safe unless the callfinally blocks are split. + JITDUMP("EH using non-funclet model; merging not yet implemented.\n"); + enableMergeFinallyChains = false; + } +#endif // FEATURE_EH_WINDOWS_X86 + + if (!UsesCallFinallyThunks()) + { + // For non-thunk EH models (x86) the callfinallys may contain + // statements, and merging is not safe unless the callfinally + // blocks are split. + JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n"); + enableMergeFinallyChains = false; + } if (!enableMergeFinallyChains) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index f5ca834696ce5b..94e6bc5b3c057a 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1630,7 +1630,6 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) break; } -#if defined(FEATURE_EH_FUNCLETS) /* Don't remove an empty block that is in a different EH region * from its successor block, if the block is the target of a * catch return. It is required that the return address of a @@ -1638,6 +1637,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) * abort exceptions to work. Insert a NOP in the empty block * to ensure we generate code for the block, if we keep it. */ + if (UsesFunclets()) { BasicBlock* succBlock = block->GetTarget(); @@ -1693,7 +1693,6 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } } } -#endif // FEATURE_EH_FUNCLETS if (!ehCanDeleteEmptyBlock(block)) { @@ -3454,9 +3453,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) { noway_assert(opts.compDbgCode == false); -#if defined(FEATURE_EH_FUNCLETS) - assert(fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS + assert(UsesFunclets() == fgFuncletsCreated); // We can't relocate anything if we only have one block if (fgFirstBB->IsLast()) @@ -3472,9 +3469,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) // First let us expand the set of run rarely blocks newRarelyRun |= fgExpandRarelyRunBlocks(); -#if !defined(FEATURE_EH_FUNCLETS) - movedBlocks |= fgRelocateEHRegions(); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + movedBlocks |= fgRelocateEHRegions(); + } +#endif // FEATURE_EH_WINDOWS_X86 // // If we are using profile weights we can change some @@ -3993,13 +3993,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) break; } -#if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function if (bEnd->NextIs(fgFirstFuncletBB)) { break; } -#endif // FEATURE_EH_FUNCLETS if (bNext == bDest) { diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index fead5b82e0b347..0c0d7384f275b6 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -538,9 +538,9 @@ inline bool OperIsControlFlow(genTreeOps oper) case GT_RETURN: case GT_RETFILT: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 return true; default: diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 7cedd596b12e8b..6c4416c4ce32e3 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1287,8 +1287,6 @@ GenTree* Compiler::fgGetCritSectOfStaticMethod() return tree; } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Add monitor enter/exit calls for synchronized methods, and a try/fault @@ -1351,6 +1349,8 @@ GenTree* Compiler::fgGetCritSectOfStaticMethod() void Compiler::fgAddSyncMethodEnterExit() { + assert(UsesFunclets()); + assert((info.compFlags & CORINFO_FLG_SYNCH) != 0); // We need to do this transformation before funclets are created. @@ -1663,8 +1663,6 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) #endif } -#endif // FEATURE_EH_FUNCLETS - //------------------------------------------------------------------------ // fgAddReversePInvokeEnterExit: Add enter/exit calls for reverse PInvoke methods // @@ -2350,17 +2348,15 @@ PhaseStatus Compiler::fgAddInternal() // Merge return points if required or beneficial MergedReturns merger(this); -#if defined(FEATURE_EH_FUNCLETS) // Add the synchronized method enter/exit calls and try/finally protection. Note // that this must happen before the one BBJ_RETURN block is created below, so the // BBJ_RETURN block gets placed at the top-level, not within an EH region. (Otherwise, // we'd have to be really careful when creating the synchronized method try/finally // not to include the BBJ_RETURN block.) - if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) + if (UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH) != 0) { fgAddSyncMethodEnterExit(); } -#endif // FEATURE_EH_FUNCLETS // // We will generate just one epilog (return block) @@ -2471,11 +2467,11 @@ PhaseStatus Compiler::fgAddInternal() madeChanges = true; } -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) /* Is this a 'synchronized' method? */ - if (info.compFlags & CORINFO_FLG_SYNCH) + if (!UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH)) { GenTree* tree = nullptr; @@ -2543,7 +2539,7 @@ PhaseStatus Compiler::fgAddInternal() madeChanges = true; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 if (opts.IsReversePInvoke()) { @@ -2729,15 +2725,11 @@ BasicBlock* Compiler::fgGetDomSpeculatively(const BasicBlock* block) // BasicBlock* Compiler::fgLastBBInMainFunction() { -#if defined(FEATURE_EH_FUNCLETS) - if (fgFirstFuncletBB != nullptr) { return fgFirstFuncletBB->Prev(); } -#endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->IsLast()); return fgLastBB; } @@ -2749,21 +2741,15 @@ BasicBlock* Compiler::fgLastBBInMainFunction() // BasicBlock* Compiler::fgEndBBAfterMainFunction() { -#if defined(FEATURE_EH_FUNCLETS) - if (fgFirstFuncletBB != nullptr) { return fgFirstFuncletBB; } -#endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->IsLast()); return nullptr; } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Introduce a new head block of the handler for the prolog to be put in, ahead * of the current handler head 'block'. @@ -2779,6 +2765,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } #endif + assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler @@ -2841,6 +2828,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // void Compiler::fgCreateFuncletPrologBlocks() { + assert(UsesFunclets()); noway_assert(fgPredsComputed); assert(!fgFuncletsCreated); @@ -2905,6 +2893,7 @@ void Compiler::fgCreateFuncletPrologBlocks() // PhaseStatus Compiler::fgCreateFunclets() { + assert(UsesFunclets()); assert(!fgFuncletsCreated); fgCreateFuncletPrologBlocks(); @@ -2980,6 +2969,8 @@ PhaseStatus Compiler::fgCreateFunclets() // bool Compiler::fgFuncletsAreCold() { + assert(UsesFunclets()); + for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->Next()) { if (!block->isRunRarely()) @@ -2991,8 +2982,6 @@ bool Compiler::fgFuncletsAreCold() return true; } -#endif // defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------ // fgDetermineFirstColdBlock: figure out where we might split the block // list to put some blocks into the cold code section @@ -3062,14 +3051,12 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() } #endif // HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION -#ifdef FEATURE_EH_FUNCLETS // Make note of if we're in the funclet section, // so we can stop the search early. if (block == fgFirstFuncletBB) { inFuncletSection = true; } -#endif // FEATURE_EH_FUNCLETS // Do we have a candidate for the first cold block? if (firstColdBlock != nullptr) @@ -3083,7 +3070,6 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() firstColdBlock = nullptr; prevToFirstColdBlock = nullptr; -#ifdef FEATURE_EH_FUNCLETS // If we're already in the funclet section, try to split // at fgFirstFuncletBB, and stop the search. if (inFuncletSection) @@ -3096,13 +3082,10 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() break; } -#endif // FEATURE_EH_FUNCLETS } } else // (firstColdBlock == NULL) -- we don't have a candidate for first cold block { - -#ifdef FEATURE_EH_FUNCLETS // // If a function has exception handling and we haven't found the first cold block yet, // consider splitting at the first funclet; do not consider splitting between funclets, @@ -3118,7 +3101,6 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() break; } -#endif // FEATURE_EH_FUNCLETS // Is this a cold block? if (!blockMustBeInHotSection && block->isRunRarely()) diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index a093d8a20e5981..b7972f216d53ed 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -62,8 +62,6 @@ ReturnKind GCInfo::getReturnKind() } } -#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - // gcMarkFilterVarsPinned - Walk all lifetimes and make it so that anything // live in a filter is marked as pinned (often by splitting the lifetime // so that *only* the filter region is pinned). This should only be @@ -86,6 +84,7 @@ ReturnKind GCInfo::getReturnKind() // void GCInfo::gcMarkFilterVarsPinned() { + assert(compiler->UsesFunclets()); assert(compiler->ehAnyFunclets()); for (EHblkDsc* const HBtab : EHClauses(compiler)) @@ -293,6 +292,8 @@ void GCInfo::gcMarkFilterVarsPinned() void GCInfo::gcInsertVarPtrDscSplit(varPtrDsc* desc, varPtrDsc* begin) { + assert(compiler->UsesFunclets()); + #ifndef JIT32_GCENCODER (void)begin; desc->vpdNext = gcVarPtrList; @@ -331,6 +332,8 @@ void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc) const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF; const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0; + assert(compiler->UsesFunclets()); + printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "", compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE); @@ -348,8 +351,6 @@ void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc) #endif // DEBUG -#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - #ifdef JIT32_GCENCODER #include "emit.h" @@ -1560,9 +1561,9 @@ size_t GCInfo::gcInfoBlockHdrSave( header->syncStartOffset = INVALID_SYNC_OFFSET; header->syncEndOffset = INVALID_SYNC_OFFSET; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses. - if (compiler->info.compFlags & CORINFO_FLG_SYNCH) + if (!compiler->UsesFunclets() && compiler->info.compFlags & CORINFO_FLG_SYNCH) { assert(compiler->syncStartEmitCookie != nullptr); header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0); @@ -2315,8 +2316,8 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un if (header.varPtrTableSize != 0) { -#if !defined(FEATURE_EH_FUNCLETS) - if (keepThisAlive) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && keepThisAlive) { // Encoding of untracked variables does not support reporting // "this". So report it as a tracked variable with a liveness @@ -2340,7 +2341,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un dest += (sz & mask); totalSize += sz; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* We'll use a delta encoding for the lifetime offsets */ @@ -3957,7 +3958,6 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz gcInfoEncoderWithLog->SetPrologSize(prologSize); } -#if defined(FEATURE_EH_FUNCLETS) if (compiler->lvaPSPSym != BAD_VAR_NUM) { #ifdef TARGET_AMD64 @@ -3976,8 +3976,6 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz } #endif // TARGET_AMD64 -#endif // FEATURE_EH_FUNCLETS - #ifdef TARGET_ARMARCH if (compiler->codeGen->GetHasTailCalls()) { @@ -4694,8 +4692,8 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode // unused by alignment C_ASSERT((OFFSET_MASK + 1) <= sizeof(int)); -#if defined(DEBUG) && defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS) - if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) +#if defined(DEBUG) && defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) { // Tracked variables can't be pinned, and the encoding takes // advantage of that by using the same bit for 'pinned' and 'this' diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index 8045cd873260ea..b7c1a2667bf91a 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -565,7 +565,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, UNALIGNED // // Arguments: // varNum - the variable number to check; -// pKeepThisAlive - if !FEATURE_EH_FUNCLETS and the argument != nullptr remember +// pKeepThisAlive - if !UsesFunclets() and the argument != nullptr remember // if `this` should be kept alive and considered tracked. // // Return value: @@ -614,16 +614,16 @@ bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeep } } -#if !defined(FEATURE_EH_FUNCLETS) - if (compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) { // "this" is in the untracked variable area, but encoding of untracked variables does not support reporting // "this". So report it as a tracked variable with a liveness extending over the entire method. // // TODO-x86-Cleanup: the semantic here is not clear, it would be useful to check different cases and // add a description where "this" is saved and how it is tracked in each of them: - // 1) when FEATURE_EH_FUNCLETS defined (x86 Linux); - // 2) when FEATURE_EH_FUNCLETS not defined, lvaKeepAliveAndReportThis == true, compJmpOpUsed == true; + // 1) when UsesFunclets() == true (x86 Linux); + // 2) when UsesFunclets() == false, lvaKeepAliveAndReportThis == true, compJmpOpUsed == true; // 3) when there is regPtrDsc for "this", but keepThisAlive == true; // etc. @@ -633,7 +633,7 @@ bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeep } return false; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 return true; } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 71d0d0e2dfe904..5b4894c82a0327 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -6672,9 +6672,9 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -9423,9 +9423,9 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) copy = new (this, oper) GenTree(oper, tree->gtType); goto DONE; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_JMP: copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1); goto DONE; @@ -10240,9 +10240,9 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -11775,24 +11775,22 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, cons ilName = "OutArgs"; } #endif // FEATURE_FIXED_OUT_ARGS -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) else if (lclNum == lvaShadowSPslotsVar) { ilName = "EHSlots"; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER else if (lclNum == lvaLocAllocSPvar) { ilName = "LocAllocSP"; } #endif // JIT32_GCENCODER -#if defined(FEATURE_EH_FUNCLETS) else if (lclNum == lvaPSPSym) { ilName = "PSPSym"; } -#endif // FEATURE_EH_FUNCLETS else { ilKind = "tmp"; @@ -12356,11 +12354,11 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) } break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: printf(" endNstLvl=%d", tree->AsVal()->gtVal1); break; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Vanilla leaves. No qualifying information available. So do nothing diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index 817b27a936a561..1d442f27673725 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -282,9 +282,9 @@ GTNODE(START_PREEMPTGC , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHI GTNODE(PROF_HOOK , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Profiler Enter/Leave/TailCall hook. GTNODE(RETFILT , GenTreeOp ,0,1,GTK_UNOP|GTK_NOVALUE) // End filter with TYP_I_IMPL return value. -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) GTNODE(END_LFIN , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally. -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 //----------------------------------------------------------------------------- // Swift interop-specific nodes: diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h index e6823478a3c9a5..8b82bc42b214e2 100644 --- a/src/coreclr/jit/gtstructs.h +++ b/src/coreclr/jit/gtstructs.h @@ -50,7 +50,7 @@ GTSTRUCT_0(UnOp , GT_OP) GTSTRUCT_0(Op , GT_OP) -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) GTSTRUCT_2(Val , GT_END_LFIN, GT_JMP) #else GTSTRUCT_1(Val , GT_JMP) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 0d1df79812f03a..7c08570aafbaa9 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4309,9 +4309,9 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // After this function, the BBJ_LEAVE block has been converted to a different type. // -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) -void Compiler::impImportLeave(BasicBlock* block) +void Compiler::impImportLeaveEHRegions(BasicBlock* block) { #ifdef DEBUG if (verbose) @@ -4594,10 +4594,17 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // DEBUG } -#else // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 void Compiler::impImportLeave(BasicBlock* block) { +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + return impImportLeaveEHRegions(block); + } +#endif + #ifdef DEBUG if (verbose) { @@ -4723,10 +4730,8 @@ void Compiler::impImportLeave(BasicBlock* block) BasicBlock* callBlock; - if (step == nullptr) + if (step == nullptr && UsesCallFinallyThunks()) { -#if FEATURE_EH_CALLFINALLY_THUNKS - // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; @@ -4757,9 +4762,9 @@ void Compiler::impImportLeave(BasicBlock* block) XTnum, block->bbNum, callBlock->bbNum); } #endif - -#else // !FEATURE_EH_CALLFINALLY_THUNKS - + } + else if (step == nullptr) // && !UsesCallFinallyThunks() + { callBlock = block; // callBlock calls the finally handler @@ -4775,8 +4780,6 @@ void Compiler::impImportLeave(BasicBlock* block) XTnum, callBlock->bbNum); } #endif - -#endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { @@ -4799,8 +4802,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (step->KindIs(BBJ_EHCATCHRET)) + if (UsesCallFinallyThunks() && step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4832,17 +4834,24 @@ void Compiler::impImportLeave(BasicBlock* block) step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } -#endif // FEATURE_EH_CALLFINALLY_THUNKS -#if FEATURE_EH_CALLFINALLY_THUNKS - unsigned callFinallyTryIndex = - (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; - unsigned callFinallyHndIndex = - (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; -#else // !FEATURE_EH_CALLFINALLY_THUNKS - unsigned callFinallyTryIndex = XTnum + 1; - unsigned callFinallyHndIndex = 0; // don't care -#endif // !FEATURE_EH_CALLFINALLY_THUNKS + unsigned callFinallyTryIndex; + unsigned callFinallyHndIndex; + + if (UsesCallFinallyThunks()) + { + callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) + ? 0 + : HBtab->ebdEnclosingTryIndex + 1; + callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) + ? 0 + : HBtab->ebdEnclosingHndIndex + 1; + } + else + { + callFinallyTryIndex = XTnum + 1; + callFinallyHndIndex = 0; // don't care + } assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); @@ -5051,15 +5060,12 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // DEBUG } -#endif // FEATURE_EH_FUNCLETS - /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { -#if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we @@ -5082,7 +5088,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->KindIs(BBJ_CALLFINALLY)) + if (UsesFunclets() && block->KindIs(BBJ_CALLFINALLY)) { BasicBlock* dupBlock = BasicBlock::New(this); dupBlock->CopyFlags(block); @@ -5112,7 +5118,6 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif } -#endif // FEATURE_EH_FUNCLETS fgInitBBLookup(); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 329f1c602cf989..2270d5b1656368 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -243,9 +243,16 @@ void EHblkDsc::DispEntry(unsigned XTnum) { printf(" %2u ::", XTnum); -#if !defined(FEATURE_EH_FUNCLETS) - printf(" %2u ", XTnum, ebdHandlerNestingLevel); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (ebdHandlerNestingLevel == 0) + { + printf(" "); + } + else + { + printf(" %2u ", ebdHandlerNestingLevel); + } +#endif // FEATURE_EH_WINDOWS_X86 if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { @@ -613,17 +620,19 @@ bool Compiler::bbIsHandlerBeg(const BasicBlock* block) bool Compiler::ehHasCallableHandlers() { -#if defined(FEATURE_EH_FUNCLETS) - - // Any EH in the function? - - return compHndBBtabCount > 0; - -#else // !FEATURE_EH_FUNCLETS - - return ehNeedsShadowSPslots(); - -#endif // !FEATURE_EH_FUNCLETS + if (UsesFunclets()) + { + // Any EH in the function? + return compHndBBtabCount > 0; + } + else + { +#if defined(FEATURE_EH_WINDOWS_X86) + return ehNeedsShadowSPslots(); +#else + return false; +#endif // FEATURE_EH_WINDOWS_X86 + } } /****************************************************************************************** @@ -897,12 +906,15 @@ unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTr assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); -#if FEATURE_EH_CALLFINALLY_THUNKS - return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); -#else - *inTryRegion = true; - return finallyIndex; -#endif + if (UsesCallFinallyThunks()) + { + return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); + } + else + { + *inTryRegion = true; + return finallyIndex; + } } void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** startBlock, BasicBlock** lastBlock) @@ -912,35 +924,38 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** st assert(startBlock != nullptr); assert(lastBlock != nullptr); -#if FEATURE_EH_CALLFINALLY_THUNKS - bool inTryRegion; - unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); - - if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) + if (UsesCallFinallyThunks()) { - *startBlock = fgFirstBB; - *lastBlock = fgLastBBInMainFunction(); - } - else - { - EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); + bool inTryRegion; + unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); - if (inTryRegion) + if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; + *startBlock = fgFirstBB; + *lastBlock = fgLastBBInMainFunction(); } else { - *startBlock = ehDsc->ebdHndBeg; - *lastBlock = ehDsc->ebdHndLast; + EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); + + if (inTryRegion) + { + *startBlock = ehDsc->ebdTryBeg; + *lastBlock = ehDsc->ebdTryLast; + } + else + { + *startBlock = ehDsc->ebdHndBeg; + *lastBlock = ehDsc->ebdHndLast; + } } } -#else // !FEATURE_EH_CALLFINALLY_THUNKS - EHblkDsc* ehDsc = ehGetDsc(finallyIndex); - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; -#endif // !FEATURE_EH_CALLFINALLY_THUNKS + else + { + EHblkDsc* ehDsc = ehGetDsc(finallyIndex); + *startBlock = ehDsc->ebdTryBeg; + *lastBlock = ehDsc->ebdTryLast; + } } #ifdef DEBUG @@ -989,8 +1004,6 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsign #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Are there (or will there be) any funclets in the function? @@ -998,7 +1011,14 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsign bool Compiler::ehAnyFunclets() { - return compHndBBtabCount > 0; // if there is any EH, there will be funclets + if (UsesFunclets()) + { + return compHndBBtabCount > 0; // if there is any EH, there will be funclets + } + else + { + return false; + } } /***************************************************************************** @@ -1010,17 +1030,24 @@ bool Compiler::ehAnyFunclets() unsigned Compiler::ehFuncletCount() { - unsigned funcletCnt = 0; - - for (EHblkDsc* const HBtab : EHClauses(this)) + if (UsesFunclets()) { - if (HBtab->HasFilter()) + unsigned funcletCnt = 0; + + for (EHblkDsc* const HBtab : EHClauses(this)) { + if (HBtab->HasFilter()) + { + ++funcletCnt; + } ++funcletCnt; } - ++funcletCnt; + return funcletCnt; + } + else + { + return 0; } - return funcletCnt; } /***************************************************************************** @@ -1037,36 +1064,41 @@ unsigned Compiler::ehFuncletCount() */ unsigned Compiler::bbThrowIndex(BasicBlock* blk) { - if (!blk->hasTryIndex() && !blk->hasHndIndex()) + if (UsesFunclets()) { - return -1; - } + if (!blk->hasTryIndex() && !blk->hasHndIndex()) + { + return -1; + } - const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; - const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; - assert(tryIndex != hndIndex); - assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); + const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; + const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; + assert(tryIndex != hndIndex); + assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); - if (tryIndex < hndIndex) - { - // The most enclosing region is a try body, use it - assert(tryIndex <= 0x3FFFFFFF); - return tryIndex; - } + if (tryIndex < hndIndex) + { + // The most enclosing region is a try body, use it + assert(tryIndex <= 0x3FFFFFFF); + return tryIndex; + } + + // The most enclosing region is a handler which will be a funclet + // Now we have to figure out if blk is in the filter or handler + assert(hndIndex <= 0x3FFFFFFF); + if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) + { + return hndIndex | 0x40000000; + } - // The most enclosing region is a handler which will be a funclet - // Now we have to figure out if blk is in the filter or handler - assert(hndIndex <= 0x3FFFFFFF); - if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) + return hndIndex | 0x80000000; + } + else { - return hndIndex | 0x40000000; + return blk->bbTryIndex; } - - return hndIndex | 0x80000000; } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * Determine the emitter code cookie for a block, for unwind purposes. */ @@ -1352,27 +1384,26 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) */ void Compiler::fgAllocEHTable() { -#if defined(FEATURE_EH_FUNCLETS) - - // We need to allocate space for EH clauses that will be used by funclets - // as well as one for each EH clause from the IL. Nested EH clauses pulled - // out as funclets create one EH clause for each enclosing region. Thus, - // the maximum number of clauses we will need might be very large. We allocate - // twice the number of EH clauses in the IL, which should be good in practice. - // In extreme cases, we might need to abandon this and reallocate. See - // fgAddEHTableEntry() for more details. + if (UsesFunclets()) + { + // We need to allocate space for EH clauses that will be used by funclets + // as well as one for each EH clause from the IL. Nested EH clauses pulled + // out as funclets create one EH clause for each enclosing region. Thus, + // the maximum number of clauses we will need might be very large. We allocate + // twice the number of EH clauses in the IL, which should be good in practice. + // In extreme cases, we might need to abandon this and reallocate. See + // fgAddEHTableEntry() for more details. #ifdef DEBUG - compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG -#else // DEBUG - compHndBBtabAllocCount = info.compXcptnsCount * 2; -#endif // DEBUG - -#else // !FEATURE_EH_FUNCLETS - - compHndBBtabAllocCount = info.compXcptnsCount; - -#endif // !FEATURE_EH_FUNCLETS + compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG +#else // DEBUG + compHndBBtabAllocCount = info.compXcptnsCount * 2; +#endif // DEBUG + } + else + { + compHndBBtabAllocCount = info.compXcptnsCount; + } compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; @@ -1492,8 +1523,6 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) } } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Add a single exception table entry at index 'XTnum', [0 <= XTnum <= compHndBBtabCount]. @@ -1505,6 +1534,8 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) */ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) { + assert(UsesFunclets()); + if (XTnum != compHndBBtabCount) { // Update all enclosing links that will get invalidated by inserting an entry at 'XTnum' @@ -1600,8 +1631,6 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) return compHndBBtab + XTnum; } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * * Sort the EH table if necessary. @@ -2989,7 +3018,6 @@ void Compiler::fgVerifyHandlerTab() assert(!HBtab->ebdFilter->HasFlag(BBF_REMOVED)); } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(HBtab->ebdHndBeg->HasFlag(BBF_FUNCLET_BEG)); @@ -2999,7 +3027,6 @@ void Compiler::fgVerifyHandlerTab() assert(HBtab->ebdFilter->HasFlag(BBF_FUNCLET_BEG)); } } -#endif // FEATURE_EH_FUNCLETS } // I want to assert things about the relative ordering of blocks in the block list using @@ -3053,7 +3080,6 @@ void Compiler::fgVerifyHandlerTab() blockHndBegSet[i] = false; } -#if defined(FEATURE_EH_FUNCLETS) bool isLegalFirstFunclet = false; unsigned bbNumFirstFunclet = 0; @@ -3069,7 +3095,6 @@ void Compiler::fgVerifyHandlerTab() { assert(fgFirstFuncletBB == nullptr); } -#endif // FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { @@ -3118,7 +3143,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumHndBeg)); } -#if defined(FEATURE_EH_FUNCLETS) // If funclets have been created, check the first funclet block. The first funclet block must be the // first block of a filter or handler. All filter/handler blocks must come after it. // Note that 'try' blocks might come either before or after it. If after, they will be nested within @@ -3167,7 +3191,6 @@ void Compiler::fgVerifyHandlerTab() } } } -#endif // FEATURE_EH_FUNCLETS // Check the 'try' region nesting, using ebdEnclosingTryIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it @@ -3192,7 +3215,6 @@ void Compiler::fgVerifyHandlerTab() // this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try' // remains in the main function region. -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // If both the 'try' region and the outer 'try' region are in the main function area, then we can @@ -3225,7 +3247,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumOuterTryBeg) || (bbNumOuterTryLast < bbNumHndBeg)); } else -#endif // FEATURE_EH_FUNCLETS { if (multipleBegBlockNormalizationDone) { @@ -3273,7 +3294,6 @@ void Compiler::fgVerifyHandlerTab() // funclets have been created, it's harder to make any relationship asserts about the order of nested // handlers, which also have been made into funclets. -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { if (handlerBegIsTryBegNormalizationDone) @@ -3300,7 +3320,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumOuterHndBeg) || (bbNumOuterHndLast < bbNumHndBeg)); } else -#endif // FEATURE_EH_FUNCLETS { if (handlerBegIsTryBegNormalizationDone) { @@ -3360,9 +3379,7 @@ void Compiler::fgVerifyHandlerTab() } } -#if defined(FEATURE_EH_FUNCLETS) assert(!fgFuncletsCreated || isLegalFirstFunclet); -#endif // FEATURE_EH_FUNCLETS // Figure out what 'try' and handler index each basic block should have, // and check the blocks against that. This depends on the more nested EH @@ -3402,7 +3419,6 @@ void Compiler::fgVerifyHandlerTab() } } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Mark all the funclet 'try' indices correctly, since they do not exist in the linear 'try' region that @@ -3432,7 +3448,6 @@ void Compiler::fgVerifyHandlerTab() } } } -#endif // FEATURE_EH_FUNCLETS // Make sure that all blocks have the right index, including those blocks that should have zero (no EH region). for (BasicBlock* const block : Blocks()) @@ -3446,13 +3461,11 @@ void Compiler::fgVerifyHandlerTab() { assert(block->bbCatchTyp == BBCT_NONE); -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Make sure blocks that aren't the first block of a funclet do not have the BBF_FUNCLET_BEG flag set. assert(!block->HasFlag(BBF_FUNCLET_BEG)); } -#endif // FEATURE_EH_FUNCLETS } // Check for legal block types @@ -3511,9 +3524,12 @@ void Compiler::fgDispHandlerTab() } printf("\nindex "); -#if !defined(FEATURE_EH_FUNCLETS) - printf("nest, "); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + printf("nest, "); + } +#endif // FEATURE_EH_WINDOWS_X86 printf("eTry, eHnd\n"); unsigned XTnum; @@ -3988,8 +4004,6 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root) } } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Is this an intra-handler control flow edge? * @@ -4013,14 +4027,14 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root) bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) { // Some simple preconditions (as stated above) + assert(UsesFunclets()); assert(!fgFuncletsCreated); assert(fgGetPredForBlock(block, predBlock) != nullptr); assert(block->hasHndIndex()); EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (xtab->HasFinallyHandler()) + if (UsesCallFinallyThunks() && xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case (xtab->ebdHndBeg->NextIs(block) && @@ -4048,7 +4062,6 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) return false; } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS assert(predBlock->hasHndIndex() || predBlock->hasTryIndex()); @@ -4117,6 +4130,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) { + assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler @@ -4132,7 +4146,7 @@ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) return false; } -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) /***************************************************************************** * @@ -4145,6 +4159,8 @@ bool Compiler::fgRelocateEHRegions() { bool result = false; // Our return value + assert(!UsesFunclets()); + #ifdef DEBUG if (verbose) printf("*************** In fgRelocateEHRegions()\n"); @@ -4249,7 +4265,7 @@ bool Compiler::fgRelocateEHRegions() return result; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 //------------------------------------------------------------------------ // fgExtendEHRegionBefore: Modify the EH table to account for a new block. @@ -4307,14 +4323,12 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) block->bbRefs--; bPrev->bbRefs++; -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(block->HasFlag(BBF_FUNCLET_BEG)); bPrev->SetFlags(BBF_FUNCLET_BEG); block->RemoveFlags(BBF_FUNCLET_BEG); } -#endif // FEATURE_EH_FUNCLETS // If this is a handler for a filter, the last block of the filter will end with // a BBJ_EHFILTERRET block that jumps to the first block of its handler. @@ -4354,14 +4368,12 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) HBtab->ebdFilter = bPrev; bPrev->SetFlags(BBF_DONT_REMOVE); -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(block->HasFlag(BBF_FUNCLET_BEG)); bPrev->SetFlags(BBF_FUNCLET_BEG); block->RemoveFlags(BBF_FUNCLET_BEG); } -#endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; } diff --git a/src/coreclr/jit/jiteh.h b/src/coreclr/jit/jiteh.h index 55b56ac9833c42..eb4c1bfbd5baf6 100644 --- a/src/coreclr/jit/jiteh.h +++ b/src/coreclr/jit/jiteh.h @@ -91,11 +91,11 @@ struct EHblkDsc EHHandlerType ebdHandlerType; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, // etc. unsigned short ebdHandlerNestingLevel; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX; @@ -110,8 +110,6 @@ struct EHblkDsc // The index of the enclosing outer handler region, NO_ENCLOSING_INDEX if none. unsigned short ebdEnclosingHndIndex; -#if defined(FEATURE_EH_FUNCLETS) - // After funclets are created, this is the index of corresponding FuncInfoDsc // Special case for Filter/Filter-handler: // Like the IL the filter funclet immediately precedes the filter-handler funclet. @@ -119,8 +117,6 @@ struct EHblkDsc // funclet index, just subtract 1. unsigned short ebdFuncIndex; -#endif // FEATURE_EH_FUNCLETS - IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported IL_OFFSET ebdTryEndOffset; IL_OFFSET ebdFilterBegOffset; // only set if HasFilter() diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index 2258903a0603eb..02fd49cead9cb3 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -365,8 +365,6 @@ class GCInfo #endif // JIT32_GCENCODER -#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - // This method expands the tracked stack variables lifetimes so that any lifetimes within filters // are reported as pinned. void gcMarkFilterVarsPinned(); @@ -378,8 +376,6 @@ class GCInfo void gcDumpVarPtrDsc(varPtrDsc* desc); #endif // DEBUG -#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - #if DUMP_GC_TABLES void gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs); diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 3e182e0820a1c5..18dd23498d850b 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -48,9 +48,9 @@ void Compiler::lvaInit() lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) lvaShadowSPslotsVar = BAD_VAR_NUM; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS @@ -79,9 +79,7 @@ void Compiler::lvaInit() lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; -#if defined(FEATURE_EH_FUNCLETS) - lvaPSPSym = BAD_VAR_NUM; -#endif + lvaPSPSym = BAD_VAR_NUM; #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD @@ -4038,8 +4036,9 @@ void Compiler::lvaSortByRefCount() { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } -#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) - if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) +#if defined(JIT32_GCENCODER) + if (UsesFunclets() && lvaIsOriginalThisArg(lclNum) && + (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked @@ -4690,11 +4689,11 @@ PhaseStatus Compiler::lvaMarkLocalVars() unsigned const lvaCountOrig = lvaCount; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // Grab space for exception handling - if (ehNeedsShadowSPslots()) + if (!UsesFunclets() && ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter @@ -4717,20 +4716,18 @@ PhaseStatus Compiler::lvaMarkLocalVars() lvaSetVarAddrExposed(lvaShadowSPslotsVar DEBUGARG(AddressExposedReason::EXTERNALLY_VISIBLE_IMPLICITLY)); } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // PSPSym is not used by the NativeAOT ABI if (!IsTargetAbi(CORINFO_NATIVEAOT_ABI)) { -#if defined(FEATURE_EH_FUNCLETS) - if (ehNeedsPSPSym()) + if (UsesFunclets() && ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } -#endif // FEATURE_EH_FUNCLETS } #ifdef JIT32_GCENCODER @@ -5596,7 +5593,7 @@ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; -#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. @@ -6705,7 +6702,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } } -#if defined(FEATURE_EH_FUNCLETS) && (defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym we allocate it early since funclets @@ -6714,7 +6711,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && (TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64) +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 if (mustDoubleAlign) { @@ -6809,9 +6806,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } #endif -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) /* If we need space for slots for shadow SP, reserve it now */ - if (ehNeedsShadowSPslots()) + if (!UsesFunclets() && ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) @@ -6828,7 +6825,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 if (compGSReorderStackLayout) { @@ -7029,12 +7026,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset - if ( -#if defined(FEATURE_EH_FUNCLETS) - lclNum == lvaPSPSym || -#else + if (lclNum == lvaPSPSym || +#if defined(FEATURE_EH_WINDOWS_X86) lclNum == lvaShadowSPslotsVar || -#endif // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER @@ -7271,7 +7266,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } } -#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument @@ -7280,7 +7275,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) +#endif // TARGET_AMD64 #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 05c65d2de3450c..31a5e52ba0cd7d 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -662,8 +662,6 @@ void Compiler::fgDispDebugScopes() * Mark variables live across their entire scope. */ -#if defined(FEATURE_EH_FUNCLETS) - void Compiler::fgExtendDbgScopes() { compResetScopeLists(); @@ -672,121 +670,107 @@ void Compiler::fgExtendDbgScopes() if (verbose) { printf("\nMarking vars alive over their entire scope :\n\n"); - } - - if (verbose) - { compDispScopeLists(); } #endif // DEBUG VARSET_TP inScope(VarSetOps::MakeEmpty(this)); - // Mark all tracked LocalVars live over their scope - walk the blocks - // keeping track of the current life, and assign it to the blocks. - - for (BasicBlock* const block : Blocks()) + if (UsesFunclets()) { - // If we get to a funclet, reset the scope lists and start again, since the block - // offsets will be out of order compared to the previous block. + // Mark all tracked LocalVars live over their scope - walk the blocks + // keeping track of the current life, and assign it to the blocks. - if (block->HasFlag(BBF_FUNCLET_BEG)) + for (BasicBlock* const block : Blocks()) { - compResetScopeLists(); - VarSetOps::ClearD(this, inScope); - } - - // Process all scopes up to the current offset + // If we get to a funclet, reset the scope lists and start again, since the block + // offsets will be out of order compared to the previous block. - if (block->bbCodeOffs != BAD_IL_OFFSET) - { - compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - } - - // Assign the current set of variables that are in scope to the block variables tracking this. + if (block->HasFlag(BBF_FUNCLET_BEG)) + { + compResetScopeLists(); + VarSetOps::ClearD(this, inScope); + } - fgMarkInScope(block, inScope); - } + // Process all scopes up to the current offset -#ifdef DEBUG - if (verbose) - { - fgDispDebugScopes(); - } -#endif // DEBUG -} + if (block->bbCodeOffs != BAD_IL_OFFSET) + { + compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, + &Compiler::fgEndScopeLife); + } -#else // !FEATURE_EH_FUNCLETS + // Assign the current set of variables that are in scope to the block variables tracking this. -void Compiler::fgExtendDbgScopes() -{ - compResetScopeLists(); + fgMarkInScope(block, inScope); + } #ifdef DEBUG - if (verbose) - { - printf("\nMarking vars alive over their entire scope :\n\n"); - compDispScopeLists(); - } + if (verbose) + { + fgDispDebugScopes(); + } #endif // DEBUG + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + compProcessScopesUntil(0, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - VARSET_TP inScope(VarSetOps::MakeEmpty(this)); - compProcessScopesUntil(0, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - - IL_OFFSET lastEndOffs = 0; - - // Mark all tracked LocalVars live over their scope - walk the blocks - // keeping track of the current life, and assign it to the blocks. + IL_OFFSET lastEndOffs = 0; - for (BasicBlock* const block : Blocks()) - { - // Find scopes becoming alive. If there is a gap in the instr - // sequence, we need to process any scopes on those missing offsets. + // Mark all tracked LocalVars live over their scope - walk the blocks + // keeping track of the current life, and assign it to the blocks. - if (block->bbCodeOffs != BAD_IL_OFFSET) + for (BasicBlock* const block : Blocks()) { - if (lastEndOffs != block->bbCodeOffs) - { - noway_assert(lastEndOffs < block->bbCodeOffs); + // Find scopes becoming alive. If there is a gap in the instr + // sequence, we need to process any scopes on those missing offsets. - compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, - &Compiler::fgEndScopeLife); - } - else + if (block->bbCodeOffs != BAD_IL_OFFSET) { - while (VarScopeDsc* varScope = compGetNextEnterScope(block->bbCodeOffs)) + if (lastEndOffs != block->bbCodeOffs) + { + noway_assert(lastEndOffs < block->bbCodeOffs); + + compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, + &Compiler::fgEndScopeLife); + } + else { - fgBeginScopeLife(&inScope, varScope); + while (VarScopeDsc* varScope = compGetNextEnterScope(block->bbCodeOffs)) + { + fgBeginScopeLife(&inScope, varScope); + } } } - } - // Assign the current set of variables that are in scope to the block variables tracking this. + // Assign the current set of variables that are in scope to the block variables tracking this. - fgMarkInScope(block, inScope); + fgMarkInScope(block, inScope); - // Find scopes going dead. + // Find scopes going dead. - if (block->bbCodeOffsEnd != BAD_IL_OFFSET) - { - VarScopeDsc* varScope; - while ((varScope = compGetNextExitScope(block->bbCodeOffsEnd)) != nullptr) + if (block->bbCodeOffsEnd != BAD_IL_OFFSET) { - fgEndScopeLife(&inScope, varScope); - } + VarScopeDsc* varScope; + while ((varScope = compGetNextExitScope(block->bbCodeOffsEnd)) != nullptr) + { + fgEndScopeLife(&inScope, varScope); + } - lastEndOffs = block->bbCodeOffsEnd; + lastEndOffs = block->bbCodeOffsEnd; + } } - } - /* Everything should be out of scope by the end of the method. But if the - last BB got removed, then inScope may not be empty. */ + /* Everything should be out of scope by the end of the method. But if the + last BB got removed, then inScope may not be empty. */ - noway_assert(VarSetOps::IsEmpty(this, inScope) || lastEndOffs < info.compILCodeSize); + noway_assert(VarSetOps::IsEmpty(this, inScope) || lastEndOffs < info.compILCodeSize); + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS - /***************************************************************************** * * For debuggable code, we allow redundant assignments to vars @@ -1945,9 +1929,9 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_SWITCH_TABLE: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index f380daeab59ac2..7fe119ccfd165c 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -590,7 +590,7 @@ int LinearScan::BuildNode(GenTree* tree) BuildDef(tree, RBM_EXCEPTION_OBJECT); break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: srcCount = 0; assert(dstCount == 0); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 7daf7104271fdc..e32fad95efa93c 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -3184,8 +3184,7 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) JITDUMP("Canonicalize exit " FMT_BB " for " FMT_LP " to have only loop predecessors\n", exit->bbNum, loop->GetIndex()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (exit->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && exit->KindIs(BBJ_CALLFINALLY)) { // Branches to a BBJ_CALLFINALLY _must_ come from inside its associated // try region, and when we have callfinally thunks the BBJ_CALLFINALLY @@ -3206,7 +3205,6 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) } } else -#endif // FEATURE_EH_CALLFINALLY_THUNKS { newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false); fgSetEHRegionForNewPreheaderOrExit(newExit); diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index ddb766e94a0de9..1dd0330a859135 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -1449,12 +1449,10 @@ void CodeGen::siInit() assert(compiler->opts.compScopeInfo); -#if defined(FEATURE_EH_FUNCLETS) if (compiler->info.compVarScopesCount > 0) { siInFuncletRegion = false; } -#endif // FEATURE_EH_FUNCLETS siLastEndOffs = 0; @@ -1482,7 +1480,6 @@ void CodeGen::siBeginBlock(BasicBlock* block) return; } -#if defined(FEATURE_EH_FUNCLETS) if (siInFuncletRegion) { return; @@ -1498,7 +1495,6 @@ void CodeGen::siBeginBlock(BasicBlock* block) return; } -#endif // FEATURE_EH_FUNCLETS #ifdef DEBUG if (verbose) @@ -1557,45 +1553,44 @@ void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned in // Check if there are any scopes on the current block's start boundary. VarScopeDsc* varScope = nullptr; -#if defined(FEATURE_EH_FUNCLETS) - - // If we find a spot where the code offset isn't what we expect, because - // there is a gap, it might be because we've moved the funclets out of - // line. Catch up with the enter and exit scopes of the current block. - // Ignore the enter/exit scope changes of the missing scopes, which for - // funclets must be matched. - if (lastBlockILEndOffset != beginOffs) + if (compiler->UsesFunclets()) { - assert(beginOffs > 0); - assert(lastBlockILEndOffset < beginOffs); + // If we find a spot where the code offset isn't what we expect, because + // there is a gap, it might be because we've moved the funclets out of + // line. Catch up with the enter and exit scopes of the current block. + // Ignore the enter/exit scope changes of the missing scopes, which for + // funclets must be matched. + if (lastBlockILEndOffset != beginOffs) + { + assert(beginOffs > 0); + assert(lastBlockILEndOffset < beginOffs); - JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); + JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); - // Skip enter scopes - while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); - } + // Skip enter scopes + while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); + } - // Skip exit scopes - while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); + // Skip exit scopes + while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); + } } } - -#else // !FEATURE_EH_FUNCLETS - - if (lastBlockILEndOffset != beginOffs) + else { - assert(lastBlockILEndOffset < beginOffs); - return; + if (lastBlockILEndOffset != beginOffs) + { + assert(lastBlockILEndOffset < beginOffs); + return; + } } -#endif // !FEATURE_EH_FUNCLETS - while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != nullptr) { LclVarDsc* lclVarDsc = compiler->lvaGetDesc(varScope->vsdVarNum); @@ -1632,12 +1627,10 @@ void CodeGen::siEndBlock(BasicBlock* block) { assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0)); -#if defined(FEATURE_EH_FUNCLETS) if (siInFuncletRegion) { return; } -#endif // FEATURE_EH_FUNCLETS unsigned endOffs = block->bbCodeOffsEnd; diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index ba2109b9cb8b28..7e72da9cf2ccdc 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -68,7 +68,6 @@ #define EMIT_TRACK_STACK_DEPTH 1 #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #ifdef UNIX_AMD64_ABI #define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based frames for most methods #else // !UNIX_AMD64_ABI diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h index ac9d72cab31f6c..a03c307094ad2c 100644 --- a/src/coreclr/jit/targetarm.h +++ b/src/coreclr/jit/targetarm.h @@ -40,7 +40,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h index 2af309e4b365a8..cccbfdc6bae6b8 100644 --- a/src/coreclr/jit/targetarm64.h +++ b/src/coreclr/jit/targetarm64.h @@ -42,7 +42,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h index 736fd1406c304c..b045c43df7dfa9 100644 --- a/src/coreclr/jit/targetloongarch64.h +++ b/src/coreclr/jit/targetloongarch64.h @@ -47,8 +47,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_FUNCLETS 1 - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h index 5ac82fa9a00974..33c1b0d4919096 100644 --- a/src/coreclr/jit/targetriscv64.h +++ b/src/coreclr/jit/targetriscv64.h @@ -42,7 +42,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index 3a861c3d7ef35a..dfeb96ae9e977c 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -53,13 +53,8 @@ // target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, // filter-handler, fault) and directly execute 'finally' clauses. - -#ifdef FEATURE_EH_FUNCLETS - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, - // protected by "cloned finally" clauses. -#else - #define FEATURE_EH_CALLFINALLY_THUNKS 0 // Generate call-to-finally code in "thunks" in the enclosing EH region, - // protected by "cloned finally" clauses. +#if !defined(UNIX_X86_ABI) + #define FEATURE_EH_WINDOWS_X86 1 // Enable support for SEH regions #endif #define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based // frames for most methods diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index e1ff9bc464a163..a51a52ab21d640 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -15,8 +15,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------ // Compiler::unwindGetFuncLocations: Get the start/end emitter locations for this // function or funclet. If 'getHotSectionData' is true, get the start/end locations @@ -53,6 +51,8 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc) { + assert(UsesFunclets()); + if (func->funKind == FUNC_ROOT) { // Since all funclets are pulled out of line, the main code size is everything @@ -134,8 +134,6 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, } } -#endif // FEATURE_EH_FUNCLETS - #if defined(FEATURE_CFI_SUPPORT) void Compiler::createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR cfiOpcode, short dwarfReg, INT offset) @@ -184,21 +182,22 @@ void Compiler::unwindBegPrologCFI() { assert(compGeneratingProlog); -#if defined(FEATURE_EH_FUNCLETS) - FuncInfoDsc* func = funCurrentFunc(); + if (UsesFunclets()) + { + FuncInfoDsc* func = funCurrentFunc(); - // There is only one prolog for a function/funclet, and it comes first. So now is - // a good time to initialize all the unwind data structures. + // There is only one prolog for a function/funclet, and it comes first. So now is + // a good time to initialize all the unwind data structures. - unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); + unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); - if (fgFirstColdBlock != nullptr) - { - unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); - } + if (fgFirstColdBlock != nullptr) + { + unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); + } - func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); -#endif // FEATURE_EH_FUNCLETS + func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); + } } void Compiler::unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat) diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index b292d74968f6ac..51af7f24889d1b 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -571,7 +571,6 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) } #endif // DEBUG -#ifdef FEATURE_EH_FUNCLETS // If hot/cold splitting occurred at fgFirstFuncletBB, then the main body is not split. const bool splitAtFirstFunclet = (funcHasColdSection && (fgFirstColdBlock == fgFirstFuncletBB)); @@ -579,7 +578,6 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) { funcHasColdSection = false; } -#endif // FEATURE_EH_FUNCLETS #if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) diff --git a/src/coreclr/jit/unwindx86.cpp b/src/coreclr/jit/unwindx86.cpp index 32d077429af6a1..40e720d40c33ac 100644 --- a/src/coreclr/jit/unwindx86.cpp +++ b/src/coreclr/jit/unwindx86.cpp @@ -70,16 +70,17 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset) // void Compiler::unwindReserve() { -#if defined(FEATURE_EH_FUNCLETS) - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); - - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + if (UsesFunclets()) { - unwindReserveFunc(funGetFunc(funcIdx)); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); + + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindReserveFunc(funGetFunc(funcIdx)); + } } -#endif } //------------------------------------------------------------------------ @@ -91,19 +92,19 @@ void Compiler::unwindReserve() // void Compiler::unwindEmit(void* pHotCode, void* pColdCode) { -#if defined(FEATURE_EH_FUNCLETS) - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); - - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + if (UsesFunclets()) { - unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); + + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); + } } -#endif // FEATURE_EH_FUNCLETS } -#if defined(FEATURE_EH_FUNCLETS) //------------------------------------------------------------------------ // Compiler::unwindReserveFunc: Reserve the unwind information from the VM for a // given main function or funclet. @@ -113,6 +114,7 @@ void Compiler::unwindEmit(void* pHotCode, void* pColdCode) // void Compiler::unwindReserveFunc(FuncInfoDsc* func) { + assert(UsesFunclets()); unwindReserveFuncHelper(func, true); if (fgFirstColdBlock != nullptr) @@ -280,5 +282,3 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo eeAllocUnwindInfo((BYTE*)pHotCode, (BYTE*)pColdCode, startOffset, endOffset, sizeof(UNWIND_INFO), (BYTE*)&unwindInfo, (CorJitFuncKind)func->funKind); } - -#endif // FEATURE_EH_FUNCLETS diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 889e0227e992d9..755c19d1386a88 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -11338,7 +11338,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) case GT_NOP: case GT_JMP: // Control flow case GT_LABEL: // Control flow -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: // Control flow #endif tree->gtVNPair = vnStore->VNPForVoid(); diff --git a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs index 1d9ef515d4e49e..010b23ed6f5811 100644 --- a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs +++ b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs @@ -146,12 +146,6 @@ private static string GetTargetSpec(TargetDetails target) { targetOSComponent = "universal"; } -#if !READYTORUN - else if (target.OperatingSystem == TargetOS.Windows && target.Architecture == TargetArchitecture.X86) - { - targetOSComponent = "win_aot"; - } -#endif else { targetOSComponent = target.OperatingSystem == TargetOS.Windows ? "win" : "unix"; From d9dcc58d95b782124f7eb9d85b35d98085d78218 Mon Sep 17 00:00:00 2001 From: SingleAccretion <62474226+SingleAccretion@users.noreply.github.com> Date: Fri, 5 Apr 2024 21:57:14 +0300 Subject: [PATCH 121/132] Remove more `ASG` terminlogy from the codebase (#86760) * Excise 'assignment' terminology from the codebase * Standardize on the 'value' terminology for store operands But only in the frontend; backend has lots of "data"s, and it did not seem purposeful renaming them. --- src/coreclr/jit/compiler.h | 52 +++---- src/coreclr/jit/compiler.hpp | 5 +- src/coreclr/jit/copyprop.cpp | 4 +- src/coreclr/jit/earlyprop.cpp | 15 +- src/coreclr/jit/fginline.cpp | 4 +- src/coreclr/jit/fgopt.cpp | 8 +- src/coreclr/jit/fgstmt.cpp | 6 +- src/coreclr/jit/gcinfo.cpp | 4 +- src/coreclr/jit/gentree.cpp | 153 +++++++++++--------- src/coreclr/jit/gentree.h | 21 ++- src/coreclr/jit/gschecks.cpp | 6 +- src/coreclr/jit/hwintrinsicarm64.cpp | 10 +- src/coreclr/jit/ifconversion.cpp | 14 +- src/coreclr/jit/importer.cpp | 91 ++++++------ src/coreclr/jit/importercalls.cpp | 32 ++-- src/coreclr/jit/importervectorization.cpp | 8 +- src/coreclr/jit/indirectcalltransformer.cpp | 2 +- src/coreclr/jit/jitconfigvalues.h | 2 +- src/coreclr/jit/lclmorph.cpp | 12 +- src/coreclr/jit/lclvars.cpp | 4 +- src/coreclr/jit/liveness.cpp | 18 +-- src/coreclr/jit/lower.cpp | 13 +- src/coreclr/jit/lower.h | 8 +- src/coreclr/jit/lsra.cpp | 5 +- src/coreclr/jit/morph.cpp | 114 +++++++-------- src/coreclr/jit/morphblock.cpp | 44 +++--- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optcse.cpp | 12 +- src/coreclr/jit/optimizer.cpp | 16 +- src/coreclr/jit/rangecheck.cpp | 4 +- src/coreclr/jit/rangecheck.h | 6 +- src/coreclr/jit/redundantbranchopts.cpp | 30 ++-- src/coreclr/jit/scopeinfo.cpp | 2 +- src/coreclr/jit/simd.cpp | 6 +- src/coreclr/jit/targetarm.h | 4 +- src/coreclr/jit/targetarm64.h | 4 +- src/coreclr/jit/targetloongarch64.h | 4 +- src/coreclr/jit/valuenum.cpp | 57 ++++---- 38 files changed, 398 insertions(+), 404 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index f95d04c9418233..8a5a1632199eb5 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3020,14 +3020,15 @@ class Compiler GenTree* gtNewConWithPattern(var_types type, uint8_t pattern); - GenTreeLclVar* gtNewStoreLclVarNode(unsigned lclNum, GenTree* data); + GenTreeLclVar* gtNewStoreLclVarNode(unsigned lclNum, GenTree* value); GenTreeLclFld* gtNewStoreLclFldNode( - unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* data); + unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* value); - GenTreeLclFld* gtNewStoreLclFldNode(unsigned lclNum, var_types type, unsigned offset, GenTree* data) + GenTreeLclFld* gtNewStoreLclFldNode(unsigned lclNum, var_types type, unsigned offset, GenTree* value) { - return gtNewStoreLclFldNode(lclNum, type, (type == TYP_STRUCT) ? data->GetLayout(this) : nullptr, offset, data); + return gtNewStoreLclFldNode( + lclNum, type, (type == TYP_STRUCT) ? value->GetLayout(this) : nullptr, offset, value); } GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); @@ -3382,7 +3383,7 @@ class Compiler GenTreeMDArr* gtNewMDArrLowerBound(GenTree* arrayOp, unsigned dim, unsigned rank, BasicBlock* block); - void gtInitializeStoreNode(GenTree* store, GenTree* data); + void gtInitializeStoreNode(GenTree* store, GenTree* value); void gtInitializeIndirNode(GenTreeIndir* indir, GenTreeFlags indirFlags); @@ -3391,10 +3392,10 @@ class Compiler GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr, GenTreeFlags indirFlags = GTF_EMPTY); GenTreeBlk* gtNewStoreBlkNode( - ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY); + ClassLayout* layout, GenTree* addr, GenTree* value, GenTreeFlags indirFlags = GTF_EMPTY); GenTreeStoreInd* gtNewStoreIndNode( - var_types type, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY); + var_types type, GenTree* addr, GenTree* value, GenTreeFlags indirFlags = GTF_EMPTY); GenTree* gtNewLoadValueNode( var_types type, ClassLayout* layout, GenTree* addr, GenTreeFlags indirFlags = GTF_EMPTY); @@ -3410,16 +3411,17 @@ class Compiler } GenTree* gtNewStoreValueNode( - var_types type, ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY); + var_types type, ClassLayout* layout, GenTree* addr, GenTree* value, GenTreeFlags indirFlags = GTF_EMPTY); - GenTree* gtNewStoreValueNode(ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY) + GenTree* gtNewStoreValueNode( + ClassLayout* layout, GenTree* addr, GenTree* value, GenTreeFlags indirFlags = GTF_EMPTY) { - return gtNewStoreValueNode(layout->GetType(), layout, addr, data, indirFlags); + return gtNewStoreValueNode(layout->GetType(), layout, addr, value, indirFlags); } - GenTree* gtNewStoreValueNode(var_types type, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY) + GenTree* gtNewStoreValueNode(var_types type, GenTree* addr, GenTree* value, GenTreeFlags indirFlags = GTF_EMPTY) { - return gtNewStoreValueNode(type, nullptr, addr, data, indirFlags); + return gtNewStoreValueNode(type, nullptr, addr, value, indirFlags); } GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); @@ -3442,7 +3444,7 @@ class Compiler CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, - GenTree* assg); + GenTree* value); GenTree* gtNewNothingNode(); @@ -4625,12 +4627,12 @@ class Compiler void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); - void impStoreTemp(unsigned lclNum, - GenTree* val, - unsigned curLevel, - Statement** pAfterStmt = nullptr, - const DebugInfo& di = DebugInfo(), - BasicBlock* block = nullptr); + void impStoreToTemp(unsigned lclNum, + GenTree* val, + unsigned curLevel, + Statement** pAfterStmt = nullptr, + const DebugInfo& di = DebugInfo(), + BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, @@ -5637,8 +5639,6 @@ class Compiler void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. - // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the - // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberStore(GenTree* tree); @@ -6332,7 +6332,7 @@ class Compiler // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree); - TempInfo fgMakeTemp(GenTree* rhs); + TempInfo fgMakeTemp(GenTree* value); GenTree* fgMakeMultiUse(GenTree** ppTree); // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. @@ -6429,7 +6429,7 @@ class Compiler bool fgMorphCombineSIMDFieldStores(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldStores(Statement* stmt); - // fgPreviousCandidateSIMDFieldStoreStmt is only used for tracking previous simd field assignment + // fgPreviousCandidateSIMDFieldStoreStmt is only used for tracking previous simd field store // in function: Compiler::impMarkContiguousSIMDFieldStores. Statement* fgPreviousCandidateSIMDFieldStoreStmt; @@ -6557,7 +6557,7 @@ class Compiler //----------------------- Liveness analysis ------------------------------- - VARSET_TP fgCurUseSet; // vars used by block (before an assignment) + VARSET_TP fgCurUseSet; // vars used by block (before a def) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. @@ -7551,7 +7551,7 @@ class Compiler } op1; struct AssertionDscOp2 { - optOp2Kind kind; // a const or copy assignment + optOp2Kind kind; // a const or copy assertion private: uint16_t m_encodedIconFlags; // encoded icon gtFlags, don't use directly public: @@ -7770,7 +7770,7 @@ class Compiler AssertionIndex* optComplementaryAssertionMap; JitExpandArray* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var - AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments + AssertionDsc* optAssertionTabPrivate; // table that holds info about assertions AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; bool optCrossBlockLocalAssertionProp; diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 6387d17b2e1ac3..3c9b9ac9e5e284 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -3548,8 +3548,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX /***************************************************************************** * - * The following resets the value assignment table - * used only during local assertion prop + * The following resets the assertions table used only during local assertion prop */ inline void Compiler::optAssertionReset(AssertionIndex limit) @@ -3602,7 +3601,7 @@ inline void Compiler::optAssertionReset(AssertionIndex limit) /***************************************************************************** * - * The following removes the i-th entry in the value assignment table + * The following removes the i-th entry in the assertions table * used only during local assertion prop */ diff --git a/src/coreclr/jit/copyprop.cpp b/src/coreclr/jit/copyprop.cpp index 142c745fc7c317..bb645b26bb4902 100644 --- a/src/coreclr/jit/copyprop.cpp +++ b/src/coreclr/jit/copyprop.cpp @@ -291,8 +291,8 @@ void Compiler::optCopyPropPushDef(GenTree* defNode, GenTreeLclVarCommon* lclNode { unsigned lclNum = lclNode->GetLclNum(); - // Shadowed parameters are special: they will (at most) have one use, that is one on the RHS of an - // assignment to their shadow, and we must not substitute them anywhere. So we'll not push any defs. + // Shadowed parameters are special: they will (at most) have one use, as values in a store + // to their shadow, and we must not substitute them anywhere. So we'll not push any defs. if ((gsShadowVarInfo != nullptr) && lvaGetDesc(lclNum)->lvIsParam && (gsShadowVarInfo[lclNum].shadowCopy != BAD_VAR_NUM)) { diff --git a/src/coreclr/jit/earlyprop.cpp b/src/coreclr/jit/earlyprop.cpp index a63c34babce9fc..ef03524a1810c0 100644 --- a/src/coreclr/jit/earlyprop.cpp +++ b/src/coreclr/jit/earlyprop.cpp @@ -371,21 +371,22 @@ GenTree* Compiler::optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropK { assert(ssaDefStore->OperIsLocalStore()); - GenTree* data = ssaDefStore->Data(); + GenTree* defValue = ssaDefStore->Data(); - // Recursively track the Rhs for "entire" stores. - if (ssaDefStore->OperIs(GT_STORE_LCL_VAR) && (ssaDefStore->GetLclNum() == lclNum) && data->OperIs(GT_LCL_VAR)) + // Recursively track the value for "entire" stores. + if (ssaDefStore->OperIs(GT_STORE_LCL_VAR) && (ssaDefStore->GetLclNum() == lclNum) && + defValue->OperIs(GT_LCL_VAR)) { - unsigned dataLclNum = data->AsLclVarCommon()->GetLclNum(); - unsigned dataSsaNum = data->AsLclVarCommon()->GetSsaNum(); + unsigned defValueLclNum = defValue->AsLclVar()->GetLclNum(); + unsigned defValueSsaNum = defValue->AsLclVar()->GetSsaNum(); - value = optPropGetValueRec(dataLclNum, dataSsaNum, valueKind, walkDepth + 1); + value = optPropGetValueRec(defValueLclNum, defValueSsaNum, valueKind, walkDepth + 1); } else { if (valueKind == optPropKind::OPK_ARRAYLEN) { - value = getArrayLengthFromAllocation(data DEBUGARG(ssaVarDsc->GetBlock())); + value = getArrayLengthFromAllocation(defValue DEBUGARG(ssaVarDsc->GetBlock())); if (value != nullptr) { if (!value->IsCnsIntOrI()) diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 7450df323aea0c..0f0fb3c7c484ea 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -433,7 +433,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorData(); - // We need to force all assignments from multi-reg nodes into the "lcl = node()" form. + // We need to force all stores from multi-reg nodes into the "lcl = node()" form. if (inlinee->IsMultiRegNode()) { // Special case: we already have a local, the only thing to do is mark it appropriately. Except @@ -607,7 +607,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorOperIs(GT_LCL_VAR) && (value->AsLclVar()->GetLclNum() == lclNum)) { - JITDUMP("... removing self-assignment\n"); + JITDUMP("... removing self-store\n"); DISPTREE(tree); tree->gtBashToNOP(); m_madeChanges = true; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 94e6bc5b3c057a..33ee13bf9b6895 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2092,7 +2092,7 @@ bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNu } // Tail duplication tends to pay off when the last statement - // is an assignment of a constant, arraylength, or a relop. + // is a local store of a constant, arraylength, or a relop. // This is because these statements produce information about values // that would otherwise be lost at the upcoming merge point. // @@ -2108,8 +2108,8 @@ bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNu GenTree* const tree = stmt->GetRootNode(); if (tree->OperIsLocalStore() && !tree->OperIsBlkOp() && (tree->AsLclVarCommon()->GetLclNum() == lclNum)) { - GenTree* const data = tree->Data(); - if (data->OperIsArrLength() || data->OperIsConst() || data->OperIsCompare()) + GenTree* const value = tree->Data(); + if (value->OperIsArrLength() || value->OperIsConst() || value->OperIsCompare()) { return true; } @@ -2158,7 +2158,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // ultimately feeds a simple conditional branch. // // These blocks are small, and when duplicated onto the tail of blocks that end in - // assignments, there is a high probability of the branch completely going away. + // local stores, there is a high probability of the branch completely going away. // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index 0c0d7384f275b6..2189955a6f9785 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -49,9 +49,9 @@ bool Compiler::fgBlockContainsStatementBounded(BasicBlock* block, // stmt - the statement to be inserted. // // Notes: -// We always insert phi statements at the beginning. -// In other cases, if there are any phi assignments and/or an assignment of -// the GT_CATCH_ARG, we insert after those. +// We always insert phi statements at the beginning. In other cases, if +// there are any phi stores and/or a store of the GT_CATCH_ARG, we insert +// after those. // void Compiler::fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt) { diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index b7c1a2667bf91a..ff534a0afcbf21 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -241,8 +241,8 @@ GCInfo::WriteBarrierForm GCInfo::gcIsWriteBarrierCandidate(GenTreeStoreInd* stor } // Ignore any assignments of NULL or nongc object - GenTree* const data = store->Data()->gtSkipReloadOrCopy(); - if (data->IsIntegralConst(0) || data->IsIconHandle(GTF_ICON_OBJ_HDL)) + GenTree* const value = store->Data()->gtSkipReloadOrCopy(); + if (value->IsIntegralConst(0) || value->IsIconHandle(GTF_ICON_OBJ_HDL)) { return WBF_NoBarrier; } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 5b4894c82a0327..7e90d26a3d6820 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -4600,7 +4600,7 @@ bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode) else { // No side effects in op2 - we can swap iff op1 has no way of modifying op2, - // i.e. through byref assignments or calls or op2 is a constant. + // i.e. through indirect stores or calls or op2 is a constant. if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS) { @@ -6317,10 +6317,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) assert(use.GetNode()->GetCostEx() == 0); assert(use.GetNode()->GetCostSz() == 0); } - // Give it a level of 2, just to be sure that it's greater than the LHS of - // the parent assignment and the PHI gets evaluated first in linear order. - // See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg. - level = 2; + level = 1; costEx = 0; costSz = 0; break; @@ -6986,7 +6983,7 @@ bool GenTree::OperRequiresAsgFlag() const return true; // If the call has return buffer argument, it produced a definition and hence - // should be marked with assignment. + // should be marked with GTF_ASG. case GT_CALL: return AsCall()->IsOptimizingRetBufAsLocal(); @@ -8231,28 +8228,51 @@ GenTree* Compiler::gtNewConWithPattern(var_types type, uint8_t pattern) } } -GenTreeLclVar* Compiler::gtNewStoreLclVarNode(unsigned lclNum, GenTree* data) +//------------------------------------------------------------------------ +// gtNewStoreLclVarNode: Create a local store node. +// +// Arguments: +// lclNum - Number of the local being stored to +// value - Value to store +// +// Return Value: +// The created STORE_LCL_VAR node. +// +GenTreeLclVar* Compiler::gtNewStoreLclVarNode(unsigned lclNum, GenTree* value) { LclVarDsc* varDsc = lvaGetDesc(lclNum); var_types type = varDsc->lvNormalizeOnLoad() ? varDsc->TypeGet() : genActualType(varDsc); - GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(type, lclNum, data); + GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(type, lclNum, value); store->gtFlags |= (GTF_VAR_DEF | GTF_ASG); if (varDsc->IsAddressExposed()) { store->gtFlags |= GTF_GLOB_REF; } - gtInitializeStoreNode(store, data); + gtInitializeStoreNode(store, value); return store; } +//------------------------------------------------------------------------ +// gtNewStoreLclFldNode: Create a local field store node. +// +// Arguments: +// lclNum - Number of the local being stored to +// type - Type of the store +// layout - Struct layout of the store +// offset - Offset of the store +// value - Value to store +// +// Return Value: +// The created STORE_LCL_FLD node. +// GenTreeLclFld* Compiler::gtNewStoreLclFldNode( - unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* data) + unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* value) { assert((type == TYP_STRUCT) == (layout != nullptr)); - GenTreeLclFld* store = new (this, GT_STORE_LCL_FLD) GenTreeLclFld(type, lclNum, offset, data, layout); + GenTreeLclFld* store = new (this, GT_STORE_LCL_FLD) GenTreeLclFld(type, lclNum, offset, value, layout); store->gtFlags |= (GTF_VAR_DEF | GTF_ASG); if (store->IsPartialLclFld(this)) { @@ -8263,7 +8283,7 @@ GenTreeLclFld* Compiler::gtNewStoreLclFldNode( store->gtFlags |= GTF_GLOB_REF; } - gtInitializeStoreNode(store, data); + gtInitializeStoreNode(store, value); return store; } @@ -8366,7 +8386,7 @@ GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL { assert(type != TYP_VOID); // We need to ensure that all struct values are normalized. - // It might be nice to assert this in general, but we have assignments of int to long. + // It might be nice to assert this in general, but we have stores of int to long. if (varTypeIsStruct(type)) { // Make an exception for implicit by-ref parameters during global morph, since @@ -8412,7 +8432,7 @@ GenTreeLclVar* Compiler::gtNewLclVarNode(unsigned lclNum, var_types type) GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs)) { // We need to ensure that all struct values are normalized. - // It might be nice to assert this in general, but we have assignments of int to long. + // It might be nice to assert this in general, but we have stores of int to long. if (varTypeIsStruct(type)) { // Make an exception for implicit by-ref parameters during global morph, since @@ -8523,28 +8543,28 @@ GenTreeFieldAddr* Compiler::gtNewFieldAddrNode(var_types type, CORINFO_FIELD_HAN // store - The store node // data - The value to store // -void Compiler::gtInitializeStoreNode(GenTree* store, GenTree* data) +void Compiler::gtInitializeStoreNode(GenTree* store, GenTree* value) { // TODO-ASG: add asserts that the types match here. - assert(store->Data() == data); + assert(store->Data() == value); #if defined(FEATURE_SIMD) #ifndef TARGET_X86 if (varTypeIsSIMD(store)) { // TODO-ASG: delete this zero-diff quirk. - if (!data->IsCall() || !data->AsCall()->ShouldHaveRetBufArg()) + if (!value->IsCall() || !value->AsCall()->ShouldHaveRetBufArg()) { - // We want to track SIMD assignments as being intrinsics since they - // are functionally SIMD `mov` instructions and are more efficient - // when we don't promote, particularly when it occurs due to inlining. + // We want to track SIMD stores as being intrinsics since they are + // functionally SIMD `mov` instructions and are more efficient when + // we don't promote, particularly when it occurs due to inlining. SetOpLclRelatedToSIMDIntrinsic(store); - SetOpLclRelatedToSIMDIntrinsic(data); + SetOpLclRelatedToSIMDIntrinsic(value); } } #else // TARGET_X86 // TODO-Cleanup: merge into the all-arch. - if (varTypeIsSIMD(data) && data->OperIs(GT_HWINTRINSIC, GT_CNS_VEC)) + if (varTypeIsSIMD(value) && value->OperIs(GT_HWINTRINSIC, GT_CNS_VEC)) { SetOpLclRelatedToSIMDIntrinsic(store); } @@ -8669,26 +8689,26 @@ GenTree* Compiler::gtNewLoadValueNode(var_types type, ClassLayout* layout, GenTr } //------------------------------------------------------------------------------ -// gtNewStoreBlkNode : Create an indirect struct store node. +// gtNewStoreBlkNode: Create an indirect struct store node. // // Arguments: // layout - The struct layout // addr - Destination address -// data - Value to store +// value - Value to store // indirFlags - Indirection flags // // Return Value: // The created GT_STORE_BLK node. // -GenTreeBlk* Compiler::gtNewStoreBlkNode(ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags) +GenTreeBlk* Compiler::gtNewStoreBlkNode(ClassLayout* layout, GenTree* addr, GenTree* value, GenTreeFlags indirFlags) { assert((indirFlags & GTF_IND_INVARIANT) == 0); - assert(data->IsInitVal() || ClassLayout::AreCompatible(layout, data->GetLayout(this))); + assert(value->IsInitVal() || ClassLayout::AreCompatible(layout, value->GetLayout(this))); - GenTreeBlk* store = new (this, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, addr, data, layout); + GenTreeBlk* store = new (this, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, addr, value, layout); store->gtFlags |= GTF_ASG; gtInitializeIndirNode(store, indirFlags); - gtInitializeStoreNode(store, data); + gtInitializeStoreNode(store, value); return store; } @@ -8699,20 +8719,20 @@ GenTreeBlk* Compiler::gtNewStoreBlkNode(ClassLayout* layout, GenTree* addr, GenT // Arguments: // type - Type of the store // addr - Destination address -// data - Value to store +// value - Value to store // indirFlags - Indirection flags // // Return Value: // The created GT_STOREIND node. // -GenTreeStoreInd* Compiler::gtNewStoreIndNode(var_types type, GenTree* addr, GenTree* data, GenTreeFlags indirFlags) +GenTreeStoreInd* Compiler::gtNewStoreIndNode(var_types type, GenTree* addr, GenTree* value, GenTreeFlags indirFlags) { assert(((indirFlags & GTF_IND_INVARIANT) == 0) && (type != TYP_STRUCT)); - GenTreeStoreInd* store = new (this, GT_STOREIND) GenTreeStoreInd(type, addr, data); + GenTreeStoreInd* store = new (this, GT_STOREIND) GenTreeStoreInd(type, addr, value); store->gtFlags |= GTF_ASG; gtInitializeIndirNode(store, indirFlags); - gtInitializeStoreNode(store, data); + gtInitializeStoreNode(store, value); return store; } @@ -8724,7 +8744,7 @@ GenTreeStoreInd* Compiler::gtNewStoreIndNode(var_types type, GenTree* addr, GenT // type - Type to store // layout - Struct layout for the store // addr - Destination address -// data - Value to store +// value - Value to store // indirFlags - Indirection flags // // Return Value: @@ -8732,7 +8752,7 @@ GenTreeStoreInd* Compiler::gtNewStoreIndNode(var_types type, GenTree* addr, GenT // a compatible local. // GenTree* Compiler::gtNewStoreValueNode( - var_types type, ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags) + var_types type, ClassLayout* layout, GenTree* addr, GenTree* value, GenTreeFlags indirFlags) { assert((type != TYP_STRUCT) || (layout != nullptr)); @@ -8743,18 +8763,18 @@ GenTree* Compiler::gtNewStoreValueNode( if ((varDsc->TypeGet() == type) && ((type != TYP_STRUCT) || ClassLayout::AreCompatible(layout, varDsc->GetLayout()))) { - return gtNewStoreLclVarNode(lclNum, data); + return gtNewStoreLclVarNode(lclNum, value); } } GenTree* store; if (type == TYP_STRUCT) { - store = gtNewStoreBlkNode(layout, addr, data, indirFlags); + store = gtNewStoreBlkNode(layout, addr, value, indirFlags); } else { - store = gtNewStoreIndNode(type, addr, data, indirFlags); + store = gtNewStoreIndNode(type, addr, value, indirFlags); } return store; @@ -8804,10 +8824,9 @@ GenTree* Compiler::gtNewAtomicNode(genTreeOps oper, var_types type, GenTree* add // value for the initblk. // // Notes: -// The initBlk MSIL instruction takes a byte value, which must be -// extended to the size of the assignment when an initBlk is transformed -// to an assignment of a primitive type. -// This performs the appropriate extension. +// The initBlk MSIL instruction takes a byte value, which must be extended +// to the size of the store when an initBlk is transformed to a store of +// a primitive type. This performs the appropriate extension. // void GenTreeIntCon::FixupInitBlkValue(var_types type) { @@ -14856,7 +14875,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions const bool isUnsafeValueClass = false; lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass); - // Remove the newobj and assignment to box temp + // Remove the newobj and store to box temp JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(boxLclDef)); boxLclDef->gtBashToNOP(); @@ -14913,7 +14932,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions // Otherwise, proceed with the optimization. // - // Change the assignment expression to a NOP. + // Change the store expression to a NOP. JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(boxLclDef)); boxLclDef->gtBashToNOP(); @@ -16351,25 +16370,23 @@ GenTree* Compiler::gtFoldIndirConst(GenTreeIndir* indir) } //------------------------------------------------------------------------ -// gtNewTempStore: Create an assignment of the given value to a temp. +// gtNewTempStore: Create a store of the given value to a temp. // // Arguments: // tmp - local number for a compiler temp -// val - value to assign to the temp +// val - value to store to the temp // curLevel - stack level to spill at (importer-only) // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: -// Normally a new assignment node. +// Normally a new store node. // However may return a nop node if val is simply a reference to the temp. // // Notes: -// Self-assignments may be represented via NOPs. -// +// Self-stores may be represented via NOPs. // May update the type of the temp, if it was previously unknown. -// // May set compFloatingPointUsed. // GenTree* Compiler::gtNewTempStore( @@ -16449,7 +16466,7 @@ GenTree* Compiler::gtNewTempStore( noway_assert(!"Incompatible types for gtNewTempStore"); } - // Floating Point assignments can be created during inlining + // Floating Point stores can be created during inlining // see "Zero init inlinee locals:" in fgInlinePrependStatements // thus we may need to set compFloatingPointUsed to true here. // @@ -16473,8 +16490,8 @@ GenTree* Compiler::gtNewTempStore( /***************************************************************************** * - * Create a helper call to access a COM field (iff 'assg' is non-zero this is - * an assignment and 'assg' is the new value). + * Create a helper call to access a COM field (iff 'value' is non-zero this is + * a store and 'value' is the new value). */ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, @@ -16482,7 +16499,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, - GenTree* assg) + GenTree* value) { assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER || pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER || @@ -16499,24 +16516,24 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, { if (access & CORINFO_ACCESS_SET) { - assert(assg != nullptr); + assert(value != nullptr); // helper needs pointer to struct, not struct itself if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT) { // TODO-Bug?: verify if flags matter here GenTreeFlags indirFlags = GTF_EMPTY; - assg = impGetNodeAddr(assg, CHECK_SPILL_ALL, &indirFlags); + value = impGetNodeAddr(value, CHECK_SPILL_ALL, &indirFlags); } - else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT) + else if (lclTyp == TYP_DOUBLE && value->TypeGet() == TYP_FLOAT) { - assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE); + value = gtNewCastNode(TYP_DOUBLE, value, false, TYP_DOUBLE); } - else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE) + else if (lclTyp == TYP_FLOAT && value->TypeGet() == TYP_DOUBLE) { - assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT); + value = gtNewCastNode(TYP_FLOAT, value, false, TYP_FLOAT); } - args[nArgs++] = assg; + args[nArgs++] = value; helperType = TYP_VOID; } else if (access & CORINFO_ACCESS_GET) @@ -16600,8 +16617,8 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, if ((access & CORINFO_ACCESS_SET) != 0) { - result = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, result, assg)->AsIndir() - : gtNewStoreIndNode(lclTyp, result, assg); + result = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, result, value)->AsIndir() + : gtNewStoreIndNode(lclTyp, result, value); if (varTypeIsStruct(lclTyp)) { result = impStoreStruct(result, CHECK_SPILL_ALL); @@ -16622,9 +16639,6 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, * Return true if the given node (excluding children trees) contains side effects. * Note that it does not recurse, and children need to be handled separately. * It may return false even if the node has GTF_SIDE_EFFECT (because of its children). - * - * Similar to OperMayThrow() (but handles GT_CALLs specially), but considers - * assignments too. */ bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags) @@ -17909,7 +17923,7 @@ GenTreeLclVar* GenTree::IsImplicitByrefParameterValuePostMorph(Compiler* compile } //------------------------------------------------------------------------ -// IsLclVarUpdateTree: Determine whether this is an assignment tree of the +// IsLclVarUpdateTree: Determine whether this is a local store tree of the // form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar // // Arguments: @@ -26757,9 +26771,8 @@ bool GenTreeHWIntrinsic::OperIsEmbRoundingEnabled() const // bool GenTreeHWIntrinsic::OperRequiresAsgFlag() const { - // A MemoryStore operation is an assignment and barriers, while they - // don't technically do an assignment are modeled the same as - // GT_MEMORYBARRIER which tracks itself as requiring the GTF_ASG flag + // Barriers, while they don't technically do an assignment are modeled the same + // as GT_MEMORYBARRIER which tracks itself as requiring the GTF_ASG flag. return OperIsMemoryStoreOrBarrier(); } @@ -26941,7 +26954,7 @@ void GenTreeHWIntrinsic::Initialize(NamedIntrinsic intrinsicId) case NI_SSE2_MemoryFence: case NI_X86Serialize_Serialize: { - // Mark as an assignment and global reference, much as is done for GT_MEMORYBARRIER + // Mark as a store and global reference, much as is done for GT_MEMORYBARRIER gtFlags |= (GTF_ASG | GTF_GLOB_REF); break; } diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index a0e8eb7242e6cf..d5dbad500c16d2 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -363,8 +363,8 @@ enum GenTreeFlags : unsigned int // expression node for one of these flags. //--------------------------------------------------------------------- - GTF_ASG = 0x00000001, // sub-expression contains an assignment - GTF_CALL = 0x00000002, // sub-expression contains a func. call + GTF_ASG = 0x00000001, // sub-expression contains a store + GTF_CALL = 0x00000002, // sub-expression contains a func. call GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s) GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect @@ -593,14 +593,13 @@ inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b) } // Can any side-effects be observed externally, say by a caller method? -// For assignments, only assignments to global memory can be observed -// externally, whereas simple assignments to local variables can not. +// For stores, only stores to global memory can be observed externally, +// whereas simple stores to local variables can not. // // Be careful when using this inside a "try" protected region as the -// order of assignments to local variables would need to be preserved -// wrt side effects if the variables are alive on entry to the -// "catch/finally" region. In such cases, even assignments to locals -// will have to be restricted. +// order of stores to local variables would need to be preserved wrt +// side effects if the variables are alive on entry to the handler +// region. In such cases, even stores to locals will have to be restricted. #define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \ (((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF))) @@ -1980,8 +1979,6 @@ struct GenTree GenTreeLclVarCommon* IsImplicitByrefParameterValuePreMorph(Compiler* compiler); GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr); - // Determine whether this is an assignment tree of the form X = X (op) Y, - // where Y is an arbitrary tree, and X is a lclVar. unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper); // Determine whether this tree is a basic block profile count update. @@ -3928,8 +3925,8 @@ struct GenTreeBox : public GenTreeUnOp { return gtOp1; } - // This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value - // type + // This is the statement that contains the definition tree when the node is an inlined GT_BOX + // on a value type Statement* gtDefStmtWhenInlinedBoxValue; // And this is the statement that copies from the value being boxed to the box payload Statement* gtCopyStmtWhenInlinedBoxValue; diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index 7b448b8ca3d24e..0b4deec87d2285 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -94,7 +94,7 @@ void Compiler::gsCopyShadowParams() // Find groups of variables assigned to each other, and also // tracks variables which are dereferenced and marks them as ptrs. - // Look for assignments to *p, and ptrs passed to functions + // Look for stores to *p, and ptrs passed to functions // if (gsFindVulnerableParams()) { @@ -117,7 +117,7 @@ struct MarkPtrsInfo { Compiler* comp; unsigned lvStoreDef; // Which local variable is the tree being assigned to? - bool isStoreSrc; // Is this the source value for an assignment? + bool isStoreSrc; // Is this the source value for a local store? bool isUnderIndir; // Is this a pointer value tree that is being dereferenced? bool skipNextNode; // Skip a single node during the tree-walk @@ -528,7 +528,7 @@ void Compiler::gsParamsToShadows() if (compJmpOpUsed) { // There could be more than one basic block ending with a "Jmp" type tail call. - // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. + // We would have to insert stores in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { if (!block->KindIs(BBJ_RETURN)) diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 385dfe4bc82bf7..8e3288f75d7090 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -1836,7 +1836,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("StoreVectorNx2 temp tree")); - impStoreTemp(tmp, op2, CHECK_SPILL_NONE); + impStoreToTemp(tmp, op2, CHECK_SPILL_NONE); op2 = gtNewLclvNode(tmp, argType); } op2 = gtConvertTableOpToFieldList(op2, fieldCount); @@ -1890,7 +1890,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("StoreSelectedScalarN")); - impStoreTemp(tmp, op2, CHECK_SPILL_NONE); + impStoreToTemp(tmp, op2, CHECK_SPILL_NONE); op2 = gtNewLclvNode(tmp, argType); } op2 = gtConvertTableOpToFieldList(op2, fieldCount); @@ -2106,7 +2106,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("LoadAndInsertScalar temp tree")); - impStoreTemp(tmp, op1, CHECK_SPILL_NONE); + impStoreToTemp(tmp, op1, CHECK_SPILL_NONE); op1 = gtNewLclvNode(tmp, argType); } @@ -2139,7 +2139,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("VectorTableLookup temp tree")); - impStoreTemp(tmp, op1, CHECK_SPILL_NONE); + impStoreToTemp(tmp, op1, CHECK_SPILL_NONE); op1 = gtNewLclvNode(tmp, argType); } @@ -2179,7 +2179,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("VectorTableLookupExtension temp tree")); - impStoreTemp(tmp, op2, CHECK_SPILL_NONE); + impStoreToTemp(tmp, op2, CHECK_SPILL_NONE); op2 = gtNewLclvNode(tmp, argType); } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 1e6a573aa7b0de..6d7ead881a91c5 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -33,7 +33,7 @@ class OptIfConversionDsc BasicBlock* m_startBlock; // First block in the If Conversion. BasicBlock* m_finalBlock = nullptr; // Block where the flows merge. In a return case, this can be nullptr. - // The node, statement and block of an assignment. + // The node, statement and block of an operation. struct IfConvertOperation { BasicBlock* block = nullptr; @@ -208,15 +208,15 @@ void OptIfConversionDsc::IfConvertFindFlow() // IfConvertCheckStmts // // From the given block to the final block, check all the statements and nodes are -// valid for an If conversion. Chain of blocks must contain only a single assignment -// and no other operations. +// valid for an If conversion. Chain of blocks must contain only a single local +// store and no other operations. // // Arguments: -// fromBlock -- Block inside the if statement to start from (Either Then or Else path). -// foundOperation -- Returns the found operation. +// fromBlock - Block inside the if statement to start from (Either Then or Else path). +// foundOperation - Returns the found operation. // // Returns: -// If everything is valid, then set foundOperation to the assignment and return true. +// If everything is valid, then set foundOperation to the store and return true. // Otherwise return false. // bool OptIfConversionDsc::IfConvertCheckStmts(BasicBlock* fromBlock, IfConvertOperation* foundOperation) @@ -774,7 +774,7 @@ PhaseStatus Compiler::optIfConversion() bool madeChanges = false; - // This phase does not respect SSA: assignments are deleted/moved. + // This phase does not respect SSA: local stores are deleted/moved. assert(!fgSsaValid); optReachableBitVecTraits = nullptr; diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 7c08570aafbaa9..12a612fde1a66a 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -464,7 +464,7 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu if (expr->OperIsLocalStore()) { - // For assignments, limit the checking to what the value could modify/interfere with. + // For stores, limit the checking to what the value could modify/interfere with. GenTree* value = expr->AsLclVarCommon()->Data(); flags = value->gtFlags & GTF_GLOB_EFFECT; @@ -660,16 +660,16 @@ Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const Debug /***************************************************************************** * - * Append an assignment of the given value to a temp to the current tree list. + * Append a store of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ -void Compiler::impStoreTemp(unsigned lclNum, - GenTree* val, - unsigned curLevel, - Statement** pAfterStmt, /* = NULL */ - const DebugInfo& di, /* = DebugInfo() */ - BasicBlock* block /* = NULL */ +void Compiler::impStoreToTemp(unsigned lclNum, + GenTree* val, + unsigned curLevel, + Statement** pAfterStmt, /* = NULL */ + const DebugInfo& di, /* = DebugInfo() */ + BasicBlock* block /* = NULL */ ) { GenTree* store = gtNewTempStore(lclNum, val, curLevel, pAfterStmt, di, block); @@ -1083,7 +1083,7 @@ GenTree* Compiler::impGetNodeAddr(GenTree* val, unsigned curLevel, GenTreeFlags* } unsigned lclNum = lvaGrabTemp(true DEBUGARG("location for address-of(RValue)")); - impStoreTemp(lclNum, val, curLevel); + impStoreToTemp(lclNum, val, curLevel); // The 'return value' is now address of the temp itself. return gtNewLclVarAddrNode(lclNum, TYP_BYREF); @@ -1166,7 +1166,7 @@ GenTree* Compiler::impNormStructVal(GenTree* structVal, unsigned curLevel) case GT_RET_EXPR: { unsigned lclNum = lvaGrabTemp(true DEBUGARG("spilled call-like call argument")); - impStoreTemp(lclNum, structVal, curLevel); + impStoreToTemp(lclNum, structVal, curLevel); // The structVal is now the temp itself structVal = gtNewLclvNode(lclNum, structType); @@ -1625,7 +1625,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken // Spilling it to a temp improves CQ (mainly in Tier0) unsigned callLclNum = lvaGrabTemp(true DEBUGARG("spilling helperCall")); - impStoreTemp(callLclNum, helperCall, CHECK_SPILL_NONE); + impStoreToTemp(callLclNum, helperCall, CHECK_SPILL_NONE); return gtNewLclvNode(callLclNum, helperCall->TypeGet()); } @@ -1744,7 +1744,7 @@ bool Compiler::impSpillStackEntry(unsigned level, } /* Assign the spilled entry to the temp */ - impStoreTemp(tnum, tree, level); + impStoreToTemp(tnum, tree, level); if (isNewTemp) { @@ -1779,7 +1779,7 @@ bool Compiler::impSpillStackEntry(unsigned level, } } - // The tree type may be modified by impStoreTemp, so use the type of the lclVar. + // The tree type may be modified by impStoreToTemp, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; @@ -1822,7 +1822,7 @@ void Compiler::impSpillStackEnsure(bool spillLeaves) /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those - * trees to temps and append the assignments to the statement list. + * trees to temps and append the stores to the statement list. * On return the stack is guaranteed to be empty. */ @@ -1936,9 +1936,8 @@ void Compiler::impSpillLclRefs(unsigned lclNum, unsigned chkLevel) GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, - then we need to spill assignments to the local if the local is - live on entry to the handler. - Just spill 'em all without considering the liveness */ + then we need to spill stores to the local if the local is on entry + to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); @@ -2087,12 +2086,12 @@ GenTree* Compiler::impCloneExpr(GenTree* tree, unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); - // impStoreTemp() may change tree->gtType to TYP_VOID for calls which + // impStoreToTemp() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from - // the lclVar AFTER calling impStoreTemp(). + // the lclVar AFTER calling impStoreToTemp(). - impStoreTemp(temp, tree, curLevel, pAfterStmt, impCurStmtDI); + impStoreToTemp(temp, tree, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); @@ -3256,7 +3255,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) Statement* allocBoxStmt = impAppendTree(allocBoxStore, CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, - // move the assignment statement(s) before the call (which must be a top level tree). + // move the store statement(s) before the call (which must be a top level tree). // // We do this because impStoreStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call @@ -3275,7 +3274,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // that has this call as the root node. // // Because gtNewTempStore (above) may have added statements that - // feed into the actual assignment we need to move this set of added + // feed into the actual store we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so @@ -3310,7 +3309,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } - // Found the call. Move the statements comprising the assignment. + // Found the call. Move the statements comprising the store. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), allocBoxStmt->GetID(), insertBeforeStmt->GetID()); @@ -3373,7 +3372,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); - // Set up this copy as a second assignment. + // Set up this copy as a second store. Statement* copyStmt = impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); @@ -3651,7 +3650,7 @@ GenTree* Compiler::impImportStaticReadOnlyField(CORINFO_FIELD_HANDLE field, CORI unsigned structTempNum = lvaGrabTemp(true DEBUGARG("folding static readonly field empty struct")); lvaSetStruct(structTempNum, fieldClsHnd, false); - impStoreTemp(structTempNum, gtNewIconNode(0), CHECK_SPILL_NONE); + impStoreToTemp(structTempNum, gtNewIconNode(0), CHECK_SPILL_NONE); return gtNewLclVarNode(structTempNum); } @@ -4233,7 +4232,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. - impStoreTemp(tmpNum, op, CHECK_SPILL_NONE); + impStoreToTemp(tmpNum, op, CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); @@ -5569,7 +5568,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // Make QMark node a top level node by spilling it. const unsigned result = lvaGrabTemp(true DEBUGARG("spilling qmarkNull")); - impStoreTemp(result, qmarkNull, CHECK_SPILL_NONE); + impStoreToTemp(result, qmarkNull, CHECK_SPILL_NONE); // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. @@ -6378,7 +6377,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) VAR_ST_VALID: - /* if it is a struct assignment, make certain we don't overflow the buffer */ + /* if it is a struct store, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) @@ -6440,7 +6439,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - /* Filter out simple assignments to itself */ + /* Filter out simple stores to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { @@ -6856,7 +6855,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - // Else call a helper function to do the assignment + // Else call a helper function to do the store impPopStack(3); // The CLI Spec allows an array to be indexed by either an int32 or a native int. @@ -7969,7 +7968,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) else { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); - impStoreTemp(tmpNum, op1, CHECK_SPILL_ALL); + impStoreToTemp(tmpNum, op1, CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); assert(lvaTable[tmpNum].lvSingleDef == 0); @@ -8455,7 +8454,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) GenTree* newObjInit = gtNewZeroConNode((lclDsc->TypeGet() == TYP_STRUCT) ? TYP_INT : lclDsc->TypeGet()); - impStoreTemp(lclNum, newObjInit, CHECK_SPILL_NONE); + impStoreToTemp(lclNum, newObjInit, CHECK_SPILL_NONE); } else { @@ -8497,16 +8496,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->SetFlags(BBF_HAS_NEWOBJ); optMethodFlags |= OMF_HAS_NEWOBJ; - // Append the assignment to the temp/local. Dont need to spill - // at all as we are just calling an EE-Jit helper which can only - // cause an (async) OutOfMemoryException. + // Append the store to the temp/local. Dont need to spill at all as + // we are just calling an EE-Jit helper which can only cause + // an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. - impStoreTemp(lclNum, op1, CHECK_SPILL_NONE); + impStoreToTemp(lclNum, op1, CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; @@ -9171,7 +9170,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!"Unexpected fieldAccessor"); } - /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full + /* V4.0 allows stores of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. @@ -9416,7 +9415,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // Explicitly zero out the local if we're inlining a method with InitLocals into a // method without InitLocals. - impStoreTemp(stackallocAsLocal, gtNewIconNode(0), CHECK_SPILL_ALL); + impStoreToTemp(stackallocAsLocal, gtNewIconNode(0), CHECK_SPILL_ALL); } if (!this->opts.compDbgEnC) @@ -10533,7 +10532,7 @@ GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); lvaSetStruct(tmpNum, hClass, false); - impStoreTemp(tmpNum, op, CHECK_SPILL_ALL); + impStoreToTemp(tmpNum, op, CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(tmpNum); @@ -10732,7 +10731,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impStoreTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); + impStoreToTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); @@ -10776,7 +10775,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); - impStoreTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); + impStoreToTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); } if (compMethodReturnsMultiRegRetType()) @@ -11354,7 +11353,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); - impStoreTemp(temp, relOp->AsOp()->gtOp1, level); + impStoreToTemp(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } @@ -11362,7 +11361,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); - impStoreTemp(temp, relOp->AsOp()->gtOp2, level); + impStoreToTemp(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } @@ -11372,7 +11371,7 @@ void Compiler::impImportBlock(BasicBlock* block) assert(addTree->OperIs(GT_SWITCH) && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); - impStoreTemp(temp, addTree->AsOp()->gtOp1, level); + impStoreToTemp(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } @@ -13517,7 +13516,7 @@ bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) // We should only call this when optimizing. assert(opts.OptimizationEnabled()); - // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] + // Check for store to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_IND) && value->AsIndir()->Addr()->OperIs(GT_INDEX_ADDR) && array->OperIs(GT_LCL_VAR)) { GenTree* valueArray = value->AsIndir()->Addr()->AsIndexAddr()->Arr(); @@ -13533,7 +13532,7 @@ bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) } } - // Check for assignment of NULL. + // Check for store of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 0974d654ffb309..66769f037e2826 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -323,7 +323,7 @@ var_types Compiler::impImportCall(OPCODE opcode, return TYP_UNDEF; } - impStoreTemp(lclNum, stubAddr, CHECK_SPILL_NONE); + impStoreToTemp(lclNum, stubAddr, CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node @@ -417,7 +417,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); - impStoreTemp(lclNum, fptr, CHECK_SPILL_ALL); + impStoreToTemp(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call->AsCall()->gtCallAddr = fptr; @@ -488,7 +488,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); - impStoreTemp(lclNum, fptr, CHECK_SPILL_ALL); + impStoreToTemp(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, di); @@ -1418,8 +1418,8 @@ var_types Compiler::impImportCall(OPCODE opcode, } // TODO-Bug: CHECK_SPILL_NONE here looks wrong. - impStoreTemp(calliSlot, call, CHECK_SPILL_NONE); - // impStoreTemp can change src arg list and return type for call that returns struct. + impStoreToTemp(calliSlot, call, CHECK_SPILL_NONE); + // impStoreToTemp can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } @@ -1463,7 +1463,7 @@ var_types Compiler::impImportCall(OPCODE opcode, { // QMARK has to be a root node unsigned tmp = lvaGrabTemp(true DEBUGARG("Grabbing temp for Qmark")); - impStoreTemp(tmp, call, CHECK_SPILL_ALL); + impStoreToTemp(tmp, call, CHECK_SPILL_ALL); call = gtNewLclvNode(tmp, call->TypeGet()); } } @@ -1755,7 +1755,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN // This is allowed by the managed ABI and impStoreStruct will // never introduce copies due to this. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Retbuf for unmanaged call")); - impStoreTemp(tmpNum, call, CHECK_SPILL_ALL); + impStoreToTemp(tmpNum, call, CHECK_SPILL_ALL); return gtNewLclvNode(tmpNum, lvaGetDesc(tmpNum)->TypeGet()); } @@ -2658,8 +2658,8 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) // // At this point we are ready to commit to implementing the InitializeArray - // intrinsic using a struct assignment. Pop the arguments from the stack and - // return the struct assignment node. + // intrinsic using a struct store. Pop the arguments from the stack and + // return the store node. // impPopStack(); @@ -3421,7 +3421,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); - impStoreTemp(rawHandleSlot, rawHandle, CHECK_SPILL_NONE); + impStoreToTemp(rawHandleSlot, rawHandle, CHECK_SPILL_NONE); GenTree* lclVarAddr = gtNewLclVarAddrNode(rawHandleSlot); var_types resultType = JITtype2varType(sig->retType); @@ -4668,7 +4668,7 @@ GenTree* Compiler::impSRCSUnsafeIntrinsic(NamedIntrinsic intrinsic, // In order to change the class handle of the object we need to spill it to a temp // and update class info for that temp. unsigned localNum = lvaGrabTemp(true DEBUGARG("updating class info")); - impStoreTemp(localNum, op, CHECK_SPILL_ALL); + impStoreToTemp(localNum, op, CHECK_SPILL_ALL); // NOTE: we still can't say for sure that it is the exact type of the argument lvaSetClass(localNum, inst, /*isExact*/ false); @@ -4814,7 +4814,7 @@ GenTree* Compiler::impSRCSUnsafeIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsIntegral(valType) && (genTypeSize(valType) < fromSize)) { unsigned lclNum = lvaGrabTemp(true DEBUGARG("bitcast small type extension")); - impStoreTemp(lclNum, op1, CHECK_SPILL_ALL); + impStoreToTemp(lclNum, op1, CHECK_SPILL_ALL); addr = gtNewLclVarAddrNode(lclNum, TYP_I_IMPL); } else @@ -5310,7 +5310,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, result = gtNewQmarkNode(baseType, cond, colon); unsigned tmp = lvaGrabTemp(true DEBUGARG("Grabbing temp for LeadingZeroCount Qmark")); - impStoreTemp(tmp, result, CHECK_SPILL_NONE); + impStoreToTemp(tmp, result, CHECK_SPILL_NONE); result = gtNewLclvNode(tmp, baseType); } #elif defined(TARGET_ARM64) @@ -5641,7 +5641,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, result = gtNewQmarkNode(baseType, cond, colon); unsigned tmp = lvaGrabTemp(true DEBUGARG("Grabbing temp for TrailingZeroCount Qmark")); - impStoreTemp(tmp, result, CHECK_SPILL_NONE); + impStoreToTemp(tmp, result, CHECK_SPILL_NONE); result = gtNewLclvNode(tmp, baseType); } #elif defined(TARGET_ARM64) @@ -6150,7 +6150,7 @@ class SpillRetExprHelper assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); - comp->impStoreTemp(tmp, retExpr, Compiler::CHECK_SPILL_NONE); + comp->impStoreToTemp(tmp, retExpr, Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); assert(comp->lvaTable[tmp].lvSingleDef == 0); @@ -10453,7 +10453,7 @@ GenTree* Compiler::impArrayAccessIntrinsic( if (intrinsicName == NI_Array_Set) { - // Assignment of a struct is more work, and there are more gets than sets. + // Stores of structs require more work, and there are more gets than sets. // TODO-CQ: support SET (`a[i,j,k] = s`) for struct element arrays. if (varTypeIsStruct(elemType)) { diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 26ae9225cbd7ed..dddc14dec3b9b2 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -724,12 +724,12 @@ GenTree* Compiler::impUtf16StringComparison(StringComparisonKind kind, CORINFO_S strLenOffset + sizeof(int), cmpMode); if (unrolled != nullptr) { - impStoreTemp(varStrTmp, varStr, CHECK_SPILL_NONE); + impStoreToTemp(varStrTmp, varStr, CHECK_SPILL_NONE); if (unrolled->OperIs(GT_QMARK)) { // QMARK nodes cannot reside on the evaluation stack unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); - impStoreTemp(rootTmp, unrolled, CHECK_SPILL_NONE); + impStoreToTemp(rootTmp, unrolled, CHECK_SPILL_NONE); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } @@ -885,14 +885,14 @@ GenTree* Compiler::impUtf16SpanComparison(StringComparisonKind kind, CORINFO_SIG { if (!spanObj->OperIs(GT_LCL_VAR)) { - impStoreTemp(spanLclNum, spanObj, CHECK_SPILL_NONE); + impStoreToTemp(spanLclNum, spanObj, CHECK_SPILL_NONE); } if (unrolled->OperIs(GT_QMARK)) { // QMARK can't be a root node, spill it to a temp unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); - impStoreTemp(rootTmp, unrolled, CHECK_SPILL_NONE); + impStoreToTemp(rootTmp, unrolled, CHECK_SPILL_NONE); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index a85ba05596b519..fad60de969d4c3 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -736,7 +736,7 @@ class IndirectCallTransformer // SpillArgToTempBeforeGuard: spill an argument into a temp in the guard/check block. // // Parameters - // arg - The arg to create a temp and assignment for. + // arg - The arg to create a temp and local store for. // void SpillArgToTempBeforeGuard(CallArg* arg) { diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 28d75fc93c3104..067d685cf7fa2c 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -71,7 +71,7 @@ CONFIG_INTEGER(JitHideAlignBehindJmp, 1) // If set, try to hide align instruction (if any) behind an unconditional jump instruction (if any) // that is present before the loop start. -CONFIG_INTEGER(JitOptimizeStructHiddenBuffer, W("JitOptimizeStructHiddenBuffer"), 1) // Track assignments to locals done +CONFIG_INTEGER(JitOptimizeStructHiddenBuffer, W("JitOptimizeStructHiddenBuffer"), 1) // Track stores to locals done // through return buffers. CONFIG_INTEGER(JitUnrollLoopMaxIterationCount, diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 6b4c6cc693f9b2..6a4534fc3f63a4 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -1047,9 +1047,9 @@ class LocalAddressVisitor final : public GenTreeVisitor } if (isDef) { - GenTree* data = indir->Data(); + GenTree* value = indir->Data(); indir->ChangeOper(GT_STORE_LCL_VAR); - indir->AsLclVar()->Data() = data; + indir->AsLclVar()->Data() = value; } else { @@ -1062,9 +1062,9 @@ class LocalAddressVisitor final : public GenTreeVisitor case IndirTransform::LclFld: if (isDef) { - GenTree* data = indir->Data(); + GenTree* value = indir->Data(); indir->ChangeOper(GT_STORE_LCL_FLD); - indir->AsLclFld()->Data() = data; + indir->AsLclFld()->Data() = value; } else { @@ -1259,9 +1259,9 @@ class LocalAddressVisitor final : public GenTreeVisitor { if (node->OperIs(GT_STOREIND, GT_STORE_BLK)) { - GenTree* data = node->Data(); + GenTree* value = node->Data(); node->ChangeOper(GT_STORE_LCL_VAR); - node->AsLclVar()->Data() = data; + node->AsLclVar()->Data() = value; node->gtFlags |= GTF_VAR_DEF; } else diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 18dd23498d850b..cfed684f7dfb06 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -2614,7 +2614,7 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) // // TODO: Ideally we would want to consider the impact of whether the struct is // passed as a parameter or assigned the return value of a call. Because once promoted, - // struct copying is done by field by field assignment instead of a more efficient + // struct copying is done by field by field store instead of a more efficient // rep.stos or xmm reg based copy. if (structPromotionInfo.fieldCnt > 3 && !varDsc->lvFieldAccessed) { @@ -3570,7 +3570,7 @@ void Compiler::lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE // // Notes: // -// This method models the type update rule for an assignment. +// This method models the type update rule for a store. // // Updates currently should only happen for single-def user args or // locals, when we are processing the expression actually being diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 31a5e52ba0cd7d..2f3f23826f8568 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -773,7 +773,7 @@ void Compiler::fgExtendDbgScopes() /***************************************************************************** * - * For debuggable code, we allow redundant assignments to vars + * For debuggable code, we allow redundant stores to vars * by marking them live over their entire scope. */ @@ -1879,12 +1879,12 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR if (isDeadStore && fgTryRemoveDeadStoreLIR(node, lclVarNode, block)) { - GenTree* data = lclVarNode->Data(); - data->SetUnusedValue(); + GenTree* value = lclVarNode->Data(); + value->SetUnusedValue(); - if (data->isIndir()) + if (value->isIndir()) { - Lowering::TransformUnusedIndirection(data->AsIndir(), this, block); + Lowering::TransformUnusedIndirection(value->AsIndir(), this, block); } } break; @@ -2024,7 +2024,7 @@ bool Compiler::fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange) { // We are only interested in avoiding the removal of nodes with direct side effects // (as opposed to side effects of their children). - // This default case should never include calls or assignments. + // This default case should never include calls or stores. assert(!node->OperRequiresAsgFlag() && !node->OperIs(GT_CALL)); if (!node->gtSetFlags() && !node->OperMayThrow(this)) { @@ -2125,11 +2125,11 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, *pStoreRemoved = true; GenTreeLclVarCommon* store = tree->AsLclVarCommon(); - GenTree* data = store->Data(); + GenTree* value = store->Data(); // Check for side effects. GenTree* sideEffList = nullptr; - if ((data->gtFlags & GTF_SIDE_EFFECT) != 0) + if ((value->gtFlags & GTF_SIDE_EFFECT) != 0) { #ifdef DEBUG if (verbose) @@ -2140,7 +2140,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, } #endif // DEBUG - gtExtractSideEffList(data, &sideEffList); + gtExtractSideEffList(value, &sideEffList); } // Test for interior statement diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index be0dde669e34be..0427cf666a3bcb 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -384,7 +384,7 @@ bool Lowering::IsSafeToMarkRegOptional(GenTree* parentNode, GenTree* childNode) LclVarDsc* dsc = comp->lvaGetDesc(childNode->AsLclVarCommon()); if (!dsc->IsAddressExposed()) { - // Safe by IR invariants (no assignments occur between parent and node). + // Safe by IR invariants (no stores occur between parent and node). return true; } @@ -4800,7 +4800,6 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) addr->gtFlags |= lclStore->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG); - // Create the assignment node. lclStore->ChangeOper(GT_STORE_BLK); GenTreeBlk* objStore = lclStore->AsBlk(); objStore->gtFlags = GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP; @@ -9514,14 +9513,14 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store) return; } - GenTree* data = store->Data(); - assert(store->TypeGet() == data->TypeGet()); + GenTree* value = store->Data(); + assert(store->TypeGet() == value->TypeGet()); // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. // - if (data->IsCnsFltOrDbl()) + if (value->IsCnsFltOrDbl()) { - double dblCns = data->AsDblCon()->DconValue(); + double dblCns = value->AsDblCon()->DconValue(); ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. @@ -9556,7 +9555,7 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store) if (type != TYP_UNKNOWN) { - data->BashToConst(intCns, type); + value->BashToConst(intCns, type); assert(!store->OperIsLocalStore() || comp->lvaGetDesc(store->AsLclVarCommon())->lvDoNotEnregister); if (store->OperIs(GT_STORE_LCL_VAR)) diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index 5d4cc1ac080a19..0538c661e16260 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -242,16 +242,16 @@ class Lowering final : public Phase GenTree* oldUseNode = use.Def(); if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM)) { - GenTree* assign; - use.ReplaceWithLclVar(comp, tempNum, &assign); + GenTree* store; + use.ReplaceWithLclVar(comp, tempNum, &store); GenTree* newUseNode = use.Def(); ContainCheckRange(oldUseNode->gtNext, newUseNode); - // We need to lower the LclVar and assignment since there may be certain + // We need to lower the LclVar and store since there may be certain // types or scenarios, such as TYP_SIMD12, that need special handling - LowerNode(assign); + LowerNode(store); LowerNode(newUseNode); return newUseNode->AsLclVar(); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index ebedf7849004df..831f6c7ac194cf 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -3995,11 +3995,8 @@ void LinearScan::spillGCRefs(RefPosition* killRefPosition) bool needsKill = varTypeIsGC(assignedInterval->registerType); if (!needsKill) { - // The importer will assign a GC type to the rhs of an assignment if the lhs type is a GC type, - // even if the rhs is not. See the CEE_STLOC* case in impImportBlockCode(). As a result, - // we can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type. + // We can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type. // The emitter will mark this register as holding a GC type. Therefore, we must spill this value. - // This was exposed on Arm32 with EH write-thru. if ((assignedInterval->recentRefPosition != nullptr) && (assignedInterval->recentRefPosition->treeNode != nullptr)) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index eb22f9d8f9ce5a..5f53d088f8a2ed 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -581,7 +581,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); - // We generate an assignment to an int and then do the cast from an int. With this we avoid + // We generate a store to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; @@ -936,9 +936,9 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) } #endif // FEATURE_ARG_SPLIT - // If the argument tree contains an assignment (GTF_ASG) then the argument and + // If the argument tree contains a store (GTF_ASG) then the argument and // and every earlier argument (except constants) must be evaluated into temps - // since there may be other arguments that follow and they may use the value being assigned. + // since there may be other arguments that follow and they may use the value being defined. // // EXAMPLE: ArgTab is "a, a=5, a" // -> when we see the second arg "a=5" @@ -1030,7 +1030,7 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) // with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT // has to be kept in the right order since we will move the call to the first position) - // For calls we don't have to be quite as conservative as we are with an assignment + // For calls we don't have to be quite as conservative as we are with stores // since the call won't be modifying any non-address taken LclVars. if (treatLikeCall) @@ -1157,7 +1157,7 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) { if ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) { - // Spill multireg struct arguments that have Assignments or Calls embedded in them. + // Spill multireg struct arguments that have stores or calls embedded in them. SetNeedsTemp(&arg); } else if (!argx->OperIsLocalRead() && !argx->OperIsLoad()) @@ -1654,7 +1654,7 @@ GenTree* CallArgs::MakeTmpArgNode(Compiler* comp, CallArg* arg) // // 3. Early: , Late: nullptr // Arguments that are passed on stack and that do not need an explicit -// assignment in the early node list do not require any late node. +// temp store in the early node list do not require any late node. // void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) { @@ -1699,8 +1699,8 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) } else { - // Create a temp assignment for the argument - // Put the temp in the gtCallLateArgs list + // Create a temp store for the argument + // Put the temp in the late arg list #ifdef DEBUG if (comp->verbose) @@ -1861,20 +1861,20 @@ void CallArgs::SetNeedsTemp(CallArg* arg) } //------------------------------------------------------------------------------ -// fgMakeTemp: Make a temp variable with a right-hand side expression as the assignment. +// fgMakeTemp: Make a temp variable and store 'value' into it. // // Arguments: -// rhs - The right-hand side expression. +// value - The expression to store to a temp. // // Return Value: -// 'TempInfo' data that contains the GT_STORE_LCL_VAR and GT_LCL_VAR nodes for store -// and variable load respectively. +// 'TempInfo' data that contains the GT_STORE_LCL_VAR and GT_LCL_VAR nodes for +// store and variable load respectively. // -TempInfo Compiler::fgMakeTemp(GenTree* rhs) +TempInfo Compiler::fgMakeTemp(GenTree* value) { unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgMakeTemp is creating a new local variable")); - GenTree* store = gtNewTempStore(lclNum, rhs); - GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs)); + GenTree* store = gtNewTempStore(lclNum, value); + GenTree* load = gtNewLclvNode(lclNum, genActualType(value)); TempInfo tempInfo{}; tempInfo.store = store; @@ -3097,9 +3097,9 @@ unsigned CallArgs::CountUserArgs() // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", are doing the in-order evaluation of the -// arguments that might have side-effects, such as embedded assignments, -// calls or possible throws. In these cases, it and earlier arguments must -// be evaluated to temps. +// arguments that might have side-effects, such as embedded stores, calls +// or possible throws. In these cases, it and earlier arguments must be +// evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument @@ -4213,8 +4213,8 @@ void Compiler::fgMoveOpsLeft(GenTree* tree) // // We expand the GT_INDEX_ADDR node into a larger tree that evaluates the array // base and index. The simplest expansion is a GT_COMMA with a GT_BOUNDS_CHECK. -// For complex array or index expressions one or more GT_COMMA assignments -// are inserted so that we only evaluate the array or index expressions once. +// For complex array or index expressions one or more GT_COMMA stores are inserted +// so that we only evaluate the array or index expressions once. // // The fully expanded tree is then morphed. This causes gtFoldExpr to // perform local constant prop and reorder the constants in the tree and @@ -4298,7 +4298,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; - // If the arrRef or index expressions involves an assignment, a call, or reads from global memory, + // If the arrRef or index expressions involves a store, a call, or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expression is sufficiently complex/expensive. We special @@ -4460,7 +4460,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) GenTree* tree = addr; - // Prepend the bounds check and the assignment trees that were created (if any). + // Prepend the bounds check and the store trees that were created (if any). if (boundsCheck != nullptr) { // This is changing a value dependency (INDEX_ADDR node) into a flow @@ -4606,9 +4606,9 @@ GenTree* Compiler::fgMorphExpandStackArgForVarArgs(GenTreeLclVarCommon* lclNode) GenTree* argNode; if (lclNode->OperIsLocalStore()) { - GenTree* data = lclNode->Data(); - argNode = lclNode->TypeIs(TYP_STRUCT) ? gtNewStoreBlkNode(lclNode->GetLayout(this), argAddr, data) - : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, data)->AsIndir(); + GenTree* value = lclNode->Data(); + argNode = lclNode->TypeIs(TYP_STRUCT) ? gtNewStoreBlkNode(lclNode->GetLayout(this), argAddr, value) + : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, value)->AsIndir(); } else if (lclNode->OperIsLocalRead()) { @@ -4778,8 +4778,8 @@ GenTree* Compiler::fgMorphExpandLocal(GenTreeLclVarCommon* lclNode) return expandedTree; } - // Small-typed arguments and aliased locals are normalized on load. Other small-typed locals are - // normalized on store. If it is an assignment to one of the latter, insert the cast on source. + // Small-typed arguments and aliased locals are normalized on load. Other small-typed + // locals are normalized on store. If it is the latter case, insert the cast on source. if (fgGlobalMorph && lclNode->OperIs(GT_STORE_LCL_VAR) && genActualTypeIsInt(lclNode)) { LclVarDsc* varDsc = lvaGetDesc(lclNode); @@ -7355,9 +7355,9 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { - GenTree* arg0Assignment = gtNewStoreLclVarNode(lvaArg0Var, gtNewLclVarNode(info.compThisArg)); - Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); - fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); + GenTree* arg0Store = gtNewStoreLclVarNode(lvaArg0Var, gtNewLclVarNode(info.compThisArg)); + Statement* arg0StoreStmt = gtNewStmt(arg0Store, callDI); + fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0StoreStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog @@ -7458,17 +7458,17 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // Arguments: -// arg - argument to assign -// late - whether to use early or late arg -// lclParamNum - the lcl num of the parameter -// block --- basic block the call is in -// callILOffset - IL offset of the call -// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) -// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted +// arg - argument to assign +// callArg - the corresponding call argument +// lclParamNum - the lcl num of the parameter +// block - basic block the call is in +// callILOffset - IL offset of the call +// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) +// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. - +// Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, CallArg* callArg, unsigned lclParamNum, @@ -7517,9 +7517,7 @@ Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, { if (argInTemp == nullptr) { - // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. - // TODO: we can avoid a temp assignment if we can prove that the argument tree - // doesn't involve any caller parameters. + // The argument is not assigned to a temp. We need to create a new temp and insert a store. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; @@ -7591,7 +7589,7 @@ GenTree* Compiler::fgMorphCall(GenTreeCall* call) compCurBB->SetFlags(BBF_HAS_CALL); // This block has a call - JITDUMP("\nInserting assignment of a multi-reg call result to a temp:\n"); + JITDUMP("\nInserting store of a multi-reg call result to a temp:\n"); DISPSTMT(storeStmt); INDEBUG(result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); @@ -7935,7 +7933,7 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 - // Now stitch together the two assignment and the calculation of result into a single tree + // Now stitch together the two stores and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, storeVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, storeVar1, commaTree); } @@ -8859,8 +8857,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA if (op1 != nullptr) { // If we are entering the "then" part of a Qmark-Colon we must - // save the state of the current copy assignment table - // so that we can restore this state when entering the "else" part + // save the state of the current assertions table so that we can + // restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); @@ -8901,8 +8899,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, mac); // If we are exiting the "then" part of a Qmark-Colon we must - // save the state of the current copy assignment table - // so that we can merge this state with the "else" part exit + // save the state of the current assertions table so that we + // can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); @@ -8917,7 +8915,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA if (op2 != nullptr) { // If we are entering the "else" part of a Qmark-Colon we must - // reset the state of the current copy assignment table + // reset the state of the current assertions table if (isQmarkColon) { noway_assert(optLocalAssertionProp); @@ -8927,8 +8925,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2); // If we are exiting the "else" part of a Qmark-Colon we must - // merge the state of the current copy assignment table with - // that of the exit of the "then" part. + // merge the state of the current assertions table with that + // of the exit of the "then" part. // if (isQmarkColon) { @@ -9772,11 +9770,11 @@ GenTree* Compiler::fgMorphFinalizeIndir(GenTreeIndir* indir) addr->ChangeType(indir->TypeGet()); if (indir->OperIs(GT_STOREIND)) { - GenTree* data = indir->Data(); + GenTree* value = indir->Data(); addr->SetOper(GT_STORE_LCL_FLD); - addr->AsLclFld()->Data() = data; + addr->AsLclFld()->Data() = value; addr->gtFlags |= (GTF_ASG | GTF_VAR_DEF); - addr->AddAllEffectsFlags(data); + addr->AddAllEffectsFlags(value); } else { @@ -9902,7 +9900,7 @@ GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) } //------------------------------------------------------------------------ -// fgOptimizeCastOnAssignment: Optimizes the supplied store tree with a GT_CAST node. +// fgOptimizeCastOnStore: Optimizes the supplied store tree with a GT_CAST node. // // Arguments: // tree - the store to optimize @@ -12284,9 +12282,9 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { - // We can't do anything if the tree has assignments, calls, or volatile - // reads. Note that we allow GTF_EXCEPT side effect since any exceptions - // thrown by the original tree will be thrown by the transformed tree as well. + // We can't do anything if the tree has stores, calls, or volatile reads. Note that we allow + // GTF_EXCEPT side effect since any exceptions thrown by the original tree will be thrown by + // the transformed tree as well. return nullptr; } @@ -12812,7 +12810,7 @@ void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { - printf("\nThe assignment "); + printf("\nThe store "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion, index); diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index 84c30e64621a2f..0f49e62c647f87 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -63,7 +63,7 @@ class MorphInitBlockHelper }; //------------------------------------------------------------------------ -// MorphInitBlock: Morph a block initialization assignment tree. +// MorphInitBlock: Morph a block initialization store tree. // // Arguments: // comp - a compiler instance; @@ -318,13 +318,13 @@ void MorphInitBlockHelper::MorphStructCases() //------------------------------------------------------------------------ // InitFieldByField: Attempts to promote a local block init tree to a tree -// of promoted field initialization assignments. +// of promoted field initialization stores. // // If successful, will set "m_transformationDecision" to "FieldByField" and // "m_result" to the final tree. // // Notes: -// This transforms a single block initialization assignment like: +// This transforms a single block initialization store like: // // * STORE_BLK struct<12> (init) // +--* LCL_ADDR byref V02 loc0 @@ -481,7 +481,7 @@ void MorphInitBlockHelper::TryPrimitiveInit() //------------------------------------------------------------------------ // EliminateCommas: Prepare for block morphing by removing commas from the -// source operand of the assignment. +// source operand of the store. // // Parameters: // commaPool - [out] Pool of GT_COMMA nodes linked by their gtNext nodes that @@ -620,7 +620,7 @@ class MorphCopyBlockHelper : public MorphInitBlockHelper }; //------------------------------------------------------------------------ -// MorphCopyBlock: Morph a block copy assignment tree. +// MorphCopyBlock: Morph a block copy tree. // // Arguments: // comp - a compiler instance; @@ -676,7 +676,7 @@ void MorphCopyBlockHelper::PrepareSrc() } // TrySpecialCases: check special cases that require special transformations. -// The current special cases include assignments with calls in RHS. +// The current special cases include stores with calls as values. // void MorphCopyBlockHelper::TrySpecialCases() { @@ -707,7 +707,7 @@ void MorphCopyBlockHelper::TrySpecialCases() // void MorphCopyBlockHelper::MorphStructCases() { - JITDUMP("block assignment to morph:\n"); + JITDUMP("block store to morph:\n"); DISPTREE(m_store); if (m_dstVarDsc != nullptr) @@ -775,7 +775,7 @@ void MorphCopyBlockHelper::MorphStructCases() requiresCopyBlock = true; } - // Can we use field by field assignment for the dest? + // Can we use field by field copy for the dest? if (m_dstDoFldStore && m_dstVarDsc->lvAnySignificantPadding) { JITDUMP(" dest has significant padding"); @@ -783,7 +783,7 @@ void MorphCopyBlockHelper::MorphStructCases() requiresCopyBlock = true; } - // Can we use field by field assignment for the src? + // Can we use field by field copy for the src? if (m_srcDoFldStore && m_srcVarDsc->lvAnySignificantPadding) { JITDUMP(" src has significant padding"); @@ -805,8 +805,8 @@ void MorphCopyBlockHelper::MorphStructCases() } #endif // TARGET_ARM - // Don't use field by field assignment if the src is a call, lowering will handle - // it without spilling the call result into memory to access the individual fields. + // Don't use field by field store if the src is a call, lowering will handle it + // without spilling the call result into memory to access the individual fields. // For HWI/SIMD/CNS_VEC, we don't expect promoted destinations - we purposefully // mark SIMDs used in such copies as "used in a SIMD intrinsic", to prevent their // promotion. @@ -944,12 +944,12 @@ void MorphCopyBlockHelper::MorphStructCases() // If we require a copy block the set both of the field assign bools to false if (requiresCopyBlock) { - // If a copy block is required then we won't do field by field assignments + // If a copy block is required then we won't do field by field stores m_dstDoFldStore = false; m_srcDoFldStore = false; } - JITDUMP(requiresCopyBlock ? " this requires a CopyBlock.\n" : " using field by field assignments.\n"); + JITDUMP(requiresCopyBlock ? " this requires a CopyBlock.\n" : " using field by field stores.\n"); if (requiresCopyBlock) { @@ -996,7 +996,7 @@ void MorphCopyBlockHelper::MorphStructCases() } //------------------------------------------------------------------------ -// TryPrimitiveCopy: Attempt to replace a block assignment with a scalar assignment. +// TryPrimitiveCopy: Attempt to replace a block store with a scalar store. // // If successful, will set "m_transformationDecision" to "OneStoreBlock". // @@ -1074,7 +1074,7 @@ void MorphCopyBlockHelper::TryPrimitiveCopy() } //------------------------------------------------------------------------ -// CopyFieldByField: transform the copy block to a field by field assignment. +// CopyFieldByField: transform the copy block to a field by field store. // // Notes: // We do it for promoted lclVars which fields can be enregistered. @@ -1108,7 +1108,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() if (m_dstDoFldStore && m_srcDoFldStore) { - // To do fieldwise assignments for both sides. + // To do fieldwise stores for both sides. // The structs do not have to be the same exact types but have to have same field types // at the same offsets. assert(m_dstLclNum != BAD_VAR_NUM && m_srcLclNum != BAD_VAR_NUM); @@ -1468,9 +1468,9 @@ bool MorphCopyBlockHelper::CanReuseAddressForDecomposedStore(GenTree* addrNode) // // Return Value: // We can return the original block copy unmodified (least desirable, but always correct) -// We can return a single assignment, when TryPrimitiveCopy transforms it (most desirable). +// We can return a single store, when TryPrimitiveCopy transforms it (most desirable). // If we have performed struct promotion of the Source() or the Dest() then we will try to -// perform a field by field assignment for each of the promoted struct fields. +// perform a field by field store for each of the promoted struct fields. // // Assumptions: // The child nodes for tree have already been Morphed. @@ -1478,10 +1478,10 @@ bool MorphCopyBlockHelper::CanReuseAddressForDecomposedStore(GenTree* addrNode) // Notes: // If we leave it as a block copy we will call lvaSetVarDoNotEnregister() on Source() or Dest() // if they cannot be enregistered. -// When performing a field by field assignment we can have one of Source() or Dest treated as a blob of bytes +// When performing a field by field store we can have one of Source() or Dest treated as a blob of bytes // and in such cases we will call lvaSetVarDoNotEnregister() on the one treated as a blob of bytes. // If the Source() or Dest() is a struct that has a "CustomLayout" and "ContainsHoles" then we -// can not use a field by field assignment and must leave the original block copy unmodified. +// can not use a field by field store and must leave the original block copy unmodified. // GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) { @@ -1489,14 +1489,14 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) } //------------------------------------------------------------------------ -// fgMorphInitBlock: Morph a block initialization assignment tree. +// fgMorphInitBlock: Morph a block initialization store tree. // // Arguments: // tree - A store tree that performs block initialization. // // Return Value: // If the destination is a promoted struct local variable then we will try to -// perform a field by field assignment for each of the promoted struct fields. +// perform a field by field store for each of the promoted struct fields. // This is not always possible (e.g. if the struct is address exposed). // // Otherwise the original store tree is returned unmodified, note that the diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index 0af5f4ba7a9929..0c995997d81388 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -318,7 +318,7 @@ void ObjectAllocator::ComputeStackObjectPointers(BitVecTraits* bitVecTraits) if (DoesLclVarPointToStack(rhsLclNum)) { - // The only assignment to lclNum local is definitely-stack-pointing + // The only store to lclNum local is the definitely-stack-pointing // rhsLclNum local so lclNum local is also definitely-stack-pointing. MarkLclVarAsDefinitelyStackPointing(lclNum); } diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp index acaed299aad42a..41b15792e24ae1 100644 --- a/src/coreclr/jit/optcse.cpp +++ b/src/coreclr/jit/optcse.cpp @@ -1798,7 +1798,7 @@ bool CSE_HeuristicCommon::CanConsiderTree(GenTree* tree, bool isReturn) } // Don't allow non-SIMD struct CSEs under a return; we don't fully - // re-morph these if we introduce a CSE assignment, and so may create + // re-morph these if we introduce a CSE store, and so may create // IR that lower is not yet prepared to handle. // if (isReturn && varTypeIsStruct(tree->gtType) && !varTypeIsSIMD(tree->gtType)) @@ -4465,7 +4465,7 @@ bool CSE_HeuristicCommon::IsCompatibleType(var_types cseLclVarTyp, var_types exp // Arguments: // successfulCandidate - cse candidate to perform // -// It will replace all of the CSE defs with assignments to a new "cse0" LclVar +// It will replace all of the CSE defs with writes to a new "cse0" LclVar // and will replace all of the CSE uses with reads of the "cse0" LclVar // // It will also put cse0 into SSA if there is just one def. @@ -4528,8 +4528,8 @@ void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate) m_pCompiler->optCSEcount++; m_pCompiler->Metrics.CseCount++; - // Walk all references to this CSE, adding an assignment - // to the CSE temp to all defs and changing all refs to + // Walk all references to this CSE, adding an store to + // the CSE temp to all defs and changing all refs to // a simple use of the CSE temp. // // Later we will unmark any nested CSE's for the CSE uses. @@ -4891,7 +4891,7 @@ void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate) if (!store->OperIs(GT_STORE_LCL_VAR)) { // This can only be the case for a struct in which the 'val' was a COMMA, so - // the assignment is sunk below it. + // the store is sunk below it. store = store->gtEffectiveVal(); noway_assert(origStore->OperIs(GT_COMMA) && (origStore == val)); } @@ -4958,7 +4958,7 @@ void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate) /* Create a comma node for the CSE assignment */ cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, origStore, cseUse); cse->gtVNPair = cseUse->gtVNPair; // The comma's value is the same as 'val' - // as the assignment to the CSE LclVar + // as the store to the CSE LclVar // cannot add any new exceptions } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index e32fad95efa93c..2767686ecfd7cb 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -6022,21 +6022,21 @@ typedef JitHashTable, unsigned> Lc // Notes: // This phase iterates over basic blocks starting with the first basic block until there is no unique // basic block successor or until it detects a loop. It keeps track of local nodes it encounters. -// When it gets to an assignment to a local variable or a local field, it checks whether the assignment +// When it gets to a store to a local variable or a local field, it checks whether the store // is the first reference to the local (or to the parent of the local field), and, if so, // it may do one of two optimizations: // 1. If the following conditions are true: // the local is untracked, -// the rhs of the assignment is 0, +// the value to store is 0, // the local is guaranteed to be fully initialized in the prolog, // then the explicit zero initialization is removed. // 2. If the following conditions are true: -// the assignment is to a local (and not a field), -// the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the assignment, -// either the local has no gc pointers or there are no gc-safe points between the prolog and the assignment, +// the store is to a local (and not a field), +// the local is not lvLiveInOutOfHndlr or no exceptions can be thrown between the prolog and the store, +// either the local has no gc pointers or there are no gc-safe points between the prolog and the store, // then the local is marked with lvHasExplicitInit which tells the codegen not to insert zero initialization // for this local in the prolog. - +// void Compiler::optRemoveRedundantZeroInits() { #ifdef DEBUG @@ -6173,7 +6173,7 @@ void Compiler::optRemoveRedundantZeroInits() break; } - // The local hasn't been referenced before this assignment. + // The local hasn't been referenced before this store. bool removedExplicitZeroInit = false; bool isEntire = !tree->IsPartialLclFld(this); @@ -6195,7 +6195,7 @@ void Compiler::optRemoveRedundantZeroInits() { // We are guaranteed to have a zero initialization in the prolog or a // dominating explicit zero initialization and the local hasn't been redefined - // between the prolog and this explicit zero initialization so the assignment + // between the prolog and this explicit zero initialization so the store // can be safely removed. if (tree == stmt->GetRootNode()) { diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index eae6d627935837..2328dba7d63688 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -486,7 +486,7 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon return true; } } - // If the rhs expr is local, then try to find the def of the local. + // If the expr is local, then try to find the def of the local. else if (expr->IsLocal()) { LclSsaVarDsc* ssaDef = GetSsaDefStore(expr->AsLclVarCommon()); @@ -521,7 +521,7 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon return false; } -// Given a lclvar use, try to find the lclvar's defining assignment and its containing block. +// Given a lclvar use, try to find the lclvar's defining store and its containing block. LclSsaVarDsc* RangeCheck::GetSsaDefStore(GenTreeLclVarCommon* lclUse) { unsigned ssaNum = lclUse->GetSsaNum(); diff --git a/src/coreclr/jit/rangecheck.h b/src/coreclr/jit/rangecheck.h index cd4193f1e2fb73..9d7b064387174a 100644 --- a/src/coreclr/jit/rangecheck.h +++ b/src/coreclr/jit/rangecheck.h @@ -691,7 +691,7 @@ class RangeCheck // Does the binary operation between the operands overflow? Check recursively. bool DoesBinOpOverflow(BasicBlock* block, GenTreeOp* binop); - // Does the phi operands involve an assignment that could overflow? + // Do the phi operands involve a definition that could overflow? bool DoesPhiOverflow(BasicBlock* block, GenTree* expr); // Find the def of the "expr" local and recurse on the arguments if any of them involve a @@ -710,9 +710,7 @@ class RangeCheck // Is the binary operation increasing the value. bool IsBinOpMonotonicallyIncreasing(GenTreeOp* binop); - // Given an "expr" trace its rhs and their definitions to check if all the assignments - // are monotonically increasing. - // + // Given an expression trace its value to check if it is monotonically increasing. bool IsMonotonicallyIncreasing(GenTree* tree, bool rejectNegativeConst); // We allocate a budget to avoid walking long UD chains. When traversing each link in the UD diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index e7569e86c2ed33..d4a678875848bf 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1028,8 +1028,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // Since flow is going to bypass block, make sure there // is nothing in block that can cause a side effect. // - // For non-PHI RBO, we neglect PHI assignments. This can leave SSA - // in an incorrect state but so far it has not yet caused problems. + // For non-PHI RBO, we neglect PHI stores. This can leave SSA in + // an incorrect state but so far it has not yet caused problems. // // For PHI-based RBO we need to be more cautious and insist that // any PHI is locally consumed, so that if we bypass the block we @@ -1894,12 +1894,12 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) break; } - GenTree* const prevTreeData = prevTree->AsLclVar()->Data(); + GenTree* const prevTreeValue = prevTree->AsLclVar()->Data(); // If prevTree has side effects, bail, unless it is in the immediately preceding statement. // We'll handle exceptional side effects with VNs below. // - if (((prevTree->gtFlags & (GTF_CALL | GTF_ORDER_SIDEEFF)) != 0) || ((prevTreeData->gtFlags & GTF_ASG) != 0)) + if (((prevTree->gtFlags & (GTF_CALL | GTF_ORDER_SIDEEFF)) != 0) || ((prevTreeValue->gtFlags & GTF_ASG) != 0)) { if (prevStmt->GetNextStmt() != stmt) { @@ -1913,13 +1913,13 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) // If we are seeing PHIs we have run out of interesting stmts. // - if (prevTreeData->OperIs(GT_PHI)) + if (prevTreeValue->OperIs(GT_PHI)) { JITDUMP(" -- prev tree is a phi\n"); break; } - // Figure out what local is assigned here. + // Figure out what local is defined here. // const unsigned prevTreeLclNum = prevTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* const prevTreeLclDsc = lvaGetDesc(prevTreeLclNum); @@ -1946,7 +1946,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) // If the normal liberal VN of RHS is the normal liberal VN of the current tree, or is "related", // consider forward sub. // - const ValueNum domCmpVN = vnStore->VNNormalValue(prevTreeData->GetVN(VNK_Liberal)); + const ValueNum domCmpVN = vnStore->VNNormalValue(prevTreeValue->GetVN(VNK_Liberal)); bool matched = false; ValueNumStore::VN_RELATION_KIND vnRelationMatch = ValueNumStore::VN_RELATION_KIND::VRK_Same; @@ -1969,11 +1969,11 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) JITDUMP(" -- prev tree has relop with %s liberal VN\n", ValueNumStore::VNRelationString(vnRelationMatch)); - // If the jump tree VN has exceptions, verify that the RHS tree has a superset. + // If the jump tree VN has exceptions, verify that the value tree has a superset. // if (treeExcVN != vnStore->VNForEmptyExcSet()) { - const ValueNum prevTreeExcVN = vnStore->VNExceptionSet(prevTreeData->GetVN(VNK_Liberal)); + const ValueNum prevTreeExcVN = vnStore->VNExceptionSet(prevTreeValue->GetVN(VNK_Liberal)); if (!vnStore->VNExcIsSubset(prevTreeExcVN, treeExcVN)) { @@ -1982,14 +1982,14 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) } } - // See if we can safely move a copy of prevTreeRHS later, to replace tree. + // See if we can safely move a copy of prevTreeValue later, to replace tree. // We can, if none of its lcls are killed. // bool interferes = false; for (unsigned int i = 0; i < definedLocalsCount; i++) { - if (gtTreeHasLocalRead(prevTreeData, definedLocals[i])) + if (gtTreeHasLocalRead(prevTreeValue, definedLocals[i])) { JITDUMP(" -- prev tree ref to V%02u interferes\n", definedLocals[i]); interferes = true; @@ -2002,7 +2002,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) break; } - if (gtMayHaveStoreInterference(prevTreeData, tree)) + if (gtMayHaveStoreInterference(prevTreeValue, tree)) { JITDUMP(" -- prev tree has an embedded store that interferes with [%06u]\n", dspTreeID(tree)); break; @@ -2010,7 +2010,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) // Heuristic: only forward sub a relop // - if (!prevTreeData->OperIsCompare()) + if (!prevTreeValue->OperIsCompare()) { JITDUMP(" -- prev tree is not relop\n"); continue; @@ -2026,7 +2026,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) continue; } - if ((prevTreeData->gtFlags & GTF_GLOB_REF) != 0) + if ((prevTreeValue->gtFlags & GTF_GLOB_REF) != 0) { bool hasExtraUses = false; @@ -2053,7 +2053,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) } JITDUMP(" -- prev tree is viable candidate for relop fwd sub!\n"); - candidateTree = prevTreeData; + candidateTree = prevTreeValue; candidateStmt = prevStmt; candidateVnRelation = vnRelationMatch; } diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 1dd0330a859135..d2d9a65d01be3a 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -41,7 +41,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX * This should only be needed if some basic block are deleted/out of order, * etc. * Also, - * o At every assignment to a variable, siCheckVarScope() adds an open scope + * o At every store to a variable, siCheckVarScope() adds an open scope * for the variable being assigned to. * o UpdateLifeVar() calls siUpdate() which closes scopes for variables which * are not live anymore. diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index b6879dafc498d9..1157bf9e5bfc94 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -738,9 +738,9 @@ GenTree* Compiler::CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_ } //------------------------------------------------------------------------------- -// impMarkContiguousSIMDFieldStores: Try to identify if there are contiguous -// assignments from SIMD field to memory. If there are, then mark the related -// lclvar so that it won't be promoted. +// impMarkContiguousSIMDFieldStores: Try to identify if there are contiguous stores +// from SIMD field to memory. If there are, then mark the related lclvar so that it +// won't be promoted. // // Arguments: // stmt - GenTree*. Input statement node. diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h index a03c307094ad2c..0f56ebe1ce989a 100644 --- a/src/coreclr/jit/targetarm.h +++ b/src/coreclr/jit/targetarm.h @@ -137,8 +137,8 @@ // ARM write barrier ABI (see vm\arm\asmhelpers.asm, vm\arm\asmhelpers.S): // CORINFO_HELP_ASSIGN_REF (JIT_WriteBarrier), CORINFO_HELP_CHECKED_ASSIGN_REF (JIT_CheckedWriteBarrier): // On entry: - // r0: the destination address (LHS of the assignment) - // r1: the object reference (RHS of the assignment) + // r0: the destination address of the store + // r1: the object reference to be stored // On exit: // r0: trashed // r3: trashed diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h index cccbfdc6bae6b8..6d33d378bcd96e 100644 --- a/src/coreclr/jit/targetarm64.h +++ b/src/coreclr/jit/targetarm64.h @@ -153,8 +153,8 @@ // ARM64 write barrier ABI (see vm\arm64\asmhelpers.asm, vm\arm64\asmhelpers.S): // CORINFO_HELP_ASSIGN_REF (JIT_WriteBarrier), CORINFO_HELP_CHECKED_ASSIGN_REF (JIT_CheckedWriteBarrier): // On entry: - // x14: the destination address (LHS of the assignment) - // x15: the object reference (RHS of the assignment) + // x14: the destination address of the store + // x15: the object reference to be stored // On exit: // x12: trashed // x14: incremented by 8 diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h index b045c43df7dfa9..d27bffa3aa698f 100644 --- a/src/coreclr/jit/targetloongarch64.h +++ b/src/coreclr/jit/targetloongarch64.h @@ -130,8 +130,8 @@ // LOONGARCH64 write barrier ABI (see vm/loongarch64/asmhelpers.S): // CORINFO_HELP_ASSIGN_REF (JIT_WriteBarrier), CORINFO_HELP_CHECKED_ASSIGN_REF (JIT_CheckedWriteBarrier): // On entry: - // t6: the destination address (LHS of the assignment) - // t7: the object reference (RHS of the assignment) + // t6: the destination address of the store + // t7: the object reference to be stored // On exit: // t0: trashed // t1: trashed diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 755c19d1386a88..7a6136577ddba3 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -10750,21 +10750,21 @@ void Compiler::fgValueNumberStore(GenTree* store) { assert(store->OperIsStore()); - GenTree* data = store->Data(); + GenTree* value = store->Data(); // Only normal values are to be stored in SSA defs, VN maps, etc. - ValueNumPair dataExcSet; - ValueNumPair dataVNPair; - vnStore->VNPUnpackExc(data->gtVNPair, &dataVNPair, &dataExcSet); - assert(dataVNPair.BothDefined()); + ValueNumPair valueExcSet; + ValueNumPair valueVNPair; + vnStore->VNPUnpackExc(value->gtVNPair, &valueVNPair, &valueExcSet); + assert(valueVNPair.BothDefined()); - // Is the type being stored different from the type computed by "data"? - if (data->TypeGet() != store->TypeGet()) + // Is the type being stored different from the type computed by "value"? + if (value->TypeGet() != store->TypeGet()) { if (store->OperIsInitBlkOp()) { ValueNum initObjVN; - if (data->IsIntegralConst(0)) + if (value->IsIntegralConst(0)) { initObjVN = vnStore->VNForZeroObj(store->GetLayout(this)); } @@ -10773,31 +10773,29 @@ void Compiler::fgValueNumberStore(GenTree* store) initObjVN = vnStore->VNForExpr(compCurBB, TYP_STRUCT); } - dataVNPair.SetBoth(initObjVN); + valueVNPair.SetBoth(initObjVN); } - else if (data->TypeGet() == TYP_REF) + else if (value->TypeGet() == TYP_REF) { - // If we have an unsafe IL assignment of a TYP_REF to a non-ref (typically a TYP_BYREF) + // If we have an unsafe IL store of a TYP_REF to a non-ref (typically a TYP_BYREF) // then don't propagate this ValueNumber to the lhs, instead create a new unique VN. - dataVNPair.SetBoth(vnStore->VNForExpr(compCurBB, store->TypeGet())); + valueVNPair.SetBoth(vnStore->VNForExpr(compCurBB, store->TypeGet())); } else { - // This means that there is an implicit cast on the rhs value - // We will add a cast function to reflect the possible narrowing of the rhs value - dataVNPair = vnStore->VNPairForCast(dataVNPair, store->TypeGet(), data->TypeGet()); + // This means that there is an implicit cast on the value. + // We will add a cast function to reflect its possible narrowing. + valueVNPair = vnStore->VNPairForCast(valueVNPair, store->TypeGet(), value->TypeGet()); } } - // Now, record the new VN for an assignment (performing the indicated "state update"). - // It's safe to use gtEffectiveVal here, because the non-last elements of a comma list on the - // LHS will come before the assignment in evaluation order. + // Now, record the new VN for the store (performing the indicated "state update"). switch (store->OperGet()) { case GT_STORE_LCL_VAR: { GenTreeLclVarCommon* lcl = store->AsLclVarCommon(); - fgValueNumberLocalStore(store, lcl, 0, lvaLclExactSize(lcl->GetLclNum()), dataVNPair, + fgValueNumberLocalStore(store, lcl, 0, lvaLclExactSize(lcl->GetLclNum()), valueVNPair, /* normalize */ false); } break; @@ -10805,7 +10803,7 @@ void Compiler::fgValueNumberStore(GenTree* store) case GT_STORE_LCL_FLD: { GenTreeLclFld* lclFld = store->AsLclFld(); - fgValueNumberLocalStore(store, lclFld, lclFld->GetLclOffs(), lclFld->GetSize(), dataVNPair); + fgValueNumberLocalStore(store, lclFld, lclFld->GetLclOffs(), lclFld->GetSize(), valueVNPair); } break; @@ -10835,16 +10833,16 @@ void Compiler::fgValueNumberStore(GenTree* store) fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); offset = vnStore->ConstantValue(funcApp.m_args[2]); - fgValueNumberFieldStore(store, baseAddr, fldSeq, offset, storeSize, dataVNPair.GetLiberal()); + fgValueNumberFieldStore(store, baseAddr, fldSeq, offset, storeSize, valueVNPair.GetLiberal()); } else if (addrIsVNFunc && (funcApp.m_func == VNF_PtrToArrElem)) { - fgValueNumberArrayElemStore(store, &funcApp, storeSize, dataVNPair.GetLiberal()); + fgValueNumberArrayElemStore(store, &funcApp, storeSize, valueVNPair.GetLiberal()); } else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { assert(fldSeq != nullptr); - fgValueNumberFieldStore(store, baseAddr, fldSeq, offset, storeSize, dataVNPair.GetLiberal()); + fgValueNumberFieldStore(store, baseAddr, fldSeq, offset, storeSize, valueVNPair.GetLiberal()); } else { @@ -10855,8 +10853,8 @@ void Compiler::fgValueNumberStore(GenTree* store) // at byref loads if the current ByrefExposed VN happens to be // VNF_ByrefExposedStore with the same pointer VN, we could propagate the // VN from the RHS to the VN for the load. This would e.g. allow tracking - // values through assignments to out params. For now, just model this - // as an opaque GcHeap/ByrefExposed mutation. + // values through stores to out params. For now, just model this as an + // opaque GcHeap/ByrefExposed mutation. fgMutateGcHeap(store DEBUGARG("assign-of-IND")); } } @@ -10867,7 +10865,7 @@ void Compiler::fgValueNumberStore(GenTree* store) } // Stores produce no values, and as such are given the "Void" VN. - ValueNumPair storeExcSet = dataExcSet; + ValueNumPair storeExcSet = valueExcSet; if (store->OperIsIndir()) { storeExcSet = vnStore->VNPUnionExcSet(store->AsIndir()->Addr()->gtVNPair, storeExcSet); @@ -11241,12 +11239,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) if (GenTree::OperIsConst(oper)) { - // If this is a struct assignment, with a constant rhs, (i,.e. an initBlk), - // it is not useful to value number the constant. - if (tree->TypeGet() != TYP_STRUCT) - { - fgValueNumberTreeConst(tree); - } + fgValueNumberTreeConst(tree); } else if (GenTree::OperIsLeaf(oper)) { From 55535ed4c3aa224481b553784bef58c4efc14781 Mon Sep 17 00:00:00 2001 From: Larry Ewing Date: Fri, 5 Apr 2024 14:04:30 -0500 Subject: [PATCH 122/132] Change change export name mangling to match expectations (#100652) --- src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs b/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs index 402325b1275ff3..4a29b47666e94d 100644 --- a/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs +++ b/src/tasks/WasmAppBuilder/PInvokeTableGenerator.cs @@ -179,7 +179,7 @@ private string CEntryPoint(PInvoke pinvoke) if (pinvoke.WasmLinkage) { // We mangle the name to avoid collisions with symbols in other modules - return _fixupSymbolName($"{pinvoke.Module}_{pinvoke.EntryPoint}"); + return _fixupSymbolName($"{pinvoke.Module}#{pinvoke.EntryPoint}"); } return _fixupSymbolName(pinvoke.EntryPoint); } From ed92ec40a465151e47c6d6c41b0b9bc95c29f1da Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Fri, 5 Apr 2024 22:45:32 +0200 Subject: [PATCH 123/132] [main] Update dependencies from dotnet/hotreload-utils, dotnet/icu, dotnet/runtime, dotnet/runtime-assets, dotnet/xharness (#100280) * Update dependencies from https://github.com/dotnet/icu build Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24175.1 * Update dependencies from https://github.com/dotnet/xharness build Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24175.1 * Update dependencies from https://github.com/dotnet/runtime-assets build Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24175.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24175.1 * Update dependencies from https://github.com/dotnet/icu build 20240325.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24175.1 * Update dependencies from https://github.com/dotnet/xharness build 20240325.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24175.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240327.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24177.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240325.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24175.1 * Update dependencies from https://github.com/dotnet/icu build 20240325.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24175.1 * Update dependencies from https://github.com/dotnet/xharness build 20240325.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24175.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240327.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24177.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240328.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24178.1 * Update dependencies from https://github.com/dotnet/icu build 20240328.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24178.1 * Update dependencies from https://github.com/dotnet/xharness build 20240328.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24178.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240328.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24178.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240328.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24178.1 * Update dependencies from https://github.com/dotnet/icu build 20240329.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24179.1 * Update dependencies from https://github.com/dotnet/xharness build 20240328.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24178.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240328.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24178.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240328.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24178.1 * Update dependencies from https://github.com/dotnet/icu build 20240329.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24179.1 * Update dependencies from https://github.com/dotnet/xharness build 20240328.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24178.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240328.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24178.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240328.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24178.1 * Update dependencies from https://github.com/dotnet/icu build 20240329.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24179.1 * Update dependencies from https://github.com/dotnet/xharness build 20240328.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24178.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240328.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24178.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240328.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24178.1 * Update dependencies from https://github.com/dotnet/runtime build 20240401.1 Microsoft.DotNet.ILCompiler , Microsoft.NET.Sdk.IL , Microsoft.NETCore.App.Runtime.win-x64 , Microsoft.NETCore.ILAsm , runtime.native.System.IO.Ports , System.Reflection.Metadata , System.Reflection.MetadataLoadContext , System.Text.Json , Microsoft.SourceBuild.Intermediate.runtime.linux-x64 From Version 9.0.0-preview.4.24175.1 -> To Version 9.0.0-preview.4.24201.1 * Update dependencies from https://github.com/dotnet/icu build 20240401.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24201.1 * Update dependencies from https://github.com/dotnet/xharness build 20240328.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24178.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240328.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24178.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240401.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24201.1 * Update dependencies from https://github.com/dotnet/icu build 20240401.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24201.1 * Update dependencies from https://github.com/dotnet/xharness build 20240403.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24203.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240403.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24203.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240401.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24201.1 * Update dependencies from https://github.com/dotnet/icu build 20240401.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24201.1 * Update dependencies from https://github.com/dotnet/xharness build 20240403.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24203.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240403.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24203.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240401.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24201.1 * Update dependencies from https://github.com/dotnet/icu build 20240401.1 Microsoft.NETCore.Runtime.ICU.Transport From Version 9.0.0-preview.4.24172.4 -> To Version 9.0.0-preview.4.24201.1 * Update dependencies from https://github.com/dotnet/xharness build 20240403.1 Microsoft.DotNet.XHarness.CLI , Microsoft.DotNet.XHarness.TestRunners.Common , Microsoft.DotNet.XHarness.TestRunners.Xunit From Version 9.0.0-prerelease.24168.2 -> To Version 9.0.0-prerelease.24203.1 * Update dependencies from https://github.com/dotnet/runtime-assets build 20240403.1 Microsoft.DotNet.CilStrip.Sources , System.ComponentModel.TypeConverter.TestData , System.Data.Common.TestData , System.Drawing.Common.TestData , System.Formats.Tar.TestData , System.IO.Compression.TestData , System.IO.Packaging.TestData , System.Net.TestData , System.Private.Runtime.UnicodeData , System.Runtime.Numerics.TestData , System.Runtime.TimeZoneData , System.Security.Cryptography.X509Certificates.TestData , System.Text.RegularExpressions.TestData , System.Windows.Extensions.TestData From Version 9.0.0-beta.24170.12 -> To Version 9.0.0-beta.24203.1 * Update dependencies from https://github.com/dotnet/hotreload-utils build 20240401.1 Microsoft.DotNet.HotReload.Utils.Generator.BuildTool From Version 9.0.0-alpha.0.24168.2 -> To Version 9.0.0-alpha.0.24201.1 --------- Co-authored-by: dotnet-maestro[bot] Co-authored-by: Larry Ewing --- .config/dotnet-tools.json | 2 +- eng/Version.Details.xml | 112 +++++++++++++++++++------------------- eng/Versions.props | 52 +++++++++--------- global.json | 2 +- 4 files changed, 84 insertions(+), 84 deletions(-) diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json index ae173ae516dc5b..9abee6864b8ee6 100644 --- a/.config/dotnet-tools.json +++ b/.config/dotnet-tools.json @@ -15,7 +15,7 @@ ] }, "microsoft.dotnet.xharness.cli": { - "version": "9.0.0-prerelease.24168.2", + "version": "9.0.0-prerelease.24203.1", "commands": [ "xharness" ] diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index dfb1ea462c5c89..287097a1977214 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -1,8 +1,8 @@ - + https://github.com/dotnet/icu - 1f01904b5b68612031509a7e9c1ffa1f9bd1d75e + 1441a3fcbfa87c94b98a27605b06db7dd862f3e4 https://github.com/dotnet/msquic @@ -174,57 +174,57 @@ https://github.com/dotnet/arcade 532f956a119bce77ca279994054d08dbc24418f7 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 https://github.com/dotnet/llvm-project @@ -282,55 +282,55 @@ https://github.com/dotnet/llvm-project c1305278000772701230efa9353cc136e10a8717 - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/runtime - 330b70cfacc7751ab5ac546ed7138cb8a09c3097 + ec4437be46d8b90bc9fa6740c556bd860d9fe5ab - + https://github.com/dotnet/xharness - 006ea312a94e8b7f5b7ae47a6470f733ddd1738a + 28af9496b0e260f7e66ec549b39f1410ee9743d1 - + https://github.com/dotnet/xharness - 006ea312a94e8b7f5b7ae47a6470f733ddd1738a + 28af9496b0e260f7e66ec549b39f1410ee9743d1 - + https://github.com/dotnet/xharness - 006ea312a94e8b7f5b7ae47a6470f733ddd1738a + 28af9496b0e260f7e66ec549b39f1410ee9743d1 https://github.com/dotnet/arcade @@ -352,13 +352,13 @@ https://dev.azure.com/dnceng/internal/_git/dotnet-optimization 78a5b978e1965c1335edb4b9a22bc4d6ff5a77a6 - + https://github.com/dotnet/hotreload-utils - 0571297e323b5315e16ab9df888a9503367e74cc + 668ee30182fea845064853c46be5f54ac6efd110 - + https://github.com/dotnet/runtime-assets - f282faa0ddd1b3672a3cba54518943fb1d0b4e36 + ad97a45c2567fa7c3a067079f166c3f3c9fecd60 https://github.com/dotnet/roslyn diff --git a/eng/Versions.props b/eng/Versions.props index 8b862d54f60efa..58b93403204069 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -104,10 +104,10 @@ 6.0.0-preview.1.102 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 6.0.0 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 6.0.0 1.1.1 @@ -119,38 +119,38 @@ 8.0.0 5.0.0 4.5.5 - 9.0.0-preview.4.24175.1 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 + 9.0.0-preview.4.24201.1 6.0.0 5.0.0 5.0.0 5.0.0 7.0.0 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 6.0.0 7.0.0 4.5.4 4.5.0 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 8.0.0 8.0.0 8.0.0 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 - 9.0.0-beta.24170.12 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 + 9.0.0-beta.24203.1 1.0.0-prerelease.24106.4 1.0.0-prerelease.24106.4 @@ -178,10 +178,10 @@ 1.4.0 17.4.0-preview-20220707-01 - 9.0.0-prerelease.24168.2 - 9.0.0-prerelease.24168.2 - 9.0.0-prerelease.24168.2 - 9.0.0-alpha.0.24168.2 + 9.0.0-prerelease.24203.1 + 9.0.0-prerelease.24203.1 + 9.0.0-prerelease.24203.1 + 9.0.0-alpha.0.24201.1 3.12.0 4.5.0 6.0.0 @@ -209,9 +209,9 @@ 0.11.4-alpha.24168.1 - 9.0.0-preview.4.24175.1 + 9.0.0-preview.4.24201.1 - 9.0.0-preview.4.24172.4 + 9.0.0-preview.4.24201.1 2.3.5 9.0.0-alpha.1.24167.3 diff --git a/global.json b/global.json index 3f9d14a6e2e518..bf1936167a4a42 100644 --- a/global.json +++ b/global.json @@ -13,6 +13,6 @@ "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24203.1", "Microsoft.Build.NoTargets": "3.7.0", "Microsoft.Build.Traversal": "3.4.0", - "Microsoft.NET.Sdk.IL": "9.0.0-preview.4.24175.1" + "Microsoft.NET.Sdk.IL": "9.0.0-preview.4.24201.1" } } From 1f698e7d6b87a7b9257b41b331cfb1e31de4e3f5 Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Fri, 5 Apr 2024 22:47:25 +0200 Subject: [PATCH 124/132] Update dependencies from https://github.com/dotnet/emsdk build 20240404.9 (#100671) Microsoft.SourceBuild.Intermediate.emsdk , Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64 , Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport From Version 9.0.0-preview.4.24201.2 -> To Version 9.0.0-preview.4.24204.9 Dependency coherency updates runtime.linux-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.JIT.Tools,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.linux-musl-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.win-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-arm64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Sdk,runtime.osx-x64.Microsoft.NETCore.Runtime.Mono.LLVM.Tools From Version 16.0.5-alpha.1.24179.1 -> To Version 16.0.5-alpha.1.24203.4 (parent: Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport Co-authored-by: dotnet-maestro[bot] Co-authored-by: Larry Ewing --- eng/Version.Details.xml | 100 ++++++++++++++++++++-------------------- eng/Versions.props | 48 +++++++++---------- 2 files changed, 74 insertions(+), 74 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 287097a1977214..ef3c9906e6fbd2 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -12,41 +12,41 @@ https://github.com/dotnet/wcf 7f504aabb1988e9a093c1e74d8040bd52feb2f01 - + https://github.com/dotnet/emsdk - bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec + 9ad7c262f14dc5e40a64030ade7788b36e74adf0 - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c https://github.com/dotnet/command-line-api @@ -68,14 +68,14 @@ 9c8ea966df62f764523b51772763e74e71040a92 - + https://github.com/dotnet/emsdk - bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec + 9ad7c262f14dc5e40a64030ade7788b36e74adf0 - + https://github.com/dotnet/emsdk - bd79d3dd7ed2db36b3c3d4fa807c21a06d2024ec + 9ad7c262f14dc5e40a64030ade7788b36e74adf0 @@ -226,61 +226,61 @@ https://github.com/dotnet/runtime-assets ad97a45c2567fa7c3a067079f166c3f3c9fecd60 - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c - + https://github.com/dotnet/llvm-project - c1305278000772701230efa9353cc136e10a8717 + 8b4f10702e13ea221a33e91c2ef46c4b7910b56c https://github.com/dotnet/runtime diff --git a/eng/Versions.props b/eng/Versions.props index 58b93403204069..4ef6855a95544a 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -216,39 +216,39 @@ 2.3.5 9.0.0-alpha.1.24167.3 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 - 9.0.0-preview.4.24201.2 + 9.0.0-preview.4.24204.9 $(MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion) - 9.0.0-preview.4.24201.2 + 9.0.0-preview.4.24204.9 1.1.87-gba258badda 1.0.0-v3.14.0.5722 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 - 16.0.5-alpha.1.24179.1 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 + 16.0.5-alpha.1.24203.4 3.1.7 1.0.406601 From 3515c7a8ebaf303729be959e54601393187e10c8 Mon Sep 17 00:00:00 2001 From: Bruce Forstall Date: Fri, 5 Apr 2024 13:49:57 -0700 Subject: [PATCH 125/132] Add build option to build Mac .dSYM debug symbol bundles (#100617) This is a small workaround to allow developers working on Mac the ability to generate .dSYM bundles as part of inner-loop development, instead of the unsupported .dwarf files that are generated by default. A full solution to use .dSYM bundles everywhere on Mac, including packaging and symbol indexing, is tracked by https://github.com/dotnet/runtime/issues/92911. To build .dSYM bundles instead of .dwarf files, invoke build.sh as follows: ```bash ./build.sh --subset clr --cmakeargs "-DCLR_CMAKE_APPLE_DSYM=TRUE" ``` --- docs/workflow/building/coreclr/macos-instructions.md | 10 ++++++++++ eng/native/functions.cmake | 10 ++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/workflow/building/coreclr/macos-instructions.md b/docs/workflow/building/coreclr/macos-instructions.md index 8deaf4578bccef..7ac0d0c6e0f859 100644 --- a/docs/workflow/building/coreclr/macos-instructions.md +++ b/docs/workflow/building/coreclr/macos-instructions.md @@ -33,6 +33,16 @@ It is possible to get a macOS ARM64 build using an Intel x64 Mac and vice versa, The Core_Root provides one of the main ways to test your build. Full instructions on how to build it in the [CoreCLR testing doc](/docs/workflow/testing/coreclr/testing.md), and we also have a detailed guide on how to use it for your own testing in [its own dedicated doc](/docs/workflow/testing/using-corerun-and-coreroot.md). +## Debugging information + +The build process puts native component symbol and debugging information into `.dwarf` files, one for each built binary. This is not the native format used by macOS, and debuggers like LLDB can't automatically find them. The native format used by macOS is `.dSYM` bundles. To build `.dSYM` bundles and get a better inner-loop developer experience on macOS (e.g., have the LLDB debugger automatically find program symbols and display source code lines, etc.), build as follows: + +```bash +./build.sh --subset clr --cmakeargs "-DCLR_CMAKE_APPLE_DSYM=TRUE" +``` + +(Note: converting the entire build process to build and package `.dSYM` bundles on macOS by default is tracked by [this](https://github.com/dotnet/runtime/issues/92911) issue.) + ## Native Sanitizers CoreCLR can be built with native sanitizers like AddressSanitizer to help catch memory safety issues. To build the project with native sanitizers, add the `-fsanitize address` argument to the build script like the following: diff --git a/eng/native/functions.cmake b/eng/native/functions.cmake index d4a7e1bee92edb..6629e926afacf6 100644 --- a/eng/native/functions.cmake +++ b/eng/native/functions.cmake @@ -378,7 +378,11 @@ endfunction() function (get_symbol_file_name targetName outputSymbolFilename) if (CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_TARGET_APPLE) - set(strip_destination_file $.dwarf) + if (CLR_CMAKE_APPLE_DSYM) + set(strip_destination_file $.dSYM) + else () + set(strip_destination_file $.dwarf) + endif () else () set(strip_destination_file $.dbg) endif () @@ -425,7 +429,9 @@ function(strip_symbols targetName outputFilename) OUTPUT_VARIABLE DSYMUTIL_HELP_OUTPUT ) - set(DSYMUTIL_OPTS "--flat") + if (NOT CLR_CMAKE_APPLE_DSYM) + set(DSYMUTIL_OPTS "--flat") + endif () if ("${DSYMUTIL_HELP_OUTPUT}" MATCHES "--minimize") list(APPEND DSYMUTIL_OPTS "--minimize") endif () From 92afc71cbed0a33376bcdde21083b578d88225e7 Mon Sep 17 00:00:00 2001 From: Lakshan Fernando Date: Fri, 5 Apr 2024 14:00:01 -0700 Subject: [PATCH 126/132] Disable default and ambient attribute at runtime with a feature switch (#100416) * Disable default and ambient attribute at runtime with a feature switch * throwing at the getter at runtime --- .../src/Resources/Strings.resx | 3 ++ .../ComponentModel/AmbientValueAttribute.cs | 46 ++++++++++++++----- .../src/Resources/Strings.resx | 5 +- .../ComponentModel/DefaultValueAttribute.cs | 23 +++++++++- 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx b/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx index ea75a801456861..b8a10ffbcd5f1b 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx +++ b/src/libraries/System.ComponentModel.TypeConverter/src/Resources/Strings.resx @@ -106,6 +106,9 @@ The specified type is not a nullable type. + + Runtime instantiation of this attribute is not allowed. + (Text) diff --git a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/AmbientValueAttribute.cs b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/AmbientValueAttribute.cs index 79b4d31603b988..53a43204668305 100644 --- a/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/AmbientValueAttribute.cs +++ b/src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/AmbientValueAttribute.cs @@ -1,6 +1,8 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.ComponentModel.Design; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; namespace System.ComponentModel @@ -12,6 +14,11 @@ namespace System.ComponentModel [AttributeUsage(AttributeTargets.All)] public sealed class AmbientValueAttribute : Attribute { + /// + /// This is the default value. + /// + private object? _value; + /// /// Initializes a new instance of the class, converting the /// specified value to the specified type, and using the U.S. English culture as the @@ -22,9 +29,15 @@ public AmbientValueAttribute([DynamicallyAccessedMembers(DynamicallyAccessedMemb { // The try/catch here is because attributes should never throw exceptions. We would fail to // load an otherwise normal class. + + if (!IDesignerHost.IsSupported) + { + return; + } + try { - Value = TypeDescriptor.GetConverter(type).ConvertFromInvariantString(value); + _value = TypeDescriptor.GetConverter(type).ConvertFromInvariantString(value); } catch { @@ -37,7 +50,7 @@ public AmbientValueAttribute([DynamicallyAccessedMembers(DynamicallyAccessedMemb /// public AmbientValueAttribute(char value) { - Value = value; + _value = value; } /// @@ -46,7 +59,7 @@ public AmbientValueAttribute(char value) /// public AmbientValueAttribute(byte value) { - Value = value; + _value = value; } /// @@ -55,7 +68,7 @@ public AmbientValueAttribute(byte value) /// public AmbientValueAttribute(short value) { - Value = value; + _value = value; } /// @@ -64,7 +77,7 @@ public AmbientValueAttribute(short value) /// public AmbientValueAttribute(int value) { - Value = value; + _value = value; } /// @@ -73,7 +86,7 @@ public AmbientValueAttribute(int value) /// public AmbientValueAttribute(long value) { - Value = value; + _value = value; } /// @@ -82,7 +95,7 @@ public AmbientValueAttribute(long value) /// public AmbientValueAttribute(float value) { - Value = value; + _value = value; } /// @@ -91,7 +104,7 @@ public AmbientValueAttribute(float value) /// public AmbientValueAttribute(double value) { - Value = value; + _value = value; } /// @@ -100,7 +113,7 @@ public AmbientValueAttribute(double value) /// public AmbientValueAttribute(bool value) { - Value = value; + _value = value; } /// @@ -108,7 +121,7 @@ public AmbientValueAttribute(bool value) /// public AmbientValueAttribute(string? value) { - Value = value; + _value = value; } /// @@ -117,13 +130,22 @@ public AmbientValueAttribute(string? value) /// public AmbientValueAttribute(object? value) { - Value = value; + _value = value; } /// /// Gets the ambient value of the property this attribute is bound to. /// - public object? Value { get; } + public object? Value { + get + { + if (!IDesignerHost.IsSupported) + { + throw new ArgumentException(SR.RuntimeInstanceNotAllowed); + } + return _value; + } + } public override bool Equals([NotNullWhen(true)] object? obj) { diff --git a/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx b/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx index bbc4b27c51ddeb..2bc7699526df53 100644 --- a/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx +++ b/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx @@ -3359,6 +3359,9 @@ Non-static method requires a target. + + Runtime instantiation of this attribute is not allowed. + An object that does not derive from System.Exception has been wrapped in a RuntimeWrappedException. @@ -4313,4 +4316,4 @@ Blocking wait is not supported on the JS interop threads. - \ No newline at end of file + diff --git a/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs b/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs index 9a9ad7ef521329..322a6ae242b051 100644 --- a/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs +++ b/src/libraries/System.Private.CoreLib/src/System/ComponentModel/DefaultValueAttribute.cs @@ -1,6 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; @@ -22,6 +23,9 @@ public class DefaultValueAttribute : Attribute // Delegate ad hoc created 'TypeDescriptor.ConvertFromInvariantString' reflection object cache private static object? s_convertFromInvariantString; + [FeatureSwitchDefinition("System.ComponentModel.DefaultValueAttribute.IsSupported")] + internal static bool IsSupported => AppContext.TryGetSwitch("System.ComponentModel.DefaultValueAttribute.IsSupported", out bool isSupported) ? isSupported : true; + /// /// Initializes a new instance of the /// class, converting the specified value to the specified type, and using the U.S. English @@ -35,6 +39,12 @@ public DefaultValueAttribute( // The null check and try/catch here are because attributes should never throw exceptions. // We would fail to load an otherwise normal class. + if (!IsSupported) + { + Debug.Assert(!IsSupported, "Runtime instantiation of this attribute is not allowed."); + return; + } + if (type == null) { return; @@ -229,7 +239,18 @@ public DefaultValueAttribute(ulong value) /// /// Gets the default value of the property this attribute is bound to. /// - public virtual object? Value => _value; + public virtual object? Value + { + get + { + if (!IsSupported) + { + throw new ArgumentException(SR.RuntimeInstanceNotAllowed); + } + return _value; + } + } + public override bool Equals([NotNullWhen(true)] object? obj) { From 3e988779476b4f5daaea450a1893a61b5bd4e944 Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Fri, 5 Apr 2024 23:18:43 +0200 Subject: [PATCH 127/132] Fix VS user unhandled exception notification (#100673) With the new EH enabled, VS is not breaking on user unhandled exceptions stemming from reflection invoked code. These are exceptions that are still handled, but not in the user code. The most frequent case when the VS issue occurs is in unit tests execution, where a unit test assert throws an exception that's caught by the xunit. With the old EH, VS breaks on such exception and pops out a dialog reporting it as user unhandled. With the new EH, it doesn't happen and the test execution completes with the failure reported into a console instead. The reason is that VS is expecting to get a "catch handler found" notification when EH locates the catch handler for the exception so that it can decide whether the catch is in user code or not. We cannot provide it when there are two separate passes of EH - one inside of the reflected code and one in the caller of the reflected code. This change fixes it by turning off the path of reflection invocation that uses the RuntimeMethodHandle::InvokeMethod when a debugger is attached. In that case, it always uses the dynamically generated managed code path. --- .../src/System/Reflection/MethodInvokerCommon.cs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/libraries/System.Private.CoreLib/src/System/Reflection/MethodInvokerCommon.cs b/src/libraries/System.Private.CoreLib/src/System/Reflection/MethodInvokerCommon.cs index 9e0232c1f4fb95..d54640f8449d79 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Reflection/MethodInvokerCommon.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Reflection/MethodInvokerCommon.cs @@ -113,9 +113,10 @@ ref InvokeFunc_ObjSpanArgs? // If ByRefs are used, we can't use this strategy. strategy |= InvokerStrategy.StrategyDetermined_ObjSpanArgs; } - else if ((strategy & InvokerStrategy.HasBeenInvoked_ObjSpanArgs) == 0) + else if (((strategy & InvokerStrategy.HasBeenInvoked_ObjSpanArgs) == 0) && !Debugger.IsAttached) { - // The first time, ignoring race conditions, use the slow path. + // The first time, ignoring race conditions, use the slow path, except for the case when running under a debugger. + // This is a workaround for the debugger issues with understanding exceptions propagation over the slow path. strategy |= InvokerStrategy.HasBeenInvoked_ObjSpanArgs; } else @@ -141,9 +142,10 @@ internal static void DetermineStrategy_Obj4Args( // If ByRefs are used, we can't use this strategy. strategy |= InvokerStrategy.StrategyDetermined_Obj4Args; } - else if ((strategy & InvokerStrategy.HasBeenInvoked_Obj4Args) == 0) + else if (((strategy & InvokerStrategy.HasBeenInvoked_Obj4Args) == 0) && !Debugger.IsAttached) { - // The first time, ignoring race conditions, use the slow path. + // The first time, ignoring race conditions, use the slow path, except for the case when running under a debugger. + // This is a workaround for the debugger issues with understanding exceptions propagation over the slow path. strategy |= InvokerStrategy.HasBeenInvoked_Obj4Args; } else @@ -163,9 +165,10 @@ internal static void DetermineStrategy_RefArgs( MethodBase method, bool backwardsCompat) { - if ((strategy & InvokerStrategy.HasBeenInvoked_RefArgs) == 0) + if (((strategy & InvokerStrategy.HasBeenInvoked_RefArgs) == 0) && !Debugger.IsAttached) { - // The first time, ignoring race conditions, use the slow path. + // The first time, ignoring race conditions, use the slow path, except for the case when running under a debugger. + // This is a workaround for the debugger issues with understanding exceptions propagation over the slow path. strategy |= InvokerStrategy.HasBeenInvoked_RefArgs; } else From fc9ab41cbeaa2443faec7aea85b5ff794fbb0707 Mon Sep 17 00:00:00 2001 From: Badre BSAILA <54767641+pedrobsaila@users.noreply.github.com> Date: Fri, 5 Apr 2024 23:27:44 +0200 Subject: [PATCH 128/132] BlobBuilder.LinkSuffix/LinkPrefix need to Free zero length chunks (#100039) * BlobBuilder.LinkSuffix/LinkPrefix need to Free zero length chunks * free child blob list * fix remarks 1 * add unit tests --- .../System/Reflection/Metadata/BlobBuilder.cs | 13 +++++++--- .../tests/Metadata/BlobTests.cs | 26 +++++++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/BlobBuilder.cs b/src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/BlobBuilder.cs index 093f60cf7197c8..97967bba080068 100644 --- a/src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/BlobBuilder.cs +++ b/src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/BlobBuilder.cs @@ -47,7 +47,7 @@ public partial class BlobBuilder private uint _length; private const uint IsFrozenMask = 0x80000000; - private bool IsHead => (_length & IsFrozenMask) == 0; + internal bool IsHead => (_length & IsFrozenMask) == 0; private int Length => (int)(_length & ~IsFrozenMask); private uint FrozenLength => _length | IsFrozenMask; private Span Span => _buffer.AsSpan(0, Length); @@ -97,8 +97,7 @@ public void Clear() { if (chunk != this) { - chunk.ClearChunk(); - chunk.FreeChunk(); + chunk.ClearAndFreeChunk(); } } @@ -396,6 +395,7 @@ public void LinkPrefix(BlobBuilder prefix) // avoid chaining empty chunks: if (prefix.Count == 0) { + prefix.ClearAndFreeChunk(); return; } @@ -456,6 +456,7 @@ public void LinkSuffix(BlobBuilder suffix) // avoid chaining empty chunks: if (suffix.Count == 0) { + suffix.ClearAndFreeChunk(); return; } @@ -1177,5 +1178,11 @@ private static string Display(byte[] bytes, int length) BitConverter.ToString(bytes, 0, length) : BitConverter.ToString(bytes, 0, MaxDisplaySize / 2) + "-...-" + BitConverter.ToString(bytes, length - MaxDisplaySize / 2, MaxDisplaySize / 2); } + + private void ClearAndFreeChunk() + { + ClearChunk(); + FreeChunk(); + } } } diff --git a/src/libraries/System.Reflection.Metadata/tests/Metadata/BlobTests.cs b/src/libraries/System.Reflection.Metadata/tests/Metadata/BlobTests.cs index 6c05f36d046c9f..a04d2b6cf19ef5 100644 --- a/src/libraries/System.Reflection.Metadata/tests/Metadata/BlobTests.cs +++ b/src/libraries/System.Reflection.Metadata/tests/Metadata/BlobTests.cs @@ -1090,5 +1090,31 @@ public void PrematureEndOfStream() AssertEx.Equal(sourceArray, builder.ToArray()); } + + [Fact] + public void LinkEmptySuffixAndPrefixShouldFreeThem() + { + var b1 = PooledBlobBuilder.GetInstance(); + var b2 = PooledBlobBuilder.GetInstance(); + var b3 = PooledBlobBuilder.GetInstance(); + var b4 = PooledBlobBuilder.GetInstance(); + var b5 = PooledBlobBuilder.GetInstance(); + + b1.WriteBytes(1, 1); + b2.WriteBytes(1, 1); + b3.WriteBytes(1, 1); + + b1.LinkSuffix(b2); + Assert.False(b2.IsHead); + + b1.LinkPrefix(b3); + Assert.False(b3.IsHead); + + b1.LinkSuffix(b4); + Assert.True(b4.IsHead); + + b1.LinkPrefix(b5); + Assert.True(b4.IsHead); + } } } From 4ce3525fa9eab5cb3b2845dcb9dd43a70ca66299 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 5 Apr 2024 15:11:42 -0700 Subject: [PATCH 129/132] Use Arcade log publishing instead of our own publishing (#100566) --- eng/pipelines/common/global-build-job.yml | 22 +++++++------------ .../templates/runtimes/build-test-job.yml | 16 +++++--------- .../templates/runtimes/run-test-job.yml | 10 +-------- .../common/templates/runtimes/xplat-job.yml | 7 ++++++ .../coreclr/templates/run-performance-job.yml | 12 +++------- .../coreclr/templates/run-scenarios-job.yml | 10 +-------- .../mono/templates/generate-offsets.yml | 12 +--------- .../mono/templates/workloads-build.yml | 12 +--------- 8 files changed, 27 insertions(+), 74 deletions(-) diff --git a/eng/pipelines/common/global-build-job.yml b/eng/pipelines/common/global-build-job.yml index 402e9271dfafe0..6fe9c7bdf809ad 100644 --- a/eng/pipelines/common/global-build-job.yml +++ b/eng/pipelines/common/global-build-job.yml @@ -60,6 +60,14 @@ jobs: enableRichCodeNavigation: ${{ parameters.enableRichCodeNavigation }} richCodeNavigationLanguage: ${{ parameters.richCodeNavigationLanguage }} + artifacts: + publish: + logs: + ${{ if notin(parameters.osGroup, 'browser', 'wasi') }}: + name: Logs_Build_Attempt$(System.JobAttempt)_${{ parameters.osGroup }}_${{ parameters.osSubGroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}_${{ parameters.nameSuffix }} + ${{ if in(parameters.osGroup, 'browser', 'wasi') }}: + name: Logs_Build_Attempt$(System.JobAttempt)_${{ parameters.osGroup }}_${{ parameters.archType }}_${{ parameters.hostedOs }}_${{ parameters.buildConfig }}_${{ parameters.nameSuffix }} + # Component governance does not work on musl machines ${{ if eq(parameters.osSubGroup, '_musl') }}: disableComponentGovernance: true @@ -283,17 +291,3 @@ jobs: - powershell: ./eng/collect_vsinfo.ps1 -ArchiveRunName postbuild_log displayName: Collect vslogs on exit condition: always() - - - template: /eng/pipelines/common/templates/publish-build-artifacts.yml - parameters: - isOfficialBuild: ${{ parameters.isOfficialBuild }} - displayName: Publish Logs - inputs: - PathtoPublish: '$(Build.SourcesDirectory)/artifacts/log/' - PublishLocation: Container - ${{ if notin(parameters.osGroup, 'browser', 'wasi') }}: - ArtifactName: Logs_Build_Attempt$(System.JobAttempt)_${{ parameters.osGroup }}_${{ parameters.osSubGroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}_${{ parameters.nameSuffix }} - ${{ if in(parameters.osGroup, 'browser', 'wasi') }}: - ArtifactName: Logs_Build_Attempt$(System.JobAttempt)_${{ parameters.osGroup }}_${{ parameters.archType }}_${{ parameters.hostedOs }}_${{ parameters.buildConfig }}_${{ parameters.nameSuffix }} - continueOnError: true - condition: always() diff --git a/eng/pipelines/common/templates/runtimes/build-test-job.yml b/eng/pipelines/common/templates/runtimes/build-test-job.yml index 7c8e34edd7b379..c86e4e04f11163 100644 --- a/eng/pipelines/common/templates/runtimes/build-test-job.yml +++ b/eng/pipelines/common/templates/runtimes/build-test-job.yml @@ -56,6 +56,11 @@ jobs: ${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: timeoutInMinutes: 160 + artifacts: + publish: + logs: + name: '${{ parameters.runtimeFlavor }}_Common_Runtime_TestBuildLogs_Attempt$(System.JobAttempt)_AnyOS_AnyCPU_$(buildConfig)_${{ parameters.testGroup }}' + variables: - ${{ each variable in parameters.variables }}: - ${{ variable }} @@ -137,14 +142,3 @@ jobs: archiveExtension: '.tar.gz' artifactName: $(microsoftNetSdkIlArtifactName) displayName: 'Microsoft.NET.Sdk.IL package' - - # Publish Logs - - template: /eng/pipelines/common/templates/publish-pipeline-artifacts.yml - parameters: - displayName: Publish Logs - isOfficialBuild: ${{ parameters.isOfficialBuild }} - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - ArtifactName: '${{ parameters.runtimeFlavor }}_Common_Runtime_TestBuildLogs_Attempt$(System.JobAttempt)_AnyOS_AnyCPU_$(buildConfig)_${{ parameters.testGroup }}' - continueOnError: true - condition: always() diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml index 8dc0e368dfc45e..d6404617a3e1ad 100644 --- a/eng/pipelines/common/templates/runtimes/run-test-job.yml +++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml @@ -47,6 +47,7 @@ jobs: runtimeVariant: ${{ parameters.runtimeVariant }} pool: ${{ parameters.pool }} condition: and(succeeded(), ${{ parameters.condition }}) + logsName: '${{ parameters.runtimeFlavor }}_${{ parameters.runtimeVariant }}_$(LogNamePrefix)_Attempt$(System.JobAttempt)_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.testGroup }}' # Test jobs should continue on error for internal builds ${{ if eq(variables['System.TeamProject'], 'internal') }}: @@ -561,15 +562,6 @@ jobs: scenarios: - clrinterpreter - # Publish Logs - - task: PublishPipelineArtifact@1 - displayName: Publish Logs - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: '${{ parameters.runtimeFlavor }}_${{ parameters.runtimeVariant }}_$(LogNamePrefix)_Attempt$(System.JobAttempt)_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.testGroup }}' - continueOnError: true - condition: always() - ######################################################################################################## # # Finalize SuperPMI collection: (1) merge all MCH files generated by all Helix jobs, (2) upload MCH file to Azure Storage, (3) upload log files diff --git a/eng/pipelines/common/templates/runtimes/xplat-job.yml b/eng/pipelines/common/templates/runtimes/xplat-job.yml index 45152e25b3bd01..e22f8f968c4790 100644 --- a/eng/pipelines/common/templates/runtimes/xplat-job.yml +++ b/eng/pipelines/common/templates/runtimes/xplat-job.yml @@ -9,6 +9,7 @@ parameters: crossBuild: false strategy: '' pool: '' + logsName: '' # arcade-specific parameters condition: '' @@ -73,6 +74,12 @@ jobs: # storage. Only relevant for build jobs. enablePublishBuildAssets: ${{ parameters.gatherAssetManifests }} + artifacts: + publish: + ${{ if ne(parameters.logsName, '') }}: + logs: + name: '${{ parameters.logsName }}' + variables: - name: buildConfig value: ${{ parameters.buildConfig }} diff --git a/eng/pipelines/coreclr/templates/run-performance-job.yml b/eng/pipelines/coreclr/templates/run-performance-job.yml index 9f33585fd7591d..12d630a5b3f39b 100644 --- a/eng/pipelines/coreclr/templates/run-performance-job.yml +++ b/eng/pipelines/coreclr/templates/run-performance-job.yml @@ -51,6 +51,8 @@ jobs: timeoutInMinutes: ${{ parameters.timeoutInMinutes }} + logsName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_${{ parameters.logicalMachine }}_${{ parameters.javascriptEngine }}_${{ parameters.pgoRunType }}_${{ parameters.physicalPromotionRunType }}_${{ parameters.r2rRunType }}_${{ parameters.experimentName }}' + variables: - ${{ each variable in parameters.variables }}: - ${{insert}}: ${{ variable }} @@ -153,7 +155,6 @@ jobs: - HelixPreCommand: 'export MONO_ENV_OPTIONS="--interpreter";$(ExtraMSBuildLogsLinux)' - Interpreter: ' --monointerpreter' - workspace: clean: all pool: @@ -191,11 +192,4 @@ jobs: WorkItemDirectory: '$(WorkItemDirectory)' # WorkItemDirectory can not be empty, so we send it some docs to keep it happy CorrelationPayloadDirectory: '$(PayloadDirectory)' # it gets checked out to a folder with shorter path than WorkItemDirectory so we can avoid file name too long exceptions ProjectFile: ${{ parameters.projectFile }} - osGroup: ${{ parameters.osGroup }} - - task: PublishPipelineArtifact@1 - displayName: Publish Logs - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_${{ parameters.logicalMachine }}_${{ parameters.javascriptEngine }}_${{ parameters.pgoRunType }}_${{ parameters.physicalPromotionRunType }}_${{ parameters.r2rRunType }}_${{ parameters.experimentName }}' - continueOnError: true - condition: always() + osGroup: ${{ parameters.osGroup }} \ No newline at end of file diff --git a/eng/pipelines/coreclr/templates/run-scenarios-job.yml b/eng/pipelines/coreclr/templates/run-scenarios-job.yml index ffe4f5c16482e7..78db1ae8cbd091 100644 --- a/eng/pipelines/coreclr/templates/run-scenarios-job.yml +++ b/eng/pipelines/coreclr/templates/run-scenarios-job.yml @@ -34,6 +34,7 @@ jobs: enableTelemetry: ${{ parameters.enableTelemetry }} enablePublishBuildArtifacts: true continueOnError: ${{ parameters.continueOnError }} + logsName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_$(iOSLlvmBuild)_$(iOSStripSymbols)_$(hybridGlobalization)' ${{ if ne(parameters.displayName, '') }}: displayName: '${{ parameters.displayName }}' @@ -213,12 +214,3 @@ jobs: CorrelationPayloadDirectory: '$(PayloadDirectory)' # contains performance repo and built product ProjectFile: ${{ parameters.projectFile }} osGroup: ${{ parameters.osGroup }} - - # publish logs - - task: PublishPipelineArtifact@1 - displayName: Publish Logs - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_$(iOSLlvmBuild)_$(iOSStripSymbols)_$(hybridGlobalization)' - continueOnError: true - condition: always() diff --git a/eng/pipelines/mono/templates/generate-offsets.yml b/eng/pipelines/mono/templates/generate-offsets.yml index dcadd25519cd20..8d8d781dd3262e 100644 --- a/eng/pipelines/mono/templates/generate-offsets.yml +++ b/eng/pipelines/mono/templates/generate-offsets.yml @@ -24,6 +24,7 @@ jobs: pool: ${{ parameters.pool }} condition: ${{ parameters.condition }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }} + logsName: 'BuildLogs_Attempt$(System.JobAttempt)_Mono_Offsets_$(osGroup)$(osSubGroup)' # Compute job name from template parameters name: ${{ format('mono_{0}{1}_offsets', parameters.osGroup, parameters.osSubGroup) }} @@ -85,14 +86,3 @@ jobs: inputs: targetPath: '$(Build.SourcesDirectory)/artifacts/obj/mono/offsetfiles' artifactName: 'Mono_Offsets_$(osGroup)$(osSubGroup)' - - # Publish Logs - - template: /eng/pipelines/common/templates/publish-pipeline-artifacts.yml - parameters: - displayName: Publish Logs - isOfficialBuild: ${{ parameters.isOfficialBuild }} - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: 'BuildLogs_Attempt$(System.JobAttempt)_Mono_Offsets_$(osGroup)$(osSubGroup)' - continueOnError: true - condition: always() diff --git a/eng/pipelines/mono/templates/workloads-build.yml b/eng/pipelines/mono/templates/workloads-build.yml index 38fd5e9f5d4f00..72e4c3adc8fb89 100644 --- a/eng/pipelines/mono/templates/workloads-build.yml +++ b/eng/pipelines/mono/templates/workloads-build.yml @@ -28,6 +28,7 @@ jobs: pool: ${{ parameters.pool }} runtimeVariant: ${{ parameters.runtimeVariant }} timeoutInMinutes: ${{ parameters.timeoutInMinutes }} + logsName: WorkloadLogs_Attempt$(System.JobAttempt) dependsOn: ${{ parameters.dependsOn }} @@ -100,17 +101,6 @@ jobs: parameters: name: workloads - # Publish Logs - - template: /eng/pipelines/common/templates/publish-pipeline-artifacts.yml - parameters: - displayName: Publish Logs - isOfficialBuild: ${{ parameters.isOfficialBuild }} - inputs: - targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: 'WorkloadLogs_Attempt$(System.JobAttempt)' - continueOnError: true - condition: always() - # Delete wixpdb files before they are uploaded to artifacts - task: DeleteFiles@1 displayName: Delete wixpdb's From f6237bc82b2d8f30fbaa9b27e2589960d713f0c3 Mon Sep 17 00:00:00 2001 From: Aaron Robinson Date: Fri, 5 Apr 2024 16:12:30 -0700 Subject: [PATCH 130/132] Special casing `System.Guid` for COM VARIANT marshalling (#100377) * Support System.Guid marshalling via VARIANT VARIANT marshalling in .NET 5+ requires a TLB for COM records (i.e., ValueType instances). This means that without a runtime provided TLB, users must define their own TLB for runtime types or define their own transfer types. We address this here by deferring to the NetFX mscorlib's TLB. Co-authored-by: Elinor Fung --- src/coreclr/vm/olevariant.cpp | 33 +++- src/coreclr/vm/stdinterfaces.cpp | 94 ++++++++++ src/coreclr/vm/stdinterfaces.h | 3 + .../ComInterop/DynamicVariantExtensions.cs | 2 - .../InteropServices/Marshalling/ComVariant.cs | 6 +- .../Marshal/GetNativeVariantForObjectTests.cs | 34 ++++ .../Marshal/GetObjectForNativeVariantTests.cs | 26 ++- src/tests/Interop/CMakeLists.txt | 1 + src/tests/Interop/COM/Dynamic/BasicTest.cs | 11 ++ .../COM/NETClients/MiscTypes/App.manifest | 18 ++ .../MiscTypes/NetClientMiscTypes.csproj | 18 ++ .../COM/NETClients/MiscTypes/Program.cs | 107 +++++++++++ .../Interop/COM/NETServer/MiscTypesTesting.cs | 54 ++++++ .../COM/NativeClients/MiscTypes.csproj | 6 + .../COM/NativeClients/MiscTypes/App.manifest | 17 ++ .../NativeClients/MiscTypes/CMakeLists.txt | 22 +++ .../MiscTypes/CoreShim.X.manifest | 16 ++ .../COM/NativeClients/MiscTypes/MiscTypes.cpp | 171 ++++++++++++++++++ .../Primitives/CoreShim.X.manifest | 4 + .../NativeServer/COMNativeServer.X.manifest | 5 + .../COM/NativeServer/MiscTypesTesting.h | 31 ++++ .../Interop/COM/NativeServer/Servers.cpp | 5 + src/tests/Interop/COM/NativeServer/Servers.h | 4 + .../COM/ServerContracts/Server.CoClasses.cs | 33 +++- .../COM/ServerContracts/Server.Contracts.cs | 11 ++ .../COM/ServerContracts/Server.Contracts.h | 12 ++ .../COM/ServerContracts/ServerGuids.cs | 1 + 27 files changed, 726 insertions(+), 19 deletions(-) create mode 100644 src/tests/Interop/COM/NETClients/MiscTypes/App.manifest create mode 100644 src/tests/Interop/COM/NETClients/MiscTypes/NetClientMiscTypes.csproj create mode 100644 src/tests/Interop/COM/NETClients/MiscTypes/Program.cs create mode 100644 src/tests/Interop/COM/NETServer/MiscTypesTesting.cs create mode 100644 src/tests/Interop/COM/NativeClients/MiscTypes.csproj create mode 100644 src/tests/Interop/COM/NativeClients/MiscTypes/App.manifest create mode 100644 src/tests/Interop/COM/NativeClients/MiscTypes/CMakeLists.txt create mode 100644 src/tests/Interop/COM/NativeClients/MiscTypes/CoreShim.X.manifest create mode 100644 src/tests/Interop/COM/NativeClients/MiscTypes/MiscTypes.cpp create mode 100644 src/tests/Interop/COM/NativeServer/MiscTypesTesting.h diff --git a/src/coreclr/vm/olevariant.cpp b/src/coreclr/vm/olevariant.cpp index 888ebdd380dae8..40e039a1648ef6 100644 --- a/src/coreclr/vm/olevariant.cpp +++ b/src/coreclr/vm/olevariant.cpp @@ -2567,17 +2567,34 @@ void OleVariant::MarshalRecordVariantOleToCom(VARIANT *pOleVariant, if (!pRecInfo) COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT); + LPVOID pvRecord = V_RECORD(pOleVariant); + if (pvRecord == NULL) + { + pComVariant->SetObjRef(NULL); + return; + } + + MethodTable* pValueClass = NULL; + { + GCX_PREEMP(); + pValueClass = GetMethodTableForRecordInfo(pRecInfo); + } + + if (pValueClass == NULL) + { + // This value type should have been registered through + // a TLB. CoreCLR doesn't support dynamic type mapping. + COMPlusThrow(kArgumentException, IDS_EE_CANNOT_MAP_TO_MANAGED_VC); + } + _ASSERTE(pValueClass->IsBlittable()); + OBJECTREF BoxedValueClass = NULL; GCPROTECT_BEGIN(BoxedValueClass) { - LPVOID pvRecord = V_RECORD(pOleVariant); - if (pvRecord) - { - // This value type should have been registered through - // a TLB. CoreCLR doesn't support dynamic type mapping. - COMPlusThrow(kArgumentException, IDS_EE_CANNOT_MAP_TO_MANAGED_VC); - } - + // Now that we have a blittable value class, allocate an instance of the + // boxed value class and copy the contents of the record into it. + BoxedValueClass = AllocateObject(pValueClass); + memcpyNoGCRefs(BoxedValueClass->GetData(), (BYTE*)pvRecord, pValueClass->GetNativeSize()); pComVariant->SetObjRef(BoxedValueClass); } GCPROTECT_END(); diff --git a/src/coreclr/vm/stdinterfaces.cpp b/src/coreclr/vm/stdinterfaces.cpp index 08af895c3baec9..3131f33d2892b9 100644 --- a/src/coreclr/vm/stdinterfaces.cpp +++ b/src/coreclr/vm/stdinterfaces.cpp @@ -611,6 +611,43 @@ HRESULT GetITypeLibForAssembly(_In_ Assembly *pAssembly, _Outptr_ ITypeLib **ppT return S_OK; } // HRESULT GetITypeLibForAssembly() +// .NET Framework's mscorlib TLB GUID. +static const GUID s_MscorlibGuid = { 0xBED7F4EA, 0x1A96, 0x11D2, { 0x8F, 0x08, 0x00, 0xA0, 0xC9, 0xA6, 0x18, 0x6D } }; + +// Hard-coded GUID for System.Guid. +static const GUID s_GuidForSystemGuid = { 0x9C5923E9, 0xDE52, 0x33EA, { 0x88, 0xDE, 0x7E, 0xBC, 0x86, 0x33, 0xB9, 0xCC } }; + +// There are types that are helpful to provide that facilitate porting from +// .NET Framework to .NET 8+. This function is used to acquire their ITypeInfo. +// This should be used narrowly. Types at a minimum should be blittable. +static bool TryDeferToMscorlib(MethodTable* pClass, ITypeInfo** ppTI) +{ + CONTRACTL + { + THROWS; + GC_TRIGGERS; + MODE_PREEMPTIVE; + PRECONDITION(pClass != NULL); + PRECONDITION(pClass->IsBlittable()); + PRECONDITION(ppTI != NULL); + } + CONTRACTL_END; + + // Marshalling of System.Guid is a common scenario that impacts many teams porting + // code to .NET 8+. Try to load the .NET Framework's TLB to support this scenario. + if (pClass == CoreLibBinder::GetClass(CLASS__GUID)) + { + SafeComHolder pMscorlibTypeLib = NULL; + if (SUCCEEDED(::LoadRegTypeLib(s_MscorlibGuid, 2, 4, 0, &pMscorlibTypeLib))) + { + if (SUCCEEDED(pMscorlibTypeLib->GetTypeInfoOfGuid(s_GuidForSystemGuid, ppTI))) + return true; + } + } + + return false; +} + HRESULT GetITypeInfoForEEClass(MethodTable *pClass, ITypeInfo **ppTI, bool bClassInfo) { CONTRACTL @@ -625,6 +662,7 @@ HRESULT GetITypeInfoForEEClass(MethodTable *pClass, ITypeInfo **ppTI, bool bClas GUID clsid; GUID ciid; ComMethodTable *pComMT = NULL; + MethodTable* pOriginalClass = pClass; HRESULT hr = S_OK; SafeComHolder pITLB = NULL; SafeComHolder pTI = NULL; @@ -770,12 +808,68 @@ HRESULT GetITypeInfoForEEClass(MethodTable *pClass, ITypeInfo **ppTI, bool bClas { if (!FAILED(hr)) hr = E_FAIL; + + if (pOriginalClass->IsValueType() && pOriginalClass->IsBlittable()) + { + if (TryDeferToMscorlib(pOriginalClass, ppTI)) + hr = S_OK; + } } ReturnHR: return hr; } // HRESULT GetITypeInfoForEEClass() +// Only a narrow set of types are supported. +// See TryDeferToMscorlib() above. +MethodTable* GetMethodTableForRecordInfo(IRecordInfo* recInfo) +{ + CONTRACTL + { + THROWS; + GC_TRIGGERS; + MODE_PREEMPTIVE; + PRECONDITION(recInfo != NULL); + } + CONTRACTL_END; + + HRESULT hr; + + // Verify the associated TypeLib attribute + SafeComHolder typeInfo; + hr = recInfo->GetTypeInfo(&typeInfo); + if (FAILED(hr)) + return NULL; + + SafeComHolder typeLib; + UINT index; + hr = typeInfo->GetContainingTypeLib(&typeLib, &index); + if (FAILED(hr)) + return NULL; + + TLIBATTR* attrs; + hr = typeLib->GetLibAttr(&attrs); + if (FAILED(hr)) + return NULL; + + GUID libGuid = attrs->guid; + typeLib->ReleaseTLibAttr(attrs); + if (s_MscorlibGuid != libGuid) + return NULL; + + // Verify the Guid of the associated type + GUID typeGuid; + hr = recInfo->GetGuid(&typeGuid); + if (FAILED(hr)) + return NULL; + + // Check for supported types. + if (s_GuidForSystemGuid == typeGuid) + return CoreLibBinder::GetClass(CLASS__GUID); + + return NULL; +} + // Returns a NON-ADDREF'd ITypeInfo. HRESULT GetITypeInfoForMT(ComMethodTable *pMT, ITypeInfo **ppTI) { diff --git a/src/coreclr/vm/stdinterfaces.h b/src/coreclr/vm/stdinterfaces.h index 8d6201439657b1..517ca810b33abb 100644 --- a/src/coreclr/vm/stdinterfaces.h +++ b/src/coreclr/vm/stdinterfaces.h @@ -183,4 +183,7 @@ IErrorInfo *GetSupportedErrorInfo(IUnknown *iface, REFIID riid); // Helpers to get the ITypeInfo* for a type. HRESULT GetITypeInfoForEEClass(MethodTable *pMT, ITypeInfo **ppTI, bool bClassInfo = false); +// Gets the MethodTable for the associated IRecordInfo. +MethodTable* GetMethodTableForRecordInfo(IRecordInfo* recInfo); + #endif diff --git a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/DynamicVariantExtensions.cs b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/DynamicVariantExtensions.cs index c8608ce2c9dfa4..5b8f45469ddcd4 100644 --- a/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/DynamicVariantExtensions.cs +++ b/src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/DynamicVariantExtensions.cs @@ -249,8 +249,6 @@ public static unsafe void SetAsByrefVariantIndirect(ref this ComVariant variant, variant.SetAsByrefVariant(ref value); return; case VarEnum.VT_RECORD: - // VT_RECORD's are weird in that regardless of is the VT_BYREF flag is set or not - // they have the same internal representation. variant = ComVariant.CreateRaw(value.VarType | VarEnum.VT_BYREF, value.GetRawDataRef()); break; case VarEnum.VT_DECIMAL: diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshalling/ComVariant.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshalling/ComVariant.cs index af1c58b9e97acd..3b9640e285ab7c 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshalling/ComVariant.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshalling/ComVariant.cs @@ -404,7 +404,11 @@ public static unsafe ComVariant CreateRaw(VarEnum vt, T rawValue) (VarEnum.VT_UNKNOWN or VarEnum.VT_DISPATCH or VarEnum.VT_LPSTR or VarEnum.VT_BSTR or VarEnum.VT_LPWSTR or VarEnum.VT_SAFEARRAY or VarEnum.VT_CLSID or VarEnum.VT_STREAM or VarEnum.VT_STREAMED_OBJECT or VarEnum.VT_STORAGE or VarEnum.VT_STORED_OBJECT or VarEnum.VT_CF or VT_VERSIONED_STREAM, _) when sizeof(T) == nint.Size => rawValue, (VarEnum.VT_CY or VarEnum.VT_FILETIME, 8) => rawValue, - (VarEnum.VT_RECORD, _) when sizeof(T) == sizeof(Record) => rawValue, + + // VT_RECORDs are weird in that regardless of whether the VT_BYREF flag is set or not + // they have the same internal representation. + (VarEnum.VT_RECORD or VarEnum.VT_RECORD | VarEnum.VT_BYREF, _) when sizeof(T) == sizeof(Record) => rawValue, + _ when vt.HasFlag(VarEnum.VT_BYREF) && sizeof(T) == nint.Size => rawValue, _ when vt.HasFlag(VarEnum.VT_VECTOR) && sizeof(T) == sizeof(Vector) => rawValue, _ when vt.HasFlag(VarEnum.VT_ARRAY) && sizeof(T) == nint.Size => rawValue, diff --git a/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetNativeVariantForObjectTests.cs b/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetNativeVariantForObjectTests.cs index c25c59a205c961..108a842a3c8b39 100644 --- a/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetNativeVariantForObjectTests.cs +++ b/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetNativeVariantForObjectTests.cs @@ -167,6 +167,40 @@ public void GetNativeVariantForObject_String_Success(string obj) } } + [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltInComEnabled))] + public unsafe void GetNativeVariantForObject_Guid_Success() + { + var guid = new Guid("0DD3E51B-3162-4D13-B906-030F402C5BA2"); + var v = new Variant(); + IntPtr pNative = Marshal.AllocHGlobal(Marshal.SizeOf(v)); + try + { + if (PlatformDetection.IsWindowsNanoServer) + { + Assert.Throws(() => Marshal.GetNativeVariantForObject(guid, pNative)); + } + else + { + Marshal.GetNativeVariantForObject(guid, pNative); + + Variant result = Marshal.PtrToStructure(pNative); + Assert.Equal(VarEnum.VT_RECORD, (VarEnum)result.vt); + Assert.NotEqual(nint.Zero, result.pRecInfo); // We should have an IRecordInfo instance. + + var expectedBytes = new ReadOnlySpan(guid.ToByteArray()); + var actualBytes = new ReadOnlySpan((void*)result.bstrVal, expectedBytes.Length); + Assert.Equal(expectedBytes, actualBytes); + + object o = Marshal.GetObjectForNativeVariant(pNative); + Assert.Equal(guid, o); + } + } + finally + { + Marshal.FreeHGlobal(pNative); + } + } + [ConditionalTheory(typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltInComEnabled))] [InlineData(3.14)] public unsafe void GetNativeVariantForObject_Double_Success(double obj) diff --git a/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetObjectForNativeVariantTests.cs b/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetObjectForNativeVariantTests.cs index c0b68f5d899ece..f4e01dc3d87e48 100644 --- a/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetObjectForNativeVariantTests.cs +++ b/src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetObjectForNativeVariantTests.cs @@ -246,14 +246,38 @@ public void GetObjectForNativeVariant_InvalidDate_ThrowsArgumentException(double } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltInComEnabled))] - public void GetObjectForNativeVariant_NoDataForRecord_ThrowsArgumentException() + public void GetObjectForNativeVariant_NoRecordInfo_ThrowsArgumentException() { Variant variant = CreateVariant(VT_RECORD, new UnionTypes { _record = new Record { _recordInfo = IntPtr.Zero } }); AssertExtensions.Throws(null, () => GetObjectForNativeVariant(variant)); } + [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltInComEnabled))] + public void GetObjectForNativeVariant_NoRecordData_ReturnsNull() + { + var recordInfo = new RecordInfo(); + IntPtr pRecordInfo = Marshal.GetComInterfaceForObject(recordInfo); + try + { + Variant variant = CreateVariant(VT_RECORD, new UnionTypes + { + _record = new Record + { + _record = IntPtr.Zero, + _recordInfo = pRecordInfo + } + }); + Assert.Null(GetObjectForNativeVariant(variant)); + } + finally + { + Marshal.Release(pRecordInfo); + } + } + public static IEnumerable GetObjectForNativeVariant_NoSuchGuid_TestData() { + yield return new object[] { typeof(object).GUID }; yield return new object[] { typeof(string).GUID }; yield return new object[] { Guid.Empty }; } diff --git a/src/tests/Interop/CMakeLists.txt b/src/tests/Interop/CMakeLists.txt index 070b4e562eb433..fa3217993a8a70 100644 --- a/src/tests/Interop/CMakeLists.txt +++ b/src/tests/Interop/CMakeLists.txt @@ -81,6 +81,7 @@ if(CLR_CMAKE_TARGET_WIN32) add_subdirectory(COM/NativeClients/DefaultInterfaces) add_subdirectory(COM/NativeClients/Dispatch) add_subdirectory(COM/NativeClients/Events) + add_subdirectory(COM/NativeClients/MiscTypes) add_subdirectory(COM/ComWrappers/MockReferenceTrackerRuntime) add_subdirectory(COM/ComWrappers/WeakReference) diff --git a/src/tests/Interop/COM/Dynamic/BasicTest.cs b/src/tests/Interop/COM/Dynamic/BasicTest.cs index 4ec5d6fbbdcccd..0d1125bdfc1293 100644 --- a/src/tests/Interop/COM/Dynamic/BasicTest.cs +++ b/src/tests/Interop/COM/Dynamic/BasicTest.cs @@ -43,6 +43,7 @@ public void Run() String(); Date(); + SpecialCasedValueTypes(); ComObject(); Null(); @@ -385,6 +386,16 @@ private void Date() Variant(val, expected); } + private void SpecialCasedValueTypes() + { + { + var val = Guid.NewGuid(); + var expected = val; + // Pass as variant + Variant(val, expected); + } + } + private void ComObject() { Type t = Type.GetTypeFromCLSID(Guid.Parse(ServerGuids.BasicTest)); diff --git a/src/tests/Interop/COM/NETClients/MiscTypes/App.manifest b/src/tests/Interop/COM/NETClients/MiscTypes/App.manifest new file mode 100644 index 00000000000000..93dcb090e865cf --- /dev/null +++ b/src/tests/Interop/COM/NETClients/MiscTypes/App.manifest @@ -0,0 +1,18 @@ + + + + + + + + + + + + diff --git a/src/tests/Interop/COM/NETClients/MiscTypes/NetClientMiscTypes.csproj b/src/tests/Interop/COM/NETClients/MiscTypes/NetClientMiscTypes.csproj new file mode 100644 index 00000000000000..bd343f7dc8f9a2 --- /dev/null +++ b/src/tests/Interop/COM/NETClients/MiscTypes/NetClientMiscTypes.csproj @@ -0,0 +1,18 @@ + + + + true + App.manifest + true + + + + + + + + + + + + diff --git a/src/tests/Interop/COM/NETClients/MiscTypes/Program.cs b/src/tests/Interop/COM/NETClients/MiscTypes/Program.cs new file mode 100644 index 00000000000000..de4945b5af13bc --- /dev/null +++ b/src/tests/Interop/COM/NETClients/MiscTypes/Program.cs @@ -0,0 +1,107 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using Xunit; +namespace NetClient +{ + using System; + using System.Runtime.InteropServices; + + using TestLibrary; + using Xunit; + using Server.Contract; + using Server.Contract.Servers; + + struct Struct {} + + public unsafe class Program + { + [Fact] + public static int TestEntryPoint() + { + // RegFree COM is not supported on Windows Nano + if (TestLibrary.Utilities.IsWindowsNanoServer) + { + return 100; + } + + try + { + ValidationTests(); + ValidateNegativeTests(); + } + catch (Exception e) + { + Console.WriteLine($"Test object interop failure: {e}"); + return 101; + } + + return 100; + } + + private static void ValidationTests() + { + Console.WriteLine($"Running {nameof(ValidationTests)} ..."); + + var miscTypeTesting = (Server.Contract.Servers.MiscTypesTesting)new Server.Contract.Servers.MiscTypesTestingClass(); + + Console.WriteLine("-- Primitives <=> VARIANT..."); + { + object expected = null; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = DBNull.Value; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = (sbyte)0x0f; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = (short)0x07ff; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = (int)0x07ffffff; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = (long)0x07ffffffffffffff; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = true; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + { + var expected = false; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + + Console.WriteLine("-- BSTR <=> VARIANT..."); + { + var expected = "The quick Fox jumped over the lazy Dog."; + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + + Console.WriteLine("-- System.Guid <=> VARIANT..."); + { + var expected = new Guid("{8EFAD956-B33D-46CB-90F4-45F55BA68A96}"); + Assert.Equal(expected, miscTypeTesting.Marshal_Variant(expected)); + } + } + + private static void ValidateNegativeTests() + { + Console.WriteLine($"Running {nameof(ValidateNegativeTests)} ..."); + + var miscTypeTesting = (Server.Contract.Servers.MiscTypesTesting)new Server.Contract.Servers.MiscTypesTestingClass(); + + Console.WriteLine("-- User defined ValueType <=> VARIANT..."); + { + Assert.Throws(() => miscTypeTesting.Marshal_Variant(new Struct())); + } + } + } +} diff --git a/src/tests/Interop/COM/NETServer/MiscTypesTesting.cs b/src/tests/Interop/COM/NETServer/MiscTypesTesting.cs new file mode 100644 index 00000000000000..2a31507d29ab22 --- /dev/null +++ b/src/tests/Interop/COM/NETServer/MiscTypesTesting.cs @@ -0,0 +1,54 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +[ComVisible(true)] +[Guid(Server.Contract.Guids.MiscTypesTesting)] +public class MiscTypesTesting : Server.Contract.IMiscTypesTesting +{ + object Server.Contract.IMiscTypesTesting.Marshal_Variant(object obj) + { + if (obj is null) + { + return null; + } + + if (obj is DBNull) + { + return DBNull.Value; + } + + if (obj.GetType().IsValueType) + { + return CallMemberwiseClone(obj); + } + + if (obj is string) + { + return obj; + } + + Environment.FailFast($"Arguments must be ValueTypes or strings: {obj.GetType()}"); + return null; + + // object.MemberwiseClone() will bitwise copy for ValueTypes. + // This is sufficient for the VARIANT marshalling scenario being + // tested here. + [UnsafeAccessor(UnsafeAccessorKind.Method, Name = "MemberwiseClone")] + static extern object CallMemberwiseClone(object obj); + } + + object Server.Contract.IMiscTypesTesting.Marshal_Instance_Variant(string init) + { + if (Guid.TryParse(init, out Guid result)) + { + return result; + } + + Environment.FailFast($"Unknown init value: {init}"); + return null; + } +} \ No newline at end of file diff --git a/src/tests/Interop/COM/NativeClients/MiscTypes.csproj b/src/tests/Interop/COM/NativeClients/MiscTypes.csproj new file mode 100644 index 00000000000000..83409dcfceb267 --- /dev/null +++ b/src/tests/Interop/COM/NativeClients/MiscTypes.csproj @@ -0,0 +1,6 @@ + + + + + + diff --git a/src/tests/Interop/COM/NativeClients/MiscTypes/App.manifest b/src/tests/Interop/COM/NativeClients/MiscTypes/App.manifest new file mode 100644 index 00000000000000..20ffce48d342f7 --- /dev/null +++ b/src/tests/Interop/COM/NativeClients/MiscTypes/App.manifest @@ -0,0 +1,17 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/src/tests/Interop/COM/NativeClients/MiscTypes/CMakeLists.txt b/src/tests/Interop/COM/NativeClients/MiscTypes/CMakeLists.txt new file mode 100644 index 00000000000000..3dcba4671143e3 --- /dev/null +++ b/src/tests/Interop/COM/NativeClients/MiscTypes/CMakeLists.txt @@ -0,0 +1,22 @@ +project (COMClientMiscTypes) +include_directories( ${INC_PLATFORM_DIR} ) +include_directories( "../../ServerContracts" ) +include_directories( "../../NativeServer" ) +include_directories("../") +set(SOURCES + MiscTypes.cpp + App.manifest) + +# add the executable +add_executable (COMClientMiscTypes ${SOURCES}) +target_link_libraries(COMClientMiscTypes PRIVATE ${LINK_LIBRARIES_ADDITIONAL}) + +# Copy CoreShim manifest to project output +file(GENERATE OUTPUT $/CoreShim.X.manifest INPUT ${CMAKE_CURRENT_SOURCE_DIR}/CoreShim.X.manifest) + +# add the install targets +install (TARGETS COMClientMiscTypes DESTINATION bin) +# If there's a dynamic ASAN runtime, then copy it to project output. +if (NOT "${ASAN_RUNTIME}" STREQUAL "") + file(COPY "${ASAN_RUNTIME}" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +endif() diff --git a/src/tests/Interop/COM/NativeClients/MiscTypes/CoreShim.X.manifest b/src/tests/Interop/COM/NativeClients/MiscTypes/CoreShim.X.manifest new file mode 100644 index 00000000000000..a3c8593ee06761 --- /dev/null +++ b/src/tests/Interop/COM/NativeClients/MiscTypes/CoreShim.X.manifest @@ -0,0 +1,16 @@ + + + + + + + + + + + diff --git a/src/tests/Interop/COM/NativeClients/MiscTypes/MiscTypes.cpp b/src/tests/Interop/COM/NativeClients/MiscTypes/MiscTypes.cpp new file mode 100644 index 00000000000000..6fb435be6513c2 --- /dev/null +++ b/src/tests/Interop/COM/NativeClients/MiscTypes/MiscTypes.cpp @@ -0,0 +1,171 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include +#include +#include +#include + +// COM headers +#include +#include + +#define COM_CLIENT +#include + +#define THROW_IF_FAILED(exp) { hr = exp; if (FAILED(hr)) { ::printf("FAILURE: 0x%08x = %s\n", hr, #exp); throw hr; } } +#define THROW_FAIL_IF_FALSE(exp) { if (!(exp)) { ::printf("FALSE: %s\n", #exp); throw E_FAIL; } } + +template +struct ComInit +{ + const HRESULT Result; + + ComInit() + : Result{ ::CoInitializeEx(nullptr, TM) } + { } + + ~ComInit() + { + if (SUCCEEDED(Result)) + ::CoUninitialize(); + } +}; + +using ComMTA = ComInit; +void ValidationTests(); + +int __cdecl main() +{ + if (is_windows_nano() == S_OK) + { + ::puts("RegFree COM is not supported on Windows Nano. Auto-passing this test.\n"); + return 100; + } + ComMTA init; + if (FAILED(init.Result)) + return -1; + + try + { + CoreShimComActivation csact{ W("NETServer"), W("MiscTypesTesting") }; + ValidationTests(); + } + catch (HRESULT hr) + { + ::printf("Test Failure: 0x%08x\n", hr); + return 101; + } + + return 100; +} + +struct VariantMarshalTest +{ + VARIANT Input; + VARIANT Result; + VariantMarshalTest() + { + ::VariantInit(&Input); + ::VariantInit(&Result); + } + ~VariantMarshalTest() + { + ::VariantClear(&Input); + ::VariantClear(&Result); + } +}; + +void ValidationTests() +{ + ::printf(__FUNCTION__ "() through CoCreateInstance...\n"); + + HRESULT hr; + + IMiscTypesTesting *miscTypesTesting; + THROW_IF_FAILED(::CoCreateInstance(CLSID_MiscTypesTesting, nullptr, CLSCTX_INPROC, IID_IMiscTypesTesting, (void**)&miscTypesTesting)); + + ::printf("-- Primitives <=> VARIANT...\n"); + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_EMPTY; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_VT(&args.Input) == V_VT(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_NULL; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_VT(&args.Input) == V_VT(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_I1; + V_I1(&args.Input) = 0x0f; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_I1(&args.Input) == V_I1(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_I2; + V_I2(&args.Input) = 0x07ff; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_I2(&args.Input) == V_I2(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_I4; + V_I4(&args.Input) = 0x07ffffff; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_I4(&args.Input) == V_I4(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_I8; + V_I8(&args.Input) = 0x07ffffffffffffff; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_I8(&args.Input) == V_I8(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_BOOL; + V_BOOL(&args.Input) = VARIANT_TRUE; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_BOOL(&args.Input) == V_BOOL(&args.Result)); + } + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_BOOL; + V_BOOL(&args.Input) = VARIANT_FALSE; + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_BOOL(&args.Input) == V_BOOL(&args.Result)); + } + + ::printf("-- BSTR <=> VARIANT...\n"); + { + VariantMarshalTest args{}; + V_VT(&args.Input) = VT_BSTR; + V_BSTR(&args.Input) = ::SysAllocString(W("The quick Fox jumped over the lazy Dog.")); + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(CompareStringOrdinal(V_BSTR(&args.Input), -1, V_BSTR(&args.Result), -1, FALSE) == CSTR_EQUAL); + } + + ::printf("-- System.Guid <=> VARIANT...\n"); + { + /* 8EFAD956-B33D-46CB-90F4-45F55BA68A96 */ + const GUID expected = { 0x8EFAD956, 0xB33D, 0x46CB, { 0x90, 0xF4, 0x45, 0xF5, 0x5B, 0xA6, 0x8A, 0x96} }; + + // Get a System.Guid into native + VariantMarshalTest guidVar; + THROW_IF_FAILED(miscTypesTesting->Marshal_Instance_Variant(W("{8EFAD956-B33D-46CB-90F4-45F55BA68A96}"), &guidVar.Result)); + THROW_FAIL_IF_FALSE(V_VT(&guidVar.Result) == VT_RECORD); + THROW_FAIL_IF_FALSE(memcmp(V_RECORD(&guidVar.Result), &expected, sizeof(expected)) == 0); + + // Use the Guid as input. + VariantMarshalTest args{}; + THROW_IF_FAILED(::VariantCopy(&args.Input, &guidVar.Result)); + THROW_IF_FAILED(miscTypesTesting->Marshal_Variant(args.Input, &args.Result)); + THROW_FAIL_IF_FALSE(V_VT(&args.Input) == V_VT(&args.Result)); + THROW_FAIL_IF_FALSE(memcmp(V_RECORD(&args.Input), V_RECORD(&args.Result), sizeof(expected)) == 0); + } +} diff --git a/src/tests/Interop/COM/NativeClients/Primitives/CoreShim.X.manifest b/src/tests/Interop/COM/NativeClients/Primitives/CoreShim.X.manifest index 099f3a36e169ba..8b8e6ad135a2e2 100644 --- a/src/tests/Interop/COM/NativeClients/Primitives/CoreShim.X.manifest +++ b/src/tests/Interop/COM/NativeClients/Primitives/CoreShim.X.manifest @@ -19,6 +19,10 @@ + + + + + +#include "Servers.h" + +class MiscTypesTesting : public UnknownImpl, public IMiscTypesTesting +{ +public: // IMiscTypesTesting + DEF_FUNC(Marshal_Variant)(_In_ VARIANT obj, _Out_ VARIANT* result) + { + return ::VariantCopy(result, &obj); + } + + DEF_FUNC(Marshal_Instance_Variant)(_In_ LPCWSTR init, _Out_ VARIANT* result) + { + return E_NOTIMPL; + } + +public: // IUnknown + STDMETHOD(QueryInterface)( + /* [in] */ REFIID riid, + /* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR *__RPC_FAR *ppvObject) + { + return DoQueryInterface(riid, ppvObject, static_cast(this)); + } + + DEFINE_REF_COUNTING(); +}; \ No newline at end of file diff --git a/src/tests/Interop/COM/NativeServer/Servers.cpp b/src/tests/Interop/COM/NativeServer/Servers.cpp index f2becfe4d0941d..05f26be8d4741d 100644 --- a/src/tests/Interop/COM/NativeServer/Servers.cpp +++ b/src/tests/Interop/COM/NativeServer/Servers.cpp @@ -162,6 +162,7 @@ STDAPI DllRegisterServer(void) RETURN_IF_FAILED(RegisterClsid(__uuidof(NumericTesting), L"Both")); RETURN_IF_FAILED(RegisterClsid(__uuidof(ArrayTesting), L"Both")); RETURN_IF_FAILED(RegisterClsid(__uuidof(StringTesting), L"Both")); + RETURN_IF_FAILED(RegisterClsid(__uuidof(MiscTypesTesting), L"Both")); RETURN_IF_FAILED(RegisterClsid(__uuidof(ErrorMarshalTesting), L"Both")); RETURN_IF_FAILED(RegisterClsid(__uuidof(DispatchTesting), L"Both")); RETURN_IF_FAILED(RegisterClsid(__uuidof(EventTesting), L"Both")); @@ -180,6 +181,7 @@ STDAPI DllUnregisterServer(void) RETURN_IF_FAILED(RemoveClsid(__uuidof(NumericTesting))); RETURN_IF_FAILED(RemoveClsid(__uuidof(ArrayTesting))); RETURN_IF_FAILED(RemoveClsid(__uuidof(StringTesting))); + RETURN_IF_FAILED(RemoveClsid(__uuidof(MiscTypesTesting))); RETURN_IF_FAILED(RemoveClsid(__uuidof(ErrorMarshalTesting))); RETURN_IF_FAILED(RemoveClsid(__uuidof(DispatchTesting))); RETURN_IF_FAILED(RemoveClsid(__uuidof(EventTesting))); @@ -202,6 +204,9 @@ STDAPI DllGetClassObject(_In_ REFCLSID rclsid, _In_ REFIID riid, _Out_ LPVOID FA if (rclsid == __uuidof(StringTesting)) return ClassFactoryBasic::Create(riid, ppv); + if (rclsid == __uuidof(MiscTypesTesting)) + return ClassFactoryBasic::Create(riid, ppv); + if (rclsid == __uuidof(ErrorMarshalTesting)) return ClassFactoryBasic::Create(riid, ppv); diff --git a/src/tests/Interop/COM/NativeServer/Servers.h b/src/tests/Interop/COM/NativeServer/Servers.h index 7c9ec0300bc67d..c87288d2535b1a 100644 --- a/src/tests/Interop/COM/NativeServer/Servers.h +++ b/src/tests/Interop/COM/NativeServer/Servers.h @@ -12,6 +12,7 @@ class DECLSPEC_UUID("53169A33-E85D-4E3C-B668-24E438D0929B") NumericTesting; class DECLSPEC_UUID("B99ABE6A-DFF6-440F-BFB6-55179B8FE18E") ArrayTesting; class DECLSPEC_UUID("C73C83E8-51A2-47F8-9B5C-4284458E47A6") StringTesting; +class DECLSPEC_UUID("CCFF894B-A27C-45E0-9B30-6C88D722E843") MiscTypesTesting; class DECLSPEC_UUID("71CF5C45-106C-4B32-B418-43A463C6041F") ErrorMarshalTesting; class DECLSPEC_UUID("0F8ACD0C-ECE0-4F2A-BD1B-6BFCA93A0726") DispatchTesting; class DECLSPEC_UUID("4DBD9B61-E372-499F-84DE-EFC70AA8A009") EventTesting; @@ -25,6 +26,7 @@ class DECLSPEC_UUID("4F54231D-9E11-4C0B-8E0B-2EBD8B0E5811") TrackMyLifetimeTesti #define CLSID_NumericTesting __uuidof(NumericTesting) #define CLSID_ArrayTesting __uuidof(ArrayTesting) #define CLSID_StringTesting __uuidof(StringTesting) +#define CLSID_MiscTypesTesting __uuidof(MiscTypesTesting) #define CLSID_ErrorMarshalTesting __uuidof(ErrorMarshalTesting) #define CLSID_DispatchTesting __uuidof(DispatchTesting) #define CLSID_EventTesting __uuidof(EventTesting) @@ -38,6 +40,7 @@ class DECLSPEC_UUID("4F54231D-9E11-4C0B-8E0B-2EBD8B0E5811") TrackMyLifetimeTesti #define IID_INumericTesting __uuidof(INumericTesting) #define IID_IArrayTesting __uuidof(IArrayTesting) #define IID_IStringTesting __uuidof(IStringTesting) +#define IID_IMiscTypesTesting __uuidof(IMiscTypesTesting) #define IID_IErrorMarshalTesting __uuidof(IErrorMarshalTesting) #define IID_IDispatchTesting __uuidof(IDispatchTesting) #define IID_TestingEvents __uuidof(TestingEvents) @@ -82,6 +85,7 @@ struct CoreShimComActivation #include "NumericTesting.h" #include "ArrayTesting.h" #include "StringTesting.h" + #include "MiscTypesTesting.h" #include "ErrorMarshalTesting.h" #include "DispatchTesting.h" #include "EventTesting.h" diff --git a/src/tests/Interop/COM/ServerContracts/Server.CoClasses.cs b/src/tests/Interop/COM/ServerContracts/Server.CoClasses.cs index 0b6f988f1a7a98..2479e6cd6f083a 100644 --- a/src/tests/Interop/COM/ServerContracts/Server.CoClasses.cs +++ b/src/tests/Interop/COM/ServerContracts/Server.CoClasses.cs @@ -10,7 +10,7 @@ namespace Server.Contract.Servers using System.Runtime.InteropServices; /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(NumericTestingClass))] @@ -29,7 +29,7 @@ internal class NumericTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(ArrayTestingClass))] @@ -48,7 +48,7 @@ internal class ArrayTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(StringTestingClass))] @@ -67,7 +67,26 @@ internal class StringTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass + /// + [ComImport] + [CoClass(typeof(MiscTypesTestingClass))] + [Guid("7FBB8677-BDD0-4E5A-B38B-CA92A4555466")] + internal interface MiscTypesTesting : Server.Contract.IMiscTypesTesting + { + } + + /// + /// Managed activation for CoClass + /// + [ComImport] + [Guid(Server.Contract.Guids.MiscTypesTesting)] + internal class MiscTypesTestingClass + { + } + + /// + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(ErrorMarshalTestingClass))] @@ -86,7 +105,7 @@ internal class ErrorMarshalTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(DispatchTestingClass))] @@ -105,7 +124,7 @@ internal class DispatchTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(AggregationTestingClass))] @@ -124,7 +143,7 @@ internal class AggregationTestingClass } /// - /// Managed definition of CoClass + /// Managed definition of CoClass /// [ComImport] [CoClass(typeof(ColorTestingClass))] diff --git a/src/tests/Interop/COM/ServerContracts/Server.Contracts.cs b/src/tests/Interop/COM/ServerContracts/Server.Contracts.cs index 0bac21e66ee17e..dd0f71634e2bdc 100644 --- a/src/tests/Interop/COM/ServerContracts/Server.Contracts.cs +++ b/src/tests/Interop/COM/ServerContracts/Server.Contracts.cs @@ -184,6 +184,17 @@ string Add_BStr( void Pass_Through_LCID(out int lcid); } + [ComVisible(true)] + [Guid("7FBB8677-BDD0-4E5A-B38B-CA92A4555466")] + [InterfaceType(ComInterfaceType.InterfaceIsIUnknown)] + public interface IMiscTypesTesting + { + object Marshal_Variant(object obj); + + // Test API for marshalling an arbitrary type via VARIANT + object Marshal_Instance_Variant([MarshalAs(UnmanagedType.LPWStr)] string init); + } + public struct HResult { public int hr; diff --git a/src/tests/Interop/COM/ServerContracts/Server.Contracts.h b/src/tests/Interop/COM/ServerContracts/Server.Contracts.h index 1eb0528aae4b78..d2c26884589efa 100644 --- a/src/tests/Interop/COM/ServerContracts/Server.Contracts.h +++ b/src/tests/Interop/COM/ServerContracts/Server.Contracts.h @@ -366,6 +366,18 @@ IStringTesting : IUnknown /*[out]*/ LCID* outLcid) = 0; }; +struct __declspec(uuid("7FBB8677-BDD0-4E5A-B38B-CA92A4555466")) +IMiscTypesTesting : IUnknown +{ + virtual HRESULT STDMETHODCALLTYPE Marshal_Variant ( + /*[in]*/ VARIANT obj, + /*[out,retval]*/ VARIANT* result) = 0; + + virtual HRESULT STDMETHODCALLTYPE Marshal_Instance_Variant ( + /*[in]*/ LPCWSTR init, + /*[out,retval]*/ VARIANT* result) = 0; +}; + struct __declspec(uuid("592386a5-6837-444d-9de3-250815d18556")) IErrorMarshalTesting : IUnknown { diff --git a/src/tests/Interop/COM/ServerContracts/ServerGuids.cs b/src/tests/Interop/COM/ServerContracts/ServerGuids.cs index 5336cde54106e0..8b0c65a3ce1532 100644 --- a/src/tests/Interop/COM/ServerContracts/ServerGuids.cs +++ b/src/tests/Interop/COM/ServerContracts/ServerGuids.cs @@ -11,6 +11,7 @@ internal sealed class Guids public const string NumericTesting = "53169A33-E85D-4E3C-B668-24E438D0929B"; public const string ArrayTesting = "B99ABE6A-DFF6-440F-BFB6-55179B8FE18E"; public const string StringTesting = "C73C83E8-51A2-47F8-9B5C-4284458E47A6"; + public const string MiscTypesTesting = "CCFF894B-A27C-45E0-9B30-6C88D722E843"; public const string ErrorMarshalTesting = "71CF5C45-106C-4B32-B418-43A463C6041F"; public const string DispatchTesting = "0F8ACD0C-ECE0-4F2A-BD1B-6BFCA93A0726"; public const string EventTesting = "4DBD9B61-E372-499F-84DE-EFC70AA8A009"; From 2496619490be55f223b7476b78202d69bc407f78 Mon Sep 17 00:00:00 2001 From: "dotnet-maestro[bot]" <42748379+dotnet-maestro[bot]@users.noreply.github.com> Date: Fri, 5 Apr 2024 21:29:36 -0400 Subject: [PATCH 131/132] Update dependencies from https://github.com/dotnet/arcade build 20240405.4 (#100710) Microsoft.SourceBuild.Intermediate.arcade , Microsoft.DotNet.Arcade.Sdk , Microsoft.DotNet.Build.Tasks.Archives , Microsoft.DotNet.Build.Tasks.Feed , Microsoft.DotNet.Build.Tasks.Installers , Microsoft.DotNet.Build.Tasks.Packaging , Microsoft.DotNet.Build.Tasks.TargetFramework , Microsoft.DotNet.Build.Tasks.Templating , Microsoft.DotNet.Build.Tasks.Workloads , Microsoft.DotNet.CodeAnalysis , Microsoft.DotNet.GenAPI , Microsoft.DotNet.GenFacades , Microsoft.DotNet.Helix.Sdk , Microsoft.DotNet.PackageTesting , Microsoft.DotNet.RemoteExecutor , Microsoft.DotNet.SharedFramework.Sdk , Microsoft.DotNet.VersionTools.Tasks , Microsoft.DotNet.XliffTasks , Microsoft.DotNet.XUnitAssert , Microsoft.DotNet.XUnitConsoleRunner , Microsoft.DotNet.XUnitExtensions From Version 9.0.0-beta.24203.1 -> To Version 9.0.0-beta.24205.4 Co-authored-by: dotnet-maestro[bot] --- eng/Version.Details.xml | 84 +++++++++---------- eng/Versions.props | 32 +++---- .../templates-official/job/onelocbuild.yml | 2 +- .../templates-official/job/source-build.yml | 2 +- .../post-build/post-build.yml | 6 +- .../variables/pool-providers.yml | 2 +- global.json | 6 +- 7 files changed, 67 insertions(+), 67 deletions(-) diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index ef3c9906e6fbd2..b5717f05a5f586 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -92,87 +92,87 @@ - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b https://github.com/dotnet/runtime-assets @@ -332,9 +332,9 @@ https://github.com/dotnet/xharness 28af9496b0e260f7e66ec549b39f1410ee9743d1 - + https://github.com/dotnet/arcade - 532f956a119bce77ca279994054d08dbc24418f7 + 541820fbd313f9bb82b756b66d258fe316d5e48b https://dev.azure.com/dnceng/internal/_git/dotnet-optimization diff --git a/eng/Versions.props b/eng/Versions.props index 4ef6855a95544a..cd1960205b4f78 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -83,22 +83,22 @@ 9.0.100-preview.4.24175.4 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 2.6.7-beta.24203.1 - 9.0.0-beta.24203.1 - 2.6.7-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 - 9.0.0-beta.24203.1 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 2.6.7-beta.24205.4 + 9.0.0-beta.24205.4 + 2.6.7-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 + 9.0.0-beta.24205.4 1.4.0 diff --git a/eng/common/templates-official/job/onelocbuild.yml b/eng/common/templates-official/job/onelocbuild.yml index 52b4d05d3f8dd6..ba9ba49303292a 100644 --- a/eng/common/templates-official/job/onelocbuild.yml +++ b/eng/common/templates-official/job/onelocbuild.yml @@ -56,7 +56,7 @@ jobs: # If it's not devdiv, it's dnceng ${{ if ne(variables['System.TeamProject'], 'DevDiv') }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022 + image: 1es-windows-2022-pt os: windows steps: diff --git a/eng/common/templates-official/job/source-build.yml b/eng/common/templates-official/job/source-build.yml index 50d4b98e201a31..50f04e642a3543 100644 --- a/eng/common/templates-official/job/source-build.yml +++ b/eng/common/templates-official/job/source-build.yml @@ -52,7 +52,7 @@ jobs: ${{ if eq(variables['System.TeamProject'], 'internal') }}: name: $[replace(replace(eq(contains(coalesce(variables['System.PullRequest.TargetBranch'], variables['Build.SourceBranch'], 'refs/heads/main'), 'release'), 'true'), True, 'NetCore1ESPool-Svc-Internal'), False, 'NetCore1ESPool-Internal')] - image: 1es-mariner-2 + image: 1es-mariner-2-pt os: linux ${{ if ne(parameters.platform.pool, '') }}: diff --git a/eng/common/templates-official/post-build/post-build.yml b/eng/common/templates-official/post-build/post-build.yml index da1f40958b450d..d286e956bdfa40 100644 --- a/eng/common/templates-official/post-build/post-build.yml +++ b/eng/common/templates-official/post-build/post-build.yml @@ -110,7 +110,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022 + image: 1es-windows-2022-pt os: windows steps: @@ -150,7 +150,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022 + image: 1es-windows-2022-pt os: windows steps: - template: setup-maestro-vars.yml @@ -208,7 +208,7 @@ stages: # If it's not devdiv, it's dnceng ${{ else }}: name: $(DncEngInternalBuildPool) - image: 1es-windows-2022 + image: 1es-windows-2022-pt os: windows steps: - template: setup-maestro-vars.yml diff --git a/eng/common/templates-official/variables/pool-providers.yml b/eng/common/templates-official/variables/pool-providers.yml index 1f308b24efc43d..beab7d1bfba062 100644 --- a/eng/common/templates-official/variables/pool-providers.yml +++ b/eng/common/templates-official/variables/pool-providers.yml @@ -23,7 +23,7 @@ # # pool: # name: $(DncEngInternalBuildPool) -# image: 1es-windows-2022 +# image: 1es-windows-2022-pt variables: # Coalesce the target and source branches so we know when a PR targets a release branch diff --git a/global.json b/global.json index bf1936167a4a42..b3d764e6c1528d 100644 --- a/global.json +++ b/global.json @@ -8,9 +8,9 @@ "dotnet": "9.0.100-preview.1.24101.2" }, "msbuild-sdks": { - "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24203.1", - "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24203.1", - "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24203.1", + "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24205.4", + "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24205.4", + "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24205.4", "Microsoft.Build.NoTargets": "3.7.0", "Microsoft.Build.Traversal": "3.4.0", "Microsoft.NET.Sdk.IL": "9.0.0-preview.4.24201.1" From 995409f2036aaa584aee7f950a55f84d03392ef4 Mon Sep 17 00:00:00 2001 From: Elinor Fung Date: Fri, 5 Apr 2024 20:03:34 -0700 Subject: [PATCH 132/132] Disable compile-native projects for linux-bionic (#100712) After https://github.com/dotnet/runtime/pull/100623, the official build is broken. Our infrastructure for building native runtime component libraries using NativeAOT is failing for linux-bionic. Disable building on linux-bionic for now to unblock the build. --- src/native/managed/compile-native.proj | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/native/managed/compile-native.proj b/src/native/managed/compile-native.proj index 453d84ab4dc072..b9815ae30e488f 100644 --- a/src/native/managed/compile-native.proj +++ b/src/native/managed/compile-native.proj @@ -19,6 +19,8 @@ false + + false false