diff --git a/docs/design/coreclr/botr/clr-abi.md b/docs/design/coreclr/botr/clr-abi.md index b168065146577..417f6fdec5346 100644 --- a/docs/design/coreclr/botr/clr-abi.md +++ b/docs/design/coreclr/botr/clr-abi.md @@ -177,11 +177,13 @@ This section describes the conventions the JIT needs to follow when generating c ## Funclets -For all platforms except Windows/x86, all managed EH handlers (finally, fault, filter, filter-handler, and catch) are extracted into their own 'funclets'. To the OS they are treated just like first class functions (separate PDATA and XDATA (`RUNTIME_FUNCTION` entry), etc.). The CLR currently treats them just like part of the parent function in many ways. The main function and all funclets must be allocated in a single code allocation (see hot cold splitting). They 'share' GC info. Only the main function prolog can be hot patched. +For all platforms except Windows/x86 on CoreCLR, all managed EH handlers (finally, fault, filter, filter-handler, and catch) are extracted into their own 'funclets'. To the OS they are treated just like first class functions (separate PDATA and XDATA (`RUNTIME_FUNCTION` entry), etc.). The CLR currently treats them just like part of the parent function in many ways. The main function and all funclets must be allocated in a single code allocation (see hot cold splitting). They 'share' GC info. Only the main function prolog can be hot patched. The only way to enter a handler funclet is via a call. In the case of an exception, the call is from the VM's EH subsystem as part of exception dispatch/unwind. In the non-exceptional case, this is called local unwind or a non-local exit. In C# this is accomplished by simply falling-through/out of a try body or an explicit goto. In IL this is always accomplished via a LEAVE opcode, within a try body, targeting an IL offset outside the try body. In such cases the call is from the JITed code of the parent function. -For Windows/x86, all handlers are generated within the method body, typically in lexical order. A nested try/catch is generated completely within the EH region in which it is nested. These handlers are essentially "in-line funclets", but they do not look like normal functions: they do not have a normal prolog or epilog, although they do have special entry/exit and register conventions. Also, nested handlers are not un-nested as for funclets: the code for a nested handler is generated within the handler in which it is nested. +For Windows/x86 on CoreCLR, all handlers are generated within the method body, typically in lexical order. A nested try/catch is generated completely within the EH region in which it is nested. These handlers are essentially "in-line funclets", but they do not look like normal functions: they do not have a normal prolog or epilog, although they do have special entry/exit and register conventions. Also, nested handlers are not un-nested as for funclets: the code for a nested handler is generated within the handler in which it is nested. + +For Windows/x86 on NativeAOT and Linux/x86, funclets are used just like on other platforms. ## Cloned finallys diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 0040f9575de27..4201c06692eeb 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -288,8 +288,4 @@ function(set_target_definitions_to_custom_os_and_arch) if (TARGETDETAILS_ARCH STREQUAL "armel") target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE ARM_SOFTFP) endif() - - if (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot")) - target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_EH_FUNCLETS) - endif (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot")) endfunction() diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake index b06b706070489..11e923805a6ea 100644 --- a/src/coreclr/crosscomponents.cmake +++ b/src/coreclr/crosscomponents.cmake @@ -25,13 +25,6 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS OR CLR_CMAKE_TARGET_IOS OR CL DESTINATIONS . COMPONENT crosscomponents ) - if (CLR_CMAKE_TARGET_ARCH_I386) - install_clr (TARGETS - clrjit_win_aot_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} - DESTINATIONS . - COMPONENT crosscomponents - ) - endif() endif() endif() endif() diff --git a/src/coreclr/inc/clrnt.h b/src/coreclr/inc/clrnt.h index fb7d810254562..cacc865b715f0 100644 --- a/src/coreclr/inc/clrnt.h +++ b/src/coreclr/inc/clrnt.h @@ -201,7 +201,6 @@ typedef struct _DISPATCHER_CONTEXT { #define RUNTIME_FUNCTION__BeginAddress(prf) (prf)->BeginAddress #define RUNTIME_FUNCTION__SetBeginAddress(prf,addr) ((prf)->BeginAddress = (addr)) -#ifdef FEATURE_EH_FUNCLETS #include "win64unwind.h" #include "daccess.h" @@ -235,7 +234,6 @@ RtlVirtualUnwind ( __inout_opt PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers ); #endif // HOST_X86 -#endif // FEATURE_EH_FUNCLETS #endif // TARGET_X86 diff --git a/src/coreclr/inc/gcinfo.h b/src/coreclr/inc/gcinfo.h index f334b099f2578..16bff25525a97 100644 --- a/src/coreclr/inc/gcinfo.h +++ b/src/coreclr/inc/gcinfo.h @@ -25,7 +25,7 @@ const unsigned OFFSET_MASK = 0x3; // mask to access the low 2 bits // const unsigned byref_OFFSET_FLAG = 0x1; // the offset is an interior ptr const unsigned pinned_OFFSET_FLAG = 0x2; // the offset is a pinned ptr -#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) // JIT32_ENCODER has additional restriction on x86 without funclets: // - for untracked locals the flags allowed are "pinned" and "byref" // - for tracked locals the flags allowed are "this" and "byref" diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index 2f0b3659aa5ad..766a0a0150e87 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -23,8 +23,6 @@ function(create_standalone_jit) if(TARGETDETAILS_OS STREQUAL "unix_osx" OR TARGETDETAILS_OS STREQUAL "unix_anyos") set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_${TARGETDETAILS_ARCH}) - elseif(TARGETDETAILS_OS STREQUAL "win_aot") - set(JIT_ARCH_LINK_LIBRARIES gcinfo_win_${TARGETDETAILS_ARCH}) else() set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH}) endif() @@ -658,7 +656,6 @@ else() create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) - create_standalone_jit(TARGET clrjit_win_aot_x86_${ARCH_HOST_NAME} OS win_aot ARCH x86 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_RISCV64) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 6cde9e0e93d8b..60dbce6aaf00a 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1812,9 +1812,7 @@ bool BasicBlock::hasEHBoundaryIn() const bool returnVal = (bbCatchTyp != BBCT_NONE); if (!returnVal) { -#if FEATURE_EH_FUNCLETS assert(!HasFlag(BBF_FUNCLET_BEG)); -#endif // FEATURE_EH_FUNCLETS } return returnVal; } @@ -1833,16 +1831,7 @@ bool BasicBlock::hasEHBoundaryIn() const // bool BasicBlock::hasEHBoundaryOut() const { - bool returnVal = KindIs(BBJ_EHFILTERRET, BBJ_EHFINALLYRET, BBJ_EHFAULTRET); - -#if FEATURE_EH_FUNCLETS - if (bbKind == BBJ_EHCATCHRET) - { - returnVal = true; - } -#endif // FEATURE_EH_FUNCLETS - - return returnVal; + return KindIs(BBJ_EHFILTERRET, BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHCATCHRET); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 16321157664a5..500b5274b6f41 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -66,7 +66,7 @@ enum BBKinds : BYTE BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally) BBJ_EHFAULTRET, // block ends with 'endfinally' (IL alias for 'endfault') (for fault) BBJ_EHFILTERRET, // block ends with 'endfilter' - BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) + BBJ_EHCATCHRET, // block ends with a leave out of a catch BBJ_THROW, // block ends with 'throw' BBJ_RETURN, // block ends with 'ret' BBJ_ALWAYS, // block always jumps to the target diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index c5e65b081583d..16893901d0a4e 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -556,8 +556,6 @@ class CodeGen final : public CodeGenInterface void genFnProlog(); void genFnEpilog(BasicBlock* block); -#if defined(FEATURE_EH_FUNCLETS) - void genReserveFuncletProlog(BasicBlock* block); void genReserveFuncletEpilog(BasicBlock* block); void genFuncletProlog(BasicBlock* block); @@ -640,16 +638,6 @@ class CodeGen final : public CodeGenInterface void genUpdateCurrentFunclet(BasicBlock* block); -#else // !FEATURE_EH_FUNCLETS - - // This is a no-op when there are no funclets! - void genUpdateCurrentFunclet(BasicBlock* block) - { - return; - } - -#endif // !FEATURE_EH_FUNCLETS - void genGeneratePrologsAndEpilogs(); #if defined(DEBUG) @@ -744,9 +732,7 @@ class CodeGen final : public CodeGenInterface void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset); protected: -#if defined(FEATURE_EH_FUNCLETS) bool siInFuncletRegion; // Have we seen the start of the funclet region? -#endif // FEATURE_EH_FUNCLETS IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed @@ -1291,11 +1277,10 @@ class CodeGen final : public CodeGenInterface void genCodeForBfiz(GenTreeOp* tree); #endif // TARGET_ARM64 -#if defined(FEATURE_EH_FUNCLETS) void genEHCatchRet(BasicBlock* block); -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) void genEHFinallyOrFilterRet(BasicBlock* block); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode); void genMultiRegStoreToLocal(GenTreeLclVar* lclNode); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index eed9a96a98172..cbf99e6de3539 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -391,25 +391,22 @@ void CodeGen::genMarkLabelsForCodegen() case BBJ_CALLFINALLY: // The finally target itself will get marked by walking the EH table, below, and marking // all handler begins. - -#if FEATURE_EH_CALLFINALLY_THUNKS - { - // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, - // as that's needed for identifying the range of the "duplicate finally" region in EH data. - BasicBlock* bbToLabel = block->Next(); - if (block->isBBCallFinallyPair()) - { - bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET - } - if (bbToLabel != nullptr) + if (compiler->UsesCallFinallyThunks()) { - JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); - bbToLabel->SetFlags(BBF_HAS_LABEL); + // For callfinally thunks, we need to mark the block following the callfinally/callfinallyret pair, + // as that's needed for identifying the range of the "duplicate finally" region in EH data. + BasicBlock* bbToLabel = block->Next(); + if (block->isBBCallFinallyPair()) + { + bbToLabel = bbToLabel->Next(); // skip the BBJ_CALLFINALLYRET + } + if (bbToLabel != nullptr) + { + JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum); + bbToLabel->SetFlags(BBF_HAS_LABEL); + } } - } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - - break; + break; case BBJ_CALLFINALLYRET: JITDUMP(" " FMT_BB " : finally continuation\n", block->GetFinallyContinuation()->bbNum); @@ -1463,10 +1460,11 @@ void CodeGen::genExitCode(BasicBlock* block) void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk) { bool useThrowHlpBlk = compiler->fgUseThrowHelperBlocks(); -#if defined(UNIX_X86_ABI) && defined(FEATURE_EH_FUNCLETS) +#if defined(UNIX_X86_ABI) + // TODO: Is this really UNIX_X86_ABI specific? Should we guard with compiler->UsesFunclets() instead? // Inline exception-throwing code in funclet to make it possible to unwind funclet frames. useThrowHlpBlk = useThrowHlpBlk && (compiler->funCurrentFunc()->funKind == FUNC_ROOT); -#endif // UNIX_X86_ABI && FEATURE_EH_FUNCLETS +#endif // UNIX_X86_ABI if (useThrowHlpBlk) { @@ -1586,8 +1584,6 @@ void CodeGen::genCheckOverflow(GenTree* tree) } #endif -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Update the current funclet as needed by calling genUpdateCurrentFunclet(). @@ -1598,6 +1594,11 @@ void CodeGen::genCheckOverflow(GenTree* tree) void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) { + if (!compiler->UsesFunclets()) + { + return; + } + if (block->HasFlag(BBF_FUNCLET_BEG)) { compiler->funSetCurrentFunc(compiler->funGetFuncIdx(block)); @@ -1614,7 +1615,7 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) } else { - assert(compiler->compCurrFuncIdx <= compiler->compFuncInfoCount); + assert(compiler->funCurrentFuncIdx() <= compiler->compFuncInfoCount); if (compiler->funCurrentFunc()->funKind == FUNC_FILTER) { assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InFilterRegionBBRange(block)); @@ -1631,8 +1632,6 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) } } -#endif // FEATURE_EH_FUNCLETS - //---------------------------------------------------------------------- // genGenerateCode: Generate code for the function. // @@ -2193,14 +2192,13 @@ void CodeGen::genReportEH() unsigned EHCount = compiler->compHndBBtabCount; -#if defined(FEATURE_EH_FUNCLETS) // Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the // VM. unsigned duplicateClauseCount = 0; unsigned enclosingTryIndex; // Duplicate clauses are not used by NativeAOT ABI - if (!isNativeAOT) + if (compiler->UsesFunclets() && !isNativeAOT) { for (XTnum = 0; XTnum < compiler->compHndBBtabCount; XTnum++) { @@ -2215,11 +2213,10 @@ void CodeGen::genReportEH() EHCount += duplicateClauseCount; } -#if FEATURE_EH_CALLFINALLY_THUNKS unsigned clonedFinallyCount = 0; // Duplicate clauses are not used by NativeAOT ABI - if (!isNativeAOT) + if (compiler->UsesFunclets() && compiler->UsesCallFinallyThunks() && !isNativeAOT) { // We don't keep track of how many cloned finally there are. So, go through and count. // We do a quick pass first through the EH table to see if there are any try/finally @@ -2247,27 +2244,33 @@ void CodeGen::genReportEH() EHCount += clonedFinallyCount; } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - -#endif // FEATURE_EH_FUNCLETS #ifdef DEBUG if (compiler->opts.dspEHTable) { -#if defined(FEATURE_EH_FUNCLETS) -#if FEATURE_EH_CALLFINALLY_THUNKS - printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to VM\n", - compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount); - assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount); -#else // !FEATURE_EH_CALLFINALLY_THUNKS - printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n", - compiler->compHndBBtabCount, duplicateClauseCount, EHCount); - assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount); -#endif // !FEATURE_EH_CALLFINALLY_THUNKS -#else // !FEATURE_EH_FUNCLETS - printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount); - assert(compiler->compHndBBtabCount == EHCount); -#endif // !FEATURE_EH_FUNCLETS + if (compiler->UsesFunclets()) + { + if (compiler->UsesCallFinallyThunks()) + { + printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to " + "VM\n", + compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount); + assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount); + } + else + { + printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n", + compiler->compHndBBtabCount, duplicateClauseCount, EHCount); + assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount); + } + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount); + assert(compiler->compHndBBtabCount == EHCount); + } +#endif // FEATURE_EH_WINDOWS_X86 } #endif // DEBUG @@ -2335,7 +2338,6 @@ void CodeGen::genReportEH() ++XTnum; } -#if defined(FEATURE_EH_FUNCLETS) // Now output duplicated clauses. // // If a funclet has been created by moving a handler out of a try region that it was originally nested @@ -2558,7 +2560,6 @@ void CodeGen::genReportEH() assert(duplicateClauseCount == reportedDuplicateClauseCount); } // if (duplicateClauseCount > 0) -#if FEATURE_EH_CALLFINALLY_THUNKS if (clonedFinallyCount > 0) { unsigned reportedClonedFinallyCount = 0; @@ -2612,10 +2613,7 @@ void CodeGen::genReportEH() } // for each block assert(clonedFinallyCount == reportedClonedFinallyCount); - } // if (clonedFinallyCount > 0) -#endif // FEATURE_EH_CALLFINALLY_THUNKS - -#endif // FEATURE_EH_FUNCLETS + } // if (clonedFinallyCount > 0) assert(XTnum == EHCount); } @@ -4510,6 +4508,7 @@ void CodeGen::genCheckUseBlockInit() #else // !defined(TARGET_AMD64) genUseBlockInit = (genInitStkLclCnt > 8); + #endif #else @@ -5237,8 +5236,6 @@ void CodeGen::genReserveEpilog(BasicBlock* block) block->IsLast()); } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Reserve space for a funclet prolog. @@ -5246,6 +5243,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block) void CodeGen::genReserveFuncletProlog(BasicBlock* block) { + assert(compiler->UsesFunclets()); assert(block != nullptr); /* Currently, no registers are live on entry to the prolog, except maybe @@ -5276,6 +5274,7 @@ void CodeGen::genReserveFuncletProlog(BasicBlock* block) void CodeGen::genReserveFuncletEpilog(BasicBlock* block) { + assert(compiler->UsesFunclets()); assert(block != nullptr); JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum); @@ -5284,8 +5283,6 @@ void CodeGen::genReserveFuncletEpilog(BasicBlock* block) gcInfo.gcRegByrefSetCur, block->IsLast()); } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * Finalize the frame size and offset assignments. * @@ -5599,7 +5596,7 @@ void CodeGen::genFnProlog() } #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) && defined(DEBUG) +#if defined(DEBUG) // We cannot force 0-initialization of the PSPSym // as it will overwrite the real value @@ -5609,7 +5606,7 @@ void CodeGen::genFnProlog() assert(!varDsc->lvMustInit); } -#endif // FEATURE_EH_FUNCLETS && DEBUG +#endif // DEBUG /*------------------------------------------------------------------------- * @@ -6151,33 +6148,35 @@ void CodeGen::genFnProlog() genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed); -#if defined(FEATURE_EH_FUNCLETS) - - genSetPSPSym(initReg, &initRegZeroed); - -#else // !FEATURE_EH_FUNCLETS - - // when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots - if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem) + if (compiler->UsesFunclets()) + { + genSetPSPSym(initReg, &initRegZeroed); + } + else { - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE; +#if defined(FEATURE_EH_WINDOWS_X86) + // when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots + if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem) + { + // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) + unsigned filterEndOffsetSlotOffs = + compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE; - // Zero out the slot for nesting level 0 - unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE; + // Zero out the slot for nesting level 0 + unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE; - if (!initRegZeroed) - { - instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); - initRegZeroed = true; - } + if (!initRegZeroed) + { + instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); + initRegZeroed = true; + } - GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, - firstSlotOffs); + GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, + firstSlotOffs); + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS - genReportGenericContextArg(initReg, &initRegZeroed); #ifdef JIT32_GCENCODER @@ -6639,15 +6638,14 @@ void CodeGen::genGeneratePrologsAndEpilogs() // Generate all the prologs and epilogs. -#if defined(FEATURE_EH_FUNCLETS) - - // Capture the data we're going to use in the funclet prolog and epilog generation. This is - // information computed during codegen, or during function prolog generation, like - // frame offsets. It must run after main function prolog generation. - - genCaptureFuncletPrologEpilogInfo(); + if (compiler->UsesFunclets()) + { + // Capture the data we're going to use in the funclet prolog and epilog generation. This is + // information computed during codegen, or during function prolog generation, like + // frame offsets. It must run after main function prolog generation. -#endif // FEATURE_EH_FUNCLETS + genCaptureFuncletPrologEpilogInfo(); + } // Walk the list of prologs and epilogs and generate them. // We maintain a list of prolog and epilog basic blocks in @@ -7807,20 +7805,25 @@ void CodeGen::genReturn(GenTree* treeNode) #if defined(DEBUG) && defined(TARGET_XARCH) bool doStackPointerCheck = compiler->opts.compStackCheckOnRet; -#if defined(FEATURE_EH_FUNCLETS) - // Don't do stack pointer check at the return from a funclet; only for the main function. - if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) + if (compiler->UsesFunclets()) { - doStackPointerCheck = false; + // Don't do stack pointer check at the return from a funclet; only for the main function. + if (compiler->funCurrentFunc()->funKind != FUNC_ROOT) + { + doStackPointerCheck = false; + } } -#else // !FEATURE_EH_FUNCLETS - // Don't generate stack checks for x86 finally/filter EH returns: these are not invoked - // with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet(). - if (compiler->compCurBB->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHFILTERRET)) + else { - doStackPointerCheck = false; +#if defined(FEATURE_EH_WINDOWS_X86) + // Don't generate stack checks for x86 finally/filter EH returns: these are not invoked + // with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet(). + if (compiler->compCurBB->KindIs(BBJ_EHFINALLYRET, BBJ_EHFAULTRET, BBJ_EHFILTERRET)) + { + doStackPointerCheck = false; + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck); #endif // defined(DEBUG) && defined(TARGET_XARCH) diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 038f9fea696bb..39c0ac7092010 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -376,12 +376,11 @@ void CodeGen::genCodeForBBlist() bool firstMapping = true; -#if defined(FEATURE_EH_FUNCLETS) if (block->HasFlag(BBF_FUNCLET_BEG)) { + assert(compiler->UsesFunclets()); genReserveFuncletProlog(block); } -#endif // FEATURE_EH_FUNCLETS // Clear compCurStmt and compCurLifeTree. compiler->compCurStmt = nullptr; @@ -727,32 +726,26 @@ void CodeGen::genCodeForBBlist() block = genCallFinally(block); break; -#if defined(FEATURE_EH_FUNCLETS) - case BBJ_EHCATCHRET: + assert(compiler->UsesFunclets()); genEHCatchRet(block); FALLTHROUGH; case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: case BBJ_EHFILTERRET: - genReserveFuncletEpilog(block); - break; - -#else // !FEATURE_EH_FUNCLETS - - case BBJ_EHCATCHRET: - noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86 - break; - - case BBJ_EHFINALLYRET: - case BBJ_EHFAULTRET: - case BBJ_EHFILTERRET: - genEHFinallyOrFilterRet(block); + if (compiler->UsesFunclets()) + { + genReserveFuncletEpilog(block); + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + genEHFinallyOrFilterRet(block); + } +#endif // FEATURE_EH_WINDOWS_X86 break; -#endif // !FEATURE_EH_FUNCLETS - case BBJ_SWITCH: break; @@ -828,9 +821,7 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallFinallyPairTail()); -#if FEATURE_EH_CALLFINALLY_THUNKS assert(!block->KindIs(BBJ_CALLFINALLY)); -#endif // FEATURE_EH_CALLFINALLY_THUNKS GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS) && !removedJmp)); } @@ -856,7 +847,7 @@ void CodeGen::genCodeForBBlist() #endif // DEBUG } //------------------ END-FOR each block of the method ------------------- -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // If this is a synchronized method on x86, and we generated all the code without // generating the "exit monitor" call, then we must have deleted the single return block // with that call because it was dead code. We still need to report the monitor range @@ -866,14 +857,15 @@ void CodeGen::genCodeForBBlist() // Do this before cleaning the GC refs below; we don't want to create an IG that clears // the `this` pointer for lvaKeepAliveAndReportThis. - if ((compiler->info.compFlags & CORINFO_FLG_SYNCH) && (compiler->syncEndEmitCookie == nullptr)) + if (!compiler->UsesFunclets() && (compiler->info.compFlags & CORINFO_FLG_SYNCH) && + (compiler->syncEndEmitCookie == nullptr)) { JITDUMP("Synchronized method with missing exit monitor call; adding final label\n"); compiler->syncEndEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); noway_assert(compiler->syncEndEmitCookie != nullptr); } -#endif // !FEATURE_EH_FUNCLETS +#endif // There could be variables alive at this point. For example see lvaKeepAliveAndReportThis. // This call is for cleaning the GC refs diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index f25e5bb046d29..552e4cf9d3f28 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -210,127 +210,131 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) BasicBlock* const nextBlock = block->Next(); -#if defined(FEATURE_EH_FUNCLETS) - // Generate a call to the finally, like this: - // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym - // call finally-funclet - // jmp finally-return // Only for non-retless finally calls - // The jmp can be a NOP if we're going to the next block. - // If we're generating code for the main function (not a funclet), and there is no localloc, - // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP - // instead of loading the PSPSym in this case, or if PSPSym is not used (NativeAOT ABI). - - if ((compiler->lvaPSPSym == BAD_VAR_NUM) || - (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT))) + if (compiler->UsesFunclets()) { + // Generate a call to the finally, like this: + // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym + // call finally-funclet + // jmp finally-return // Only for non-retless finally calls + // The jmp can be a NOP if we're going to the next block. + // If we're generating code for the main function (not a funclet), and there is no localloc, + // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP + // instead of loading the PSPSym in this case, or if PSPSym is not used (NativeAOT ABI). + + if ((compiler->lvaPSPSym == BAD_VAR_NUM) || + (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT))) + { #ifndef UNIX_X86_ABI - inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false); + inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false); #endif // !UNIX_X86_ABI - } - else - { - GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0); - } - GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - - if (block->HasFlag(BBF_RETLESS_CALL)) - { - // We have a retless call, and the last instruction generated was a call. - // If the next block is in a different EH region (or is the end of the code - // block), then we need to generate a breakpoint here (since it will never - // get executed) to get proper unwind behavior. + } + else + { + GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0); + } + GetEmitter()->emitIns_J(INS_call, block->GetTarget()); - if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) + if (block->HasFlag(BBF_RETLESS_CALL)) { - instGen(INS_BREAKPOINT); // This should never get executed + // We have a retless call, and the last instruction generated was a call. + // If the next block is in a different EH region (or is the end of the code + // block), then we need to generate a breakpoint here (since it will never + // get executed) to get proper unwind behavior. + + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) + { + instGen(INS_BREAKPOINT); // This should never get executed + } } - } - else - { + else + { // TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other // architectures? #ifndef JIT32_GCENCODER - // Because of the way the flowgraph is connected, the liveness info for this one instruction - // after the call is not (can not be) correct in cases where a variable has a last use in the - // handler. So turn off GC reporting for this single instruction. - GetEmitter()->emitDisableGC(); + // Because of the way the flowgraph is connected, the liveness info for this one instruction + // after the call is not (can not be) correct in cases where a variable has a last use in the + // handler. So turn off GC reporting for this single instruction. + GetEmitter()->emitDisableGC(); #endif // JIT32_GCENCODER - BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); + BasicBlock* const finallyContinuation = nextBlock->GetFinallyContinuation(); - // Now go to where the finally funclet needs to return to. - if (nextBlock->NextIs(finallyContinuation) && !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) - { - // Fall-through. - // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly - // to the next instruction? This would depend on stack walking from within the finally - // handler working without this instruction being in this special EH region. - instGen(INS_nop); - } - else - { - inst_JMP(EJ_jmp, finallyContinuation); - } + // Now go to where the finally funclet needs to return to. + if (nextBlock->NextIs(finallyContinuation) && + !compiler->fgInDifferentRegions(nextBlock, finallyContinuation)) + { + // Fall-through. + // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly + // to the next instruction? This would depend on stack walking from within the finally + // handler working without this instruction being in this special EH region. + instGen(INS_nop); + } + else + { + inst_JMP(EJ_jmp, finallyContinuation); + } #ifndef JIT32_GCENCODER - GetEmitter()->emitEnableGC(); + GetEmitter()->emitEnableGC(); #endif // JIT32_GCENCODER + } } - -#else // !FEATURE_EH_FUNCLETS - - // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot - // corresponding to the finally's nesting level. When invoked in response to an exception, the - // EE does this. - // - // We have a BBJ_CALLFINALLY possibly paired with a following BBJ_CALLFINALLYRET. - // - // We will emit : - // mov [ebp - (n + 1)], 0 - // mov [ebp - n ], 0xFC - // push &step - // jmp finallyBlock - // ... - // step: - // mov [ebp - n ], 0 - // jmp leaveTarget - // ... - // leaveTarget: - - noway_assert(isFramePointerUsed()); - - // Get the nesting level which contains the finally - unsigned finallyNesting = 0; - compiler->fgGetNestingLevel(block, &finallyNesting); - - // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) - unsigned filterEndOffsetSlotOffs; - filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); - - unsigned curNestingSlotOffs; - curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE)); - - // Zero out the slot for the next nesting level - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, - curNestingSlotOffs - TARGET_POINTER_SIZE, 0); - GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK); - - // Now push the address where the finally funclet should return to directly. - if (!block->HasFlag(BBF_RETLESS_CALL)) - { - assert(block->isBBCallFinallyPair()); - GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetFinallyContinuation()); - } +#if defined(FEATURE_EH_WINDOWS_X86) else { - // EE expects a DWORD, so we provide 0 - inst_IV(INS_push_hide, 0); - } - - // Jump to the finally BB - inst_JMP(EJ_jmp, block->GetTarget()); + // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot + // corresponding to the finally's nesting level. When invoked in response to an exception, the + // EE does this. + // + // We have a BBJ_CALLFINALLY possibly paired with a following BBJ_CALLFINALLYRET. + // + // We will emit : + // mov [ebp - (n + 1)], 0 + // mov [ebp - n ], 0xFC + // push &step + // jmp finallyBlock + // ... + // step: + // mov [ebp - n ], 0 + // jmp leaveTarget + // ... + // leaveTarget: + + noway_assert(isFramePointerUsed()); + + // Get the nesting level which contains the finally + unsigned finallyNesting = 0; + compiler->fgGetNestingLevel(block, &finallyNesting); + + // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) + unsigned filterEndOffsetSlotOffs; + filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); + + unsigned curNestingSlotOffs; + curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE)); + + // Zero out the slot for the next nesting level + GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, + curNestingSlotOffs - TARGET_POINTER_SIZE, 0); + GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, + LCL_FINALLY_MARK); + + // Now push the address where the finally funclet should return to directly. + if (!block->HasFlag(BBF_RETLESS_CALL)) + { + assert(block->isBBCallFinallyPair()); + GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetFinallyContinuation()); + } + else + { + // EE expects a DWORD, so we provide 0 + inst_IV(INS_push_hide, 0); + } -#endif // !FEATURE_EH_FUNCLETS + // Jump to the finally BB + inst_JMP(EJ_jmp, block->GetTarget()); + } +#endif // FEATURE_EH_WINDOWS_X86 // The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the // jump target using bbTargetEdge - that is already used to point @@ -344,7 +348,6 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) return block; } -#if defined(FEATURE_EH_FUNCLETS) void CodeGen::genEHCatchRet(BasicBlock* block) { // Set RAX to the address the VM should return to after the catch. @@ -354,10 +357,11 @@ void CodeGen::genEHCatchRet(BasicBlock* block) GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->GetTarget(), REG_INTRET); } -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) { + assert(!compiler->UsesFunclets()); // The last statement of the block must be a GT_RETFILT, which has already been generated. assert(block->lastNode() != nullptr); assert(block->lastNode()->OperGet() == GT_RETFILT); @@ -383,7 +387,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Move an immediate value into an integer register @@ -2144,7 +2148,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genConsumeReg(treeNode); break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates: @@ -2167,7 +2171,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs, 0); break; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PINVOKE_PROLOG: noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & @@ -6120,37 +6124,42 @@ void CodeGen::genCall(GenTreeCall* call) compiler->lvaCallSpCheck, call->CallerPop() ? 0 : stackArgBytes, REG_ARG_0); #endif // defined(DEBUG) && defined(TARGET_X86) -#if !defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------- - // Create a label for tracking of region protected by the monitor in synchronized methods. - // This needs to be here, rather than above where fPossibleSyncHelperCall is set, - // so the GC state vars have been updated before creating the label. - - if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets()) { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); - switch (helperNum) + //------------------------------------------------------------------------- + // Create a label for tracking of region protected by the monitor in synchronized methods. + // This needs to be here, rather than above where fPossibleSyncHelperCall is set, + // so the GC state vars have been updated before creating the label. + + if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH)) { - case CORINFO_HELP_MON_ENTER: - case CORINFO_HELP_MON_ENTER_STATIC: - noway_assert(compiler->syncStartEmitCookie == nullptr); - compiler->syncStartEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncStartEmitCookie != nullptr); - break; - case CORINFO_HELP_MON_EXIT: - case CORINFO_HELP_MON_EXIT_STATIC: - noway_assert(compiler->syncEndEmitCookie == nullptr); - compiler->syncEndEmitCookie = - GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur); - noway_assert(compiler->syncEndEmitCookie != nullptr); - break; - default: - break; + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); + switch (helperNum) + { + case CORINFO_HELP_MON_ENTER: + case CORINFO_HELP_MON_ENTER_STATIC: + noway_assert(compiler->syncStartEmitCookie == nullptr); + compiler->syncStartEmitCookie = + GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, + gcInfo.gcRegByrefSetCur); + noway_assert(compiler->syncStartEmitCookie != nullptr); + break; + case CORINFO_HELP_MON_EXIT: + case CORINFO_HELP_MON_EXIT_STATIC: + noway_assert(compiler->syncEndEmitCookie == nullptr); + compiler->syncEndEmitCookie = + GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, + gcInfo.gcRegByrefSetCur); + noway_assert(compiler->syncEndEmitCookie != nullptr); + break; + default: + break; + } } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 unsigned stackAdjustBias = 0; @@ -8844,13 +8853,12 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, int s_cached; -#ifdef FEATURE_EH_FUNCLETS // We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it if (compiler->ehAnyFunclets()) { + assert(compiler->UsesFunclets()); gcInfo.gcMarkFilterVarsPinned(); } -#endif #ifdef DEBUG size_t headerSize = @@ -10427,8 +10435,6 @@ void CodeGen::genFnEpilog(BasicBlock* block) } } -#if defined(FEATURE_EH_FUNCLETS) - #if defined(TARGET_AMD64) /***************************************************************************** @@ -10818,8 +10824,6 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) #endif // TARGET* } -#endif // FEATURE_EH_FUNCLETS - //----------------------------------------------------------------------------- // genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case // `genUseBlockInit` is set. diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 697c76527afe5..1edc429414018 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1863,6 +1863,11 @@ void Compiler::compInit(ArenaAllocator* pAlloc, eeInfoInitialized = false; +#if defined(FEATURE_EH_WINDOWS_X86) + // Cache Native AOT ABI check. This must happen *after* eeInfoInitialized is initialized, above. + eeIsNativeAotAbi = IsTargetAbi(CORINFO_NATIVEAOT_ABI); +#endif + compDoAggressiveInlining = false; if (compIsForInlining()) @@ -4901,13 +4906,12 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); -#if defined(FEATURE_EH_FUNCLETS) - - // Create funclets from the EH handlers. - // - DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); - -#endif // FEATURE_EH_FUNCLETS + if (UsesFunclets()) + { + // Create funclets from the EH handlers. + // + DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); + } if (opts.OptimizationEnabled()) { @@ -5448,29 +5452,26 @@ bool Compiler::shouldAlignLoop(FlowGraphNaturalLoop* loop, BasicBlock* top) assert(!top->IsFirst()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (top->Prev()->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && top->Prev()->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!top->Prev()->isBBCallFinallyPair()); // If the block before the loop start is a retless BBJ_CALLFINALLY - // with FEATURE_EH_CALLFINALLY_THUNKS, we can't add alignment + // with UsesCallFinallyThunks, we can't add alignment // because it will affect reported EH region range. For x86 (where - // !FEATURE_EH_CALLFINALLY_THUNKS), we can allow this. + // !UsesCallFinallyThunks), we can allow this. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY block\n", loop->GetIndex()); return false; } -#endif // FEATURE_EH_CALLFINALLY_THUNKS if (top->Prev()->isBBCallFinallyPairTail()) { // If the previous block is the BBJ_CALLFINALLYRET of a // BBJ_CALLFINALLY/BBJ_CALLFINALLYRET pair, then we can't add alignment // because we can't add instructions in that block. In the - // FEATURE_EH_CALLFINALLY_THUNKS case, it would affect the - // reported EH, as above. + // UsesCallFinallyThunks case, it would affect the reported EH, as above. JITDUMP("Skipping alignment for " FMT_LP "; its top block follows a CALLFINALLY/ALWAYS pair\n", loop->GetIndex()); return false; diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 7e6b2c57c89dc..d7dbe341b490d 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2722,7 +2722,7 @@ class Compiler // Exception handling functions // -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) bool ehNeedsShadowSPslots() { @@ -2735,7 +2735,7 @@ class Compiler // etc. unsigned ehMaxHndNestingCount; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); @@ -2832,7 +2832,6 @@ class Compiler bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See @@ -2853,23 +2852,6 @@ class Compiler unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks -#else // !FEATURE_EH_FUNCLETS - - bool ehAnyFunclets() - { - return false; - } - unsigned ehFuncletCount() - { - return 0; - } - - unsigned bbThrowIndex(BasicBlock* blk) - { - return blk->bbTryIndex; - } // Get the index to use as the cache key for sharing throw blocks -#endif // !FEATURE_EH_FUNCLETS - FlowEdge* BlockPredsWithEH(BasicBlock* blk); FlowEdge* BlockDominancePreds(BasicBlock* blk); @@ -2918,12 +2900,8 @@ class Compiler void fgRemoveEHTableEntry(unsigned XTnum); -#if defined(FEATURE_EH_FUNCLETS) - EHblkDsc* fgAddEHTableEntry(unsigned XTnum); -#endif // FEATURE_EH_FUNCLETS - void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting @@ -3901,10 +3879,10 @@ class Compiler //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // Block-layout TYP_STRUCT variable for all the shadow SP slots -#endif // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as @@ -4249,9 +4227,7 @@ class Compiler unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX -#if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym -#endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; @@ -4474,6 +4450,9 @@ class Compiler GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); +#if defined(FEATURE_EH_WINDOWS_X86) + void impImportLeaveEHRegions(BasicBlock* block); +#endif void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); @@ -5022,9 +5001,7 @@ class Compiler BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) -#if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) -#endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks @@ -5226,9 +5203,7 @@ class Compiler // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data -#if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run -#endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places @@ -5282,16 +5257,12 @@ class Compiler GenTree* fgGetCritSectOfStaticMethod(); -#if defined(FEATURE_EH_FUNCLETS) - void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); -#endif // FEATURE_EH_FUNCLETS - void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); @@ -6034,15 +6005,14 @@ class Compiler }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); -#if defined(FEATURE_EH_FUNCLETS) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); PhaseStatus fgCreateFunclets(); -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) bool fgRelocateEHRegions(); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); @@ -6069,9 +6039,7 @@ class Compiler bool fgReorderBlocks(bool useProfile); -#ifdef FEATURE_EH_FUNCLETS bool fgFuncletsAreCold(); -#endif // FEATURE_EH_FUNCLETS PhaseStatus fgDetermineFirstColdBlock(); @@ -8265,6 +8233,30 @@ class Compiler return eeGetEEInfo()->targetAbi == abi; } +#if defined(FEATURE_EH_WINDOWS_X86) + bool eeIsNativeAotAbi; + bool UsesFunclets() const + { + return eeIsNativeAotAbi; + } + + bool UsesCallFinallyThunks() const + { + // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. + return UsesFunclets(); + } +#else + bool UsesFunclets() const + { + return true; + } + + bool UsesCallFinallyThunks() const + { + return true; + } +#endif + bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) @@ -8481,37 +8473,29 @@ class Compiler } // Things that MAY belong either in CodeGen or CodeGenContext - -#if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; + FuncInfoDsc compFuncInfoRoot; unsigned short compFuncCount() { - assert(fgFuncletsCreated); - return compFuncInfoCount; - } - -#else // !FEATURE_EH_FUNCLETS - - // This is a no-op when there are no funclets! - void genUpdateCurrentFunclet(BasicBlock* block) - { - return; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + return compFuncInfoCount; + } + else + { + return 1; + } } - FuncInfoDsc compFuncInfoRoot; - - static const unsigned compCurrFuncIdx = 0; - - unsigned short compFuncCount() + unsigned short funCurrentFuncIdx() { - return 1; + return UsesFunclets() ? compCurrFuncIdx : 0; } -#endif // !FEATURE_EH_FUNCLETS - FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); @@ -8619,22 +8603,20 @@ class Compiler // private: -#if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); -#endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); -#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) +#if defined(TARGET_AMD64) || defined(TARGET_X86) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); -#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) +#endif // TARGET_AMD64 || TARGET_X86 UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); @@ -10506,14 +10488,14 @@ class Compiler unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks @@ -11400,9 +11382,9 @@ class GenTreeVisitor case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // !FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 62efd5282a16a..6387d17b2e1ac 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -789,8 +789,6 @@ inline bool BasicBlock::HasPotentialEHSuccs(Compiler* comp) return hndDesc->InFilterRegionBBRange(this); } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. @@ -798,7 +796,14 @@ inline bool BasicBlock::HasPotentialEHSuccs(Compiler* comp) */ inline FuncInfoDsc* Compiler::funCurrentFunc() { - return funGetFunc(compCurrFuncIdx); + if (UsesFunclets()) + { + return funGetFunc(compCurrFuncIdx); + } + else + { + return &compFuncInfoRoot; + } } /***************************************************************************** @@ -808,10 +813,17 @@ inline FuncInfoDsc* Compiler::funCurrentFunc() */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { - assert(fgFuncletsCreated); - assert(FitsIn(funcIdx)); - noway_assert(funcIdx < compFuncInfoCount); - compCurrFuncIdx = (unsigned short)funcIdx; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + assert(FitsIn(funcIdx)); + noway_assert(funcIdx < compFuncInfoCount); + compCurrFuncIdx = (unsigned short)funcIdx; + } + else + { + assert(funcIdx == 0); + } } /***************************************************************************** @@ -821,9 +833,17 @@ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { - assert(fgFuncletsCreated); - assert(funcIdx < compFuncInfoCount); - return &compFuncInfos[funcIdx]; + if (UsesFunclets()) + { + assert(fgFuncletsCreated); + assert(funcIdx < compFuncInfoCount); + return &compFuncInfos[funcIdx]; + } + else + { + assert(funcIdx == 0); + return &compFuncInfoRoot; + } } /***************************************************************************** @@ -836,71 +856,33 @@ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { - assert(fgFuncletsCreated); - assert(block->HasFlag(BBF_FUNCLET_BEG)); - - EHblkDsc* eh = ehGetDsc(block->getHndIndex()); - unsigned int funcIdx = eh->ebdFuncIndex; - if (eh->ebdHndBeg != block) + if (UsesFunclets()) { - // If this is a filter EH clause, but we want the funclet - // for the filter (not the filter handler), it is the previous one - noway_assert(eh->HasFilter()); - noway_assert(eh->ebdFilter == block); - assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); - assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); - assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); - funcIdx--; - } - - return funcIdx; -} - -#else // !FEATURE_EH_FUNCLETS + assert(fgFuncletsCreated); + assert(block->HasFlag(BBF_FUNCLET_BEG)); -/***************************************************************************** - * Get the FuncInfoDsc for the funclet we are currently generating code for. - * This is only valid during codegen. For non-funclet platforms, this is - * always the root function. - * - */ -inline FuncInfoDsc* Compiler::funCurrentFunc() -{ - return &compFuncInfoRoot; -} - -/***************************************************************************** - * Change which funclet we are currently generating code for. - * This is only valid after funclets are created. - * - */ -inline void Compiler::funSetCurrentFunc(unsigned funcIdx) -{ - assert(funcIdx == 0); -} - -/***************************************************************************** - * Get the FuncInfoDsc for the givven funclet. - * This is only valid after funclets are created. - * - */ -inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) -{ - assert(funcIdx == 0); - return &compFuncInfoRoot; -} + EHblkDsc* eh = ehGetDsc(block->getHndIndex()); + unsigned int funcIdx = eh->ebdFuncIndex; + if (eh->ebdHndBeg != block) + { + // If this is a filter EH clause, but we want the funclet + // for the filter (not the filter handler), it is the previous one + noway_assert(eh->HasFilter()); + noway_assert(eh->ebdFilter == block); + assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); + assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); + assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); + funcIdx--; + } -/***************************************************************************** - * No funclets, so always 0. - * - */ -inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) -{ - return 0; + return funcIdx; + } + else + { + return 0; + } } -#endif // !FEATURE_EH_FUNCLETS - //------------------------------------------------------------------------------ // genRegNumFromMask : Maps a single register mask to a register number. // @@ -4114,9 +4096,7 @@ bool Compiler::fgVarIsNeverZeroInitializedInProlog(unsigned varNum) result = result || (varNum == lvaOutgoingArgSpaceVar); #endif -#if defined(FEATURE_EH_FUNCLETS) result = result || (varNum == lvaPSPSym); -#endif return result; } @@ -4233,9 +4213,9 @@ void GenTree::VisitOperands(TVisitor visitor) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: diff --git a/src/coreclr/jit/compphases.h b/src/coreclr/jit/compphases.h index b222451d26e23..4bd236ad7f196 100644 --- a/src/coreclr/jit/compphases.h +++ b/src/coreclr/jit/compphases.h @@ -55,9 +55,7 @@ CompPhaseNameMacro(PHASE_POST_MORPH, "Post-Morph", CompPhaseNameMacro(PHASE_MORPH_END, "Morph - Finish", false, -1, true) CompPhaseNameMacro(PHASE_GS_COOKIE, "GS Cookie", false, -1, false) CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS, "Compute edge weights (1, false)",false, -1, false) -#if defined(FEATURE_EH_FUNCLETS) CompPhaseNameMacro(PHASE_CREATE_FUNCLETS, "Create EH funclets", false, -1, false) -#endif // FEATURE_EH_FUNCLETS CompPhaseNameMacro(PHASE_HEAD_TAIL_MERGE, "Head and tail merge", false, -1, false) CompPhaseNameMacro(PHASE_MERGE_THROWS, "Merge throw blocks", false, -1, false) CompPhaseNameMacro(PHASE_INVERT_LOOPS, "Invert loops", false, -1, false) diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 85bbda9f3cfbc..5259b936646fb 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -1608,11 +1608,8 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) // the prolog/epilog placeholder groups ARE generated in order, and are // re-used. But generating additional groups would not work. if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 1) && emitCurIGinsCnt && !emitIGisInProlog(emitCurIG) && - !emitIGisInEpilog(emitCurIG) && !emitCurIG->endsWithAlignInstr() -#if defined(FEATURE_EH_FUNCLETS) - && !emitIGisInFuncletProlog(emitCurIG) && !emitIGisInFuncletEpilog(emitCurIG) -#endif // FEATURE_EH_FUNCLETS - ) + !emitIGisInEpilog(emitCurIG) && !emitCurIG->endsWithAlignInstr() && !emitIGisInFuncletProlog(emitCurIG) && + !emitIGisInFuncletEpilog(emitCurIG)) { emitNxtIG(true); } @@ -2070,11 +2067,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, bool extend = false; - if (igType == IGPT_EPILOG -#if defined(FEATURE_EH_FUNCLETS) - || igType == IGPT_FUNCLET_EPILOG -#endif // FEATURE_EH_FUNCLETS - ) + if (igType == IGPT_EPILOG || igType == IGPT_FUNCLET_EPILOG) { #ifdef TARGET_AMD64 emitOutputPreEpilogNOP(); @@ -2108,7 +2101,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, * case, we need to make sure any re-used fields, such as igFuncIdx, are correct. */ - igPh->igFuncIdx = emitComp->compCurrFuncIdx; + igPh->igFuncIdx = emitComp->funCurrentFuncIdx(); /* Create a separate block of memory to store placeholder information. * We could use unions to put some of this into the insGroup itself, but we don't @@ -2144,7 +2137,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, { igPh->igFlags |= IGF_EPILOG; } -#if defined(FEATURE_EH_FUNCLETS) else if (igType == IGPT_FUNCLET_PROLOG) { igPh->igFlags |= IGF_FUNCLET_PROLOG; @@ -2153,7 +2145,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, { igPh->igFlags |= IGF_FUNCLET_EPILOG; } -#endif // FEATURE_EH_FUNCLETS /* Link it into the placeholder list */ @@ -2174,7 +2165,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, emitCurIGsize += MAX_PLACEHOLDER_IG_SIZE; emitCurCodeOffset += emitCurIGsize; -#if defined(FEATURE_EH_FUNCLETS) // Add the appropriate IP mapping debugging record for this placeholder // group. genExitCode() adds the mapping for main function epilogs. if (emitComp->opts.compDbgInfo) @@ -2188,7 +2178,6 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, codeGen->genIPmappingAdd(IPmappingDscKind::Epilog, DebugInfo(), true); } } -#endif // FEATURE_EH_FUNCLETS /* Start a new IG if more code follows */ @@ -2198,11 +2187,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, } else { - if (igType == IGPT_EPILOG -#if defined(FEATURE_EH_FUNCLETS) - || igType == IGPT_FUNCLET_EPILOG -#endif // FEATURE_EH_FUNCLETS - ) + if (igType == IGPT_EPILOG || igType == IGPT_FUNCLET_EPILOG) { // If this was an epilog, then assume this is the end of any currently in progress // no-GC region. If a block after the epilog needs to be no-GC, it needs to call @@ -2249,12 +2234,10 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, void emitter::emitGeneratePrologEpilog() { #ifdef DEBUG - unsigned prologCnt = 0; - unsigned epilogCnt = 0; -#if defined(FEATURE_EH_FUNCLETS) + unsigned prologCnt = 0; + unsigned epilogCnt = 0; unsigned funcletPrologCnt = 0; unsigned funcletEpilogCnt = 0; -#endif // FEATURE_EH_FUNCLETS #endif // DEBUG insGroup* igPh; @@ -2284,8 +2267,6 @@ void emitter::emitGeneratePrologEpilog() emitEndFnEpilog(); break; -#if defined(FEATURE_EH_FUNCLETS) - case IGPT_FUNCLET_PROLOG: INDEBUG(++funcletPrologCnt); emitBegFuncletProlog(igPh); @@ -2300,8 +2281,6 @@ void emitter::emitGeneratePrologEpilog() emitEndFuncletEpilog(); break; -#endif // FEATURE_EH_FUNCLETS - default: unreached(); } @@ -2311,17 +2290,16 @@ void emitter::emitGeneratePrologEpilog() if (emitComp->verbose) { printf("%d prologs, %d epilogs", prologCnt, epilogCnt); -#if defined(FEATURE_EH_FUNCLETS) - printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); -#endif // FEATURE_EH_FUNCLETS + if (emitComp->UsesFunclets()) + { + printf(", %d funclet prologs, %d funclet epilogs", funcletPrologCnt, funcletEpilogCnt); + } printf("\n"); -// prolog/epilog code doesn't use this yet -// noway_assert(prologCnt == 1); -// noway_assert(epilogCnt == emitEpilogCnt); // Is this correct? -#if defined(FEATURE_EH_FUNCLETS) + // prolog/epilog code doesn't use this yet + // noway_assert(prologCnt == 1); + // noway_assert(epilogCnt == emitEpilogCnt); // Is this correct? assert(funcletPrologCnt == emitComp->ehFuncletCount()); -#endif // FEATURE_EH_FUNCLETS } #endif // DEBUG } @@ -2519,8 +2497,6 @@ void emitter::emitEndFnEpilog() #endif // JIT32_GCENCODER } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Begin generating a funclet prolog. @@ -2528,6 +2504,7 @@ void emitter::emitEndFnEpilog() void emitter::emitBegFuncletProlog(insGroup* igPh) { + assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2538,6 +2515,7 @@ void emitter::emitBegFuncletProlog(insGroup* igPh) void emitter::emitEndFuncletProlog() { + assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } @@ -2548,6 +2526,7 @@ void emitter::emitEndFuncletProlog() void emitter::emitBegFuncletEpilog(insGroup* igPh) { + assert(emitComp->UsesFunclets()); emitBegPrologEpilog(igPh); } @@ -2558,11 +2537,10 @@ void emitter::emitBegFuncletEpilog(insGroup* igPh) void emitter::emitEndFuncletEpilog() { + assert(emitComp->UsesFunclets()); emitEndPrologEpilog(); } -#endif // FEATURE_EH_FUNCLETS - #ifdef JIT32_GCENCODER // @@ -2999,16 +2977,12 @@ bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFrag if (ig->igNext->igFlags & IGF_FUNCLET_PROLOG) return true; -#if defined(FEATURE_EH_FUNCLETS) - // Is the next IG a placeholder group for a funclet prolog? if ((ig->igNext->igFlags & IGF_PLACEHOLDER) && (ig->igNext->igPhData->igPhType == IGPT_FUNCLET_PROLOG)) { return true; } -#endif // FEATURE_EH_FUNCLETS - return false; } @@ -4042,14 +4016,12 @@ void emitter::emitDispIG(insGroup* ig, bool displayFunc, bool displayInstruction case IGPT_EPILOG: pszType = "epilog"; break; -#if defined(FEATURE_EH_FUNCLETS) case IGPT_FUNCLET_PROLOG: pszType = "funclet prolog"; break; case IGPT_FUNCLET_EPILOG: pszType = "funclet epilog"; break; -#endif // FEATURE_EH_FUNCLETS default: pszType = "UNKNOWN"; break; @@ -7112,16 +7084,15 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); -#ifdef JIT32_GCENCODER -#ifndef FEATURE_EH_FUNCLETS +#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) // Remember the frame offset of the "this" argument for synchronized methods. - if (emitComp->lvaIsOriginalThisArg(num) && emitComp->lvaKeepAliveAndReportThis()) + if (!emitComp->UsesFunclets() && emitComp->lvaIsOriginalThisArg(num) && + emitComp->lvaKeepAliveAndReportThis()) { emitSyncThisObjOffs = offs; offs |= this_OFFSET_FLAG; } -#endif -#endif // JIT32_GCENCODER +#endif // JIT32_GCENCODER && FEATURE_EH_WINDOWS_X86 if (dsc->TypeGet() == TYP_BYREF) { @@ -8604,8 +8575,8 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp /* the lower 2 bits encode props about the stk ptr */ -#if defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS) - if (offs == emitSyncThisObjOffs) +#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) + if (!emitComp->UsesFunclets() && offs == emitSyncThisObjOffs) { desc->vpdVarNum |= this_OFFSET_FLAG; } @@ -9572,7 +9543,7 @@ void emitter::emitInitIG(insGroup* ig) /* Set the current function index */ - ig->igFuncIdx = emitComp->compCurrFuncIdx; + ig->igFuncIdx = emitComp->funCurrentFuncIdx(); ig->igFlags = 0; diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 4e37226e2b581..03a75f2cb22d4 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -234,10 +234,8 @@ enum insGroupPlaceholderType : unsigned char { IGPT_PROLOG, // currently unused IGPT_EPILOG, -#if defined(FEATURE_EH_FUNCLETS) IGPT_FUNCLET_PROLOG, IGPT_FUNCLET_EPILOG, -#endif // FEATURE_EH_FUNCLETS }; #if defined(_MSC_VER) && defined(TARGET_ARM) @@ -317,15 +315,11 @@ struct insGroup // Mask of IGF_* flags that should be propagated to new blocks when they are created. // This allows prologs and epilogs to be any number of IGs, but still be // automatically marked properly. -#if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG | IGF_FUNCLET_EPILOG) #else // DEBUG #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG) #endif // DEBUG -#else // !FEATURE_EH_FUNCLETS -#define IGF_PROPAGATE_MASK (IGF_EPILOG) -#endif // !FEATURE_EH_FUNCLETS // Try to do better packing based on how large regMaskSmall is (8, 16, or 64 bits). @@ -544,8 +538,6 @@ class emitter return (ig != nullptr) && ((ig->igFlags & IGF_EPILOG) != 0); } -#if defined(FEATURE_EH_FUNCLETS) - bool emitIGisInFuncletProlog(const insGroup* ig) { return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_PROLOG) != 0); @@ -556,8 +548,6 @@ class emitter return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_EPILOG) != 0); } -#endif // FEATURE_EH_FUNCLETS - void emitRecomputeIGoffsets(); void emitDispCommentForHandle(size_t handle, size_t cookie, GenTreeFlags flags); @@ -2356,16 +2346,12 @@ class emitter void emitBegFnEpilog(insGroup* igPh); void emitEndFnEpilog(); -#if defined(FEATURE_EH_FUNCLETS) - void emitBegFuncletProlog(insGroup* igPh); void emitEndFuncletProlog(); void emitBegFuncletEpilog(insGroup* igPh); void emitEndFuncletEpilog(); -#endif // FEATURE_EH_FUNCLETS - /************************************************************************/ /* Methods to record a code position and later convert to offset */ /************************************************************************/ diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 848ec0f479edd..9e76148c9bdc0 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -17780,9 +17780,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) #if !FEATURE_FIXED_OUT_ARGS bool updateStackLevel = !emitIGisInProlog(ig) && !emitIGisInEpilog(ig); -#if defined(FEATURE_EH_FUNCLETS) updateStackLevel = updateStackLevel && !emitIGisInFuncletProlog(ig) && !emitIGisInFuncletEpilog(ig); -#endif // FEATURE_EH_FUNCLETS // Make sure we keep the current stack level up to date if (updateStackLevel) diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 48d8765857fd0..37683b188c303 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -39,10 +39,8 @@ void Compiler::fgInit() fgOSREntryBB = nullptr; fgEntryBBExtraRefs = 0; -#if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; fgFuncletsCreated = false; -#endif // FEATURE_EH_FUNCLETS fgBBcount = 0; @@ -113,9 +111,9 @@ void Compiler::fgInit() fgUsedSharedTemps = nullptr; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) ehMaxHndNestingCount = 0; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ for (int i = 0; i < TYP_COUNT; i++) @@ -4039,9 +4037,9 @@ void Compiler::fgFindBasicBlocks() * try-finally blocks. */ -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) HBtab->ebdHandlerNestingLevel = 0; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; @@ -4051,12 +4049,12 @@ void Compiler::fgFindBasicBlocks() for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { -#if !defined(FEATURE_EH_FUNCLETS) - if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets() && jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) { xtab->ebdHandlerNestingLevel++; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the @@ -4089,15 +4087,16 @@ void Compiler::fgFindBasicBlocks() } // end foreach handler table entry -#if !defined(FEATURE_EH_FUNCLETS) - - for (EHblkDsc* const HBtab : EHClauses(this)) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) - ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; + for (EHblkDsc* const HBtab : EHClauses(this)) + { + if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) + ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; + } } - -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 { // always run these checks for a debug build @@ -4332,7 +4331,7 @@ void Compiler::fgCheckBasicBlockControlFlow() } break; - case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) + case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only if UsesFunclets() == true) case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbKind"); // these blocks don't get created until importing @@ -5150,18 +5149,19 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) fgFirstColdBlock = bPrev->Next(); } -#if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG - // You can't unlink a range that includes the first funclet block. A range certainly - // can't cross the non-funclet/funclet region. And you can't unlink the first block - // of the first funclet with this, either. (If that's necessary, it could be allowed - // by updating fgFirstFuncletBB to bEnd->bbNext.) - for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) + if (UsesFunclets()) { - assert(tempBB != fgFirstFuncletBB); + // You can't unlink a range that includes the first funclet block. A range certainly + // can't cross the non-funclet/funclet region. And you can't unlink the first block + // of the first funclet with this, either. (If that's necessary, it could be allowed + // by updating fgFirstFuncletBB to bEnd->bbNext.) + for (BasicBlock* tempBB = bBeg; tempBB != bEnd->Next(); tempBB = tempBB->Next()) + { + assert(tempBB != fgFirstFuncletBB); + } } #endif // DEBUG -#endif // FEATURE_EH_FUNCLETS } //------------------------------------------------------------------------ @@ -5196,13 +5196,11 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgUnreachableBlock(block); -#if defined(FEATURE_EH_FUNCLETS) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->Next(); } -#endif // FEATURE_EH_FUNCLETS // If this is the first Cold basic block update fgFirstColdBlock if (block->IsFirstColdBlock(this)) @@ -5272,13 +5270,11 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgFirstColdBlock = block->Next(); } -#if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->Next(); } -#endif // FEATURE_EH_FUNCLETS // Update successor block start IL offset, if empty predecessor // covers the immediately preceding range. @@ -5687,10 +5683,8 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* bLast = nullptr; BasicBlock* bPrev = nullptr; -#if defined(FEATURE_EH_FUNCLETS) // We don't support moving try regions... yet? - noway_assert(relocateType == FG_RELOCATE_HANDLER); -#endif // FEATURE_EH_FUNCLETS + noway_assert(!UsesFunclets() || relocateType == FG_RELOCATE_HANDLER); HBtab = ehGetDsc(regionIndex); @@ -5728,24 +5722,24 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r goto FAILURE; } -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // In the funclets case, we still need to set some information on the handler blocks - if (bLast->IsLast()) + if (!UsesFunclets() && bLast->IsLast()) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) bool isRare; isRare = bStart->isRunRarely(); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 block = fgFirstBB; while (true) { @@ -5763,14 +5757,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r if (inTheRange) { -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // Unless all blocks are (not) run rarely we must return false. - if (isRare != block->isRunRarely()) + if (!UsesFunclets() && isRare != block->isRunRarely()) { INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) goto FAILURE; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 validRange = true; } @@ -5798,11 +5792,10 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r fgDispHandlerTab(); } -#if !defined(FEATURE_EH_FUNCLETS) - +#if defined(FEATURE_EH_WINDOWS_X86) // This is really expensive, and quickly becomes O(n^n) with funclets // so only do it once after we've created them (see fgCreateFunclets) - if (expensiveDebugCheckLevel >= 2) + if (!UsesFunclets() && expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } @@ -5810,16 +5803,15 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) - - bStart->SetFlags(BBF_FUNCLET_BEG); // Mark the start block of the funclet - - if (bMiddle != nullptr) + if (UsesFunclets()) { - bMiddle->SetFlags(BBF_FUNCLET_BEG); // Also mark the start block of a filter handler as a funclet - } + bStart->SetFlags(BBF_FUNCLET_BEG); // Mark the start block of the funclet -#endif // FEATURE_EH_FUNCLETS + if (bMiddle != nullptr) + { + bMiddle->SetFlags(BBF_FUNCLET_BEG); // Also mark the start block of a filter handler as a funclet + } + } BasicBlock* bNext; bNext = bLast->Next(); @@ -5830,60 +5822,134 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; -#if defined(FEATURE_EH_FUNCLETS) - - // There are several cases we need to consider when moving an EH range. - // If moving a range X, we must consider its relationship to every other EH - // range A in the table. Note that each entry in the table represents both - // a protected region and a handler region (possibly including a filter region - // that must live before and adjacent to the handler region), so we must - // consider try and handler regions independently. These are the cases: - // 1. A is completely contained within X (where "completely contained" means - // that the 'begin' and 'last' parts of A are strictly between the 'begin' - // and 'end' parts of X, and aren't equal to either, for example, they don't - // share 'last' blocks). In this case, when we move X, A moves with it, and - // the EH table doesn't need to change. - // 2. X is completely contained within A. In this case, X gets extracted from A, - // and the range of A shrinks, but because A is strictly within X, the EH - // table doesn't need to change. - // 3. A and X have exactly the same range. In this case, A is moving with X and - // the EH table doesn't need to change. - // 4. A and X share the 'last' block. There are two sub-cases: - // (a) A is a larger range than X (such that the beginning of A precedes the - // beginning of X): in this case, we are moving the tail of A. We set the - // 'last' block of A to the block preceding the beginning block of X. - // (b) A is a smaller range than X. Thus, we are moving the entirety of A along - // with X. In this case, nothing in the EH record for A needs to change. - // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). - // This can never happen here, because we are only moving handler ranges (we don't - // move try ranges), and handler regions cannot start at the beginning of a try - // range or handler range and be a subset. - // - // Note that A and X must properly nest for the table to be well-formed. For example, - // the beginning of A can't be strictly within the range of X (that is, the beginning - // of A isn't shared with the beginning of X) and the end of A outside the range. + if (UsesFunclets()) + { + // There are several cases we need to consider when moving an EH range. + // If moving a range X, we must consider its relationship to every other EH + // range A in the table. Note that each entry in the table represents both + // a protected region and a handler region (possibly including a filter region + // that must live before and adjacent to the handler region), so we must + // consider try and handler regions independently. These are the cases: + // 1. A is completely contained within X (where "completely contained" means + // that the 'begin' and 'last' parts of A are strictly between the 'begin' + // and 'end' parts of X, and aren't equal to either, for example, they don't + // share 'last' blocks). In this case, when we move X, A moves with it, and + // the EH table doesn't need to change. + // 2. X is completely contained within A. In this case, X gets extracted from A, + // and the range of A shrinks, but because A is strictly within X, the EH + // table doesn't need to change. + // 3. A and X have exactly the same range. In this case, A is moving with X and + // the EH table doesn't need to change. + // 4. A and X share the 'last' block. There are two sub-cases: + // (a) A is a larger range than X (such that the beginning of A precedes the + // beginning of X): in this case, we are moving the tail of A. We set the + // 'last' block of A to the block preceding the beginning block of X. + // (b) A is a smaller range than X. Thus, we are moving the entirety of A along + // with X. In this case, nothing in the EH record for A needs to change. + // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). + // This can never happen here, because we are only moving handler ranges (we don't + // move try ranges), and handler regions cannot start at the beginning of a try + // range or handler range and be a subset. + // + // Note that A and X must properly nest for the table to be well-formed. For example, + // the beginning of A can't be strictly within the range of X (that is, the beginning + // of A isn't shared with the beginning of X) and the end of A outside the range. - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + { + if (XTnum != regionIndex) // we don't need to update our 'last' pointer + { + if (HBtab->ebdTryLast == bLast) + { + // If we moved a set of blocks that were at the end of + // a different try region then we may need to update ebdTryLast + for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next()) + { + if (block == bPrev) + { + // We were contained within it, so shrink its region by + // setting its 'last' + fgSetTryEnd(HBtab, bPrev); + break; + } + else if (HBtab->ebdTryLast->NextIs(block)) + { + // bPrev does not come after the TryBeg, thus we are larger, and + // it is moving with us. + break; + } + } + } + if (HBtab->ebdHndLast == bLast) + { + // If we moved a set of blocks that were at the end of + // a different handler region then we must update ebdHndLast + for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next()) + { + if (block == bPrev) + { + fgSetHndEnd(HBtab, bPrev); + break; + } + else if (HBtab->ebdHndLast->NextIs(block)) + { + // bPrev does not come after the HndBeg + break; + } + } + } + } + } // end exception table iteration + + // Insert the block(s) we are moving after fgLastBlock + fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); + + if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet + { + fgFirstFuncletBB = bStart; + } + else + { + assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the + // funclet region. + } + +#ifdef DEBUG + if (verbose) + { + printf("Create funclets: moved region\n"); + fgDispHandlerTab(); + } + +// We have to wait to do this until we've created all the additional regions +// Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex +#endif // DEBUG + } + else { - if (XTnum != regionIndex) // we don't need to update our 'last' pointer +#if defined(FEATURE_EH_WINDOWS_X86) + for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { + if (XTnum == regionIndex) + { + // Don't update our handler's Last info + continue; + } + if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != nullptr; block = block->Next()) + for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) { if (block == bPrev) { - // We were contained within it, so shrink its region by - // setting its 'last' fgSetTryEnd(HBtab, bPrev); break; } else if (HBtab->ebdTryLast->NextIs(block)) { - // bPrev does not come after the TryBeg, thus we are larger, and - // it is moving with us. + // bPrev does not come after the TryBeg break; } } @@ -5892,7 +5958,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != nullptr; block = block->Next()) + for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) { if (block == bPrev) { @@ -5906,84 +5972,12 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r } } } - } - } // end exception table iteration + } // end exception table iteration - // Insert the block(s) we are moving after fgLastBlock - fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); - - if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet - { - fgFirstFuncletBB = bStart; + // We have decided to insert the block(s) after fgLastBlock + fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); +#endif // FEATURE_EH_WINDOWS_X86 } - else - { - assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the - // funclet region. - } - -#ifdef DEBUG - if (verbose) - { - printf("Create funclets: moved region\n"); - fgDispHandlerTab(); - } - -// We have to wait to do this until we've created all the additional regions -// Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex -#endif // DEBUG - -#else // !FEATURE_EH_FUNCLETS - - for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) - { - if (XTnum == regionIndex) - { - // Don't update our handler's Last info - continue; - } - - if (HBtab->ebdTryLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different try region then we may need to update ebdTryLast - for (block = HBtab->ebdTryBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetTryEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdTryLast->NextIs(block)) - { - // bPrev does not come after the TryBeg - break; - } - } - } - if (HBtab->ebdHndLast == bLast) - { - // If we moved a set of blocks that were at the end of - // a different handler region then we must update ebdHndLast - for (block = HBtab->ebdHndBeg; block != NULL; block = block->Next()) - { - if (block == bPrev) - { - fgSetHndEnd(HBtab, bPrev); - break; - } - else if (HBtab->ebdHndLast->NextIs(block)) - { - // bPrev does not come after the HndBeg - break; - } - } - } - } // end exception table iteration - - // We have decided to insert the block(s) after fgLastBlock - fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); -#endif // !FEATURE_EH_FUNCLETS goto DONE; @@ -6127,16 +6121,11 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) fgInsertBBafter(insertBeforeBlk->Prev(), newBlk); } -#if defined(FEATURE_EH_FUNCLETS) - /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ - if (fgFirstFuncletBB == insertBeforeBlk) { fgFirstFuncletBB = newBlk; } - -#endif // FEATURE_EH_FUNCLETS } /***************************************************************************** @@ -6607,7 +6596,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make - // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), + // loop iteration easier). Note that, after funclets have been created (for UsesFunclets() == true), // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index e2af55f45ac82..6311d189186bd 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -2406,7 +2406,6 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, blockTargetFieldWidth, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); // } -#if defined(FEATURE_EH_FUNCLETS) if (inDefaultOrder && (block == fgFirstFuncletBB)) { printf("++++++%*s+++++++++++++++++++++++++++++++++++++%*s++++++++++++++++++++++++++%*s++++++++++" @@ -2415,7 +2414,6 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, ibcColWidth, "++++++++++++", // blockTargetFieldWidth, "++++++++++++++++++++++++++++++++++++++++++++++"); // } -#endif // FEATURE_EH_FUNCLETS fgTableDispBasicBlock(block, nextBlock, printEdgeLikelihoods, blockTargetFieldWidth, ibcColWidth); @@ -2887,8 +2885,6 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) } } -#if defined(FEATURE_EH_FUNCLETS) - if (!found && comp->fgFuncletsCreated) { // There is no easy way to search just the funclets that were pulled out of @@ -2907,8 +2903,6 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) } } -#endif // FEATURE_EH_FUNCLETS - assert(found && "BBJ_EHFINALLYRET predecessor of block that doesn't follow a BBJ_CALLFINALLY!"); return found; } @@ -2969,7 +2963,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef return; } -#if defined(FEATURE_EH_FUNCLETS) bool reachedFirstFunclet = false; if (fgFuncletsCreated) { @@ -2983,7 +2976,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(fgFirstFuncletBB->HasFlag(BBF_FUNCLET_BEG)); } } -#endif // FEATURE_EH_FUNCLETS /* Check bbNum, bbRefs and bbPreds */ // First, pick a traversal stamp, and label all the blocks with it. @@ -3071,7 +3063,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(block->bbPreds == nullptr); } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // @@ -3096,7 +3087,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef assert(block->hasHndIndex() == true); } } -#endif // FEATURE_EH_FUNCLETS if (checkBBRefs) { @@ -3180,7 +3170,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // try { // try { // LEAVE L_OUTER; // this becomes a branch to a BBJ_CALLFINALLY in an outer try region - // // (in the FEATURE_EH_CALLFINALLY_THUNKS case) + // // (in the UsesCallFinallyThunks case) // } catch { // } // } finally { @@ -3191,7 +3181,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef if (ehDsc->ebdTryBeg == succBlock) { // The BBJ_CALLFINALLY is the first block of it's `try` region. Don't check the predecessor. - // Note that this case won't occur in the FEATURE_EH_CALLFINALLY_THUNKS case, since the + // Note that this case won't occur in the UsesCallFinallyThunks case, since the // BBJ_CALLFINALLY in that case won't exist in the `try` region of the `finallyIndex`. } else diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 0e1ce24c39ed8..47127fc0ad20f 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -32,10 +32,8 @@ // PhaseStatus Compiler::fgRemoveEmptyFinally() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -271,10 +269,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() { JITDUMP("\n*************** In fgRemoveEmptyTry()\n"); -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -341,6 +337,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() BasicBlock* const lastTryBlock = HBtab->ebdTryLast; BasicBlock* const firstHandlerBlock = HBtab->ebdHndBeg; BasicBlock* const lastHandlerBlock = HBtab->ebdHndLast; + BasicBlock* callFinally; assert(firstTryBlock->getTryIndex() == XTnum); @@ -353,63 +350,64 @@ PhaseStatus Compiler::fgRemoveEmptyTry() continue; } -#if FEATURE_EH_CALLFINALLY_THUNKS - - // Look for blocks that are always jumps to a call finally - // pair that targets the finally - if (!firstTryBlock->KindIs(BBJ_ALWAYS)) + if (UsesCallFinallyThunks()) { - JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for blocks that are always jumps to a call finally + // pair that targets the finally + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) + { + JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - BasicBlock* const callFinally = firstTryBlock->GetTarget(); + callFinally = firstTryBlock->GetTarget(); - // Look for call finally pair. Note this will also disqualify - // empty try removal in cases where the finally doesn't - // return. - if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) - { - JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, - firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for call finally pair. Note this will also disqualify + // empty try removal in cases where the finally doesn't + // return. + if (!callFinally->isBBCallFinallyPair() || !callFinally->TargetIs(firstHandlerBlock)) + { + JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, + firstTryBlock->bbNum); + XTnum++; + continue; + } - // Try itself must be a single block. - if (firstTryBlock != lastTryBlock) - { - JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, - firstTryBlock->Next()->bbNum); - XTnum++; - continue; + // Try itself must be a single block. + if (firstTryBlock != lastTryBlock) + { + JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n", XTnum, + firstTryBlock->Next()->bbNum); + XTnum++; + continue; + } } - -#else - // Look for call finally pair within the try itself. Note this - // will also disqualify empty try removal in cases where the - // finally doesn't return. - if (!firstTryBlock->isBBCallFinallyPair() || !firstTryBlock->TargetIs(firstHandlerBlock)) + else { - JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); - XTnum++; - continue; - } + // Look for call finally pair within the try itself. Note this + // will also disqualify empty try removal in cases where the + // finally doesn't return. + if (!firstTryBlock->isBBCallFinallyPair() || !firstTryBlock->TargetIs(firstHandlerBlock)) + { + JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); + XTnum++; + continue; + } - BasicBlock* const callFinally = firstTryBlock; + callFinally = firstTryBlock; - // Try must be a callalways pair of blocks. - if (!firstTryBlock->NextIs(lastTryBlock)) - { - JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, firstTryBlock->Next()->bbNum); - XTnum++; - continue; + // Try must be a callalways pair of blocks. + if (!firstTryBlock->NextIs(lastTryBlock)) + { + JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n", XTnum, + firstTryBlock->Next()->bbNum); + XTnum++; + continue; + } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - JITDUMP("EH#%u has empty try, removing the try region and promoting the finally.\n", XTnum); // There should be just one callfinally that invokes this @@ -527,21 +525,24 @@ PhaseStatus Compiler::fgRemoveEmptyTry() } } -#if !defined(FEATURE_EH_FUNCLETS) - // If we're in a non-funclet model, decrement the nesting - // level of any GT_END_LFIN we find in the handler region, - // since we're removing the enclosing handler. - for (Statement* const stmt : block->Statements()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - GenTree* expr = stmt->GetRootNode(); - if (expr->gtOper == GT_END_LFIN) + // If we're in a non-funclet model, decrement the nesting + // level of any GT_END_LFIN we find in the handler region, + // since we're removing the enclosing handler. + for (Statement* const stmt : block->Statements()) { - const size_t nestLevel = expr->AsVal()->gtVal1; - assert(nestLevel > 0); - expr->AsVal()->gtVal1 = nestLevel - 1; + GenTree* expr = stmt->GetRootNode(); + if (expr->gtOper == GT_END_LFIN) + { + const size_t nestLevel = expr->AsVal()->gtVal1; + assert(nestLevel > 0); + expr->AsVal()->gtVal1 = nestLevel - 1; + } } } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 } // (6) Remove the try-finally EH region. This will compact the @@ -605,10 +606,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // PhaseStatus Compiler::fgCloneFinally() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -795,25 +794,29 @@ PhaseStatus Compiler::fgCloneFinally() for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->Prev()) { -#if FEATURE_EH_CALLFINALLY_THUNKS - // Blocks that transfer control to callfinallies are usually - // BBJ_ALWAYS blocks, but the last block of a try may fall - // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, - // indicating a chained callfinally. BasicBlock* jumpDest = nullptr; - if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) + if (UsesCallFinallyThunks()) { - jumpDest = block->GetTarget(); - } + // Blocks that transfer control to callfinallies are usually + // BBJ_ALWAYS blocks, but the last block of a try may fall + // through to a callfinally, or could be the target of a BBJ_CALLFINALLYRET, + // indicating a chained callfinally. + + if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET)) + { + jumpDest = block->GetTarget(); + } - if (jumpDest == nullptr) + if (jumpDest == nullptr) + { + continue; + } + } + else { - continue; + jumpDest = block; } -#else - BasicBlock* const jumpDest = block; -#endif // FEATURE_EH_CALLFINALLY_THUNKS // The jumpDest must be a callfinally that in turn invokes the // finally of interest. @@ -880,29 +883,32 @@ PhaseStatus Compiler::fgCloneFinally() isUpdate = true; } -#if FEATURE_EH_CALLFINALLY_THUNKS - // When there are callfinally thunks, we don't expect to see the - // callfinally within a handler region either. - assert(!jumpDest->hasHndIndex()); - - // Update the clone insertion point to just after the - // call always pair. - cloneInsertAfter = finallyReturnBlock; - - // We will consider moving the callfinally so we can fall - // through from the try into the clone. - tryToRelocateCallFinally = true; - - JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); -#else - JITDUMP("%s path to clone: try block " FMT_BB " is a callfinally;" - " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", - isUpdate ? "Updating" : "Choosing", block->bbNum, finallyReturnBlock->bbNum, - postTryFinallyBlock->bbNum); -#endif // FEATURE_EH_CALLFINALLY_THUNKS + if (UsesCallFinallyThunks()) + { + // When there are callfinally thunks, we don't expect to see the + // callfinally within a handler region either. + assert(!jumpDest->hasHndIndex()); + + // Update the clone insertion point to just after the + // call always pair. + cloneInsertAfter = finallyReturnBlock; + + // We will consider moving the callfinally so we can fall + // through from the try into the clone. + tryToRelocateCallFinally = true; + + JITDUMP("%s path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" + " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", + isUpdate ? "Updating" : "Choosing", block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, + postTryFinallyBlock->bbNum); + } + else + { + JITDUMP("%s path to clone: try block " FMT_BB " is a callfinally;" + " the call returns to " FMT_BB " which jumps to " FMT_BB "\n", + isUpdate ? "Updating" : "Choosing", block->bbNum, finallyReturnBlock->bbNum, + postTryFinallyBlock->bbNum); + } // For non-pgo just take the first one we find. // For pgo, keep searching in case we find one we like better. @@ -1335,19 +1341,15 @@ void Compiler::fgDebugCheckTryFinallyExits() continue; } -#if FEATURE_EH_CALLFINALLY_THUNKS - // When there are callfinally thunks, callfinallies // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) { continue; } -#endif // FEATURE_EH_CALLFINALLY_THUNKS - // Now we know block lies directly within the try of a // try-finally, and succBlock is in an enclosing // region (possibly the method region). So this path @@ -1365,19 +1367,16 @@ void Compiler::fgDebugCheckTryFinallyExits() // (e) via an always jump clonefinally exit bool isCallToFinally = false; -#if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) isCallToFinally = isFinally && succBlock->TargetIs(finallyBlock); } -#else // !FEATURE_EH_CALLFINALLY_THUNKS - if (block->KindIs(BBJ_CALLFINALLY)) + else if (!UsesCallFinallyThunks() && block->KindIs(BBJ_CALLFINALLY)) { // case (a2) isCallToFinally = isFinally && block->TargetIs(finallyBlock); } -#endif // !FEATURE_EH_CALLFINALLY_THUNKS bool isJumpToClonedFinally = false; @@ -1455,27 +1454,30 @@ void Compiler::fgDebugCheckTryFinallyExits() // void Compiler::fgCleanupContinuation(BasicBlock* continuation) { -#if !defined(FEATURE_EH_FUNCLETS) - // The continuation may be a finalStep block. - // It is now a normal block, so clear the special keep - // always flag. - continuation->RemoveFlags(BBF_KEEP_BBJ_ALWAYS); - - // Remove the GT_END_LFIN from the continuation, - // Note we only expect to see one such statement. - bool foundEndLFin = false; - for (Statement* const stmt : continuation->Statements()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) { - GenTree* expr = stmt->GetRootNode(); - if (expr->gtOper == GT_END_LFIN) + // The continuation may be a finalStep block. + // It is now a normal block, so clear the special keep + // always flag. + continuation->RemoveFlags(BBF_KEEP_BBJ_ALWAYS); + + // Remove the GT_END_LFIN from the continuation, + // Note we only expect to see one such statement. + bool foundEndLFin = false; + for (Statement* const stmt : continuation->Statements()) { - assert(!foundEndLFin); - fgRemoveStmt(continuation, stmt); - foundEndLFin = true; + GenTree* expr = stmt->GetRootNode(); + if (expr->gtOper == GT_END_LFIN) + { + assert(!foundEndLFin); + fgRemoveStmt(continuation, stmt); + foundEndLFin = true; + } } + assert(foundEndLFin); } - assert(foundEndLFin); -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 } //------------------------------------------------------------------------ @@ -1493,10 +1495,8 @@ void Compiler::fgCleanupContinuation(BasicBlock* continuation) // PhaseStatus Compiler::fgMergeFinallyChains() { -#if defined(FEATURE_EH_FUNCLETS) // We need to do this transformation before funclets are created. assert(!fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS // We need to update the bbPreds lists. assert(fgPredsComputed); @@ -1521,22 +1521,26 @@ PhaseStatus Compiler::fgMergeFinallyChains() bool enableMergeFinallyChains = true; -#if !defined(FEATURE_EH_FUNCLETS) - // For non-funclet models (x86) the callfinallys may contain - // statements and the continuations contain GT_END_LFINs. So no - // merging is possible until the GT_END_LFIN blocks can be merged - // and merging is not safe unless the callfinally blocks are split. - JITDUMP("EH using non-funclet model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; -#endif // !FEATURE_EH_FUNCLETS - -#if !FEATURE_EH_CALLFINALLY_THUNKS - // For non-thunk EH models (x86) the callfinallys may contain - // statements, and merging is not safe unless the callfinally - // blocks are split. - JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n"); - enableMergeFinallyChains = false; -#endif +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + // For non-funclet models (x86) the callfinallys may contain + // statements and the continuations contain GT_END_LFINs. So no + // merging is possible until the GT_END_LFIN blocks can be merged + // and merging is not safe unless the callfinally blocks are split. + JITDUMP("EH using non-funclet model; merging not yet implemented.\n"); + enableMergeFinallyChains = false; + } +#endif // FEATURE_EH_WINDOWS_X86 + + if (!UsesCallFinallyThunks()) + { + // For non-thunk EH models (x86) the callfinallys may contain + // statements, and merging is not safe unless the callfinally + // blocks are split. + JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n"); + enableMergeFinallyChains = false; + } if (!enableMergeFinallyChains) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index f5ca834696ce5..94e6bc5b3c057 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1630,7 +1630,6 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) break; } -#if defined(FEATURE_EH_FUNCLETS) /* Don't remove an empty block that is in a different EH region * from its successor block, if the block is the target of a * catch return. It is required that the return address of a @@ -1638,6 +1637,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) * abort exceptions to work. Insert a NOP in the empty block * to ensure we generate code for the block, if we keep it. */ + if (UsesFunclets()) { BasicBlock* succBlock = block->GetTarget(); @@ -1693,7 +1693,6 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } } } -#endif // FEATURE_EH_FUNCLETS if (!ehCanDeleteEmptyBlock(block)) { @@ -3454,9 +3453,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) { noway_assert(opts.compDbgCode == false); -#if defined(FEATURE_EH_FUNCLETS) - assert(fgFuncletsCreated); -#endif // FEATURE_EH_FUNCLETS + assert(UsesFunclets() == fgFuncletsCreated); // We can't relocate anything if we only have one block if (fgFirstBB->IsLast()) @@ -3472,9 +3469,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) // First let us expand the set of run rarely blocks newRarelyRun |= fgExpandRarelyRunBlocks(); -#if !defined(FEATURE_EH_FUNCLETS) - movedBlocks |= fgRelocateEHRegions(); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + movedBlocks |= fgRelocateEHRegions(); + } +#endif // FEATURE_EH_WINDOWS_X86 // // If we are using profile weights we can change some @@ -3993,13 +3993,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) break; } -#if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function if (bEnd->NextIs(fgFirstFuncletBB)) { break; } -#endif // FEATURE_EH_FUNCLETS if (bNext == bDest) { diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index fead5b82e0b34..0c0d7384f275b 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -538,9 +538,9 @@ inline bool OperIsControlFlow(genTreeOps oper) case GT_RETURN: case GT_RETFILT: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 return true; default: diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 92b84e31aa72b..91b7e878ad45b 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1287,8 +1287,6 @@ GenTree* Compiler::fgGetCritSectOfStaticMethod() return tree; } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Add monitor enter/exit calls for synchronized methods, and a try/fault @@ -1351,6 +1349,8 @@ GenTree* Compiler::fgGetCritSectOfStaticMethod() void Compiler::fgAddSyncMethodEnterExit() { + assert(UsesFunclets()); + assert((info.compFlags & CORINFO_FLG_SYNCH) != 0); // We need to do this transformation before funclets are created. @@ -1663,8 +1663,6 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) #endif } -#endif // FEATURE_EH_FUNCLETS - //------------------------------------------------------------------------ // fgAddReversePInvokeEnterExit: Add enter/exit calls for reverse PInvoke methods // @@ -2349,17 +2347,15 @@ PhaseStatus Compiler::fgAddInternal() // Merge return points if required or beneficial MergedReturns merger(this); -#if defined(FEATURE_EH_FUNCLETS) // Add the synchronized method enter/exit calls and try/finally protection. Note // that this must happen before the one BBJ_RETURN block is created below, so the // BBJ_RETURN block gets placed at the top-level, not within an EH region. (Otherwise, // we'd have to be really careful when creating the synchronized method try/finally // not to include the BBJ_RETURN block.) - if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) + if (UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH) != 0) { fgAddSyncMethodEnterExit(); } -#endif // FEATURE_EH_FUNCLETS // // We will generate just one epilog (return block) @@ -2470,11 +2466,11 @@ PhaseStatus Compiler::fgAddInternal() madeChanges = true; } -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) /* Is this a 'synchronized' method? */ - if (info.compFlags & CORINFO_FLG_SYNCH) + if (!UsesFunclets() && (info.compFlags & CORINFO_FLG_SYNCH)) { GenTree* tree = nullptr; @@ -2542,7 +2538,7 @@ PhaseStatus Compiler::fgAddInternal() madeChanges = true; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 if (opts.IsReversePInvoke()) { @@ -2728,15 +2724,11 @@ BasicBlock* Compiler::fgGetDomSpeculatively(const BasicBlock* block) // BasicBlock* Compiler::fgLastBBInMainFunction() { -#if defined(FEATURE_EH_FUNCLETS) - if (fgFirstFuncletBB != nullptr) { return fgFirstFuncletBB->Prev(); } -#endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->IsLast()); return fgLastBB; } @@ -2748,21 +2740,15 @@ BasicBlock* Compiler::fgLastBBInMainFunction() // BasicBlock* Compiler::fgEndBBAfterMainFunction() { -#if defined(FEATURE_EH_FUNCLETS) - if (fgFirstFuncletBB != nullptr) { return fgFirstFuncletBB; } -#endif // FEATURE_EH_FUNCLETS - assert(fgLastBB->IsLast()); return nullptr; } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Introduce a new head block of the handler for the prolog to be put in, ahead * of the current handler head 'block'. @@ -2778,6 +2764,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } #endif + assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler @@ -2840,6 +2827,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // void Compiler::fgCreateFuncletPrologBlocks() { + assert(UsesFunclets()); noway_assert(fgPredsComputed); assert(!fgFuncletsCreated); @@ -2904,6 +2892,7 @@ void Compiler::fgCreateFuncletPrologBlocks() // PhaseStatus Compiler::fgCreateFunclets() { + assert(UsesFunclets()); assert(!fgFuncletsCreated); fgCreateFuncletPrologBlocks(); @@ -2979,6 +2968,8 @@ PhaseStatus Compiler::fgCreateFunclets() // bool Compiler::fgFuncletsAreCold() { + assert(UsesFunclets()); + for (BasicBlock* block = fgFirstFuncletBB; block != nullptr; block = block->Next()) { if (!block->isRunRarely()) @@ -2990,8 +2981,6 @@ bool Compiler::fgFuncletsAreCold() return true; } -#endif // defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------ // fgDetermineFirstColdBlock: figure out where we might split the block // list to put some blocks into the cold code section @@ -3061,14 +3050,12 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() } #endif // HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION -#ifdef FEATURE_EH_FUNCLETS // Make note of if we're in the funclet section, // so we can stop the search early. if (block == fgFirstFuncletBB) { inFuncletSection = true; } -#endif // FEATURE_EH_FUNCLETS // Do we have a candidate for the first cold block? if (firstColdBlock != nullptr) @@ -3082,7 +3069,6 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() firstColdBlock = nullptr; prevToFirstColdBlock = nullptr; -#ifdef FEATURE_EH_FUNCLETS // If we're already in the funclet section, try to split // at fgFirstFuncletBB, and stop the search. if (inFuncletSection) @@ -3095,13 +3081,10 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() break; } -#endif // FEATURE_EH_FUNCLETS } } else // (firstColdBlock == NULL) -- we don't have a candidate for first cold block { - -#ifdef FEATURE_EH_FUNCLETS // // If a function has exception handling and we haven't found the first cold block yet, // consider splitting at the first funclet; do not consider splitting between funclets, @@ -3117,7 +3100,6 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() break; } -#endif // FEATURE_EH_FUNCLETS // Is this a cold block? if (!blockMustBeInHotSection && block->isRunRarely()) diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index a093d8a20e598..b7972f216d53e 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -62,8 +62,6 @@ ReturnKind GCInfo::getReturnKind() } } -#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - // gcMarkFilterVarsPinned - Walk all lifetimes and make it so that anything // live in a filter is marked as pinned (often by splitting the lifetime // so that *only* the filter region is pinned). This should only be @@ -86,6 +84,7 @@ ReturnKind GCInfo::getReturnKind() // void GCInfo::gcMarkFilterVarsPinned() { + assert(compiler->UsesFunclets()); assert(compiler->ehAnyFunclets()); for (EHblkDsc* const HBtab : EHClauses(compiler)) @@ -293,6 +292,8 @@ void GCInfo::gcMarkFilterVarsPinned() void GCInfo::gcInsertVarPtrDscSplit(varPtrDsc* desc, varPtrDsc* begin) { + assert(compiler->UsesFunclets()); + #ifndef JIT32_GCENCODER (void)begin; desc->vpdNext = gcVarPtrList; @@ -331,6 +332,8 @@ void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc) const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF; const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0; + assert(compiler->UsesFunclets()); + printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "", compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE); @@ -348,8 +351,6 @@ void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc) #endif // DEBUG -#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - #ifdef JIT32_GCENCODER #include "emit.h" @@ -1560,9 +1561,9 @@ size_t GCInfo::gcInfoBlockHdrSave( header->syncStartOffset = INVALID_SYNC_OFFSET; header->syncEndOffset = INVALID_SYNC_OFFSET; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses. - if (compiler->info.compFlags & CORINFO_FLG_SYNCH) + if (!compiler->UsesFunclets() && compiler->info.compFlags & CORINFO_FLG_SYNCH) { assert(compiler->syncStartEmitCookie != nullptr); header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0); @@ -2315,8 +2316,8 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un if (header.varPtrTableSize != 0) { -#if !defined(FEATURE_EH_FUNCLETS) - if (keepThisAlive) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && keepThisAlive) { // Encoding of untracked variables does not support reporting // "this". So report it as a tracked variable with a liveness @@ -2340,7 +2341,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un dest += (sz & mask); totalSize += sz; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 /* We'll use a delta encoding for the lifetime offsets */ @@ -3957,7 +3958,6 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz gcInfoEncoderWithLog->SetPrologSize(prologSize); } -#if defined(FEATURE_EH_FUNCLETS) if (compiler->lvaPSPSym != BAD_VAR_NUM) { #ifdef TARGET_AMD64 @@ -3976,8 +3976,6 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz } #endif // TARGET_AMD64 -#endif // FEATURE_EH_FUNCLETS - #ifdef TARGET_ARMARCH if (compiler->codeGen->GetHasTailCalls()) { @@ -4694,8 +4692,8 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode // unused by alignment C_ASSERT((OFFSET_MASK + 1) <= sizeof(int)); -#if defined(DEBUG) && defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS) - if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) +#if defined(DEBUG) && defined(JIT32_GCENCODER) && defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) { // Tracked variables can't be pinned, and the encoding takes // advantage of that by using the same bit for 'pinned' and 'this' diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index 8045cd873260e..b7c1a2667bf91 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -565,7 +565,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, UNALIGNED // // Arguments: // varNum - the variable number to check; -// pKeepThisAlive - if !FEATURE_EH_FUNCLETS and the argument != nullptr remember +// pKeepThisAlive - if !UsesFunclets() and the argument != nullptr remember // if `this` should be kept alive and considered tracked. // // Return value: @@ -614,16 +614,16 @@ bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeep } } -#if !defined(FEATURE_EH_FUNCLETS) - if (compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) +#if defined(FEATURE_EH_WINDOWS_X86) + if (!compiler->UsesFunclets() && compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) { // "this" is in the untracked variable area, but encoding of untracked variables does not support reporting // "this". So report it as a tracked variable with a liveness extending over the entire method. // // TODO-x86-Cleanup: the semantic here is not clear, it would be useful to check different cases and // add a description where "this" is saved and how it is tracked in each of them: - // 1) when FEATURE_EH_FUNCLETS defined (x86 Linux); - // 2) when FEATURE_EH_FUNCLETS not defined, lvaKeepAliveAndReportThis == true, compJmpOpUsed == true; + // 1) when UsesFunclets() == true (x86 Linux); + // 2) when UsesFunclets() == false, lvaKeepAliveAndReportThis == true, compJmpOpUsed == true; // 3) when there is regPtrDsc for "this", but keepThisAlive == true; // etc. @@ -633,7 +633,7 @@ bool GCInfo::gcIsUntrackedLocalOrNonEnregisteredArg(unsigned varNum, bool* pKeep } return false; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 return true; } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 9cf06c7bb1fcb..2712e11a8f706 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -6672,9 +6672,9 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -9423,9 +9423,9 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) copy = new (this, oper) GenTree(oper, tree->gtType); goto DONE; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_JMP: copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1); goto DONE; @@ -10240,9 +10240,9 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_PHI_ARG: case GT_JMPTABLE: case GT_PHYSREG: @@ -11775,24 +11775,22 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, cons ilName = "OutArgs"; } #endif // FEATURE_FIXED_OUT_ARGS -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) else if (lclNum == lvaShadowSPslotsVar) { ilName = "EHSlots"; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER else if (lclNum == lvaLocAllocSPvar) { ilName = "LocAllocSP"; } #endif // JIT32_GCENCODER -#if defined(FEATURE_EH_FUNCLETS) else if (lclNum == lvaPSPSym) { ilName = "PSPSym"; } -#endif // FEATURE_EH_FUNCLETS else { ilKind = "tmp"; @@ -12356,11 +12354,11 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) } break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: printf(" endNstLvl=%d", tree->AsVal()->gtVal1); break; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // Vanilla leaves. No qualifying information available. So do nothing diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index 817b27a936a56..1d442f2767372 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -282,9 +282,9 @@ GTNODE(START_PREEMPTGC , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHI GTNODE(PROF_HOOK , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Profiler Enter/Leave/TailCall hook. GTNODE(RETFILT , GenTreeOp ,0,1,GTK_UNOP|GTK_NOVALUE) // End filter with TYP_I_IMPL return value. -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) GTNODE(END_LFIN , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally. -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 //----------------------------------------------------------------------------- // Swift interop-specific nodes: diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h index e6823478a3c9a..8b82bc42b214e 100644 --- a/src/coreclr/jit/gtstructs.h +++ b/src/coreclr/jit/gtstructs.h @@ -50,7 +50,7 @@ GTSTRUCT_0(UnOp , GT_OP) GTSTRUCT_0(Op , GT_OP) -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) GTSTRUCT_2(Val , GT_END_LFIN, GT_JMP) #else GTSTRUCT_1(Val , GT_JMP) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 0d1df79812f03..7c08570aafbaa 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4309,9 +4309,9 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // After this function, the BBJ_LEAVE block has been converted to a different type. // -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) -void Compiler::impImportLeave(BasicBlock* block) +void Compiler::impImportLeaveEHRegions(BasicBlock* block) { #ifdef DEBUG if (verbose) @@ -4594,10 +4594,17 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // DEBUG } -#else // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 void Compiler::impImportLeave(BasicBlock* block) { +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + return impImportLeaveEHRegions(block); + } +#endif + #ifdef DEBUG if (verbose) { @@ -4723,10 +4730,8 @@ void Compiler::impImportLeave(BasicBlock* block) BasicBlock* callBlock; - if (step == nullptr) + if (step == nullptr && UsesCallFinallyThunks()) { -#if FEATURE_EH_CALLFINALLY_THUNKS - // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; @@ -4757,9 +4762,9 @@ void Compiler::impImportLeave(BasicBlock* block) XTnum, block->bbNum, callBlock->bbNum); } #endif - -#else // !FEATURE_EH_CALLFINALLY_THUNKS - + } + else if (step == nullptr) // && !UsesCallFinallyThunks() + { callBlock = block; // callBlock calls the finally handler @@ -4775,8 +4780,6 @@ void Compiler::impImportLeave(BasicBlock* block) XTnum, callBlock->bbNum); } #endif - -#endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { @@ -4799,8 +4802,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (step->KindIs(BBJ_EHCATCHRET)) + if (UsesCallFinallyThunks() && step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4832,17 +4834,24 @@ void Compiler::impImportLeave(BasicBlock* block) step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } -#endif // FEATURE_EH_CALLFINALLY_THUNKS -#if FEATURE_EH_CALLFINALLY_THUNKS - unsigned callFinallyTryIndex = - (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; - unsigned callFinallyHndIndex = - (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; -#else // !FEATURE_EH_CALLFINALLY_THUNKS - unsigned callFinallyTryIndex = XTnum + 1; - unsigned callFinallyHndIndex = 0; // don't care -#endif // !FEATURE_EH_CALLFINALLY_THUNKS + unsigned callFinallyTryIndex; + unsigned callFinallyHndIndex; + + if (UsesCallFinallyThunks()) + { + callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) + ? 0 + : HBtab->ebdEnclosingTryIndex + 1; + callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) + ? 0 + : HBtab->ebdEnclosingHndIndex + 1; + } + else + { + callFinallyTryIndex = XTnum + 1; + callFinallyHndIndex = 0; // don't care + } assert(step->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET)); assert((step == block) || !step->HasInitializedTarget()); @@ -5051,15 +5060,12 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // DEBUG } -#endif // FEATURE_EH_FUNCLETS - /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { -#if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we @@ -5082,7 +5088,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->KindIs(BBJ_CALLFINALLY)) + if (UsesFunclets() && block->KindIs(BBJ_CALLFINALLY)) { BasicBlock* dupBlock = BasicBlock::New(this); dupBlock->CopyFlags(block); @@ -5112,7 +5118,6 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif } -#endif // FEATURE_EH_FUNCLETS fgInitBBLookup(); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 329f1c602cf98..2270d5b165636 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -243,9 +243,16 @@ void EHblkDsc::DispEntry(unsigned XTnum) { printf(" %2u ::", XTnum); -#if !defined(FEATURE_EH_FUNCLETS) - printf(" %2u ", XTnum, ebdHandlerNestingLevel); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (ebdHandlerNestingLevel == 0) + { + printf(" "); + } + else + { + printf(" %2u ", ebdHandlerNestingLevel); + } +#endif // FEATURE_EH_WINDOWS_X86 if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { @@ -613,17 +620,19 @@ bool Compiler::bbIsHandlerBeg(const BasicBlock* block) bool Compiler::ehHasCallableHandlers() { -#if defined(FEATURE_EH_FUNCLETS) - - // Any EH in the function? - - return compHndBBtabCount > 0; - -#else // !FEATURE_EH_FUNCLETS - - return ehNeedsShadowSPslots(); - -#endif // !FEATURE_EH_FUNCLETS + if (UsesFunclets()) + { + // Any EH in the function? + return compHndBBtabCount > 0; + } + else + { +#if defined(FEATURE_EH_WINDOWS_X86) + return ehNeedsShadowSPslots(); +#else + return false; +#endif // FEATURE_EH_WINDOWS_X86 + } } /****************************************************************************************** @@ -897,12 +906,15 @@ unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTr assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); -#if FEATURE_EH_CALLFINALLY_THUNKS - return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); -#else - *inTryRegion = true; - return finallyIndex; -#endif + if (UsesCallFinallyThunks()) + { + return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); + } + else + { + *inTryRegion = true; + return finallyIndex; + } } void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** startBlock, BasicBlock** lastBlock) @@ -912,35 +924,38 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** st assert(startBlock != nullptr); assert(lastBlock != nullptr); -#if FEATURE_EH_CALLFINALLY_THUNKS - bool inTryRegion; - unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); - - if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) + if (UsesCallFinallyThunks()) { - *startBlock = fgFirstBB; - *lastBlock = fgLastBBInMainFunction(); - } - else - { - EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); + bool inTryRegion; + unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); - if (inTryRegion) + if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; + *startBlock = fgFirstBB; + *lastBlock = fgLastBBInMainFunction(); } else { - *startBlock = ehDsc->ebdHndBeg; - *lastBlock = ehDsc->ebdHndLast; + EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); + + if (inTryRegion) + { + *startBlock = ehDsc->ebdTryBeg; + *lastBlock = ehDsc->ebdTryLast; + } + else + { + *startBlock = ehDsc->ebdHndBeg; + *lastBlock = ehDsc->ebdHndLast; + } } } -#else // !FEATURE_EH_CALLFINALLY_THUNKS - EHblkDsc* ehDsc = ehGetDsc(finallyIndex); - *startBlock = ehDsc->ebdTryBeg; - *lastBlock = ehDsc->ebdTryLast; -#endif // !FEATURE_EH_CALLFINALLY_THUNKS + else + { + EHblkDsc* ehDsc = ehGetDsc(finallyIndex); + *startBlock = ehDsc->ebdTryBeg; + *lastBlock = ehDsc->ebdTryLast; + } } #ifdef DEBUG @@ -989,8 +1004,6 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsign #endif // DEBUG -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Are there (or will there be) any funclets in the function? @@ -998,7 +1011,14 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsign bool Compiler::ehAnyFunclets() { - return compHndBBtabCount > 0; // if there is any EH, there will be funclets + if (UsesFunclets()) + { + return compHndBBtabCount > 0; // if there is any EH, there will be funclets + } + else + { + return false; + } } /***************************************************************************** @@ -1010,17 +1030,24 @@ bool Compiler::ehAnyFunclets() unsigned Compiler::ehFuncletCount() { - unsigned funcletCnt = 0; - - for (EHblkDsc* const HBtab : EHClauses(this)) + if (UsesFunclets()) { - if (HBtab->HasFilter()) + unsigned funcletCnt = 0; + + for (EHblkDsc* const HBtab : EHClauses(this)) { + if (HBtab->HasFilter()) + { + ++funcletCnt; + } ++funcletCnt; } - ++funcletCnt; + return funcletCnt; + } + else + { + return 0; } - return funcletCnt; } /***************************************************************************** @@ -1037,36 +1064,41 @@ unsigned Compiler::ehFuncletCount() */ unsigned Compiler::bbThrowIndex(BasicBlock* blk) { - if (!blk->hasTryIndex() && !blk->hasHndIndex()) + if (UsesFunclets()) { - return -1; - } + if (!blk->hasTryIndex() && !blk->hasHndIndex()) + { + return -1; + } - const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; - const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; - assert(tryIndex != hndIndex); - assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); + const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; + const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; + assert(tryIndex != hndIndex); + assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); - if (tryIndex < hndIndex) - { - // The most enclosing region is a try body, use it - assert(tryIndex <= 0x3FFFFFFF); - return tryIndex; - } + if (tryIndex < hndIndex) + { + // The most enclosing region is a try body, use it + assert(tryIndex <= 0x3FFFFFFF); + return tryIndex; + } + + // The most enclosing region is a handler which will be a funclet + // Now we have to figure out if blk is in the filter or handler + assert(hndIndex <= 0x3FFFFFFF); + if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) + { + return hndIndex | 0x40000000; + } - // The most enclosing region is a handler which will be a funclet - // Now we have to figure out if blk is in the filter or handler - assert(hndIndex <= 0x3FFFFFFF); - if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) + return hndIndex | 0x80000000; + } + else { - return hndIndex | 0x40000000; + return blk->bbTryIndex; } - - return hndIndex | 0x80000000; } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * Determine the emitter code cookie for a block, for unwind purposes. */ @@ -1352,27 +1384,26 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) */ void Compiler::fgAllocEHTable() { -#if defined(FEATURE_EH_FUNCLETS) - - // We need to allocate space for EH clauses that will be used by funclets - // as well as one for each EH clause from the IL. Nested EH clauses pulled - // out as funclets create one EH clause for each enclosing region. Thus, - // the maximum number of clauses we will need might be very large. We allocate - // twice the number of EH clauses in the IL, which should be good in practice. - // In extreme cases, we might need to abandon this and reallocate. See - // fgAddEHTableEntry() for more details. + if (UsesFunclets()) + { + // We need to allocate space for EH clauses that will be used by funclets + // as well as one for each EH clause from the IL. Nested EH clauses pulled + // out as funclets create one EH clause for each enclosing region. Thus, + // the maximum number of clauses we will need might be very large. We allocate + // twice the number of EH clauses in the IL, which should be good in practice. + // In extreme cases, we might need to abandon this and reallocate. See + // fgAddEHTableEntry() for more details. #ifdef DEBUG - compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG -#else // DEBUG - compHndBBtabAllocCount = info.compXcptnsCount * 2; -#endif // DEBUG - -#else // !FEATURE_EH_FUNCLETS - - compHndBBtabAllocCount = info.compXcptnsCount; - -#endif // !FEATURE_EH_FUNCLETS + compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG +#else // DEBUG + compHndBBtabAllocCount = info.compXcptnsCount * 2; +#endif // DEBUG + } + else + { + compHndBBtabAllocCount = info.compXcptnsCount; + } compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; @@ -1492,8 +1523,6 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) } } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * * Add a single exception table entry at index 'XTnum', [0 <= XTnum <= compHndBBtabCount]. @@ -1505,6 +1534,8 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) */ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) { + assert(UsesFunclets()); + if (XTnum != compHndBBtabCount) { // Update all enclosing links that will get invalidated by inserting an entry at 'XTnum' @@ -1600,8 +1631,6 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) return compHndBBtab + XTnum; } -#endif // FEATURE_EH_FUNCLETS - /***************************************************************************** * * Sort the EH table if necessary. @@ -2989,7 +3018,6 @@ void Compiler::fgVerifyHandlerTab() assert(!HBtab->ebdFilter->HasFlag(BBF_REMOVED)); } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(HBtab->ebdHndBeg->HasFlag(BBF_FUNCLET_BEG)); @@ -2999,7 +3027,6 @@ void Compiler::fgVerifyHandlerTab() assert(HBtab->ebdFilter->HasFlag(BBF_FUNCLET_BEG)); } } -#endif // FEATURE_EH_FUNCLETS } // I want to assert things about the relative ordering of blocks in the block list using @@ -3053,7 +3080,6 @@ void Compiler::fgVerifyHandlerTab() blockHndBegSet[i] = false; } -#if defined(FEATURE_EH_FUNCLETS) bool isLegalFirstFunclet = false; unsigned bbNumFirstFunclet = 0; @@ -3069,7 +3095,6 @@ void Compiler::fgVerifyHandlerTab() { assert(fgFirstFuncletBB == nullptr); } -#endif // FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { @@ -3118,7 +3143,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumHndBeg)); } -#if defined(FEATURE_EH_FUNCLETS) // If funclets have been created, check the first funclet block. The first funclet block must be the // first block of a filter or handler. All filter/handler blocks must come after it. // Note that 'try' blocks might come either before or after it. If after, they will be nested within @@ -3167,7 +3191,6 @@ void Compiler::fgVerifyHandlerTab() } } } -#endif // FEATURE_EH_FUNCLETS // Check the 'try' region nesting, using ebdEnclosingTryIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it @@ -3192,7 +3215,6 @@ void Compiler::fgVerifyHandlerTab() // this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try' // remains in the main function region. -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // If both the 'try' region and the outer 'try' region are in the main function area, then we can @@ -3225,7 +3247,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumOuterTryBeg) || (bbNumOuterTryLast < bbNumHndBeg)); } else -#endif // FEATURE_EH_FUNCLETS { if (multipleBegBlockNormalizationDone) { @@ -3273,7 +3294,6 @@ void Compiler::fgVerifyHandlerTab() // funclets have been created, it's harder to make any relationship asserts about the order of nested // handlers, which also have been made into funclets. -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { if (handlerBegIsTryBegNormalizationDone) @@ -3300,7 +3320,6 @@ void Compiler::fgVerifyHandlerTab() assert((bbNumHndLast < bbNumOuterHndBeg) || (bbNumOuterHndLast < bbNumHndBeg)); } else -#endif // FEATURE_EH_FUNCLETS { if (handlerBegIsTryBegNormalizationDone) { @@ -3360,9 +3379,7 @@ void Compiler::fgVerifyHandlerTab() } } -#if defined(FEATURE_EH_FUNCLETS) assert(!fgFuncletsCreated || isLegalFirstFunclet); -#endif // FEATURE_EH_FUNCLETS // Figure out what 'try' and handler index each basic block should have, // and check the blocks against that. This depends on the more nested EH @@ -3402,7 +3419,6 @@ void Compiler::fgVerifyHandlerTab() } } -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Mark all the funclet 'try' indices correctly, since they do not exist in the linear 'try' region that @@ -3432,7 +3448,6 @@ void Compiler::fgVerifyHandlerTab() } } } -#endif // FEATURE_EH_FUNCLETS // Make sure that all blocks have the right index, including those blocks that should have zero (no EH region). for (BasicBlock* const block : Blocks()) @@ -3446,13 +3461,11 @@ void Compiler::fgVerifyHandlerTab() { assert(block->bbCatchTyp == BBCT_NONE); -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Make sure blocks that aren't the first block of a funclet do not have the BBF_FUNCLET_BEG flag set. assert(!block->HasFlag(BBF_FUNCLET_BEG)); } -#endif // FEATURE_EH_FUNCLETS } // Check for legal block types @@ -3511,9 +3524,12 @@ void Compiler::fgDispHandlerTab() } printf("\nindex "); -#if !defined(FEATURE_EH_FUNCLETS) - printf("nest, "); -#endif // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) + if (!UsesFunclets()) + { + printf("nest, "); + } +#endif // FEATURE_EH_WINDOWS_X86 printf("eTry, eHnd\n"); unsigned XTnum; @@ -3988,8 +4004,6 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root) } } -#if defined(FEATURE_EH_FUNCLETS) - /***************************************************************************** * Is this an intra-handler control flow edge? * @@ -4013,14 +4027,14 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root) bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) { // Some simple preconditions (as stated above) + assert(UsesFunclets()); assert(!fgFuncletsCreated); assert(fgGetPredForBlock(block, predBlock) != nullptr); assert(block->hasHndIndex()); EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (xtab->HasFinallyHandler()) + if (UsesCallFinallyThunks() && xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case (xtab->ebdHndBeg->NextIs(block) && @@ -4048,7 +4062,6 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) return false; } } -#endif // FEATURE_EH_CALLFINALLY_THUNKS assert(predBlock->hasHndIndex() || predBlock->hasTryIndex()); @@ -4117,6 +4130,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) { + assert(UsesFunclets()); assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler @@ -4132,7 +4146,7 @@ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) return false; } -#else // !FEATURE_EH_FUNCLETS +#if defined(FEATURE_EH_WINDOWS_X86) /***************************************************************************** * @@ -4145,6 +4159,8 @@ bool Compiler::fgRelocateEHRegions() { bool result = false; // Our return value + assert(!UsesFunclets()); + #ifdef DEBUG if (verbose) printf("*************** In fgRelocateEHRegions()\n"); @@ -4249,7 +4265,7 @@ bool Compiler::fgRelocateEHRegions() return result; } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 //------------------------------------------------------------------------ // fgExtendEHRegionBefore: Modify the EH table to account for a new block. @@ -4307,14 +4323,12 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) block->bbRefs--; bPrev->bbRefs++; -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(block->HasFlag(BBF_FUNCLET_BEG)); bPrev->SetFlags(BBF_FUNCLET_BEG); block->RemoveFlags(BBF_FUNCLET_BEG); } -#endif // FEATURE_EH_FUNCLETS // If this is a handler for a filter, the last block of the filter will end with // a BBJ_EHFILTERRET block that jumps to the first block of its handler. @@ -4354,14 +4368,12 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) HBtab->ebdFilter = bPrev; bPrev->SetFlags(BBF_DONT_REMOVE); -#if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(block->HasFlag(BBF_FUNCLET_BEG)); bPrev->SetFlags(BBF_FUNCLET_BEG); block->RemoveFlags(BBF_FUNCLET_BEG); } -#endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; } diff --git a/src/coreclr/jit/jiteh.h b/src/coreclr/jit/jiteh.h index 55b56ac9833c4..eb4c1bfbd5baf 100644 --- a/src/coreclr/jit/jiteh.h +++ b/src/coreclr/jit/jiteh.h @@ -91,11 +91,11 @@ struct EHblkDsc EHHandlerType ebdHandlerType; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, // etc. unsigned short ebdHandlerNestingLevel; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX; @@ -110,8 +110,6 @@ struct EHblkDsc // The index of the enclosing outer handler region, NO_ENCLOSING_INDEX if none. unsigned short ebdEnclosingHndIndex; -#if defined(FEATURE_EH_FUNCLETS) - // After funclets are created, this is the index of corresponding FuncInfoDsc // Special case for Filter/Filter-handler: // Like the IL the filter funclet immediately precedes the filter-handler funclet. @@ -119,8 +117,6 @@ struct EHblkDsc // funclet index, just subtract 1. unsigned short ebdFuncIndex; -#endif // FEATURE_EH_FUNCLETS - IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported IL_OFFSET ebdTryEndOffset; IL_OFFSET ebdFilterBegOffset; // only set if HasFilter() diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index 2258903a0603e..02fd49cead9cb 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -365,8 +365,6 @@ class GCInfo #endif // JIT32_GCENCODER -#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - // This method expands the tracked stack variables lifetimes so that any lifetimes within filters // are reported as pinned. void gcMarkFilterVarsPinned(); @@ -378,8 +376,6 @@ class GCInfo void gcDumpVarPtrDsc(varPtrDsc* desc); #endif // DEBUG -#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS) - #if DUMP_GC_TABLES void gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs); diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 042c411b306d0..b2d14a3fdf748 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -48,9 +48,9 @@ void Compiler::lvaInit() lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) lvaShadowSPslotsVar = BAD_VAR_NUM; -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS @@ -79,9 +79,7 @@ void Compiler::lvaInit() lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; -#if defined(FEATURE_EH_FUNCLETS) - lvaPSPSym = BAD_VAR_NUM; -#endif + lvaPSPSym = BAD_VAR_NUM; #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD @@ -3924,8 +3922,9 @@ void Compiler::lvaSortByRefCount() { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } -#if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) - if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) +#if defined(JIT32_GCENCODER) + if (UsesFunclets() && lvaIsOriginalThisArg(lclNum) && + (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked @@ -4576,11 +4575,11 @@ PhaseStatus Compiler::lvaMarkLocalVars() unsigned const lvaCountOrig = lvaCount; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) // Grab space for exception handling - if (ehNeedsShadowSPslots()) + if (!UsesFunclets() && ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter @@ -4603,20 +4602,18 @@ PhaseStatus Compiler::lvaMarkLocalVars() lvaSetVarAddrExposed(lvaShadowSPslotsVar DEBUGARG(AddressExposedReason::EXTERNALLY_VISIBLE_IMPLICITLY)); } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 // PSPSym is not used by the NativeAOT ABI if (!IsTargetAbi(CORINFO_NATIVEAOT_ABI)) { -#if defined(FEATURE_EH_FUNCLETS) - if (ehNeedsPSPSym()) + if (UsesFunclets() && ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } -#endif // FEATURE_EH_FUNCLETS } #ifdef JIT32_GCENCODER @@ -5482,7 +5479,7 @@ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; -#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. @@ -6580,7 +6577,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } } -#if defined(FEATURE_EH_FUNCLETS) && (defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym we allocate it early since funclets @@ -6589,7 +6586,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && (TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64) +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 if (mustDoubleAlign) { @@ -6684,9 +6681,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } #endif -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) /* If we need space for slots for shadow SP, reserve it now */ - if (ehNeedsShadowSPslots()) + if (!UsesFunclets() && ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) @@ -6703,7 +6700,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 if (compGSReorderStackLayout) { @@ -6904,12 +6901,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset - if ( -#if defined(FEATURE_EH_FUNCLETS) - lclNum == lvaPSPSym || -#else + if (lclNum == lvaPSPSym || +#if defined(FEATURE_EH_WINDOWS_X86) lclNum == lvaShadowSPslotsVar || -#endif // FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER @@ -7170,7 +7165,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } } -#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument @@ -7179,7 +7174,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) +#endif // TARGET_AMD64 #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 05c65d2de3450..31a5e52ba0cd7 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -662,8 +662,6 @@ void Compiler::fgDispDebugScopes() * Mark variables live across their entire scope. */ -#if defined(FEATURE_EH_FUNCLETS) - void Compiler::fgExtendDbgScopes() { compResetScopeLists(); @@ -672,121 +670,107 @@ void Compiler::fgExtendDbgScopes() if (verbose) { printf("\nMarking vars alive over their entire scope :\n\n"); - } - - if (verbose) - { compDispScopeLists(); } #endif // DEBUG VARSET_TP inScope(VarSetOps::MakeEmpty(this)); - // Mark all tracked LocalVars live over their scope - walk the blocks - // keeping track of the current life, and assign it to the blocks. - - for (BasicBlock* const block : Blocks()) + if (UsesFunclets()) { - // If we get to a funclet, reset the scope lists and start again, since the block - // offsets will be out of order compared to the previous block. + // Mark all tracked LocalVars live over their scope - walk the blocks + // keeping track of the current life, and assign it to the blocks. - if (block->HasFlag(BBF_FUNCLET_BEG)) + for (BasicBlock* const block : Blocks()) { - compResetScopeLists(); - VarSetOps::ClearD(this, inScope); - } - - // Process all scopes up to the current offset + // If we get to a funclet, reset the scope lists and start again, since the block + // offsets will be out of order compared to the previous block. - if (block->bbCodeOffs != BAD_IL_OFFSET) - { - compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - } - - // Assign the current set of variables that are in scope to the block variables tracking this. + if (block->HasFlag(BBF_FUNCLET_BEG)) + { + compResetScopeLists(); + VarSetOps::ClearD(this, inScope); + } - fgMarkInScope(block, inScope); - } + // Process all scopes up to the current offset -#ifdef DEBUG - if (verbose) - { - fgDispDebugScopes(); - } -#endif // DEBUG -} + if (block->bbCodeOffs != BAD_IL_OFFSET) + { + compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, + &Compiler::fgEndScopeLife); + } -#else // !FEATURE_EH_FUNCLETS + // Assign the current set of variables that are in scope to the block variables tracking this. -void Compiler::fgExtendDbgScopes() -{ - compResetScopeLists(); + fgMarkInScope(block, inScope); + } #ifdef DEBUG - if (verbose) - { - printf("\nMarking vars alive over their entire scope :\n\n"); - compDispScopeLists(); - } + if (verbose) + { + fgDispDebugScopes(); + } #endif // DEBUG + } +#if defined(FEATURE_EH_WINDOWS_X86) + else + { + compProcessScopesUntil(0, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - VARSET_TP inScope(VarSetOps::MakeEmpty(this)); - compProcessScopesUntil(0, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife); - - IL_OFFSET lastEndOffs = 0; - - // Mark all tracked LocalVars live over their scope - walk the blocks - // keeping track of the current life, and assign it to the blocks. + IL_OFFSET lastEndOffs = 0; - for (BasicBlock* const block : Blocks()) - { - // Find scopes becoming alive. If there is a gap in the instr - // sequence, we need to process any scopes on those missing offsets. + // Mark all tracked LocalVars live over their scope - walk the blocks + // keeping track of the current life, and assign it to the blocks. - if (block->bbCodeOffs != BAD_IL_OFFSET) + for (BasicBlock* const block : Blocks()) { - if (lastEndOffs != block->bbCodeOffs) - { - noway_assert(lastEndOffs < block->bbCodeOffs); + // Find scopes becoming alive. If there is a gap in the instr + // sequence, we need to process any scopes on those missing offsets. - compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, - &Compiler::fgEndScopeLife); - } - else + if (block->bbCodeOffs != BAD_IL_OFFSET) { - while (VarScopeDsc* varScope = compGetNextEnterScope(block->bbCodeOffs)) + if (lastEndOffs != block->bbCodeOffs) + { + noway_assert(lastEndOffs < block->bbCodeOffs); + + compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, + &Compiler::fgEndScopeLife); + } + else { - fgBeginScopeLife(&inScope, varScope); + while (VarScopeDsc* varScope = compGetNextEnterScope(block->bbCodeOffs)) + { + fgBeginScopeLife(&inScope, varScope); + } } } - } - // Assign the current set of variables that are in scope to the block variables tracking this. + // Assign the current set of variables that are in scope to the block variables tracking this. - fgMarkInScope(block, inScope); + fgMarkInScope(block, inScope); - // Find scopes going dead. + // Find scopes going dead. - if (block->bbCodeOffsEnd != BAD_IL_OFFSET) - { - VarScopeDsc* varScope; - while ((varScope = compGetNextExitScope(block->bbCodeOffsEnd)) != nullptr) + if (block->bbCodeOffsEnd != BAD_IL_OFFSET) { - fgEndScopeLife(&inScope, varScope); - } + VarScopeDsc* varScope; + while ((varScope = compGetNextExitScope(block->bbCodeOffsEnd)) != nullptr) + { + fgEndScopeLife(&inScope, varScope); + } - lastEndOffs = block->bbCodeOffsEnd; + lastEndOffs = block->bbCodeOffsEnd; + } } - } - /* Everything should be out of scope by the end of the method. But if the - last BB got removed, then inScope may not be empty. */ + /* Everything should be out of scope by the end of the method. But if the + last BB got removed, then inScope may not be empty. */ - noway_assert(VarSetOps::IsEmpty(this, inScope) || lastEndOffs < info.compILCodeSize); + noway_assert(VarSetOps::IsEmpty(this, inScope) || lastEndOffs < info.compILCodeSize); + } +#endif // FEATURE_EH_WINDOWS_X86 } -#endif // !FEATURE_EH_FUNCLETS - /***************************************************************************** * * For debuggable code, we allow redundant assignments to vars @@ -1945,9 +1929,9 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: -#endif // !FEATURE_EH_FUNCLETS +#endif // FEATURE_EH_WINDOWS_X86 case GT_SWITCH_TABLE: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index f380daeab59ac..7fe119ccfd165 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -590,7 +590,7 @@ int LinearScan::BuildNode(GenTree* tree) BuildDef(tree, RBM_EXCEPTION_OBJECT); break; -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: srcCount = 0; assert(dstCount == 0); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 7daf7104271fd..e32fad95efa93 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -3184,8 +3184,7 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) JITDUMP("Canonicalize exit " FMT_BB " for " FMT_LP " to have only loop predecessors\n", exit->bbNum, loop->GetIndex()); -#if FEATURE_EH_CALLFINALLY_THUNKS - if (exit->KindIs(BBJ_CALLFINALLY)) + if (UsesCallFinallyThunks() && exit->KindIs(BBJ_CALLFINALLY)) { // Branches to a BBJ_CALLFINALLY _must_ come from inside its associated // try region, and when we have callfinally thunks the BBJ_CALLFINALLY @@ -3206,7 +3205,6 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) } } else -#endif // FEATURE_EH_CALLFINALLY_THUNKS { newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false); fgSetEHRegionForNewPreheaderOrExit(newExit); diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index ddb766e94a0de..1dd0330a85913 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -1449,12 +1449,10 @@ void CodeGen::siInit() assert(compiler->opts.compScopeInfo); -#if defined(FEATURE_EH_FUNCLETS) if (compiler->info.compVarScopesCount > 0) { siInFuncletRegion = false; } -#endif // FEATURE_EH_FUNCLETS siLastEndOffs = 0; @@ -1482,7 +1480,6 @@ void CodeGen::siBeginBlock(BasicBlock* block) return; } -#if defined(FEATURE_EH_FUNCLETS) if (siInFuncletRegion) { return; @@ -1498,7 +1495,6 @@ void CodeGen::siBeginBlock(BasicBlock* block) return; } -#endif // FEATURE_EH_FUNCLETS #ifdef DEBUG if (verbose) @@ -1557,45 +1553,44 @@ void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned in // Check if there are any scopes on the current block's start boundary. VarScopeDsc* varScope = nullptr; -#if defined(FEATURE_EH_FUNCLETS) - - // If we find a spot where the code offset isn't what we expect, because - // there is a gap, it might be because we've moved the funclets out of - // line. Catch up with the enter and exit scopes of the current block. - // Ignore the enter/exit scope changes of the missing scopes, which for - // funclets must be matched. - if (lastBlockILEndOffset != beginOffs) + if (compiler->UsesFunclets()) { - assert(beginOffs > 0); - assert(lastBlockILEndOffset < beginOffs); + // If we find a spot where the code offset isn't what we expect, because + // there is a gap, it might be because we've moved the funclets out of + // line. Catch up with the enter and exit scopes of the current block. + // Ignore the enter/exit scope changes of the missing scopes, which for + // funclets must be matched. + if (lastBlockILEndOffset != beginOffs) + { + assert(beginOffs > 0); + assert(lastBlockILEndOffset < beginOffs); - JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); + JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); - // Skip enter scopes - while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); - } + // Skip enter scopes + while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); + } - // Skip exit scopes - while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); + // Skip exit scopes + while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); + } } } - -#else // !FEATURE_EH_FUNCLETS - - if (lastBlockILEndOffset != beginOffs) + else { - assert(lastBlockILEndOffset < beginOffs); - return; + if (lastBlockILEndOffset != beginOffs) + { + assert(lastBlockILEndOffset < beginOffs); + return; + } } -#endif // !FEATURE_EH_FUNCLETS - while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != nullptr) { LclVarDsc* lclVarDsc = compiler->lvaGetDesc(varScope->vsdVarNum); @@ -1632,12 +1627,10 @@ void CodeGen::siEndBlock(BasicBlock* block) { assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0)); -#if defined(FEATURE_EH_FUNCLETS) if (siInFuncletRegion) { return; } -#endif // FEATURE_EH_FUNCLETS unsigned endOffs = block->bbCodeOffsEnd; diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index 7d1a2c8f08039..70f1e92812bcf 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -68,7 +68,6 @@ #define EMIT_TRACK_STACK_DEPTH 1 #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #ifdef UNIX_AMD64_ABI #define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based frames for most methods #else // !UNIX_AMD64_ABI diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h index ac9d72cab31f6..a03c307094ad2 100644 --- a/src/coreclr/jit/targetarm.h +++ b/src/coreclr/jit/targetarm.h @@ -40,7 +40,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h index 2af309e4b365a..cccbfdc6bae6b 100644 --- a/src/coreclr/jit/targetarm64.h +++ b/src/coreclr/jit/targetarm64.h @@ -42,7 +42,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h index 736fd1406c304..b045c43df7dfa 100644 --- a/src/coreclr/jit/targetloongarch64.h +++ b/src/coreclr/jit/targetloongarch64.h @@ -47,8 +47,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_FUNCLETS 1 - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h index 5ac82fa9a0097..33c1b0d491909 100644 --- a/src/coreclr/jit/targetriscv64.h +++ b/src/coreclr/jit/targetriscv64.h @@ -42,7 +42,6 @@ // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index 3a861c3d7ef35..dfeb96ae9e977 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -53,13 +53,8 @@ // target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, // filter-handler, fault) and directly execute 'finally' clauses. - -#ifdef FEATURE_EH_FUNCLETS - #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, - // protected by "cloned finally" clauses. -#else - #define FEATURE_EH_CALLFINALLY_THUNKS 0 // Generate call-to-finally code in "thunks" in the enclosing EH region, - // protected by "cloned finally" clauses. +#if !defined(UNIX_X86_ABI) + #define FEATURE_EH_WINDOWS_X86 1 // Enable support for SEH regions #endif #define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based // frames for most methods diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index e1ff9bc464a16..a51a52ab21d64 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -15,8 +15,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(FEATURE_EH_FUNCLETS) - //------------------------------------------------------------------------ // Compiler::unwindGetFuncLocations: Get the start/end emitter locations for this // function or funclet. If 'getHotSectionData' is true, get the start/end locations @@ -53,6 +51,8 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc) { + assert(UsesFunclets()); + if (func->funKind == FUNC_ROOT) { // Since all funclets are pulled out of line, the main code size is everything @@ -134,8 +134,6 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, } } -#endif // FEATURE_EH_FUNCLETS - #if defined(FEATURE_CFI_SUPPORT) void Compiler::createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR cfiOpcode, short dwarfReg, INT offset) @@ -184,21 +182,22 @@ void Compiler::unwindBegPrologCFI() { assert(compGeneratingProlog); -#if defined(FEATURE_EH_FUNCLETS) - FuncInfoDsc* func = funCurrentFunc(); + if (UsesFunclets()) + { + FuncInfoDsc* func = funCurrentFunc(); - // There is only one prolog for a function/funclet, and it comes first. So now is - // a good time to initialize all the unwind data structures. + // There is only one prolog for a function/funclet, and it comes first. So now is + // a good time to initialize all the unwind data structures. - unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); + unwindGetFuncLocations(func, true, &func->startLoc, &func->endLoc); - if (fgFirstColdBlock != nullptr) - { - unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); - } + if (fgFirstColdBlock != nullptr) + { + unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc); + } - func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); -#endif // FEATURE_EH_FUNCLETS + func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator()); + } } void Compiler::unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat) diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index b292d74968f6a..51af7f24889d1 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -571,7 +571,6 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) } #endif // DEBUG -#ifdef FEATURE_EH_FUNCLETS // If hot/cold splitting occurred at fgFirstFuncletBB, then the main body is not split. const bool splitAtFirstFunclet = (funcHasColdSection && (fgFirstColdBlock == fgFirstFuncletBB)); @@ -579,7 +578,6 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) { funcHasColdSection = false; } -#endif // FEATURE_EH_FUNCLETS #if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) diff --git a/src/coreclr/jit/unwindx86.cpp b/src/coreclr/jit/unwindx86.cpp index 32d077429af6a..40e720d40c33a 100644 --- a/src/coreclr/jit/unwindx86.cpp +++ b/src/coreclr/jit/unwindx86.cpp @@ -70,16 +70,17 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset) // void Compiler::unwindReserve() { -#if defined(FEATURE_EH_FUNCLETS) - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); - - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + if (UsesFunclets()) { - unwindReserveFunc(funGetFunc(funcIdx)); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); + + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindReserveFunc(funGetFunc(funcIdx)); + } } -#endif } //------------------------------------------------------------------------ @@ -91,19 +92,19 @@ void Compiler::unwindReserve() // void Compiler::unwindEmit(void* pHotCode, void* pColdCode) { -#if defined(FEATURE_EH_FUNCLETS) - assert(!compGeneratingProlog); - assert(!compGeneratingEpilog); - - assert(compFuncInfoCount > 0); - for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + if (UsesFunclets()) { - unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); + assert(!compGeneratingProlog); + assert(!compGeneratingEpilog); + + assert(compFuncInfoCount > 0); + for (unsigned funcIdx = 0; funcIdx < compFuncInfoCount; funcIdx++) + { + unwindEmitFunc(funGetFunc(funcIdx), pHotCode, pColdCode); + } } -#endif // FEATURE_EH_FUNCLETS } -#if defined(FEATURE_EH_FUNCLETS) //------------------------------------------------------------------------ // Compiler::unwindReserveFunc: Reserve the unwind information from the VM for a // given main function or funclet. @@ -113,6 +114,7 @@ void Compiler::unwindEmit(void* pHotCode, void* pColdCode) // void Compiler::unwindReserveFunc(FuncInfoDsc* func) { + assert(UsesFunclets()); unwindReserveFuncHelper(func, true); if (fgFirstColdBlock != nullptr) @@ -280,5 +282,3 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo eeAllocUnwindInfo((BYTE*)pHotCode, (BYTE*)pColdCode, startOffset, endOffset, sizeof(UNWIND_INFO), (BYTE*)&unwindInfo, (CorJitFuncKind)func->funKind); } - -#endif // FEATURE_EH_FUNCLETS diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 889e0227e992d..755c19d1386a8 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -11338,7 +11338,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) case GT_NOP: case GT_JMP: // Control flow case GT_LABEL: // Control flow -#if !defined(FEATURE_EH_FUNCLETS) +#if defined(FEATURE_EH_WINDOWS_X86) case GT_END_LFIN: // Control flow #endif tree->gtVNPair = vnStore->VNPForVoid(); diff --git a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs index 1d9ef515d4e49..010b23ed6f581 100644 --- a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs +++ b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs @@ -146,12 +146,6 @@ private static string GetTargetSpec(TargetDetails target) { targetOSComponent = "universal"; } -#if !READYTORUN - else if (target.OperatingSystem == TargetOS.Windows && target.Architecture == TargetArchitecture.X86) - { - targetOSComponent = "win_aot"; - } -#endif else { targetOSComponent = target.OperatingSystem == TargetOS.Windows ? "win" : "unix";