diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml
index 3da5b9c1c24c7..3e24aee06bf01 100644
--- a/eng/pipelines/common/templates/runtimes/run-test-job.yml
+++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml
@@ -456,6 +456,7 @@ jobs:
scenarios:
- jitosr
- jitosr_stress
+ - jitosr_uncommon
- jitguardeddevirtualization
- jitehwritethru
- jitobjectstackallocation
diff --git a/src/coreclr/src/inc/corinfo.h b/src/coreclr/src/inc/corinfo.h
index 635da5887ade4..b7b4da6d1716e 100644
--- a/src/coreclr/src/inc/corinfo.h
+++ b/src/coreclr/src/inc/corinfo.h
@@ -217,11 +217,11 @@ TODO: Talk about initializing strutures before use
#endif
#endif
-SELECTANY const GUID JITEEVersionIdentifier = { /* 6ae798bf-44bd-4e8a-b8fc-dbe1d1f4029e */
- 0x6ae798bf,
- 0x44bd,
- 0x4e8a,
- {0xb8, 0xfc, 0xdb, 0xe1, 0xd1, 0xf4, 0x02, 0x9e}
+SELECTANY const GUID JITEEVersionIdentifier = { /* fd36d9ca-54c6-4949-ba01-ae257a9f2da3 */
+ 0xfd36d9ca,
+ 0x54c6,
+ 0x4949,
+ {0xba, 0x01, 0xae, 0x25, 0x7a, 0x9f, 0x2d, 0xa3}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -634,6 +634,7 @@ enum CorInfoHelpFunc
CORINFO_HELP_STACK_PROBE, // Probes each page of the allocated stack frame
CORINFO_HELP_PATCHPOINT, // Notify runtime that code has reached a patchpoint
+ CORINFO_HELP_UNCOMMON_PATCHPOINT, // Notify runtime that code has reached a part of the method that wasn't originally jitted.
CORINFO_HELP_COUNT,
};
diff --git a/src/coreclr/src/inc/jithelpers.h b/src/coreclr/src/inc/jithelpers.h
index 9add356aa9051..2ac61c6cf8345 100644
--- a/src/coreclr/src/inc/jithelpers.h
+++ b/src/coreclr/src/inc/jithelpers.h
@@ -354,6 +354,7 @@
#endif
JITHELPER(CORINFO_HELP_PATCHPOINT, JIT_Patchpoint, CORINFO_HELP_SIG_REG_ONLY)
+ JITHELPER(CORINFO_HELP_UNCOMMON_PATCHPOINT, JIT_UncommonPatchpoint, CORINFO_HELP_SIG_REG_ONLY)
#undef JITHELPER
#undef DYNAMICJITHELPER
diff --git a/src/coreclr/src/jit/assertionprop.cpp b/src/coreclr/src/jit/assertionprop.cpp
index 916921f36cd3f..2f18f897205a6 100644
--- a/src/coreclr/src/jit/assertionprop.cpp
+++ b/src/coreclr/src/jit/assertionprop.cpp
@@ -4545,6 +4545,7 @@ class AssertionPropFlowCallback
ASSERT_TP* mJumpDestGen;
BitVecTraits* apTraits;
+ Compiler* m_compiler;
public:
AssertionPropFlowCallback(Compiler* pCompiler, ASSERT_TP* jumpDestOut, ASSERT_TP* jumpDestGen)
@@ -4553,6 +4554,7 @@ class AssertionPropFlowCallback
, mJumpDestOut(jumpDestOut)
, mJumpDestGen(jumpDestGen)
, apTraits(pCompiler->apTraits)
+ , m_compiler(pCompiler)
{
}
@@ -4590,7 +4592,50 @@ class AssertionPropFlowCallback
// It means we can propagate only assertions that are valid for the whole try region.
void MergeHandler(BasicBlock* block, BasicBlock* firstTryBlock, BasicBlock* lastTryBlock)
{
- BitVecOps::IntersectionD(apTraits, block->bbAssertionIn, firstTryBlock->bbAssertionIn);
+ JITDUMP("AssertionPropCallback::MergeHandler : " FMT_BB " in -> %s, try-beg " FMT_BB " try-end " FMT_BB "\n\n",
+ block->bbNum, BitVecOps::ToString(apTraits, block->bbAssertionIn), firstTryBlock->bbNum,
+ lastTryBlock->bbNum);
+
+ // For OSR the first try block may not be the try entry block for
+ // control flow purposes. Find the actual entry blocks.
+ //
+ if (m_compiler->opts.IsOSR())
+ {
+ JITDUMP("OSR case -- scanning try range for possible non-first try entry BB\n");
+ assert(firstTryBlock->hasTryIndex());
+
+ const unsigned tryIndex = firstTryBlock->getTryIndex();
+ bool foundEntry = false;
+
+ // Scan try extent looking for a block with a predecessor that is not in the try.
+ for (BasicBlock* tryBlock = firstTryBlock; tryBlock != lastTryBlock->bbNext; tryBlock = tryBlock->bbNext)
+ {
+ assert(m_compiler->bbInTryRegions(tryIndex, tryBlock));
+
+ for (flowList* pred = tryBlock->bbPreds; pred; pred = pred->flNext)
+ {
+ BasicBlock* predBlock = pred->flBlock;
+
+ if (!m_compiler->bbInTryRegions(tryIndex, predBlock))
+ {
+ JITDUMP("Pred " FMT_BB " of " FMT_BB " is outside try, using latter as entry\n",
+ predBlock->bbNum, tryBlock->bbNum);
+ BitVecOps::IntersectionD(apTraits, block->bbAssertionIn, tryBlock->bbAssertionIn);
+ foundEntry = true;
+ }
+ }
+ }
+
+ // We should have found at least one entry. We might see more than one.
+ assert(foundEntry);
+ }
+ else
+ {
+ // For the normal case the first block of the try is the try entry block.
+ //
+ BitVecOps::IntersectionD(apTraits, block->bbAssertionIn, firstTryBlock->bbAssertionIn);
+ }
+
BitVecOps::IntersectionD(apTraits, block->bbAssertionIn, lastTryBlock->bbAssertionOut);
}
diff --git a/src/coreclr/src/jit/block.cpp b/src/coreclr/src/jit/block.cpp
index 3bd45dd1ca85b..ffcde2c04e94d 100644
--- a/src/coreclr/src/jit/block.cpp
+++ b/src/coreclr/src/jit/block.cpp
@@ -352,6 +352,10 @@ void BasicBlock::dspFlags()
{
printf("ppoint ");
}
+ if (bbFlags & BBF_UNCOMMON_PATCHPOINT)
+ {
+ printf("u-ppoint ");
+ }
if (bbFlags & BBF_RETLESS_CALL)
{
printf("retless ");
diff --git a/src/coreclr/src/jit/block.h b/src/coreclr/src/jit/block.h
index 5e49bb5c3f366..55b009dda5ffc 100644
--- a/src/coreclr/src/jit/block.h
+++ b/src/coreclr/src/jit/block.h
@@ -446,6 +446,7 @@ struct BasicBlock : private LIR::Range
#define BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY 0x800000000 // Block is dominated by exceptional entry.
#define BBF_BACKWARD_JUMP_TARGET 0x1000000000 // Block is a target of a backward jump
#define BBF_PATCHPOINT 0x2000000000 // Block is a patchpoint
+#define BBF_UNCOMMON_PATCHPOINT 0x4000000000 // Block is an uncommon patchpoint
// clang-format on
diff --git a/src/coreclr/src/jit/compiler.cpp b/src/coreclr/src/jit/compiler.cpp
index 61e4c14f27ea7..767a26f40085b 100644
--- a/src/coreclr/src/jit/compiler.cpp
+++ b/src/coreclr/src/jit/compiler.cpp
@@ -4906,7 +4906,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
//
void Compiler::generatePatchpointInfo()
{
- if (!doesMethodHavePatchpoints())
+ if (!doesMethodHavePatchpoints() && !doesMethodHaveUncommonPatchpoints())
{
// Nothing to report
return;
diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h
index 4ec9095c25ee4..576fe54b177c0 100644
--- a/src/coreclr/src/jit/compiler.h
+++ b/src/coreclr/src/jit/compiler.h
@@ -6370,16 +6370,17 @@ class Compiler
}
};
-#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
-#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
-#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
-#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
-#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
-#define OMF_HAS_FATPOINTER 0x00000020 // Method contains call, that needs fat pointer transformation.
-#define OMF_HAS_OBJSTACKALLOC 0x00000040 // Method contains an object allocated on the stack.
-#define OMF_HAS_GUARDEDDEVIRT 0x00000080 // Method contains guarded devirtualization candidate
-#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000100 // Method contains a runtime lookup to an expandable dictionary.
-#define OMF_HAS_PATCHPOINT 0x00000200 // Method contains patchpoints
+#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
+#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
+#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
+#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
+#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
+#define OMF_HAS_FATPOINTER 0x00000020 // Method contains call, that needs fat pointer transformation.
+#define OMF_HAS_OBJSTACKALLOC 0x00000040 // Method contains an object allocated on the stack.
+#define OMF_HAS_GUARDEDDEVIRT 0x00000080 // Method contains guarded devirtualization candidate
+#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000100 // Method contains a runtime lookup to an expandable dictionary.
+#define OMF_HAS_PATCHPOINT 0x00000200 // Method contains patchpoints
+#define OMF_HAS_UNCOMMON_PATCHPOINT 0x00000400 // Method contains uncommon patchpoints
bool doesMethodHaveFatPointer()
{
@@ -6446,6 +6447,16 @@ class Compiler
optMethodFlags |= OMF_HAS_PATCHPOINT;
}
+ bool doesMethodHaveUncommonPatchpoints()
+ {
+ return (optMethodFlags & OMF_HAS_UNCOMMON_PATCHPOINT) != 0;
+ }
+
+ void setMethodHasUncommonPatchpoint()
+ {
+ optMethodFlags |= OMF_HAS_UNCOMMON_PATCHPOINT;
+ }
+
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp
index af6f834c7f1e6..daecee16c22f2 100644
--- a/src/coreclr/src/jit/flowgraph.cpp
+++ b/src/coreclr/src/jit/flowgraph.cpp
@@ -9974,6 +9974,16 @@ void Compiler::fgRemoveEmptyBlocks()
newTryEntry->clearHndIndex();
fgInsertBBafter(tryEntryPrev, newTryEntry);
+ // Generally this (unreacahble) new entry can appear to "fall through"
+ // to the next block, but in cases where there's a nested try with an
+ // out of order handler, the next block may be a handler.So even though
+ // this new try entry block is unreachable, we need to give it a
+ // plausible flow target. Simplest is to just mark it as a throw.
+ if (bbIsHandlerBeg(newTryEntry->bbNext))
+ {
+ newTryEntry->bbJumpKind = BBJ_THROW;
+ }
+
JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n",
XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum);
}
diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp
index a4e287614326d..95a3a57a5d941 100644
--- a/src/coreclr/src/jit/importer.cpp
+++ b/src/coreclr/src/jit/importer.cpp
@@ -10578,6 +10578,24 @@ void Compiler::impImportBlockCode(BasicBlock* block)
assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
}
+ // Mark stack-empty rare blocks to be considered for deferred compilation.
+ //
+ // Ideally these are conditionally executed blocks -- if the method is going
+ // to unconditinally throw there's not as much to be gained by deferring jitting.
+ // For now, we just screen out the entry bb.
+ //
+ // In general we might want track all the IL stack empty points so we can
+ // propagate rareness back through flow and place the uncommon patchpoints "earlier"
+ // so there are fewer overall.
+ //
+ if ((JitConfig.TC_UncommonPatchpoint() > 0) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) &&
+ (block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
+ ((block->bbFlags & BBF_PATCHPOINT) == 0))
+ {
+ block->bbFlags |= BBF_UNCOMMON_PATCHPOINT;
+ setMethodHasUncommonPatchpoint();
+ }
+
#endif // FEATURE_ON_STACK_REPLACEMENT
/* Walk the opcodes that comprise the basic block */
diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h
index 65b92b55ba133..297c3f90513ca 100644
--- a/src/coreclr/src/jit/jitconfigvalues.h
+++ b/src/coreclr/src/jit/jitconfigvalues.h
@@ -398,6 +398,8 @@ CONFIG_INTEGER(JitGuardedDevirtualizationGuessBestClass, W("JitGuardedDevirtuali
CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0)
// Initial patchpoint counter value used by jitted code
CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000)
+// Enable uncommon patchpoints in Tier0 methods (aka partial compilation -- disabled by default)
+CONFIG_INTEGER(TC_UncommonPatchpoint, W("TC_UncommonPatchpoint"), 0)
#if defined(DEBUG)
// JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the
diff --git a/src/coreclr/src/jit/patchpoint.cpp b/src/coreclr/src/jit/patchpoint.cpp
index 7116e780f4cd9..eee386cd768d6 100644
--- a/src/coreclr/src/jit/patchpoint.cpp
+++ b/src/coreclr/src/jit/patchpoint.cpp
@@ -13,10 +13,13 @@
// Insert patchpoint checks into Tier0 methods, based on locations identified
// during importation (see impImportBlockCode).
//
-// Policy decisions implemented here:
+// There are now two diffrent types of patchpoints:
+// * loop based: enable OSR transitions in loops
+// * uncommon: allows partial compilation of original method
//
+// Loop patchpoint policy decisions implemented here:
// * One counter per stack frame, regardless of the number of patchpoints.
-// * Shared counter value initialized to zero in prolog.
+// * Shared counter value initialized to a constant in the prolog.
// * Patchpoint trees fully expanded into jit IR. Deferring expansion could
// lead to more compact code and lessen size overhead for Tier0.
//
@@ -29,15 +32,13 @@
//
class PatchpointTransformer
{
- unsigned ppCounterLclNum;
const int HIGH_PROBABILITY = 99;
+ unsigned ppCounterLclNum;
Compiler* compiler;
public:
- PatchpointTransformer(Compiler* compiler) : compiler(compiler)
+ PatchpointTransformer(Compiler* compiler) : ppCounterLclNum(BAD_VAR_NUM), compiler(compiler)
{
- ppCounterLclNum = compiler->lvaGrabTemp(true DEBUGARG("patchpoint counter"));
- compiler->lvaTable[ppCounterLclNum].lvType = TYP_INT;
}
//------------------------------------------------------------------------
@@ -53,11 +54,9 @@ class PatchpointTransformer
compiler->fgEnsureFirstBBisScratch();
}
- BasicBlock* block = compiler->fgFirstBB;
- TransformEntry(block);
-
int count = 0;
- for (block = block->bbNext; block != nullptr; block = block->bbNext)
+
+ for (BasicBlock* block = compiler->fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
{
if (block->bbFlags & BBF_PATCHPOINT)
{
@@ -72,11 +71,24 @@ class PatchpointTransformer
continue;
}
- JITDUMP("Patchpoint: instrumenting " FMT_BB "\n", block->bbNum);
+ JITDUMP("Patchpoint: loop patchpoint in " FMT_BB "\n", block->bbNum);
assert(block != compiler->fgFirstBB);
TransformBlock(block);
count++;
}
+ else if (block->bbFlags & BBF_UNCOMMON_PATCHPOINT)
+ {
+ if (compiler->ehGetBlockHndDsc(block) != nullptr)
+ {
+ JITDUMP("Patchpoint: skipping uncommon patchpoint for " FMT_BB " as it is in a handler\n",
+ block->bbNum);
+ continue;
+ }
+
+ JITDUMP("Patchpoint: uncommon patchpoint in " FMT_BB "\n", block->bbNum);
+ TransformUncommon(block);
+ count++;
+ }
}
return count;
@@ -119,6 +131,16 @@ class PatchpointTransformer
//
void TransformBlock(BasicBlock* block)
{
+ // If we haven't allocated the counter temp yet, set it up
+ if (ppCounterLclNum == BAD_VAR_NUM)
+ {
+ ppCounterLclNum = compiler->lvaGrabTemp(true DEBUGARG("patchpoint counter"));
+ compiler->lvaTable[ppCounterLclNum].lvType = TYP_INT;
+
+ // and initialize in the entry block
+ TransformEntry(compiler->fgFirstBB);
+ }
+
// Capture the IL offset
IL_OFFSET ilOffset = block->bbCodeOffs;
assert(ilOffset != BAD_IL_OFFSET);
@@ -185,6 +207,47 @@ class PatchpointTransformer
compiler->fgNewStmtNearEnd(block, ppCounterAsg);
}
+
+ //------------------------------------------------------------------------
+ // TransformUncommon: delete all the statements in the block and insert
+ // a call to the uncommon patchpoint helper
+ //
+ // S0; S1; S2; ... SN;
+ //
+ // ==>
+ //
+ // ~~{ S0; ... SN; }~~ (deleted)
+ // call JIT_UNCOMMON_PATCHPOINT(ilOffset)
+ //
+ // Note S0 -- SN are not forever lost -- they will appear in the OSR version
+ // of the method created when the patchpoint is hit. Also note the patchpoint
+ // helper call will not return control to this method.
+ //
+ void TransformUncommon(BasicBlock* block)
+ {
+ // Capture the IL offset
+ IL_OFFSET ilOffset = block->bbCodeOffs;
+ assert(ilOffset != BAD_IL_OFFSET);
+
+ // Remove all statements from the block.
+ for (Statement* stmt : block->Statements())
+ {
+ compiler->fgRemoveStmt(block, stmt);
+ }
+
+ // Update flow
+ block->bbJumpKind = BBJ_THROW;
+ block->bbJumpDest = nullptr;
+
+ // Add helper call
+ //
+ // call UncommonPPHelper(ilOffset)
+ GenTree* ilOffsetNode = compiler->gtNewIconNode(ilOffset, TYP_INT);
+ GenTreeCall::Use* helperArgs = compiler->gtNewCallArgs(ilOffsetNode);
+ GenTreeCall* helperCall = compiler->gtNewHelperCallNode(CORINFO_HELP_UNCOMMON_PATCHPOINT, TYP_VOID, helperArgs);
+
+ compiler->fgNewStmtAtEnd(block, helperCall);
+ }
};
//------------------------------------------------------------------------
@@ -200,7 +263,7 @@ class PatchpointTransformer
//
PhaseStatus Compiler::fgTransformPatchpoints()
{
- if (!doesMethodHavePatchpoints())
+ if (!doesMethodHavePatchpoints() && !doesMethodHaveUncommonPatchpoints())
{
JITDUMP("\n -- no patchpoints to transform\n");
return PhaseStatus::MODIFIED_NOTHING;
diff --git a/src/coreclr/src/tools/crossgen2/jitinterface/jitwrapper.cpp b/src/coreclr/src/tools/crossgen2/jitinterface/jitwrapper.cpp
index 5f388248f8c11..c029b25894aa2 100644
--- a/src/coreclr/src/tools/crossgen2/jitinterface/jitwrapper.cpp
+++ b/src/coreclr/src/tools/crossgen2/jitinterface/jitwrapper.cpp
@@ -27,11 +27,11 @@ class CORJIT_FLAGS
uint64_t corJitFlags;
};
-static const GUID JITEEVersionIdentifier = { /* 6ae798bf-44bd-4e8a-b8fc-dbe1d1f4029e */
- 0x6ae798bf,
- 0x44bd,
- 0x4e8a,
- {0xb8, 0xfc, 0xdb, 0xe1, 0xd1, 0xf4, 0x02, 0x9e}
+static const GUID JITEEVersionIdentifier = { /* fd36d9ca-54c6-4949-ba01-ae257a9f2da3 */
+ 0xfd36d9ca,
+ 0x54c6,
+ 0x4949,
+ {0xba, 0x01, 0xae, 0x25, 0x7a, 0x9f, 0x2d, 0xa3}
};
class Jit
diff --git a/src/coreclr/src/vm/jithelpers.cpp b/src/coreclr/src/vm/jithelpers.cpp
index 63d61ae474042..7e916d86c8aad 100644
--- a/src/coreclr/src/vm/jithelpers.cpp
+++ b/src/coreclr/src/vm/jithelpers.cpp
@@ -5249,8 +5249,8 @@ void JIT_Patchpoint(int* counter, int ilOffset)
if (osrMethodCode == NULL)
{
// Unexpected, but not fatal
- STRESS_LOG4(LF_TIEREDCOMPILATION, LL_WARNING, "Jit_Patchpoint: patchpoint (0x%p) OSR method creation failed,"
- " marking patchpoint invalid for Method=0x%pM il offset %d\n", ip, hitCount, pMD, ilOffset);
+ STRESS_LOG3(LF_TIEREDCOMPILATION, LL_WARNING, "Jit_Patchpoint: patchpoint (0x%p) OSR method creation failed,"
+ " marking patchpoint invalid for Method=0x%pM il offset %d\n", ip, pMD, ilOffset);
InterlockedOr(&ppInfo->m_flags, (LONG)PerPatchpointInfo::patchpoint_invalid);
return;
@@ -5290,8 +5290,9 @@ void JIT_Patchpoint(int* counter, int ilOffset)
if ((UINT_PTR)ip != GetIP(&frameContext))
{
// Should be fatal
- STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: patchpoint (0x%p) TRANSITION"
+ STRESS_LOG2(LF_TIEREDCOMPILATION, LL_FATALERROR, "Jit_Patchpoint: patchpoint (0x%p) TRANSITION"
" unexpected context IP 0x%p\n", ip, GetIP(&frameContext));
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
// Now unwind back to the original method caller frame.
@@ -5330,6 +5331,169 @@ void JIT_Patchpoint(int* counter, int ilOffset)
RtlRestoreContext(&frameContext, NULL);
}
+// Jit helper invoked at a uncommon patchpoint.
+//
+// Similar to Jit_Patchpoint, but invoked when execution
+// reaches a point in a method where the continuation
+// was never jitted (eg an exceptional path).
+//
+// Unlike regular patchpoints, uncommon patchpoints
+// must always transition.
+//
+void JIT_UncommonPatchpoint(int ilOffset)
+{
+ // This method will not return normally
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // Patchpoint identity is the helper return address
+ PCODE ip = (PCODE)_ReturnAddress();
+
+ // Fetch or setup patchpoint info for this patchpoint.
+ EECodeInfo codeInfo(ip);
+ MethodDesc* pMD = codeInfo.GetMethodDesc();
+ LoaderAllocator* allocator = pMD->GetLoaderAllocator();
+ OnStackReplacementManager* manager = allocator->GetOnStackReplacementManager();
+ PerPatchpointInfo * ppInfo = manager->GetPerPatchpointInfo(ip);
+
+#if _DEBUG
+ const int ppId = ppInfo->m_patchpointId;
+#endif
+
+ // See if we have an OSR method for this patchpoint.
+ bool isNewMethod = false;
+ DWORD backoffs = 0;
+ while (ppInfo->m_osrMethodCode == NULL)
+ {
+ // Invalid patchpoints are fatal, for uncommon patchpoints
+ //
+ if ((ppInfo->m_flags & PerPatchpointInfo::patchpoint_invalid) == PerPatchpointInfo::patchpoint_invalid)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_FATALERROR, "Jit_UncommonPatchpoint: invalid patchpoint [%d] (0x%p) in Method=0x%pM (%s::%s) at offset %d\n",
+ ppId, ip, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset));
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ // Make sure no other thread is trying to create the OSR method.
+ //
+ LONG oldFlags = ppInfo->m_flags;
+ if ((oldFlags & PerPatchpointInfo::patchpoint_triggered) == PerPatchpointInfo::patchpoint_triggered)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Uncommonatchpoint: AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
+ __SwitchToThread(0, backoffs++);
+ continue;
+ }
+
+ // Make sure we win the race to create the OSR method
+ //
+ LONG newFlags = ppInfo->m_flags | PerPatchpointInfo::patchpoint_triggered;
+ BOOL triggerTransition = InterlockedCompareExchange(&ppInfo->m_flags, newFlags, oldFlags) == oldFlags;
+
+ if (!triggerTransition)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_UncommonPatchpoint: (lost race) AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
+ __SwitchToThread(0, backoffs++);
+ continue;
+ }
+
+ // Invoke the helper to build the OSR method
+ //
+ // TODO: may not want to optimize this part of the method, if it's truly uncommon
+ // and can't possibly rejoin into the main flow.
+ //
+ // (but consider: throw path in method with try/catch, OSR method will contain more than just the throw?)
+ //
+ LOG((LF_TIEREDCOMPILATION, LL_INFO10, "Jit_UncommonPatchpoint: patchpoint [%d] (0x%p) TRIGGER\n", ppId, ip));
+ PCODE newMethodCode = HCCALL3(JIT_Patchpoint_Framed, pMD, codeInfo, ilOffset);
+
+ // If that failed, mark the patchpoint as invalid.
+ // This is fatal, for uncommon patchpoints
+ //
+ if (newMethodCode == NULL)
+ {
+ STRESS_LOG3(LF_TIEREDCOMPILATION, LL_WARNING, "Jit_UncommonPatchpoint: patchpoint (0x%p) OSR method creation failed,"
+ " marking patchpoint invalid for Method=0x%pM il offset %d\n", ip, pMD, ilOffset);
+ InterlockedOr(&ppInfo->m_flags, (LONG)PerPatchpointInfo::patchpoint_invalid);
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ break;
+ }
+
+ // We've successfully created the osr method; make it available.
+ _ASSERTE(ppInfo->m_osrMethodCode == NULL);
+ ppInfo->m_osrMethodCode = newMethodCode;
+ isNewMethod = true;
+ }
+
+ // If we get here, we have code to transition to...
+ PCODE osrMethodCode = ppInfo->m_osrMethodCode;
+ _ASSERTE(osrMethodCode != NULL);
+
+ Thread *pThread = GetThread();
+
+#ifdef FEATURE_HIJACK
+ // We can't crawl the stack of a thread that currently has a hijack pending
+ // (since the hijack routine won't be recognized by any code manager). So we
+ // Undo any hijack, the EE will re-attempt it later.
+ pThread->UnhijackThread();
+#endif
+
+ // Find context for the original method
+ CONTEXT frameContext;
+ frameContext.ContextFlags = CONTEXT_FULL;
+ RtlCaptureContext(&frameContext);
+
+ // Walk back to the original method frame
+ pThread->VirtualUnwindToFirstManagedCallFrame(&frameContext);
+
+ // Remember original method FP and SP because new method will inherit them.
+ UINT_PTR currentSP = GetSP(&frameContext);
+ UINT_PTR currentFP = GetFP(&frameContext);
+
+ // We expect to be back at the right IP
+ if ((UINT_PTR)ip != GetIP(&frameContext))
+ {
+ // Should be fatal
+ STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "Jit_UncommonPatchpoint: patchpoint (0x%p) TRANSITION"
+ " unexpected context IP 0x%p\n", ip, GetIP(&frameContext));
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ // Now unwind back to the original method caller frame.
+ EECodeInfo callerCodeInfo(GetIP(&frameContext));
+ frameContext.ContextFlags = CONTEXT_FULL;
+ ULONG_PTR establisherFrame = 0;
+ PVOID handlerData = NULL;
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, callerCodeInfo.GetModuleBase(), GetIP(&frameContext), callerCodeInfo.GetFunctionEntry(),
+ &frameContext, &handlerData, &establisherFrame, NULL);
+
+ // Now, set FP and SP back to the values they had just before this helper was called,
+ // since the new method must have access to the original method frame.
+ //
+ // TODO: if we access the patchpointInfo here, we can read out the FP-SP delta from there and
+ // use that to adjust the stack, likely saving some stack space.
+
+#if defined(TARGET_AMD64)
+ // If calls push the return address, we need to simulate that here, so the OSR
+ // method sees the "expected" SP misalgnment on entry.
+ _ASSERTE(currentSP % 16 == 0);
+ currentSP -= 8;
+#endif
+
+ SetSP(&frameContext, currentSP);
+ frameContext.Rbp = currentFP;
+
+ // Note we can get here w/o triggering, if there is an existing OSR method and
+ // we hit the patchpoint.
+ const int transitionLogLevel = isNewMethod ? LL_INFO10 : LL_INFO1000;
+ LOG((LF_TIEREDCOMPILATION, transitionLogLevel, "Jit_UncommonPatchpoint: patchpoint [%d] (0x%p) TRANSITION to ip 0x%p\n", ppId, ip, osrMethodCode));
+
+ // Install new entry point as IP
+ SetIP(&frameContext, osrMethodCode);
+
+ // Transition!
+ RtlRestoreContext(&frameContext, NULL);
+}
+
#else
void JIT_Patchpoint(int* counter, int ilOffset)
@@ -5341,6 +5505,15 @@ void JIT_Patchpoint(int* counter, int ilOffset)
UNREACHABLE();
}
+void JIT_UncommonPatchpoint(int* counter, int ilOffset)
+{
+ // Stub version if OSR feature is disabled
+ //
+ // Should not be called.
+
+ UNREACHABLE();
+}
+
#endif // FEATURE_ON_STACK_REPLACEMENT
//========================================================================
diff --git a/src/coreclr/tests/testenvironment.proj b/src/coreclr/tests/testenvironment.proj
index caed6ae7e1ee8..063f1ff61fd04 100644
--- a/src/coreclr/tests/testenvironment.proj
+++ b/src/coreclr/tests/testenvironment.proj
@@ -44,6 +44,7 @@
COMPlus_TC_OnStackReplacement;
COMPlus_TC_QuickJitForLoops;
COMPlus_TC_OnStackReplacement_InitialCounter;
+ COMPlus_TC_UncommonPatchpoint;
COMPlus_OSR_HitLimit;
COMPlus_JitEnableGuardedDevirtualization;
COMPlus_EnableEHWriteThru;
@@ -135,6 +136,7 @@
+