From b4d51a5c336daa723f0809316542cad2c00b7e57 Mon Sep 17 00:00:00 2001 From: sergey ignatov Date: Mon, 15 Aug 2016 16:41:50 +0300 Subject: [PATCH] Jitted Code Dropping Feature implemented --- src/debug/daccess/fntableaccess.h | 3 + src/inc/clrconfigvalues.h | 7 + src/inc/loaderheap.h | 6 +- src/jit/codegencommon.cpp | 6 +- src/utilcode/loaderheap.cpp | 5 +- src/vm/CMakeLists.txt | 4 + src/vm/codeman.cpp | 153 ++++++++- src/vm/codeman.h | 60 +++- src/vm/dynamicmethod.cpp | 4 + src/vm/dynamicmethod.h | 4 + src/vm/hosting.cpp | 15 +- src/vm/loaderallocator.cpp | 1 + src/vm/loaderallocator.hpp | 1 + src/vm/method.cpp | 10 + src/vm/method.hpp | 13 + src/vm/prestub.cpp | 514 +++++++++++++++++++++++++++++- 16 files changed, 786 insertions(+), 20 deletions(-) diff --git a/src/debug/daccess/fntableaccess.h b/src/debug/daccess/fntableaccess.h index b5ea5452f65e..c3a1ea17ed92 100644 --- a/src/debug/daccess/fntableaccess.h +++ b/src/debug/daccess/fntableaccess.h @@ -53,6 +53,9 @@ typedef struct _FakeHpRealCodeHdr LPVOID phdrJitGCInfo; // changed from BYTE* #if defined (FEATURE_GDBJIT) LPVOID pCalledMethods; +#endif +#if defined(FEATURE_JIT_DROPPING) + LPVOID phdrHeapList; #endif LPVOID hdrMDesc; // changed from MethodDesc* DWORD nUnwindInfos; diff --git a/src/inc/clrconfigvalues.h b/src/inc/clrconfigvalues.h index 6160f2061aaf..f5d9781fe902 100644 --- a/src/inc/clrconfigvalues.h +++ b/src/inc/clrconfigvalues.h @@ -132,6 +132,13 @@ RETAIL_CONFIG_DWORD_INFO(EXTERNAL_FinalizeOnShutdown, W("FinalizeOnShutdown"), D // ARM // RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_ARMEnabled, W("ARMEnabled"), (DWORD)0, "Set it to 1 to enable ARM") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropEnabled, W("JitDropEnabled"), (DWORD)0, "Set it to 1 to enable Jit Dropping") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropMemThreshold, W("JitDropMemThreshold"), (DWORD)0, "Dropping jits when code heap usage is larger than this (in bytes)") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropMethodSizeThreshold, W("JitDropMethodSizeThreshold"), (DWORD)0, "Dropping jit for methods whose native code size larger than this (in bytes)") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropMaxLevel, W("JitDropMaxLevel"), (DWORD)0, "Dropping jits for all methods as it possible") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropPrintStat, W("JitDropPrintStat"), (DWORD)0, "Print statistics about Jit Dropping") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropMinVal, W("JitDropMinVal"), (DWORD)0, "Dropping jit if the value of the inner counter greater than this value (for debugging purpose only)") +RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitDropMaxVal, W("JitDropMaxVal"), (DWORD)0xffffffff, "Dropping jit the value of the inner counter less then this value (for debuggin purpose only)") // // Assembly Loader diff --git a/src/inc/loaderheap.h b/src/inc/loaderheap.h index 7d4c48f5e86f..7088ddafa9d8 100644 --- a/src/inc/loaderheap.h +++ b/src/inc/loaderheap.h @@ -418,7 +418,7 @@ class UnlockedLoaderHeap #endif protected: - void *UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment); + void *UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment, bool updateAllocPtr = true); void UnlockedSetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory); }; @@ -835,10 +835,10 @@ class ExplicitControlLoaderHeap : public UnlockedLoaderHeap public: - void *AllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment) + void *AllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment, bool updateAllocPtr = true) { WRAPPER_NO_CONTRACT; - return UnlockedAllocMemForCode_NoThrow(dwHeaderSize, dwCodeSize, dwCodeAlignment); + return UnlockedAllocMemForCode_NoThrow(dwHeaderSize, dwCodeSize, dwCodeAlignment, updateAllocPtr); } void SetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory) diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp index a28ce2cfb512..02e7899a0302 100644 --- a/src/jit/codegencommon.cpp +++ b/src/jit/codegencommon.cpp @@ -3213,7 +3213,11 @@ void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode) } #endif // EMIT_TRACK_STACK_DEPTH - *nativeSizeOfCode = codeSize; + if (nativeSizeOfCode != nullptr) + { + *nativeSizeOfCode = codeSize; + } + compiler->info.compNativeCodeSize = (UNATIVE_OFFSET)codeSize; // printf("%6u bytes of code generated for %s.%s\n", codeSize, compiler->info.compFullName); diff --git a/src/utilcode/loaderheap.cpp b/src/utilcode/loaderheap.cpp index a005ac8af8eb..5462806ffe23 100644 --- a/src/utilcode/loaderheap.cpp +++ b/src/utilcode/loaderheap.cpp @@ -1720,7 +1720,7 @@ void *UnlockedLoaderHeap::UnlockedAllocAlignedMem(size_t dwRequestedSize, -void *UnlockedLoaderHeap::UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment) +void *UnlockedLoaderHeap::UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment, bool updateAllocPtr) { CONTRACT(void*) { @@ -1758,7 +1758,8 @@ void *UnlockedLoaderHeap::UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, s BYTE *pResult = (BYTE *)ALIGN_UP(m_pAllocPtr + dwHeaderSize, dwCodeAlignment); EtwAllocRequest(this, pResult, (pResult + dwCodeSize) - m_pAllocPtr); - m_pAllocPtr = pResult + dwCodeSize; + if (updateAllocPtr) + m_pAllocPtr = pResult + dwCodeSize; RETURN pResult; } diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt index 21f1659c6fb1..2856bfcb5312 100644 --- a/src/vm/CMakeLists.txt +++ b/src/vm/CMakeLists.txt @@ -32,6 +32,10 @@ if(FEATURE_GDBJIT) add_definitions(-DFEATURE_GDBJIT) endif(FEATURE_GDBJIT) +if(FEATURE_JIT_DROPPING) + add_definitions(-DFEATURE_JIT_DROPPING) +endif(FEATURE_JIT_DROPPING) + set(VM_SOURCES_DAC_AND_WKS_COMMON appdomain.cpp array.cpp diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp index ad4c5196b1f2..7ece32994363 100644 --- a/src/vm/codeman.cpp +++ b/src/vm/codeman.cpp @@ -2155,7 +2155,11 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap RETURN pHp; } +#if defined(FEATURE_JIT_DROPPING) +void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, bool updateAllocPtr) +#else void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) +#endif { CONTRACTL { NOTHROW; @@ -2164,7 +2168,11 @@ void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad; +#if defined(FEATURE_JIT_DROPPING) + void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment, updateAllocPtr); +#else void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment); +#endif if (p == NULL) return NULL; @@ -2189,6 +2197,9 @@ void CodeHeapRequestInfo::Init() m_pAllocator = m_pMD->GetLoaderAllocatorForCode(); m_isDynamicDomain = (m_pMD != NULL) ? m_pMD->IsLCGMethod() : false; m_isCollectible = m_pAllocator->IsCollectible() ? true : false; +#if defined(FEATURE_JIT_DROPPING) + m_isJitDroppedDomain = false; +#endif } #ifdef WIN64EXCEPTIONS @@ -2254,19 +2265,36 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi } CONTRACT_END; size_t initialRequestSize = pInfo->getRequestSize(); - size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) +#if defined(FEATURE_JIT_DROPPING) + size_t minReserveSize = 0; + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) + minReserveSize = OS_PAGE_SIZE; // ( 4 KB) + else + minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) +#else + size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) +#endif #ifdef _WIN64 if (pInfo->m_hiAddr == 0) { +#if defined(FEATURE_JIT_DROPPING) + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) == 0 + && pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) +#else if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) +#endif { minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code. } // For non-DynamicDomains that don't have a loAddr/hiAddr range // we bump up the reserve size for the 64-bit platforms +#if defined(FEATURE_JIT_DROPPING) + if (!pInfo->IsJitDroppedDomain() && !pInfo->IsDynamicDomain()) +#else if (!pInfo->IsDynamicDomain()) +#endif { minReserveSize *= 4; // CodeHeaps are larger on AMD64 (256 KB to 1024 KB) } @@ -2282,7 +2310,15 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi size_t reserveSize = requestAndHeadersSize; if (reserveSize < minReserveSize) reserveSize = minReserveSize; + +#if defined(FEATURE_JIT_DROPPING) + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) + reserveSize = ALIGN_UP(reserveSize, OS_PAGE_SIZE); + else + reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); +#else reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); +#endif pInfo->setReserveSize(reserveSize); @@ -2377,6 +2413,9 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, bool bForJumpStubs = (pInfo->m_loAddr != 0) || (pInfo->m_hiAddr != 0); bool bUseCachedDynamicCodeHeap = pInfo->IsDynamicDomain(); +#if defined(FEATURE_JIT_DROPPING) + bool bUseCachedJitDroppedCodeHeap = !bForJumpStubs && pInfo->m_pMD && pInfo->IsJitDroppedDomain(); +#endif HeapList * pCodeHeap; @@ -2388,6 +2427,13 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap; pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL; } +#if defined(FEATURE_JIT_DROPPING) + else if(bUseCachedJitDroppedCodeHeap) + { + pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedJitDroppedCodeHeap; + pInfo->m_pAllocator->m_pLastUsedJitDroppedCodeHeap = NULL; + } +#endif else { pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap; @@ -2401,8 +2447,12 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, pCodeHeap = NULL; } +#if defined(FEATURE_JIT_DROPPING) + if (bUseCachedJitDroppedCodeHeap || pCodeHeap == NULL) +#else // If we don't have a cached code heap or can't use it, get a code heap if (pCodeHeap == NULL) +#endif { pCodeHeap = GetCodeHeap(pInfo); if (pCodeHeap == NULL) @@ -2410,7 +2460,11 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, } #ifdef _WIN64 +#if defined(FEATURE_JIT_DROPPING) + if (!bForJumpStubs && !bUseCachedJitDroppedCodeHeap) +#else if (!bForJumpStubs) +#endif { // // Keep a small reserve at the end of the codeheap for jump stubs. It should reduce @@ -2443,7 +2497,19 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, } #endif +#if defined(FEATURE_JIT_DROPPING) + mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, bUseCachedJitDroppedCodeHeap ? false : true); +#else mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align); +#endif + +#if defined(FEATURE_JIT_DROPPING) + if (mem != NULL && bUseCachedJitDroppedCodeHeap) { + pCodeHeap->SetHeapFull(); + pCodeHeap->SetHeapFullForJumpStubs(); + } +#endif + if (mem != NULL) break; @@ -2458,28 +2524,53 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo, { // Let us create a new heap. +#if defined(FEATURE_JIT_DROPPING) + DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator, FALSE, (pInfo->IsJitDroppedDomain() ? TRUE : FALSE)); +#else DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator); +#endif + if (pList == NULL) { // not found so need to create the first one pList = CreateCodeHeapList(pInfo); +#if defined(FEATURE_JIT_DROPPING) + _ASSERTE(pList == GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator, FALSE, (pInfo->IsJitDroppedDomain() ? TRUE : FALSE))); +#else _ASSERTE(pList == GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator)); +#endif } _ASSERTE(pList); pCodeHeap = NewCodeHeap(pInfo, pList); _ASSERTE(pCodeHeap); +#if defined(FEATURE_JIT_DROPPING) + mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, bUseCachedJitDroppedCodeHeap ? false : true); +#else mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align); +#endif if (mem == NULL) ThrowOutOfMemory(); _ASSERTE(mem); +#if defined(FEATURE_JIT_DROPPING) + if (bUseCachedJitDroppedCodeHeap) { + pCodeHeap->SetHeapFull(); + pCodeHeap->SetHeapFullForJumpStubs(); + } +#endif } if (bUseCachedDynamicCodeHeap) { pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap; } +#if defined(FEATURE_JIT_DROPPING) + else if (bUseCachedJitDroppedCodeHeap) + { + pInfo->m_pAllocator->m_pLastUsedJitDroppedCodeHeap = pCodeHeap; + } +#endif else { pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap; @@ -2562,6 +2653,14 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAll CodeHeader * pCodeHdr = NULL; CodeHeapRequestInfo requestInfo(pMD); +#if defined(FEATURE_JIT_DROPPING) + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0 && + CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMethodSizeThreshold) < blockSize && + !pMD->IsNotForDropping()) + { + requestInfo.SetJitDroppedDomain(); + } +#endif // Scope the lock { @@ -2607,18 +2706,30 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAll pCodeHdr->SetEHInfo(NULL); pCodeHdr->SetGCInfo(NULL); pCodeHdr->SetMethodDesc(pMD); +#if defined(FEATURE_JIT_DROPPING) + pCodeHdr->SetHeapList(pCodeHeap); +#endif + #ifdef WIN64EXCEPTIONS pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos); *pModuleBase = (TADDR)pCodeHeap; #endif NibbleMapSet(pCodeHeap, pCode, TRUE); +#if defined(FEATURE_JIT_DROPPING) + if (requestInfo.IsJitDroppedDomain()) + pCodeHeap->cBlocks = 1; +#endif } RETURN(pCodeHdr); } +#if defined(FEATURE_JIT_DROPPING) +EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly, BOOL fJitDropped) +#else EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly) +#endif { CONTRACTL { NOTHROW; @@ -2637,6 +2748,13 @@ EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); } +#if defined(FEATURE_JIT_DROPPING) + if (fJitDropped) + { + ppList = m_JitDroppedDomainCodeHeaps.Table(); + count = m_JitDroppedDomainCodeHeaps.Count(); + } +#endif else { ppList = m_DomainCodeHeaps.Table(); @@ -2673,7 +2791,11 @@ HeapList* EEJitManager::GetCodeHeap(CodeHeapRequestInfo *pInfo) // loop through the m_DomainCodeHeaps to find the AppDomain // if not found, then create it +#if defined(FEATURE_JIT_DROPPING) + DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator, FALSE, (pInfo->IsJitDroppedDomain() ? TRUE : FALSE)); +#else DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator); +#endif if (pList) { // Set pResult to the largest non-full HeapList @@ -2794,7 +2916,7 @@ bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHea } } - return retVal; + return retVal; } EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo) @@ -2811,6 +2933,10 @@ EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequ DomainCodeHeapList **ppList = NULL; if (pInfo->IsDynamicDomain()) ppList = m_DynamicDomainCodeHeaps.AppendThrowing(); +#if defined(FEATURE_JIT_DROPPING) + else if (pInfo->IsJitDroppedDomain()) + ppList = m_JitDroppedDomainCodeHeaps.AppendThrowing(); +#endif else ppList = m_DomainCodeHeaps.AppendThrowing(); *ppList = pNewList; @@ -3281,6 +3407,29 @@ void EEJitManager::Unload(LoaderAllocator *pAllocator) break; } } +#if defined(FEATURE_JIT_DROPPING) + ppList = m_JitDroppedDomainCodeHeaps.Table(); + count = m_JitDroppedDomainCodeHeaps.Count(); + + for (int i=0; i < count; i++) { + if (ppList[i]->m_pAllocator== pAllocator) { + DomainCodeHeapList *pList = ppList[i]; + m_JitDroppedDomainCodeHeaps.DeleteByIndex(i); + + // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself + count = pList->m_CodeHeapList.Count(); + for (i=0; i < count; i++) { + HeapList *pHeapList = pList->m_CodeHeapList[i]; + DeleteCodeHeap(pHeapList); + } + + // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section. + delete pList; + + break; + } + } +#endif ppList = m_DynamicDomainCodeHeaps.Table(); count = m_DynamicDomainCodeHeaps.Count(); for (int i=0; i < count; i++) { diff --git a/src/vm/codeman.h b/src/vm/codeman.h index 5fbddea87546..ffb0ad75280d 100644 --- a/src/vm/codeman.h +++ b/src/vm/codeman.h @@ -145,7 +145,11 @@ typedef struct _hpCodeHdr PTR_BYTE phdrJitGCInfo; #if defined(FEATURE_GDBJIT) - VOID* pCalledMethods; + VOID* pCalledMethods; +#endif + +#if defined(FEATURE_JIT_DROPPING) + struct _HeapList* phdrHeapList; #endif PTR_MethodDesc phdrMDesc; @@ -181,7 +185,7 @@ typedef struct _hpCodeHdr return phdrMDesc; } #if defined(FEATURE_GDBJIT) - VOID* GetCalledMethods() + PTR_BYTE GetCalledMethods() { SUPPORTS_DAC; return pCalledMethods; @@ -197,13 +201,20 @@ typedef struct _hpCodeHdr SUPPORTS_DAC; return (StubCodeBlockKind)dac_cast(phdrMDesc); } +#if defined(FEATURE_JIT_DROPPING) + struct _HeapList* GetHeapList() + { + SUPPORTS_DAC; + return phdrHeapList; + } +#endif + BOOL IsStubCodeBlock() { SUPPORTS_DAC; // Note that it is important for this comparison to be unsigned return dac_cast(phdrMDesc) <= (TADDR)STUB_CODE_BLOCK_LAST; } - void SetDebugInfo(PTR_BYTE pDI) { phdrDebugInfo = pDI; @@ -230,6 +241,13 @@ typedef struct _hpCodeHdr { phdrMDesc = (PTR_MethodDesc)kind; } +#if defined(FEATURE_JIT_DROPPING) + void SetHeapList(struct _HeapList* pHL) + { + phdrHeapList = pHL; + } +#endif + #endif // !USE_INDIRECT_CODEHEADER // if we're using the indirect codeheaders then all enumeration is done by the code header @@ -286,6 +304,13 @@ typedef struct _hpCodeHdr SUPPORTS_DAC; return (StubCodeBlockKind)dac_cast(pRealCodeHeader); } +#if defined(FEATURE_JIT_DROPPING) + struct _HeapList* GetHeapList() + { + SUPPORTS_DAC; + return pRealCodeHeader->phdrHeapList; + } +#endif BOOL IsStubCodeBlock() { SUPPORTS_DAC; @@ -324,6 +349,12 @@ typedef struct _hpCodeHdr { pRealCodeHeader = (PTR_RealCodeHeader)kind; } +#if defined(FEATURE_JIT_DROPPING) + void SetHeapList(struct _HeapList* pHL) + { + pRealCodeHeader->phdrHeapList = pHL; + } +#endif #if defined(WIN64EXCEPTIONS) UINT GetNumberOfUnwindInfos() @@ -368,9 +399,16 @@ struct CodeHeapRequestInfo size_t m_requestSize; // minimum size that must be made available size_t m_reserveSize; // Amount that VirtualAlloc will reserved bool m_isDynamicDomain; +#if defined(FEATURE_JIT_DROPPING) + bool m_isJitDroppedDomain; +#endif bool m_isCollectible; bool IsDynamicDomain() { return m_isDynamicDomain; } +#if defined(FEATURE_JIT_DROPPING) + bool IsJitDroppedDomain() { return m_isJitDroppedDomain; } + void SetJitDroppedDomain() { m_isJitDroppedDomain = true; } +#endif bool IsCollectible() { return m_isCollectible; } size_t getRequestSize() { return m_requestSize; } @@ -433,7 +471,11 @@ class CodeHeap // Alloc the specified numbers of bytes for code. Returns NULL if the request does not fit // Space for header is reserved immediately before. It is not included in size. +#if defined(FEATURE_JIT_DROPPING) + virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, bool updateAllocPtr = true) = 0; +#else virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) = 0; +#endif #ifdef DACCESS_COMPILE virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0; @@ -527,7 +569,11 @@ class LoaderCodeHeap : CodeHeap WRAPPER_NO_CONTRACT; } +#if defined(FEATURE_JIT_DROPPING) + virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, bool updateAllocPtr) DAC_EMPTY_RET(NULL); +#else virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL); +#endif #ifdef DACCESS_COMPILE virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) @@ -1097,7 +1143,11 @@ private : size_t header, size_t blockSize, unsigned align, HeapList ** ppCodeHeap /* Writeback, Can be null */ ); +#if defined(FEATURE_JIT_DROPPING) + DomainCodeHeapList *GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE, BOOL fJitDropped = FALSE); +#else DomainCodeHeapList *GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE); +#endif DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo); LoaderHeap* GetJitMetaHeap(MethodDesc *pMD); #endif // !CROSSGEN_COMPILE @@ -1172,8 +1222,12 @@ private : // must hold critical section to access this structure. CUnorderedArray m_DomainCodeHeaps; CUnorderedArray m_DynamicDomainCodeHeaps; +#if defined(FEATURE_JIT_DROPPING) + CUnorderedArray m_JitDroppedDomainCodeHeaps; +#endif #endif + #ifdef _TARGET_AMD64_ private: // diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp index 3eec1252477d..3e0bdc60af0d 100644 --- a/src/vm/dynamicmethod.cpp +++ b/src/vm/dynamicmethod.cpp @@ -657,7 +657,11 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert) m_pFreeList, m_pFreeList->size)); } +#if defined(FEATURE_JIT_DROPPING) +void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, bool updateAllocPtr) +#else void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) +#endif { CONTRACTL { diff --git a/src/vm/dynamicmethod.h b/src/vm/dynamicmethod.h index a96200ba4fec..a101e36ff779 100644 --- a/src/vm/dynamicmethod.h +++ b/src/vm/dynamicmethod.h @@ -285,7 +285,11 @@ class HostCodeHeap : CodeHeap public: // Space for header is reserved immediately before. It is not included in size. +#if defined(FEATURE_JIT_DROPPING) + virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, bool updateAllocPtr = true) DAC_EMPTY_RET(NULL); +#else virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL); +#endif virtual ~HostCodeHeap() DAC_EMPTY(); diff --git a/src/vm/hosting.cpp b/src/vm/hosting.cpp index 620b9d6800e2..44a6ee58e04e 100644 --- a/src/vm/hosting.cpp +++ b/src/vm/hosting.cpp @@ -475,12 +475,15 @@ BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) #ifdef _DEBUG GlobalAllocStore::RemoveAlloc (lpMem); - // Check the heap handle to detect heap contamination - lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN; - HANDLE storedHeapHandle = *((HANDLE*)lpMem); - if(storedHeapHandle != hHeap) - _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n" - "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it."); + if (lpMem != NULL) + { + // Check the heap handle to detect heap contamination + lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN; + HANDLE storedHeapHandle = *((HANDLE*)lpMem); + if(storedHeapHandle != hHeap) + _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n" + "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it."); + } #endif // DON'T REMOVE THIS SEEMINGLY USELESS CAST // diff --git a/src/vm/loaderallocator.cpp b/src/vm/loaderallocator.cpp index 70c8cabb7930..9fcdb664e36e 100644 --- a/src/vm/loaderallocator.cpp +++ b/src/vm/loaderallocator.cpp @@ -66,6 +66,7 @@ LoaderAllocator::LoaderAllocator() m_pLastUsedCodeHeap = NULL; m_pLastUsedDynamicCodeHeap = NULL; m_pJumpStubCache = NULL; + m_pLastUsedJitDroppedCodeHeap = NULL; m_nLoaderAllocator = InterlockedIncrement64((LONGLONG *)&LoaderAllocator::cLoaderAllocatorsCreated); } diff --git a/src/vm/loaderallocator.hpp b/src/vm/loaderallocator.hpp index 72fa59857d04..6c6b6e31750b 100644 --- a/src/vm/loaderallocator.hpp +++ b/src/vm/loaderallocator.hpp @@ -115,6 +115,7 @@ class LoaderAllocator void * m_pLastUsedCodeHeap; void * m_pLastUsedDynamicCodeHeap; void * m_pJumpStubCache; + void * m_pLastUsedJitDroppedCodeHeap; // LoaderAllocator GC Structures PTR_LoaderAllocator m_pLoaderAllocatorDestroyNext; // Used in LoaderAllocator GC process (during sweeping) diff --git a/src/vm/method.cpp b/src/vm/method.cpp index bfd9c73e327b..bf8c976a0683 100644 --- a/src/vm/method.cpp +++ b/src/vm/method.cpp @@ -2300,11 +2300,21 @@ PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags } else { +#if defined(FEATURE_JIT_DROPPING) + if (IsPointingToNativeCode() + && ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) == 0) || !HasTemporaryEntryPoint())) +#else if (IsPointingToNativeCode()) +#endif return GetNativeCode(); } +#if defined(FEATURE_JIT_DROPPING) + if (HasStableEntryPoint() + && ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) == 0) || !HasTemporaryEntryPoint())) +#else if (HasStableEntryPoint()) +#endif return GetStableEntryPoint(); // Force the creation of the precode if we would eventually got one anyway diff --git a/src/vm/method.hpp b/src/vm/method.hpp index bbcb012d54c4..9de1af247bc0 100644 --- a/src/vm/method.hpp +++ b/src/vm/method.hpp @@ -1388,6 +1388,11 @@ class MethodDesc // - ngened code if IsPreImplemented() PCODE GetNativeCode(); +#if defined(FEATURE_JIT_DROPPING) + bool IsNotForDropping(); + void DropNativeCode(); +#endif + //================================================================ // FindOrCreateAssociatedMethodDesc // @@ -1629,9 +1634,17 @@ class MethodDesc // PCODE DoBackpatch(MethodTable * pMT, MethodTable * pDispatchingMT, BOOL fFullBackPatch); +#if defined(FEATURE_JIT_DROPPING) + PCODE DoPrestub(MethodTable *pDispatchingMT, BOOL fDoJitDropping = FALSE); +#else PCODE DoPrestub(MethodTable *pDispatchingMT); +#endif +#if defined(FEATURE_JIT_DROPPING) + PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags, BOOL fDoJitDropping = FALSE); +#else PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags); +#endif VOID GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature); VOID GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature); diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp index 1aec7973d062..727699b19ccc 100644 --- a/src/vm/prestub.cpp +++ b/src/vm/prestub.cpp @@ -48,7 +48,280 @@ #include "perfmap.h" #endif -#ifndef DACCESS_COMPILE +#if defined(FEATURE_JIT_DROPPING) +#include "nibblemapmacros.h" +#include "threadsuspend.h" +#endif + +#ifndef DACCESS_COMPILE + +#if defined(FEATURE_JIT_DROPPING) +static PtrHashMap* s_pCalledMethods = NULL; +static SimpleRWLock* s_pCalledMethodsLock = NULL; + +static PtrHashMap* s_pExecutedMethods = NULL; +static SimpleRWLock* s_pExecutedMethodsLock = NULL; + +static PtrHashMap* s_pNotForDroppingMethods = NULL; +static SimpleRWLock* s_pNotForDroppingMethodsLock = NULL; + +static ULONG s_totalNCSize = 0; +static SimpleRWLock* s_totalNCSizeLock = NULL; + +static ULONG s_jitDroppedBytes = 0; + +static bool s_printStack = false; + +static COUNT_T GetFullHash(MethodDesc* pMD) +{ + const char *moduleName = pMD->GetModule()->GetSimpleName(); + + COUNT_T hash = HashStringA(moduleName); // Start the hash with the Module name + +#if defined(_DEBUG) + hash = HashCOUNT_T(hash, HashStringA(pMD->m_pszDebugClassName)); // Hash in the name of the Class name + hash = HashCOUNT_T(hash, HashStringA(pMD->m_pszDebugMethodName)); // Hash in the name of the Method name + hash = HashCOUNT_T(hash, HashStringA(pMD->m_pszDebugMethodSignature)); // Hash in the name of the Method signature +#else + SString className, methodName, methodSig; + + pMD->GetMethodInfo(className, methodName, methodSig); + + hash = HashCOUNT_T(hash, className.Hash()); // Hash in the name of the Class name + hash = HashCOUNT_T(hash, methodName.Hash()); // Hash in the name of the Method name + hash = HashCOUNT_T(hash, methodSig.Hash()); // Hash in the name of the Method signature +#endif + + return hash; +} + +bool MethodDesc::IsNotForDropping() +{ + if (this == NULL) + return TRUE; + + if (IsLCGMethod() || IsFCall() || IsVtableMethod() || IsInterface() || IsVirtual()) + return TRUE; + + if (!IsIL() || IsUnboxingStub() || GetMethodTable()->Collectible()) + return TRUE; + + if (s_pNotForDroppingMethodsLock != NULL) + { + SimpleReadLockHolder srlh(s_pNotForDroppingMethodsLock); + if (s_pNotForDroppingMethods != NULL) + { + UPTR key = (UPTR)GetFullHash(this); + MethodDesc *pFound = (MethodDesc *)s_pNotForDroppingMethods->LookupValue(key, (LPVOID)this); + if (pFound != (MethodDesc *)INVALIDENTRY) + { + return TRUE; + } + } + } + return FALSE; +} + +static BOOL IsOwnerOfRWLock(LPVOID lock) +{ + // @TODO - SimpleRWLock does not have knowledge of which thread gets the writer + // lock, so no way to verify + return TRUE; +} + +static bool LookupOrCreateInNotForDroppingMethods(MethodDesc* pMD) +{ + CONTRACTL + { + MODE_COOPERATIVE; + GC_TRIGGERS; + THROWS; + } + CONTRACTL_END; + + if (pMD->IsNotForDropping()) + return TRUE; + + if (s_pNotForDroppingMethodsLock == NULL) + { + void *pLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock))); + SimpleRWLock *pLock = new (pLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT); + + if (FastInterlockCompareExchangePointer(&s_pNotForDroppingMethodsLock, pLock, NULL) != NULL) + SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(pLockSpace, sizeof(SimpleRWLock)); + } + + UPTR key = (UPTR)GetFullHash(pMD); + + if (s_pNotForDroppingMethods == NULL) + { + SimpleWriteLockHolder swlh(s_pNotForDroppingMethodsLock); + if (s_pNotForDroppingMethods == NULL) + { + PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap(); + LockOwner lock = {s_pNotForDroppingMethodsLock, IsOwnerOfRWLock}; + pMap->Init(32, NULL, FALSE, &lock); + s_pNotForDroppingMethods = pMap; + } + } + else + { + SimpleReadLockHolder srlh(s_pNotForDroppingMethodsLock); + MethodDesc *pFound = (MethodDesc *)s_pNotForDroppingMethods->LookupValue(key, (LPVOID)pMD); + if (pFound != (MethodDesc *)INVALIDENTRY) + return TRUE; + } + + { + SimpleWriteLockHolder swlh(s_pNotForDroppingMethodsLock); + s_pNotForDroppingMethods->InsertValue(key, (LPVOID)pMD); + } + + return FALSE; +} + +static void LookupOrCreateInCalledMethods(MethodDesc* pMD, PCODE pCode) +{ + CONTRACTL + { + MODE_COOPERATIVE; + GC_TRIGGERS; + THROWS; + } + CONTRACTL_END; + + if (pMD->IsNotForDropping()) + return; + + PCODE prCode = pMD->GetPreImplementedCode(); + if (prCode != NULL) + return; + + // We lazily allocate the reader/writer lock we synchronize access to the hash with. + if (s_pCalledMethodsLock == NULL) + { + void *pLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock))); + SimpleRWLock *pLock = new (pLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT); + + if (FastInterlockCompareExchangePointer(&s_pCalledMethodsLock, pLock, NULL) != NULL) + // We lost the race, give up our copy. + SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(pLockSpace, sizeof(SimpleRWLock)); + } + + // Now we have a lock we can use to synchronize the remainder of the init. + + UPTR key = (UPTR)GetFullHash(pMD); + + if (s_pCalledMethods == NULL) + { + SimpleWriteLockHolder swlh(s_pCalledMethodsLock); + if (s_pCalledMethods == NULL) + { + PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap(); + LockOwner lock = {s_pCalledMethodsLock, IsOwnerOfRWLock}; + pMap->Init(32, NULL, FALSE, &lock); + s_pCalledMethods = pMap; + } + } + else + { + // Try getting an existing value first. + SimpleReadLockHolder srlh(s_pCalledMethodsLock); + MethodDesc *pFound = (MethodDesc *)s_pCalledMethods->LookupValue(key, (LPVOID)pMD); + if (pFound != (MethodDesc *)INVALIDENTRY) + return; + } + + { + SimpleWriteLockHolder swlh(s_pCalledMethodsLock); + s_pCalledMethods->InsertValue(key, (LPVOID)pMD); + } +} + +static void DeleteFromCalledMethods(MethodDesc* pMD) +{ + CONTRACTL + { + MODE_COOPERATIVE; + GC_TRIGGERS; + THROWS; + } + CONTRACTL_END; + + if (pMD->IsNotForDropping()) + return; + PCODE pCode = pMD->GetPreImplementedCode(); + if (pCode != NULL) + return; + + _ASSERTE((s_pCalledMethodsLock == NULL && s_pCalledMethods == NULL) || + (s_pCalledMethodsLock != NULL && s_pCalledMethods != NULL)); + + if (s_pCalledMethodsLock == NULL || s_pCalledMethods == NULL) + return; + + UPTR key = (UPTR)GetFullHash(pMD); + + { + SimpleReadLockHolder srlh(s_pCalledMethodsLock); + MethodDesc *pFound = (MethodDesc *)s_pCalledMethods->LookupValue(key, (LPVOID)pMD); + if (pFound == (MethodDesc *)INVALIDENTRY) + return; + } + + { + SimpleWriteLockHolder swlh(s_pCalledMethodsLock); + s_pCalledMethods->DeleteValue(key, (LPVOID)pMD); + } + return; +} + +StackWalkAction CrawlFrameVisitor(CrawlFrame* pCf, Thread* pMdThread) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + SO_TOLERANT; + MODE_ANY; + } + CONTRACTL_END; + + MethodDesc* pMD = pCf->GetFunction(); + + // Filter out methods we don't care about + if (pMD == nullptr || !pMD->IsIL() || pMD->IsUnboxingStub() || pMD->GetMethodTable()->Collectible()) + { + return SWA_CONTINUE; + } + + if (s_pExecutedMethods == NULL) + { + PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap(); + pMap->Init(TRUE, NULL); + s_pExecutedMethods = pMap; + } + + UPTR key = (UPTR)GetFullHash(pMD); + MethodDesc *pFound = (MethodDesc *)s_pExecutedMethods->LookupValue(key, (LPVOID)pMD); + if (pFound == (MethodDesc *)INVALIDENTRY) + { + s_pExecutedMethods->InsertValue(key, (LPVOID)pMD); + } + + return SWA_CONTINUE; +} + +// Visitor for stack walk callback. +StackWalkAction StackWalkCallback(CrawlFrame* pCf, VOID* data) +{ + WRAPPER_NO_CONTRACT; + + // WalkInfo* info = (WalkInfo*) data; + return CrawlFrameVisitor(pCf, (Thread *)data); +} + +#endif // defined(FEATURE_JIT_DROPPING) EXTERN_C void STDCALL ThePreStub(); EXTERN_C void STDCALL ThePreStubPatch(); @@ -67,6 +340,7 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO PRECONDITION(pMT == GetMethodTable()); } CONTRACTL_END; + #ifdef FEATURE_INTERPRETER PCODE pTarget = GetMethodEntryPoint(); #else @@ -239,6 +513,172 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc) #endif // +#if defined(FEATURE_JIT_DROPPING) +static ULONG s_DroppedMethodCounter = 0; +void MethodDesc::DropNativeCode() +{ + WRAPPER_NO_CONTRACT; + SUPPORTS_DAC; + + g_IBCLogger.LogMethodDescAccess(this); + + if (IsNotForDropping() || IsInstantiatingStub() || IsUnboxingStub()) + return; + + PCODE pCode; + if (HasNativeCodeSlot()) + { + pCode = PCODE(NativeCodeSlot::GetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot()) & ~FIXUP_LIST_MASK); + } + else + { + if (!HasStableEntryPoint() || HasPrecode()) + return; + pCode = GetStableEntryPoint(); + } + + _ASSERTE(pCode != NULL); + _ASSERTE(HasNativeCode()); + + ++s_DroppedMethodCounter; + + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMinVal) > s_DroppedMethodCounter) + { + return; + } + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMaxVal) < s_DroppedMethodCounter) + { + return; + } + + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMaxLevel) == 0) + { + if (LookupOrCreateInNotForDroppingMethods(this)) + return; + } + + MethodTable * pMT = GetMethodTable(); + _ASSERTE(pMT != NULL); + + CodeHeader* pCH = ((CodeHeader*)(pCode & ~1)) - 1; + _ASSERTE(pCH->GetMethodDesc() == this); + + HeapList* pHp = pCH->GetHeapList(); + + _ASSERTE(pHp != NULL && pHp->cBlocks == 1 && pHp->bFull && pHp->bFullForJumpStubs); + + ULONG droppedBytes = ((BYTE*)pHp->endAddress - (BYTE*)pCode); + + s_jitDroppedBytes += droppedBytes; + + pHp->endAddress = pHp->startAddress; + pHp->cBlocks = 0; + pHp->bFull = false; + pHp->bFullForJumpStubs = false; + + ZeroMemory((BYTE *)pCode, droppedBytes); + ZeroMemory((BYTE *)pCH, sizeof(CodeHeader)); + + _ASSERTE(pCode >= pHp->mapBase); + size_t delta = pCode - pHp->mapBase; + size_t pos = ADDR2POS(delta); + DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD); + PTR_DWORD pMap = pHp->pHdrMap; + *(pMap+index) = 0; + + if (HasNativeCodeSlot()) + { + NativeCodeSlot::SetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot(), NULL); + } + else + { +#ifdef FEATURE_INTERPRETER + SetNativeCodeInterlocked(NULL, NULL, FALSE); +#else + SetNativeCodeInterlocked(NULL, NULL); +#endif + } + if (HasStableEntryPoint()) + { + SetStableEntryPointInterlocked(NULL); + InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint, FALSE); + *GetAddrOfSlot() = GetTemporaryEntryPoint(); + } + + _ASSERTE(!HasNativeCode()); + + if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropPrintStat) != 0) + { + printf("Dropped %lld %lld %lld @%s@ #%s# {%s}\n", + s_DroppedMethodCounter, s_jitDroppedBytes, s_totalNCSize, m_pszDebugClassName, GetName(), m_pszDebugMethodSignature); + } +} + +static void CheckStacksAndDrop(bool fDoJitDropping) +{ + if (s_totalNCSizeLock == NULL) + { + void *pLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock))); + SimpleRWLock* pLock = new (pLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT); + + if (FastInterlockCompareExchangePointer(&s_totalNCSizeLock, pLock, NULL) != NULL) + SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(pLockSpace, sizeof(SimpleRWLock)); + } + + if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) && + (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMemThreshold) != 0) && + fDoJitDropping) + { + SimpleReadLockHolder srlh(s_totalNCSizeLock); + + if ((s_totalNCSize - s_jitDroppedBytes) > CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMemThreshold) && s_pCalledMethods != NULL) + { + EX_TRY + { + // Suspend the runtime. + ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER); + + // Walk all other threads. + Thread* pThread = nullptr; + while ((pThread = ThreadStore::GetThreadList(pThread)) != nullptr) + { + pThread->StackWalkFrames(StackWalkCallback, (VOID *)pThread, ALLOW_ASYNC_STACK_WALK); + } + + PtrHashMap::PtrIterator i = s_pCalledMethods->begin(); + while (!i.end()) + { + MethodDesc *pMD = (MethodDesc *) i.GetValue(); + UPTR key = (UPTR)GetFullHash(pMD); + MethodDesc *pFound = (MethodDesc *)s_pExecutedMethods->LookupValue(key, (LPVOID)pMD); + ++i; + if (pFound == (MethodDesc *)INVALIDENTRY) + { + pMD->DropNativeCode(); + SimpleWriteLockHolder swlh(s_pCalledMethodsLock); + s_pCalledMethods->DeleteValue(key, (LPVOID)pMD); + } + } + for (PtrHashMap::PtrIterator i = s_pExecutedMethods->begin(); !i.end();) + { + MethodDesc *pMD = (MethodDesc *) i.GetValue(); + UPTR key = (UPTR)GetFullHash(pMD); + ++i; + s_pExecutedMethods->DeleteValue(key, (LPVOID)pMD); + } + delete s_pExecutedMethods; + s_pExecutedMethods = NULL; + + ThreadSuspend::RestartEE(FALSE, TRUE); + } + EX_CATCH + { + } + EX_END_CATCH(SwallowAllExceptions); + } + } +} +#endif // ******************************************************************** // README!! @@ -252,7 +692,11 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc) // which prevents us from trying to JIT the same method more that once. +#if defined(FEATURE_JIT_DROPPING) +PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags, BOOL fDoJitDropping) +#else PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags) +#endif { STANDARD_VM_CONTRACT; @@ -265,8 +709,15 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla GetMethodTable()->GetDebugClassName(), m_pszDebugMethodName)); +#if defined(FEATURE_JIT_DROPPING) + CheckStacksAndDrop(fDoJitDropping); +#endif + PCODE pCode = NULL; ULONG sizeOfCode = 0; +#if defined(FEATURE_JIT_DROPPING) + ULONG totalNCSize = 0; +#endif #ifdef FEATURE_INTERPRETER PCODE pPreviousInterpStub = NULL; BOOL fInterpreted = FALSE; @@ -467,6 +918,7 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla EX_TRY { + PTR_PCODE addrOfSlot = GetAddrOfSlot(); pCode = UnsafeJitFunction(this, ILHeader, flags, &sizeOfCode); } EX_CATCH @@ -521,7 +973,7 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla #endif // FEATURE_INTERPRETER #ifdef FEATURE_MULTICOREJIT - + // If called from multi-core JIT background thread, store code under lock, delay patching until code is queried from application threads if (fBackgroundThread) { @@ -569,6 +1021,31 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla pCode = GetNativeCode(); goto Done; } +#if defined(FEATURE_JIT_DROPPING) + else + { + CodeHeader* pCH = ((CodeHeader*)(pCode & ~1)) - 1; + _ASSERTE(pCH->GetMethodDesc() == this); + + if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) && + (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMemThreshold) != 0) && + fDoJitDropping) + { + if (sizeOfCode > 0 && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMethodSizeThreshold) < sizeOfCode && + !IsNotForDropping()) + { + LookupOrCreateInCalledMethods(this, pCode); + } + if (sizeOfCode > 0) + { + SimpleWriteLockHolder swlh(s_totalNCSizeLock); + HeapList* pHp = pCH->GetHeapList(); + ULONG jittedBytes = ((BYTE*)pHp->endAddress - (BYTE*)pCode); + s_totalNCSize += jittedBytes; + } + } + } +#endif } #ifdef FEATURE_INTERPRETER @@ -1070,7 +1547,11 @@ extern "C" PCODE STDCALL PreStubWorker(TransitionBlock * pTransitionBlock, Metho } GCX_PREEMP_THREAD_EXISTS(CURRENT_THREAD); +#if defined(FEATURE_JIT_DROPPING) + pbRetVal = pMD->DoPrestub(pDispatchingMT, TRUE); +#else pbRetVal = pMD->DoPrestub(pDispatchingMT); +#endif UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; @@ -1137,7 +1618,11 @@ static void TestSEHGuardPageRestore() // the case of methods that require stubs to be executed first (e.g., remoted methods // that require remoting stubs to be executed first), this stable entrypoint would be a // pointer to the stub, and not a pointer directly to the JITted code. +#if defined(FEATURE_JIT_DROPPING) +PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT, BOOL fDoJitDropping) +#else PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT) +#endif { CONTRACT(PCODE) { @@ -1279,7 +1764,15 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT) { LOG((LF_CLASSLOADER, LL_INFO10000, " In PreStubWorker, method already jitted, backpatching call point\n")); - +#if defined(FEATURE_JIT_DROPPING) + if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) && + (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMemThreshold) != 0) && + !IsNotForDropping()) + { + DeleteFromCalledMethods(this); + LookupOrCreateInNotForDroppingMethods(this); + } +#endif RETURN DoBackpatch(pMT, pDispatchingMT, TRUE); } @@ -1470,7 +1963,11 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT) // Mark the code as hot in case the method ends up in the native image g_IBCLogger.LogMethodCodeAccess(this); +#if defined(FEATURE_JIT_DROPPING) + pCode = MakeJitWorker(pHeader, CORJIT_FLAGS(), fDoJitDropping); +#else pCode = MakeJitWorker(pHeader, CORJIT_FLAGS()); +#endif #ifdef FEATURE_INTERPRETER if ((pCode != NULL) && !HasStableEntryPoint()) @@ -2050,6 +2547,17 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl pCode = PatchNonVirtualExternalMethod(pMD, pCode, pImportSection, pIndirection); } } + +#if defined (FEATURE_JIT_DROPPING) + if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropEnabled) != 0) && + (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitDropMemThreshold) != 0) ) + { + if (pMD != NULL) + { + DeleteFromCalledMethods(pMD); + } + } +#endif } // Force a GC on every jit if the stress level is high enough