diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index 6f0f5a1296780..167b4ef9fd950 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -1389,7 +1389,8 @@ T *CUnorderedArrayWithAllocator::Grow() // exception if c // try to allocate memory for reallocation. pTemp = ALLOCATOR::AllocThrowing(this, m_iSize+iGrowInc); - memcpy (pTemp, m_pTable, m_iSize*sizeof(T)); + if (m_iSize > 0) + memcpy (pTemp, m_pTable, m_iSize*sizeof(T)); ALLOCATOR::Free(this, m_pTable); m_pTable = pTemp; m_iSize += iGrowInc; diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 78ad80d23352c..6861c1343d895 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1774,6 +1774,9 @@ void Compiler::compInit(ArenaAllocator* pAlloc, #ifdef DEBUG bRangeAllowStress = false; + + // set this early so we can use it without relying on random memory values + verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS @@ -5561,8 +5564,6 @@ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build - // set this early so we can use it without relying on random memory values - verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 7c75196d5d7e3..104d1ae137263 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -9364,7 +9364,7 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) return; case GT_CALL: - m_statePtr = &*m_node->AsCall()->gtArgs.Args().begin(); + m_statePtr = m_node->AsCall()->gtArgs.Args().begin().GetArg(); m_advance = &GenTreeUseEdgeIterator::AdvanceCall; AdvanceCall(); return; @@ -9649,7 +9649,7 @@ void GenTreeUseEdgeIterator::AdvanceCall() return; } } - m_statePtr = &*call->gtArgs.LateArgs().begin(); + m_statePtr = call->gtArgs.LateArgs().begin().GetArg(); m_advance = &GenTreeUseEdgeIterator::AdvanceCall; FALLTHROUGH; diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index d13e36c63d16c..144d5f3ab1775 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -6681,15 +6681,19 @@ void LinearScan::resolveRegisters() // handle incoming arguments and special temps RefPositionIterator refPosIterator = refPositions.begin(); - RefPosition* currentRefPosition = &refPosIterator; + RefPosition* currentRefPosition = refPosIterator != refPositions.end() ? &refPosIterator : nullptr; if (enregisterLocalVars) { VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum]; - for (; refPosIterator != refPositions.end() && - (currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit); - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit) + { + break; + } + Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); resolveLocalRef(nullptr, nullptr, currentRefPosition); @@ -6731,9 +6735,14 @@ void LinearScan::resolveRegisters() } // Handle the DummyDefs, updating the incoming var location. - for (; refPosIterator != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef; - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (currentRefPosition->refType != RefTypeDummyDef) + { + break; + } + assert(currentRefPosition->isIntervalRef()); // Don't mark dummy defs as reload currentRefPosition->reload = false; @@ -6756,13 +6765,16 @@ void LinearScan::resolveRegisters() assert(refPosIterator != refPositions.end()); assert(currentRefPosition->refType == RefTypeBB); ++refPosIterator; - currentRefPosition = &refPosIterator; // Handle the RefPositions for the block - for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB && - currentRefPosition->refType != RefTypeDummyDef; - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (currentRefPosition->refType == RefTypeBB || currentRefPosition->refType == RefTypeDummyDef) + { + break; + } + currentLocation = currentRefPosition->nodeLocation; // Ensure that the spill & copy info is valid. @@ -9488,9 +9500,14 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) if (mode != LSRA_DUMP_PRE) { printf("Incoming Parameters: "); - for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB; - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (currentRefPosition->refType == RefTypeBB) + { + break; + } + Interval* interval = currentRefPosition->getInterval(); assert(interval != nullptr && interval->isLocalVar); printf(" V%02d", interval->varNum); @@ -9530,11 +9547,15 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) { bool printedBlockHeader = false; // We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks - for (; refPosIterator != refPositions.end() && - (currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef || - (currentRefPosition->refType == RefTypeBB && !printedBlockHeader)); - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (currentRefPosition->refType != RefTypeExpUse && currentRefPosition->refType != RefTypeDummyDef && + !(currentRefPosition->refType == RefTypeBB && !printedBlockHeader)) + { + break; + } + Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { @@ -9613,13 +9634,17 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) // and combining the fixed regs with their associated def or use bool killPrinted = false; RefPosition* lastFixedRegRefPos = nullptr; - for (; refPosIterator != refPositions.end() && - (currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg || - currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) && - (currentRefPosition->nodeLocation == tree->gtSeqNum || - currentRefPosition->nodeLocation == tree->gtSeqNum + 1); - ++refPosIterator, currentRefPosition = &refPosIterator) + for (; refPosIterator != refPositions.end(); ++refPosIterator) { + currentRefPosition = &refPosIterator; + if (!(currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg || + currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) || + !(currentRefPosition->nodeLocation == tree->gtSeqNum || + currentRefPosition->nodeLocation == tree->gtSeqNum + 1)) + { + break; + } + Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { diff --git a/src/coreclr/vm/crst.cpp b/src/coreclr/vm/crst.cpp index 28136d8fdaad4..748222e1c94be 100644 --- a/src/coreclr/vm/crst.cpp +++ b/src/coreclr/vm/crst.cpp @@ -327,9 +327,7 @@ void CrstBase::Enter(INDEBUG(NoLevelCheckFlag noLevelCheckFlag/* = CRST_LEVEL_CH if (fToggle) { - pThread->DisablePreemptiveGC(); - } } @@ -419,14 +417,17 @@ void CrstBase::PreEnter() } // If a thread suspends another thread, it cannot acquire locks. - if ((pThread != NULL) && - (pThread->Debug_GetUnsafeSuspendeeCount() != 0)) + if ((pThread != NULL) + && (pThread->Debug_GetUnsafeSuspendeeCount() != 0)) { CONSISTENCY_CHECK_MSGF(false, ("Suspender thread taking non-suspender lock:'%s'", m_tag)); } - if (ThreadStore::s_pThreadStore->IsCrstForThreadStore(this)) + if ((ThreadStore::s_pThreadStore != NULL) + && ThreadStore::s_pThreadStore->IsCrstForThreadStore(this)) + { return; + } if (m_dwFlags & CRST_UNSAFE_COOPGC) { @@ -492,8 +493,11 @@ void CrstBase::PostEnter() } } - if (ThreadStore::s_pThreadStore->IsCrstForThreadStore(this)) + if ((ThreadStore::s_pThreadStore != NULL) + && ThreadStore::s_pThreadStore->IsCrstForThreadStore(this)) + { return; + } if (m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_UNSAFE_COOPGC | CRST_GC_NOTRIGGER_WHEN_TAKEN)) { @@ -700,15 +704,13 @@ BOOL CrstBase::IsSafeToTake() // which case it must always be taken in this mode. // If there is no thread object, we ignore the check since this thread isn't // coordinated with the GC. - Thread * pThread; - - pThread = GetThreadNULLOk(); + Thread * pThread = GetThreadNULLOk(); _ASSERTE(pThread == NULL || (pThread->PreemptiveGCDisabled() == ((m_dwFlags & CRST_UNSAFE_COOPGC) != 0)) || ((m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_GC_NOTRIGGER_WHEN_TAKEN)) != 0) || (GCHeapUtilities::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread())); - + if (m_holderthreadid.IsCurrentThread()) { diff --git a/src/coreclr/vm/dllimport.cpp b/src/coreclr/vm/dllimport.cpp index 98580e89a1c49..239480035443d 100644 --- a/src/coreclr/vm/dllimport.cpp +++ b/src/coreclr/vm/dllimport.cpp @@ -4119,7 +4119,8 @@ namespace // for (int i = 0; i < pParams->m_nParamTokens; ++i) { - memcpy(pBlobParams, paramInfos[i].pvNativeType, paramInfos[i].cbNativeType); + if (paramInfos[i].cbNativeType > 0) + memcpy(pBlobParams, paramInfos[i].pvNativeType, paramInfos[i].cbNativeType); pBlobParams += paramInfos[i].cbNativeType; } diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index c7c7e40aa0884..ecd1e9d22916c 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -396,7 +396,8 @@ MethodTableBuilder::ExpandApproxInterface( } bmtInterfaceEntry * pNewMap = (bmtInterfaceEntry *)new (GetStackingAllocator()) BYTE[safeSize.Value()]; - memcpy(pNewMap, bmtInterface->pInterfaceMap, sizeof(bmtInterfaceEntry) * bmtInterface->dwInterfaceMapAllocated); + if (bmtInterface->dwInterfaceMapAllocated > 0) + memcpy(pNewMap, bmtInterface->pInterfaceMap, sizeof(bmtInterfaceEntry) * bmtInterface->dwInterfaceMapAllocated); bmtInterface->pInterfaceMap = pNewMap; bmtInterface->dwInterfaceMapAllocated = dwNewAllocated.Value(); @@ -11690,7 +11691,8 @@ void MethodTableBuilder::bmtMethodImplInfo::AddMethodImpl( // If we have to grow this array, we will not free the old array before we clean up the BuildMethodTable operation // because this is a stacking allocator. However, the old array will get freed when all the stack allocator is freed. Entry *rgEntriesNew = new (pStackingAllocator) Entry[newEntriesCount]; - memcpy(rgEntriesNew, rgEntries, sizeof(Entry) * cMaxIndex); + if (cMaxIndex > 0) + memcpy(rgEntriesNew, rgEntries, sizeof(Entry) * cMaxIndex); // Start using newly allocated array. rgEntries = rgEntriesNew;