Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release/8.0] Fix a deadlock in NonGC + Profiler API #91130

Merged
merged 21 commits into from
Aug 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9955,6 +9955,20 @@ BOOL gc_heap::insert_ro_segment (heap_segment* seg)
return TRUE;
}

void gc_heap::update_ro_segment (heap_segment* seg, uint8_t* allocated, uint8_t* committed)
{
enter_spin_lock (&gc_heap::gc_lock);

assert (use_frozen_segments_p);
assert (heap_segment_read_only_p (seg));
assert (allocated <= committed);
assert (committed <= heap_segment_reserved (seg));
heap_segment_allocated (seg) = allocated;
heap_segment_committed (seg) = committed;

leave_spin_lock (&gc_heap::gc_lock);
}

// No one is calling this function right now. If this is getting called we need
// to take care of decommitting the mark array for it - we will need to remember
// which portion of the mark array was committed and only decommit that.
Expand Down
9 changes: 6 additions & 3 deletions src/coreclr/gc/gcee.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -510,9 +510,12 @@ bool GCHeap::IsInFrozenSegment(Object *object)
void GCHeap::UpdateFrozenSegment(segment_handle seg, uint8_t* allocated, uint8_t* committed)
{
#ifdef FEATURE_BASICFREEZE
heap_segment* heap_seg = reinterpret_cast<heap_segment*>(seg);
heap_segment_committed(heap_seg) = committed;
heap_segment_allocated(heap_seg) = allocated;
#ifdef MULTIPLE_HEAPS
gc_heap* heap = gc_heap::g_heaps[0];
#else
gc_heap* heap = pGenGCHeap;
#endif //MULTIPLE_HEAPS
heap->update_ro_segment (reinterpret_cast<heap_segment*>(seg), allocated, committed);
#endif // FEATURE_BASICFREEZE
}

Expand Down
1 change: 1 addition & 0 deletions src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2380,6 +2380,7 @@ class gc_heap
#ifdef FEATURE_BASICFREEZE
PER_HEAP_METHOD BOOL insert_ro_segment (heap_segment* seg);
PER_HEAP_METHOD void remove_ro_segment (heap_segment* seg);
PER_HEAP_METHOD void update_ro_segment (heap_segment* seg, uint8_t* allocated, uint8_t* committed);
#endif //FEATURE_BASICFREEZE
PER_HEAP_METHOD BOOL set_ro_segment_in_range (heap_segment* seg);
#ifndef USE_REGIONS
Expand Down
18 changes: 15 additions & 3 deletions src/coreclr/vm/appdomain.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2451,12 +2451,24 @@ class SystemDomain : public BaseDomain
}
static FrozenObjectHeapManager* GetFrozenObjectHeapManager()
{
WRAPPER_NO_CONTRACT;
if (m_FrozenObjectHeapManager == NULL)
CONTRACTL
{
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;

if (VolatileLoad(&m_FrozenObjectHeapManager) == nullptr)
{
LazyInitFrozenObjectsHeap();
}
return m_FrozenObjectHeapManager;
return VolatileLoad(&m_FrozenObjectHeapManager);
}
static FrozenObjectHeapManager* GetFrozenObjectHeapManagerNoThrow()
{
LIMITED_METHOD_CONTRACT;

return VolatileLoad(&m_FrozenObjectHeapManager);
}
#endif // DACCESS_COMPILE

Expand Down
196 changes: 119 additions & 77 deletions src/coreclr/vm/frozenobjectheap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,18 @@
#define FOH_COMMIT_SIZE (64 * 1024)

FrozenObjectHeapManager::FrozenObjectHeapManager():
m_Crst(CrstFrozenObjectHeap, CRST_UNSAFE_COOPGC),
m_Crst(CrstFrozenObjectHeap, CRST_UNSAFE_ANYMODE),
m_SegmentRegistrationCrst(CrstFrozenObjectHeap),
m_CurrentSegment(nullptr)
{
}

// Allocates an object of the give size (including header) on a frozen segment.
// May return nullptr if object is too large (larger than FOH_COMMIT_SIZE)
// in such cases caller is responsible to find a more appropriate heap to allocate it
Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t objectSize, bool publish)

Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t objectSize,
void(*initFunc)(Object*, void*), void* pParam)
{
CONTRACTL
{
Expand All @@ -33,64 +36,83 @@ Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t
#else // FEATURE_BASICFREEZE

Object* obj = nullptr;
{
CrstHolder ch(&m_Crst);

_ASSERT(type != nullptr);
_ASSERT(FOH_COMMIT_SIZE >= MIN_OBJECT_SIZE);

// Currently we don't support frozen objects with special alignment requirements
// TODO: We should also give up on arrays of doubles on 32-bit platforms.
// (we currently never allocate them on frozen segments)
#ifdef FEATURE_64BIT_ALIGNMENT
if (type->RequiresAlign8())
{
// Align8 objects are not supported yet
return nullptr;
}
#endif
FrozenObjectSegment* curSeg = nullptr;
uint8_t* curSegmentCurrent = nullptr;
size_t curSegSizeCommitted = 0;

// NOTE: objectSize is expected be the full size including header
_ASSERT(objectSize >= MIN_OBJECT_SIZE);

if (objectSize > FOH_COMMIT_SIZE)
{
GCX_PREEMP();
{
// The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
// since FrozenObjectHeap is just an optimization, let's not fill it with huge objects.
return nullptr;
}

if (m_CurrentSegment == nullptr)
CrstHolder ch(&m_Crst);

_ASSERT(type != nullptr);
_ASSERT(FOH_COMMIT_SIZE >= MIN_OBJECT_SIZE);

// Currently we don't support frozen objects with special alignment requirements
// TODO: We should also give up on arrays of doubles on 32-bit platforms.
// (we currently never allocate them on frozen segments)
#ifdef FEATURE_64BIT_ALIGNMENT
if (type->RequiresAlign8())
{
// Align8 objects are not supported yet
return nullptr;
}
#endif

// NOTE: objectSize is expected be the full size including header
_ASSERT(objectSize >= MIN_OBJECT_SIZE);

if (objectSize > FOH_COMMIT_SIZE)
{
// The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
// since FrozenObjectHeap is just an optimization, let's not fill it with huge objects.
return nullptr;
}

obj = m_CurrentSegment == nullptr ? nullptr : m_CurrentSegment->TryAllocateObject(type, objectSize);
// obj is nullptr if the current segment is full or hasn't been allocated yet
if (obj == nullptr)
{
size_t newSegmentSize = FOH_SEGMENT_DEFAULT_SIZE;
if (m_CurrentSegment != nullptr)
{
// Double the reserved size to reduce the number of frozen segments in apps with lots of frozen objects
// Use the same size in case if prevSegmentSize*2 operation overflows.
const size_t prevSegmentSize = m_CurrentSegment->m_Size;
newSegmentSize = max(prevSegmentSize, prevSegmentSize * 2);
}

m_CurrentSegment = new FrozenObjectSegment(newSegmentSize);
m_FrozenSegments.Append(m_CurrentSegment);

// Try again
obj = m_CurrentSegment->TryAllocateObject(type, objectSize);

// This time it's not expected to be null
_ASSERT(obj != nullptr);
}

if (initFunc != nullptr)
{
initFunc(obj, pParam);
}

curSeg = m_CurrentSegment;
curSegSizeCommitted = curSeg->m_SizeCommitted;
curSegmentCurrent = curSeg->m_pCurrent;
} // end of m_Crst lock

// Let GC know about the new segment or changes in it.
// We do it under a new lock because the main one (m_Crst) can be used by Profiler in a GC's thread
// and that might cause deadlocks since RegisterFrozenSegment may stuck on GC's lock.
{
// Create the first segment on first allocation
m_CurrentSegment = new FrozenObjectSegment(FOH_SEGMENT_DEFAULT_SIZE);
m_FrozenSegments.Append(m_CurrentSegment);
_ASSERT(m_CurrentSegment != nullptr);
CrstHolder regLock(&m_SegmentRegistrationCrst);
curSeg->RegisterOrUpdate(curSegmentCurrent, curSegSizeCommitted);
}

obj = m_CurrentSegment->TryAllocateObject(type, objectSize);

// The only case where it can be null is when the current segment is full and we need
// to create a new one
if (obj == nullptr)
{
// Double the reserved size to reduce the number of frozen segments in apps with lots of frozen objects
// Use the same size in case if prevSegmentSize*2 operation overflows.
size_t prevSegmentSize = m_CurrentSegment->GetSize();
m_CurrentSegment = new FrozenObjectSegment(max(prevSegmentSize, prevSegmentSize * 2));
m_FrozenSegments.Append(m_CurrentSegment);

// Try again
obj = m_CurrentSegment->TryAllocateObject(type, objectSize);
} // end of GCX_PREEMP

// This time it's not expected to be null
_ASSERT(obj != nullptr);
}
}
if (publish)
{
PublishFrozenObject(obj);
}
PublishFrozenObject(obj);

return obj;
#endif // !FEATURE_BASICFREEZE
Expand All @@ -101,10 +123,10 @@ Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t
FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
m_pStart(nullptr),
m_pCurrent(nullptr),
m_pCurrentRegistered(nullptr),
m_SizeCommitted(0),
m_Size(sizeHint),
m_SegmentHandle(nullptr)
COMMA_INDEBUG(m_ObjectsCount(0))
{
_ASSERT(m_Size > FOH_COMMIT_SIZE);
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
Expand Down Expand Up @@ -135,34 +157,59 @@ FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
ThrowOutOfMemory();
}

m_pStart = static_cast<uint8_t*>(committedAlloc);
m_pCurrent = m_pStart + sizeof(ObjHeader);
m_SizeCommitted = FOH_COMMIT_SIZE;

// ClrVirtualAlloc is expected to be PageSize-aligned so we can expect
// DATA_ALIGNMENT alignment as well
_ASSERT(IS_ALIGNED(committedAlloc, DATA_ALIGNMENT));
}

segment_info si;
si.pvMem = committedAlloc;
si.ibFirstObject = sizeof(ObjHeader);
si.ibAllocated = si.ibFirstObject;
si.ibCommit = FOH_COMMIT_SIZE;
si.ibReserved = m_Size;

m_SegmentHandle = GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&si);
if (m_SegmentHandle == nullptr)
void FrozenObjectSegment::RegisterOrUpdate(uint8_t* current, size_t sizeCommited)
{
CONTRACTL
{
ClrVirtualFree(alloc, 0, MEM_RELEASE);
ThrowOutOfMemory();
THROWS;
MODE_PREEMPTIVE;
}
CONTRACTL_END

m_pStart = static_cast<uint8_t*>(committedAlloc);
m_pCurrent = m_pStart + sizeof(ObjHeader);
m_SizeCommitted = si.ibCommit;
INDEBUG(m_ObjectsCount = 0);
return;
if (m_pCurrentRegistered == nullptr)
{
segment_info si;
si.pvMem = m_pStart;
si.ibFirstObject = sizeof(ObjHeader);
si.ibAllocated = (size_t)current;
si.ibCommit = sizeCommited;
si.ibReserved = m_Size;

// NOTE: RegisterFrozenSegment may take a GC lock inside.
m_SegmentHandle = GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&si);
if (m_SegmentHandle == nullptr)
{
ThrowOutOfMemory();
}
m_pCurrentRegistered = current;
}
else
{
if (current > m_pCurrentRegistered)
{
GCHeapUtilities::GetGCHeap()->UpdateFrozenSegment(
m_SegmentHandle, current, m_pStart + sizeCommited);
m_pCurrentRegistered = current;
}
else
{
// Some other thread already advanced it.
}
}
}

Object* FrozenObjectSegment::TryAllocateObject(PTR_MethodTable type, size_t objectSize)
{
_ASSERT(m_pStart != nullptr && m_Size > 0 && m_SegmentHandle != nullptr); // Expected to be inited
_ASSERT((m_pStart != nullptr) && (m_Size > 0));
_ASSERT(IS_ALIGNED(m_pCurrent, DATA_ALIGNMENT));
_ASSERT(IS_ALIGNED(objectSize, DATA_ALIGNMENT));
_ASSERT(objectSize <= FOH_COMMIT_SIZE);
Expand Down Expand Up @@ -194,16 +241,11 @@ Object* FrozenObjectSegment::TryAllocateObject(PTR_MethodTable type, size_t obje
m_SizeCommitted += FOH_COMMIT_SIZE;
}

INDEBUG(m_ObjectsCount++);

Object* object = reinterpret_cast<Object*>(m_pCurrent);
object->SetMethodTable(type);

m_pCurrent += objectSize;

// Notify GC that we bumped the pointer and, probably, committed more memory in the reserved part
GCHeapUtilities::GetGCHeap()->UpdateFrozenSegment(m_SegmentHandle, m_pCurrent, m_pStart + m_SizeCommitted);

return object;
}

Expand Down
19 changes: 13 additions & 6 deletions src/coreclr/vm/frozenobjectheap.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,12 @@ class FrozenObjectHeapManager
{
public:
FrozenObjectHeapManager();
Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize, bool publish = true);
Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize,
void(*initFunc)(Object*,void*) = nullptr, void* pParam = nullptr);

private:
Crst m_Crst;
Crst m_SegmentRegistrationCrst;
SArray<FrozenObjectSegment*> m_FrozenSegments;
FrozenObjectSegment* m_CurrentSegment;

Expand All @@ -43,10 +45,7 @@ class FrozenObjectSegment
public:
FrozenObjectSegment(size_t sizeHint);
Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize);
size_t GetSize() const
{
return m_Size;
}
void RegisterOrUpdate(uint8_t* current, size_t sizeCommited);

private:
Object* GetFirstObject() const;
Expand All @@ -55,12 +54,20 @@ class FrozenObjectSegment
// Start of the reserved memory, the first object starts at "m_pStart + sizeof(ObjHeader)" (its pMT)
uint8_t* m_pStart;

// NOTE: To handle potential race conditions, only m_[x]Registered fields should be accessed
// externally as they guarantee that GC is aware of the current state of the segment.

// Pointer to the end of the current segment, ready to be used as a pMT for a new object
// meaning that "m_pCurrent - sizeof(ObjHeader)" is the actual start of the new object (header).
//
// m_pCurrent <= m_SizeCommitted
uint8_t* m_pCurrent;

// Last known value of m_pCurrent that GC is aware of.
//
// m_pCurrentRegistered <= m_pCurrent
uint8_t* m_pCurrentRegistered;

// Memory committed in the current segment
//
// m_SizeCommitted <= m_pStart + FOH_SIZE_RESERVED
Expand All @@ -70,10 +77,10 @@ class FrozenObjectSegment
size_t m_Size;

segment_handle m_SegmentHandle;
INDEBUG(size_t m_ObjectsCount);

friend class ProfilerObjectEnum;
friend class ProfToEEInterfaceImpl;
friend class FrozenObjectHeapManager;
};

#endif // _FROZENOBJECTHEAP_H
Expand Down
Loading
Loading