Skip to content

Commit

Permalink
runtime: add heap lock assertions
Browse files Browse the repository at this point in the history
Some functions that required holding the heap lock _or_ world stop have
been simplified to simply requiring the heap lock. This is conceptually
simpler and taking the heap lock during world stop is guaranteed to not
contend. This was only done on functions already called on the
systemstack to avoid too many extra systemstack calls in GC.

Updates #40677

Change-Id: I15aa1dadcdd1a81aac3d2a9ecad6e7d0377befdc
Reviewed-on: https://go-review.googlesource.com/c/go/+/250262
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Trust: Michael Pratt <mpratt@google.com>
  • Loading branch information
prattmic committed Oct 30, 2020
1 parent 6abbfc1 commit 9393b5b
Show file tree
Hide file tree
Showing 8 changed files with 139 additions and 13 deletions.
61 changes: 55 additions & 6 deletions src/runtime/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,16 @@ func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
return (*pageCache)(c).alloc(npages)
}
func (c *PageCache) Flush(s *PageAlloc) {
(*pageCache)(c).flush((*pageAlloc)(s))
cp := (*pageCache)(c)
sp := (*pageAlloc)(s)

systemstack(func() {
// None of the tests need any higher-level locking, so we just
// take the lock internally.
lock(sp.mheapLock)
cp.flush(sp)
unlock(sp.mheapLock)
})
}

// Expose chunk index type.
Expand All @@ -754,20 +763,50 @@ type ChunkIdx chunkIdx
type PageAlloc pageAlloc

func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
return (*pageAlloc)(p).alloc(npages)
pp := (*pageAlloc)(p)

var addr, scav uintptr
systemstack(func() {
// None of the tests need any higher-level locking, so we just
// take the lock internally.
lock(pp.mheapLock)
addr, scav = pp.alloc(npages)
unlock(pp.mheapLock)
})
return addr, scav
}
func (p *PageAlloc) AllocToCache() PageCache {
return PageCache((*pageAlloc)(p).allocToCache())
pp := (*pageAlloc)(p)

var c PageCache
systemstack(func() {
// None of the tests need any higher-level locking, so we just
// take the lock internally.
lock(pp.mheapLock)
c = PageCache(pp.allocToCache())
unlock(pp.mheapLock)
})
return c
}
func (p *PageAlloc) Free(base, npages uintptr) {
(*pageAlloc)(p).free(base, npages)
pp := (*pageAlloc)(p)

systemstack(func() {
// None of the tests need any higher-level locking, so we just
// take the lock internally.
lock(pp.mheapLock)
pp.free(base, npages)
unlock(pp.mheapLock)
})
}
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
// None of the tests need any higher-level locking, so we just
// take the lock internally.
lock(pp.mheapLock)
r = pp.scavenge(nbytes, mayUnlock)
unlock(pp.mheapLock)
Expand Down Expand Up @@ -926,7 +965,11 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
addr := chunkBase(chunkIdx(i))

// Mark the chunk's existence in the pageAlloc.
p.grow(addr, pallocChunkBytes)
systemstack(func() {
lock(p.mheapLock)
p.grow(addr, pallocChunkBytes)
unlock(p.mheapLock)
})

// Initialize the bitmap and update pageAlloc metadata.
chunk := p.chunkOf(chunkIndex(addr))
Expand Down Expand Up @@ -957,13 +1000,19 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
}

// Update heap metadata for the allocRange calls above.
p.update(addr, pallocChunkPages, false, false)
systemstack(func() {
lock(p.mheapLock)
p.update(addr, pallocChunkPages, false, false)
unlock(p.mheapLock)
})
}

systemstack(func() {
lock(p.mheapLock)
p.scavengeStartGen()
unlock(p.mheapLock)
})

return (*PageAlloc)(p)
}

Expand Down
2 changes: 2 additions & 0 deletions src/runtime/malloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,8 @@ func mallocinit() {
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
assertLockHeld(&h.lock)

n = alignUp(n, heapArenaBytes)

// First, try the arena pre-reservation.
Expand Down
4 changes: 4 additions & 0 deletions src/runtime/mgc.go
Original file line number Diff line number Diff line change
Expand Up @@ -821,6 +821,8 @@ func pollFractionalWorkerExit() bool {
//
// mheap_.lock must be held or the world must be stopped.
func gcSetTriggerRatio(triggerRatio float64) {
assertWorldStoppedOrLockHeld(&mheap_.lock)

// Compute the next GC goal, which is when the allocated heap
// has grown by GOGC/100 over the heap marked by the last
// cycle.
Expand Down Expand Up @@ -960,6 +962,8 @@ func gcSetTriggerRatio(triggerRatio float64) {
//
// mheap_.lock must be held or the world must be stopped.
func gcEffectiveGrowthRatio() float64 {
assertWorldStoppedOrLockHeld(&mheap_.lock)

egogc := float64(atomic.Load64(&memstats.next_gc)-memstats.heap_marked) / float64(memstats.heap_marked)
if egogc < 0 {
// Shouldn't happen, but just in case.
Expand Down
18 changes: 18 additions & 0 deletions src/runtime/mgcscavenge.go
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,8 @@ func bgscavenge(c chan int) {
//
//go:systemstack
func (p *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
assertLockHeld(p.mheapLock)

var (
addrs addrRange
gen uint32
Expand Down Expand Up @@ -446,6 +448,8 @@ func printScavTrace(gen uint32, released uintptr, forced bool) {
//
//go:systemstack
func (p *pageAlloc) scavengeStartGen() {
assertLockHeld(p.mheapLock)

if debug.scavtrace > 0 {
printScavTrace(p.scav.gen, p.scav.released, false)
}
Expand Down Expand Up @@ -495,6 +499,8 @@ func (p *pageAlloc) scavengeStartGen() {
//
//go:systemstack
func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
assertLockHeld(p.mheapLock)

// Start by reserving the minimum.
r := p.scav.inUse.removeLast(p.scav.reservationBytes)

Expand Down Expand Up @@ -525,6 +531,8 @@ func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
//
//go:systemstack
func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
assertLockHeld(p.mheapLock)

if r.size() == 0 || gen != p.scav.gen {
return
}
Expand Down Expand Up @@ -552,6 +560,8 @@ func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
//
//go:systemstack
func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
assertLockHeld(p.mheapLock)

// Defensively check if we've recieved an empty address range.
// If so, just return.
if work.size() == 0 {
Expand Down Expand Up @@ -610,6 +620,8 @@ func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
// If we found something, scavenge it and return!
if npages != 0 {
work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)}

assertLockHeld(p.mheapLock) // Must be locked on return.
return uintptr(npages) * pageSize, work
}
}
Expand Down Expand Up @@ -674,12 +686,16 @@ func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}

assertLockHeld(p.mheapLock) // Must be locked on return.
return uintptr(npages) * pageSize, work
}

// We were fooled, so let's continue from where we left off.
work.limit = offAddr{chunkBase(candidateChunkIdx)}
}

assertLockHeld(p.mheapLock) // Must be locked on return.
return 0, work
}

Expand All @@ -692,6 +708,8 @@ func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
//
// p.mheapLock must be held.
func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
assertLockHeld(p.mheapLock)

p.chunkOf(ci).scavenged.setRange(base, npages)

// Compute the full address for the start of the range.
Expand Down
29 changes: 23 additions & 6 deletions src/runtime/mheap.go
Original file line number Diff line number Diff line change
Expand Up @@ -483,10 +483,15 @@ func (s *mspan) layout() (size, n, total uintptr) {
// indirect call from the fixalloc initializer, the compiler can't see
// this.
//
// The heap lock must be held.
//
//go:nowritebarrierrec
func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh)
s := (*mspan)(p)

assertLockHeld(&h.lock)

if len(h.allspans) >= cap(h.allspans) {
n := 64 * 1024 / sys.PtrSize
if n < cap(h.allspans)*3/2 {
Expand Down Expand Up @@ -721,7 +726,7 @@ func (h *mheap) init() {
//
// reclaim implements the page-reclaimer half of the sweeper.
//
// h must NOT be locked.
// h.lock must NOT be held.
func (h *mheap) reclaim(npage uintptr) {
// TODO(austin): Half of the time spent freeing spans is in
// locking/unlocking the heap (even with low contention). We
Expand Down Expand Up @@ -804,6 +809,8 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
// In particular, if a span were freed and merged concurrently
// with this probing heapArena.spans, it would be possible to
// observe arbitrary, stale span pointers.
assertLockHeld(&h.lock)

n0 := n
var nFreed uintptr
sg := h.sweepgen
Expand Down Expand Up @@ -858,6 +865,8 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
traceGCSweepSpan((n0 - nFreed) * pageSize)
lock(&h.lock)
}

assertLockHeld(&h.lock) // Must be locked on return.
return nFreed
}

Expand Down Expand Up @@ -1011,7 +1020,7 @@ func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
// tryAllocMSpan attempts to allocate an mspan object from
// the P-local cache, but may fail.
//
// h need not be locked.
// h.lock need not be held.
//
// This caller must ensure that its P won't change underneath
// it during this function. Currently to ensure that we enforce
Expand All @@ -1035,7 +1044,7 @@ func (h *mheap) tryAllocMSpan() *mspan {

// allocMSpanLocked allocates an mspan object.
//
// h must be locked.
// h.lock must be held.
//
// allocMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
Expand All @@ -1044,6 +1053,8 @@ func (h *mheap) tryAllocMSpan() *mspan {
//
//go:systemstack
func (h *mheap) allocMSpanLocked() *mspan {
assertLockHeld(&h.lock)

pp := getg().m.p.ptr()
if pp == nil {
// We don't have a p so just do the normal thing.
Expand All @@ -1065,7 +1076,7 @@ func (h *mheap) allocMSpanLocked() *mspan {

// freeMSpanLocked free an mspan object.
//
// h must be locked.
// h.lock must be held.
//
// freeMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
Expand All @@ -1074,6 +1085,8 @@ func (h *mheap) allocMSpanLocked() *mspan {
//
//go:systemstack
func (h *mheap) freeMSpanLocked(s *mspan) {
assertLockHeld(&h.lock)

pp := getg().m.p.ptr()
// First try to free the mspan directly to the cache.
if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
Expand All @@ -1097,7 +1110,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) {
//
// The returned span is fully initialized.
//
// h must not be locked.
// h.lock must not be held.
//
// allocSpan must be called on the system stack both because it acquires
// the heap lock and because it must block GC transitions.
Expand Down Expand Up @@ -1281,8 +1294,10 @@ HaveSpan:
// Try to add at least npage pages of memory to the heap,
// returning whether it worked.
//
// h must be locked.
// h.lock must be held.
func (h *mheap) grow(npage uintptr) bool {
assertLockHeld(&h.lock)

// We must grow the heap in whole palloc chunks.
ask := alignUp(npage, pallocChunkPages) * pageSize

Expand Down Expand Up @@ -1391,6 +1406,8 @@ func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
}

func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
assertLockHeld(&h.lock)

switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
Expand Down
Loading

0 comments on commit 9393b5b

Please sign in to comment.