runtime: cancel mark and scavenge assists if the limiter is enabled

This change forces mark and scavenge assists to be cancelled early if
the limiter is enabled. This avoids goroutines getting stuck in really
long assists if the limiter happens to be disabled when they first come
into the assist. This can get especially bad for mark assists, which, in
dire situations, can end up "owing" the GC a really significant debt.

For #52890.

Change-Id: I4bfaa76b8de3e167d49d2ffd8bc2127b87ea566a
Reviewed-on: https://go-review.googlesource.com/c/go/+/408816
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Anthony Knyszek 2022-05-25 22:51:21 +00:00 committed by Gopher Robot
parent 0f57c88bce
commit 1f0ef6bec7
4 changed files with 13 additions and 6 deletions

View File

@ -811,7 +811,7 @@ func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
r = pp.scavenge(nbytes)
r = pp.scavenge(nbytes, nil)
})
return
}

View File

@ -1150,8 +1150,10 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
// want to claim was done by this call.
workFlushed := -gcw.heapScanWork
// In addition to backing out because of a preemption, back out
// if the GC CPU limiter is enabled.
gp := getg().m.curg
for !gp.preempt && workFlushed+gcw.heapScanWork < scanWork {
for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
// See gcDrain comment.
if work.full == 0 {
gcw.balance()

View File

@ -356,7 +356,7 @@ func (s *scavengerState) init() {
if s.scavenge == nil {
s.scavenge = func(n uintptr) (uintptr, int64) {
start := nanotime()
r := mheap_.pages.scavenge(n)
r := mheap_.pages.scavenge(n, nil)
end := nanotime()
if start >= end {
return r, 0
@ -636,7 +636,7 @@ func bgscavenge(c chan int) {
//
// scavenge always tries to scavenge nbytes worth of memory, and will
// only fail to do so if the heap is exhausted for now.
func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool) uintptr {
released := uintptr(0)
for released < nbytes {
ci, pageIdx := p.scav.index.find()
@ -646,6 +646,9 @@ func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
systemstack(func() {
released += p.scavengeOne(ci, pageIdx, nbytes-released)
})
if shouldStop != nil && shouldStop() {
break
}
}
return released
}

View File

@ -1305,7 +1305,9 @@ HaveSpan:
// Measure how long we spent scavenging and add that measurement to the assist
// time so we can track it for the GC CPU limiter.
start := nanotime()
h.pages.scavenge(bytesToScavenge)
h.pages.scavenge(bytesToScavenge, func() bool {
return gcCPULimiter.limiting()
})
now := nanotime()
h.pages.scav.assistTime.Add(now - start)
gcCPULimiter.addAssistTime(now - start)
@ -1558,7 +1560,7 @@ func (h *mheap) scavengeAll() {
gp := getg()
gp.m.mallocing++
released := h.pages.scavenge(^uintptr(0))
released := h.pages.scavenge(^uintptr(0), nil)
gp.m.mallocing--