mirror of
https://github.com/golang/go.git
synced 2025-05-29 11:25:43 +00:00
runtime: eliminate gcBlackenPromptly mode
Now that there is no mark 2 phase, gcBlackenPromptly is no longer used. Updates #26903. This is a follow-up to eliminating mark 2. Change-Id: Ib9c534f21b36b8416fcf3cab667f186167b827f8 Reviewed-on: https://go-review.googlesource.com/c/134319 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
9108ae7751
commit
d398dbdfc3
@ -253,21 +253,6 @@ var writeBarrier struct {
|
|||||||
// gcphase == _GCmark.
|
// gcphase == _GCmark.
|
||||||
var gcBlackenEnabled uint32
|
var gcBlackenEnabled uint32
|
||||||
|
|
||||||
// gcBlackenPromptly indicates that optimizations that may
|
|
||||||
// hide work from the global work queue should be disabled.
|
|
||||||
//
|
|
||||||
// If gcBlackenPromptly is true, per-P gcWork caches should
|
|
||||||
// be flushed immediately and new objects should be allocated black.
|
|
||||||
//
|
|
||||||
// There is a tension between allocating objects white and
|
|
||||||
// allocating them black. If white and the objects die before being
|
|
||||||
// marked they can be collected during this GC cycle. On the other
|
|
||||||
// hand allocating them black will reduce _GCmarktermination latency
|
|
||||||
// since more work is done in the mark phase. This tension is resolved
|
|
||||||
// by allocating white until the mark phase is approaching its end and
|
|
||||||
// then allocating black for the remainder of the mark phase.
|
|
||||||
var gcBlackenPromptly bool
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
_GCoff = iota // GC not running; sweeping in background, write barrier disabled
|
_GCoff = iota // GC not running; sweeping in background, write barrier disabled
|
||||||
_GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
|
_GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
|
||||||
@ -1497,7 +1482,6 @@ func gcMarkTermination(nextTriggerRatio float64) {
|
|||||||
// World is stopped.
|
// World is stopped.
|
||||||
// Start marktermination which includes enabling the write barrier.
|
// Start marktermination which includes enabling the write barrier.
|
||||||
atomic.Store(&gcBlackenEnabled, 0)
|
atomic.Store(&gcBlackenEnabled, 0)
|
||||||
gcBlackenPromptly = false
|
|
||||||
setGCPhase(_GCmarktermination)
|
setGCPhase(_GCmarktermination)
|
||||||
|
|
||||||
work.heap1 = memstats.heap_live
|
work.heap1 = memstats.heap_live
|
||||||
@ -1828,16 +1812,6 @@ func gcBgMarkWorker(_p_ *p) {
|
|||||||
casgstatus(gp, _Gwaiting, _Grunning)
|
casgstatus(gp, _Gwaiting, _Grunning)
|
||||||
})
|
})
|
||||||
|
|
||||||
// If we are nearing the end of mark, dispose
|
|
||||||
// of the cache promptly. We must do this
|
|
||||||
// before signaling that we're no longer
|
|
||||||
// working so that other workers can't observe
|
|
||||||
// no workers and no work while we have this
|
|
||||||
// cached, and before we compute done.
|
|
||||||
if gcBlackenPromptly {
|
|
||||||
_p_.gcw.dispose()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Account for time.
|
// Account for time.
|
||||||
duration := nanotime() - startTime
|
duration := nanotime() - startTime
|
||||||
switch _p_.gcMarkWorkerMode {
|
switch _p_.gcMarkWorkerMode {
|
||||||
|
@ -556,11 +556,6 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
|
|||||||
// will be more cache friendly.
|
// will be more cache friendly.
|
||||||
gcw := &getg().m.p.ptr().gcw
|
gcw := &getg().m.p.ptr().gcw
|
||||||
workDone := gcDrainN(gcw, scanWork)
|
workDone := gcDrainN(gcw, scanWork)
|
||||||
// If we are near the end of the mark phase
|
|
||||||
// dispose of the gcw.
|
|
||||||
if gcBlackenPromptly {
|
|
||||||
gcw.dispose()
|
|
||||||
}
|
|
||||||
|
|
||||||
casgstatus(gp, _Gwaiting, _Grunning)
|
casgstatus(gp, _Gwaiting, _Grunning)
|
||||||
|
|
||||||
@ -577,8 +572,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
|
|||||||
incnwait := atomic.Xadd(&work.nwait, +1)
|
incnwait := atomic.Xadd(&work.nwait, +1)
|
||||||
if incnwait > work.nproc {
|
if incnwait > work.nproc {
|
||||||
println("runtime: work.nwait=", incnwait,
|
println("runtime: work.nwait=", incnwait,
|
||||||
"work.nproc=", work.nproc,
|
"work.nproc=", work.nproc)
|
||||||
"gcBlackenPromptly=", gcBlackenPromptly)
|
|
||||||
throw("work.nwait > work.nproc")
|
throw("work.nwait > work.nproc")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1155,7 +1149,7 @@ func shade(b uintptr) {
|
|||||||
if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
|
if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
|
||||||
gcw := &getg().m.p.ptr().gcw
|
gcw := &getg().m.p.ptr().gcw
|
||||||
greyobject(obj, 0, 0, span, gcw, objIndex)
|
greyobject(obj, 0, 0, span, gcw, objIndex)
|
||||||
if gcphase == _GCmarktermination || gcBlackenPromptly {
|
if gcphase == _GCmarktermination {
|
||||||
// Ps aren't allowed to cache work during mark
|
// Ps aren't allowed to cache work during mark
|
||||||
// termination.
|
// termination.
|
||||||
gcw.dispose()
|
gcw.dispose()
|
||||||
@ -1289,18 +1283,13 @@ func gcDumpObject(label string, obj, off uintptr) {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func gcmarknewobject(obj, size, scanSize uintptr) {
|
func gcmarknewobject(obj, size, scanSize uintptr) {
|
||||||
if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen.
|
if useCheckmark { // The world should be stopped so this should not happen.
|
||||||
throw("gcmarknewobject called while doing checkmark")
|
throw("gcmarknewobject called while doing checkmark")
|
||||||
}
|
}
|
||||||
markBitsForAddr(obj).setMarked()
|
markBitsForAddr(obj).setMarked()
|
||||||
gcw := &getg().m.p.ptr().gcw
|
gcw := &getg().m.p.ptr().gcw
|
||||||
gcw.bytesMarked += uint64(size)
|
gcw.bytesMarked += uint64(size)
|
||||||
gcw.scanWork += int64(scanSize)
|
gcw.scanWork += int64(scanSize)
|
||||||
if gcBlackenPromptly {
|
|
||||||
// There shouldn't be anything in the work queue, but
|
|
||||||
// we still need to flush stats.
|
|
||||||
gcw.dispose()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// gcMarkTinyAllocs greys all active tiny alloc blocks.
|
// gcMarkTinyAllocs greys all active tiny alloc blocks.
|
||||||
@ -1315,9 +1304,6 @@ func gcMarkTinyAllocs() {
|
|||||||
_, span, objIndex := findObject(c.tiny, 0, 0)
|
_, span, objIndex := findObject(c.tiny, 0, 0)
|
||||||
gcw := &p.gcw
|
gcw := &p.gcw
|
||||||
greyobject(c.tiny, 0, 0, span, gcw, objIndex)
|
greyobject(c.tiny, 0, 0, span, gcw, objIndex)
|
||||||
if gcBlackenPromptly {
|
|
||||||
gcw.dispose()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,9 +47,6 @@ func init() {
|
|||||||
// (preemption must be disabled)
|
// (preemption must be disabled)
|
||||||
// gcw := &getg().m.p.ptr().gcw
|
// gcw := &getg().m.p.ptr().gcw
|
||||||
// .. call gcw.put() to produce and gcw.get() to consume ..
|
// .. call gcw.put() to produce and gcw.get() to consume ..
|
||||||
// if gcBlackenPromptly {
|
|
||||||
// gcw.dispose()
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// It's important that any use of gcWork during the mark phase prevent
|
// It's important that any use of gcWork during the mark phase prevent
|
||||||
// the garbage collector from transitioning to mark termination since
|
// the garbage collector from transitioning to mark termination since
|
||||||
|
@ -1438,9 +1438,6 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
|
|||||||
// Mark the finalizer itself, since the
|
// Mark the finalizer itself, since the
|
||||||
// special isn't part of the GC'd heap.
|
// special isn't part of the GC'd heap.
|
||||||
scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
|
scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
|
||||||
if gcBlackenPromptly {
|
|
||||||
gcw.dispose()
|
|
||||||
}
|
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -79,7 +79,7 @@ const (
|
|||||||
func (b *wbBuf) reset() {
|
func (b *wbBuf) reset() {
|
||||||
start := uintptr(unsafe.Pointer(&b.buf[0]))
|
start := uintptr(unsafe.Pointer(&b.buf[0]))
|
||||||
b.next = start
|
b.next = start
|
||||||
if gcBlackenPromptly || writeBarrier.cgo {
|
if writeBarrier.cgo {
|
||||||
// Effectively disable the buffer by forcing a flush
|
// Effectively disable the buffer by forcing a flush
|
||||||
// on every barrier.
|
// on every barrier.
|
||||||
b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
|
b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
|
||||||
@ -275,7 +275,7 @@ func wbBufFlush1(_p_ *p) {
|
|||||||
|
|
||||||
// Enqueue the greyed objects.
|
// Enqueue the greyed objects.
|
||||||
gcw.putBatch(ptrs[:pos])
|
gcw.putBatch(ptrs[:pos])
|
||||||
if gcphase == _GCmarktermination || gcBlackenPromptly {
|
if gcphase == _GCmarktermination {
|
||||||
// Ps aren't allowed to cache work during mark
|
// Ps aren't allowed to cache work during mark
|
||||||
// termination.
|
// termination.
|
||||||
gcw.dispose()
|
gcw.dispose()
|
||||||
|
@ -981,9 +981,6 @@ func newstack() {
|
|||||||
// system stack.
|
// system stack.
|
||||||
gcw := &gp.m.p.ptr().gcw
|
gcw := &gp.m.p.ptr().gcw
|
||||||
scanstack(gp, gcw)
|
scanstack(gp, gcw)
|
||||||
if gcBlackenPromptly {
|
|
||||||
gcw.dispose()
|
|
||||||
}
|
|
||||||
gp.gcscandone = true
|
gp.gcscandone = true
|
||||||
}
|
}
|
||||||
gp.preemptscan = false
|
gp.preemptscan = false
|
||||||
|
Loading…
x
Reference in New Issue
Block a user