mirror of
https://github.com/golang/go.git
synced 2025-05-25 17:31:22 +00:00
runtime: remove periodic scavenging
This change removes the periodic scavenger which goes over every span in the heap and scavenges it if it hasn't been used for 5 minutes. It should no longer be necessary if we have background scavenging (follow-up). For #30333. Change-Id: Ic3a1a4e85409dc25719ba4593a3b60273a4c71e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/143157 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
d56199d68e
commit
eaa1c87b00
@ -394,7 +394,6 @@ type mspan struct {
|
|||||||
divShift2 uint8 // for divide by elemsize - divMagic.shift2
|
divShift2 uint8 // for divide by elemsize - divMagic.shift2
|
||||||
scavenged bool // whether this span has had its pages released to the OS
|
scavenged bool // whether this span has had its pages released to the OS
|
||||||
elemsize uintptr // computed from sizeclass or from npages
|
elemsize uintptr // computed from sizeclass or from npages
|
||||||
unusedsince int64 // first time spotted by gc in mspanfree state
|
|
||||||
limit uintptr // end of data in span
|
limit uintptr // end of data in span
|
||||||
speciallock mutex // guards specials list
|
speciallock mutex // guards specials list
|
||||||
specials *special // linked list of special records sorted by offset.
|
specials *special // linked list of special records sorted by offset.
|
||||||
@ -1209,10 +1208,9 @@ HaveSpan:
|
|||||||
// Also, scavenge may cause coalescing, so prevent
|
// Also, scavenge may cause coalescing, so prevent
|
||||||
// coalescing with s by temporarily changing its state.
|
// coalescing with s by temporarily changing its state.
|
||||||
s.state = mSpanManual
|
s.state = mSpanManual
|
||||||
h.scavengeLocked(s.npages * pageSize)
|
h.scavengeLocked(s.npages*pageSize, true)
|
||||||
s.state = mSpanFree
|
s.state = mSpanFree
|
||||||
}
|
}
|
||||||
s.unusedsince = 0
|
|
||||||
|
|
||||||
h.setSpans(s.base(), npage, s)
|
h.setSpans(s.base(), npage, s)
|
||||||
|
|
||||||
@ -1243,7 +1241,7 @@ func (h *mheap) grow(npage uintptr) bool {
|
|||||||
// is proportional to the number of sysUnused() calls rather than
|
// is proportional to the number of sysUnused() calls rather than
|
||||||
// the number of pages released, so we make fewer of those calls
|
// the number of pages released, so we make fewer of those calls
|
||||||
// with larger spans.
|
// with larger spans.
|
||||||
h.scavengeLocked(size)
|
h.scavengeLocked(size, true)
|
||||||
|
|
||||||
// Create a fake "in use" span and free it, so that the
|
// Create a fake "in use" span and free it, so that the
|
||||||
// right coalescing happens.
|
// right coalescing happens.
|
||||||
@ -1253,7 +1251,7 @@ func (h *mheap) grow(npage uintptr) bool {
|
|||||||
atomic.Store(&s.sweepgen, h.sweepgen)
|
atomic.Store(&s.sweepgen, h.sweepgen)
|
||||||
s.state = mSpanInUse
|
s.state = mSpanInUse
|
||||||
h.pagesInUse += uint64(s.npages)
|
h.pagesInUse += uint64(s.npages)
|
||||||
h.freeSpanLocked(s, false, true, 0)
|
h.freeSpanLocked(s, false, true)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1283,7 +1281,7 @@ func (h *mheap) freeSpan(s *mspan, large bool) {
|
|||||||
// heap_scan changed.
|
// heap_scan changed.
|
||||||
gcController.revise()
|
gcController.revise()
|
||||||
}
|
}
|
||||||
h.freeSpanLocked(s, true, true, 0)
|
h.freeSpanLocked(s, true, true)
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1304,12 +1302,12 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
|
|||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
*stat -= uint64(s.npages << _PageShift)
|
*stat -= uint64(s.npages << _PageShift)
|
||||||
memstats.heap_sys += uint64(s.npages << _PageShift)
|
memstats.heap_sys += uint64(s.npages << _PageShift)
|
||||||
h.freeSpanLocked(s, false, true, 0)
|
h.freeSpanLocked(s, false, true)
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
// s must be on the busy list or unlinked.
|
// s must be on the busy list or unlinked.
|
||||||
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
|
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
|
||||||
switch s.state {
|
switch s.state {
|
||||||
case mSpanManual:
|
case mSpanManual:
|
||||||
if s.allocCount != 0 {
|
if s.allocCount != 0 {
|
||||||
@ -1337,13 +1335,6 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||||||
}
|
}
|
||||||
s.state = mSpanFree
|
s.state = mSpanFree
|
||||||
|
|
||||||
// Stamp newly unused spans. The scavenger will use that
|
|
||||||
// info to potentially give back some pages to the OS.
|
|
||||||
s.unusedsince = unusedsince
|
|
||||||
if unusedsince == 0 {
|
|
||||||
s.unusedsince = nanotime()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Coalesce span with neighbors.
|
// Coalesce span with neighbors.
|
||||||
h.coalesce(s)
|
h.coalesce(s)
|
||||||
|
|
||||||
@ -1353,15 +1344,23 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||||||
|
|
||||||
// scavengeLocked scavenges nbytes worth of spans in the free treap by
|
// scavengeLocked scavenges nbytes worth of spans in the free treap by
|
||||||
// starting from the span with the highest base address and working down.
|
// starting from the span with the highest base address and working down.
|
||||||
// It then takes those spans and places them in scav. h must be locked.
|
// It then takes those spans and places them in scav.
|
||||||
func (h *mheap) scavengeLocked(nbytes uintptr) {
|
//
|
||||||
|
// useCredit determines whether a scavenging call should use the credit
|
||||||
|
// system. In general, useCredit should be true except in special
|
||||||
|
// circumstances.
|
||||||
|
//
|
||||||
|
// Returns the amount of memory scavenged in bytes. h must be locked.
|
||||||
|
func (h *mheap) scavengeLocked(nbytes uintptr, useCredit bool) uintptr {
|
||||||
// Use up scavenge credit if there's any available.
|
// Use up scavenge credit if there's any available.
|
||||||
if nbytes > h.scavengeCredit {
|
if useCredit {
|
||||||
nbytes -= h.scavengeCredit
|
if nbytes > h.scavengeCredit {
|
||||||
h.scavengeCredit = 0
|
nbytes -= h.scavengeCredit
|
||||||
} else {
|
h.scavengeCredit = 0
|
||||||
h.scavengeCredit -= nbytes
|
} else {
|
||||||
return
|
h.scavengeCredit -= nbytes
|
||||||
|
return nbytes
|
||||||
|
}
|
||||||
}
|
}
|
||||||
released := uintptr(0)
|
released := uintptr(0)
|
||||||
// Iterate over spans with huge pages first, then spans without.
|
// Iterate over spans with huge pages first, then spans without.
|
||||||
@ -1388,60 +1387,41 @@ func (h *mheap) scavengeLocked(nbytes uintptr) {
|
|||||||
h.free.insert(s)
|
h.free.insert(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If we over-scavenged, turn that extra amount into credit.
|
if useCredit {
|
||||||
if released > nbytes {
|
// If we over-scavenged, turn that extra amount into credit.
|
||||||
h.scavengeCredit += released - nbytes
|
if released > nbytes {
|
||||||
}
|
h.scavengeCredit += released - nbytes
|
||||||
}
|
|
||||||
|
|
||||||
// scavengeAll visits each node in the unscav treap and scavenges the
|
|
||||||
// treapNode's span. It then removes the scavenged span from
|
|
||||||
// unscav and adds it into scav before continuing. h must be locked.
|
|
||||||
func (h *mheap) scavengeAllLocked(now, limit uint64) uintptr {
|
|
||||||
// Iterate over the unscavenged spans in the treap scavenging spans
|
|
||||||
// if unused for at least limit time.
|
|
||||||
released := uintptr(0)
|
|
||||||
for t := h.free.start(treapIterScav, 0); t.valid(); {
|
|
||||||
s := t.span()
|
|
||||||
n := t.next()
|
|
||||||
if (now - uint64(s.unusedsince)) > limit {
|
|
||||||
start, end := s.physPageBounds()
|
|
||||||
if start < end {
|
|
||||||
h.free.erase(t)
|
|
||||||
released += s.scavenge()
|
|
||||||
// See (*mheap).scavengeLocked.
|
|
||||||
h.coalesce(s)
|
|
||||||
h.free.insert(s)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
t = n
|
|
||||||
}
|
}
|
||||||
return released
|
return released
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *mheap) scavengeAll(k int32, now, limit uint64) {
|
// scavengeAll visits each node in the free treap and scavenges the
|
||||||
|
// treapNode's span. It then removes the scavenged span from
|
||||||
|
// unscav and adds it into scav before continuing.
|
||||||
|
func (h *mheap) scavengeAll() {
|
||||||
// Disallow malloc or panic while holding the heap lock. We do
|
// Disallow malloc or panic while holding the heap lock. We do
|
||||||
// this here because this is an non-mallocgc entry-point to
|
// this here because this is an non-mallocgc entry-point to
|
||||||
// the mheap API.
|
// the mheap API.
|
||||||
gp := getg()
|
gp := getg()
|
||||||
gp.m.mallocing++
|
gp.m.mallocing++
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
released := h.scavengeAllLocked(now, limit)
|
released := h.scavengeLocked(^uintptr(0), false)
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
gp.m.mallocing--
|
gp.m.mallocing--
|
||||||
|
|
||||||
if debug.gctrace > 0 {
|
if debug.gctrace > 0 {
|
||||||
if released > 0 {
|
if released > 0 {
|
||||||
print("scvg", k, ": ", released>>20, " MB released\n")
|
print("forced scvg: ", released>>20, " MB released\n")
|
||||||
}
|
}
|
||||||
print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
|
print("forced scvg: inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
|
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
|
||||||
func runtime_debug_freeOSMemory() {
|
func runtime_debug_freeOSMemory() {
|
||||||
GC()
|
GC()
|
||||||
systemstack(func() { mheap_.scavengeAll(-1, ^uint64(0), 0) })
|
systemstack(func() { mheap_.scavengeAll() })
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize a new span with the given start and npages.
|
// Initialize a new span with the given start and npages.
|
||||||
@ -1456,7 +1436,6 @@ func (span *mspan) init(base uintptr, npages uintptr) {
|
|||||||
span.spanclass = 0
|
span.spanclass = 0
|
||||||
span.elemsize = 0
|
span.elemsize = 0
|
||||||
span.state = mSpanDead
|
span.state = mSpanDead
|
||||||
span.unusedsince = 0
|
|
||||||
span.scavenged = false
|
span.scavenged = false
|
||||||
span.speciallock.key = 0
|
span.speciallock.key = 0
|
||||||
span.specials = nil
|
span.specials = nil
|
||||||
|
@ -4282,19 +4282,6 @@ func sysmon() {
|
|||||||
checkdead()
|
checkdead()
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
|
|
||||||
// If a heap span goes unused for 5 minutes after a garbage collection,
|
|
||||||
// we hand it back to the operating system.
|
|
||||||
scavengelimit := int64(5 * 60 * 1e9)
|
|
||||||
|
|
||||||
if debug.scavenge > 0 {
|
|
||||||
// Scavenge-a-lot for testing.
|
|
||||||
forcegcperiod = 10 * 1e6
|
|
||||||
scavengelimit = 20 * 1e6
|
|
||||||
}
|
|
||||||
|
|
||||||
lastscavenge := nanotime()
|
|
||||||
nscavenge := 0
|
|
||||||
|
|
||||||
lasttrace := int64(0)
|
lasttrace := int64(0)
|
||||||
idle := 0 // how many cycles in succession we had not wokeup somebody
|
idle := 0 // how many cycles in succession we had not wokeup somebody
|
||||||
delay := uint32(0)
|
delay := uint32(0)
|
||||||
@ -4316,9 +4303,6 @@ func sysmon() {
|
|||||||
// Make wake-up period small enough
|
// Make wake-up period small enough
|
||||||
// for the sampling to be correct.
|
// for the sampling to be correct.
|
||||||
maxsleep := forcegcperiod / 2
|
maxsleep := forcegcperiod / 2
|
||||||
if scavengelimit < forcegcperiod {
|
|
||||||
maxsleep = scavengelimit / 2
|
|
||||||
}
|
|
||||||
shouldRelax := true
|
shouldRelax := true
|
||||||
if osRelaxMinNS > 0 {
|
if osRelaxMinNS > 0 {
|
||||||
next := timeSleepUntil()
|
next := timeSleepUntil()
|
||||||
@ -4381,12 +4365,6 @@ func sysmon() {
|
|||||||
injectglist(&list)
|
injectglist(&list)
|
||||||
unlock(&forcegc.lock)
|
unlock(&forcegc.lock)
|
||||||
}
|
}
|
||||||
// scavenge heap once in a while
|
|
||||||
if lastscavenge+scavengelimit/2 < now {
|
|
||||||
mheap_.scavengeAll(int32(nscavenge), uint64(now), uint64(scavengelimit))
|
|
||||||
lastscavenge = now
|
|
||||||
nscavenge++
|
|
||||||
}
|
|
||||||
if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
|
if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
|
||||||
lasttrace = now
|
lasttrace = now
|
||||||
schedtrace(debug.scheddetail > 0)
|
schedtrace(debug.scheddetail > 0)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user