runtime: tricky replacements of _g_ in proc.go

This manually replaces uses of _g_ that cannot be trivially switched to
gp since there is another gp variable in scope.

Most of these functions only use the current g to reach the m, so this
helps with clarity by switching all accesses directly to an mp variable.

Change-Id: I96a4fc1c32470a7f3d12ddec9f147c2743210e71
Reviewed-on: https://go-review.googlesource.com/c/go/+/418577
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Pratt 2021-02-10 12:46:09 -05:00
parent 5e8d261918
commit e9d7f54a1a

View File

@ -751,9 +751,9 @@ func schedinit() {
} }
func dumpgstatus(gp *g) { func dumpgstatus(gp *g) {
_g_ := getg() thisg := getg()
print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
} }
// sched.lock must be held. // sched.lock must be held.
@ -848,7 +848,6 @@ func ready(gp *g, traceskip int, next bool) {
status := readgstatus(gp) status := readgstatus(gp)
// Mark runnable. // Mark runnable.
_g_ := getg()
mp := acquirem() // disable preemption because it can be holding p in a local var mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting { if status&^_Gscan != _Gwaiting {
dumpgstatus(gp) dumpgstatus(gp)
@ -857,7 +856,7 @@ func ready(gp *g, traceskip int, next bool) {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
runqput(_g_.m.p.ptr(), gp, next) runqput(mp.p.ptr(), gp, next)
wakep() wakep()
releasem(mp) releasem(mp)
} }
@ -2453,10 +2452,8 @@ func stoplockedm() {
// //
//go:nowritebarrierrec //go:nowritebarrierrec
func startlockedm(gp *g) { func startlockedm(gp *g) {
_g_ := getg()
mp := gp.lockedm.ptr() mp := gp.lockedm.ptr()
if mp == _g_.m { if mp == getg().m {
throw("startlockedm: locked to me") throw("startlockedm: locked to me")
} }
if mp.nextp != 0 { if mp.nextp != 0 {
@ -2507,7 +2504,7 @@ func gcstopm() {
// //
//go:yeswritebarrierrec //go:yeswritebarrierrec
func execute(gp *g, inheritTime bool) { func execute(gp *g, inheritTime bool) {
_g_ := getg() mp := getg().m
if goroutineProfile.active { if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine // Make sure that gp has had its stack written out to the goroutine
@ -2518,19 +2515,19 @@ func execute(gp *g, inheritTime bool) {
// Assign gp.m before entering _Grunning so running Gs have an // Assign gp.m before entering _Grunning so running Gs have an
// M. // M.
_g_.m.curg = gp mp.curg = gp
gp.m = _g_.m gp.m = mp
casgstatus(gp, _Grunnable, _Grunning) casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0 gp.waitsince = 0
gp.preempt = false gp.preempt = false
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard0 = gp.stack.lo + _StackGuard
if !inheritTime { if !inheritTime {
_g_.m.p.ptr().schedtick++ mp.p.ptr().schedtick++
} }
// Check whether the profiler needs to be turned on or off. // Check whether the profiler needs to be turned on or off.
hz := sched.profilehz hz := sched.profilehz
if _g_.m.profilehz != hz { if mp.profilehz != hz {
setThreadCPUProfiler(hz) setThreadCPUProfiler(hz)
} }
@ -2551,14 +2548,14 @@ func execute(gp *g, inheritTime bool) {
// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace // tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
// reader) so the caller should try to wake a P. // reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime, tryWakeP bool) { func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
_g_ := getg() mp := getg().m
// The conditions here and in handoffp must agree: if // The conditions here and in handoffp must agree: if
// findrunnable would return a G to run, handoffp must start // findrunnable would return a G to run, handoffp must start
// an M. // an M.
top: top:
pp := _g_.m.p.ptr() pp := mp.p.ptr()
if sched.gcwaiting != 0 { if sched.gcwaiting != 0 {
gcstopm() gcstopm()
goto top goto top
@ -2653,9 +2650,9 @@ top:
// This is necessary to prevent excessive CPU consumption when // This is necessary to prevent excessive CPU consumption when
// GOMAXPROCS>>1 but the program parallelism is low. // GOMAXPROCS>>1 but the program parallelism is low.
procs := uint32(gomaxprocs) procs := uint32(gomaxprocs)
if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) { if mp.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
if !_g_.m.spinning { if !mp.spinning {
_g_.m.spinning = true mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1) atomic.Xadd(&sched.nmspinning, 1)
} }
@ -2757,9 +2754,9 @@ top:
// we also observe no idle Ps it is OK to skip unparking a new worker // we also observe no idle Ps it is OK to skip unparking a new worker
// thread: the system is fully loaded so no spinning threads are required. // thread: the system is fully loaded so no spinning threads are required.
// Also see "Worker thread parking/unparking" comment at the top of the file. // Also see "Worker thread parking/unparking" comment at the top of the file.
wasSpinning := _g_.m.spinning wasSpinning := mp.spinning
if _g_.m.spinning { if mp.spinning {
_g_.m.spinning = false mp.spinning = false
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("findrunnable: negative nmspinning") throw("findrunnable: negative nmspinning")
} }
@ -2774,7 +2771,7 @@ top:
pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot) pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
if pp != nil { if pp != nil {
acquirep(pp) acquirep(pp)
_g_.m.spinning = true mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1) atomic.Xadd(&sched.nmspinning, 1)
goto top goto top
} }
@ -2783,7 +2780,7 @@ top:
pp, gp = checkIdleGCNoP() pp, gp = checkIdleGCNoP()
if pp != nil { if pp != nil {
acquirep(pp) acquirep(pp)
_g_.m.spinning = true mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1) atomic.Xadd(&sched.nmspinning, 1)
// Run the idle worker. // Run the idle worker.
@ -2807,10 +2804,10 @@ top:
// Poll network until next timer. // Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil)) atomic.Store64(&sched.pollUntil, uint64(pollUntil))
if _g_.m.p != 0 { if mp.p != 0 {
throw("findrunnable: netpoll with p") throw("findrunnable: netpoll with p")
} }
if _g_.m.spinning { if mp.spinning {
throw("findrunnable: netpoll with spinning") throw("findrunnable: netpoll with spinning")
} }
// Refresh now. // Refresh now.
@ -2852,7 +2849,7 @@ top:
return gp, false, false return gp, false, false
} }
if wasSpinning { if wasSpinning {
_g_.m.spinning = true mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1) atomic.Xadd(&sched.nmspinning, 1)
} }
goto top goto top
@ -3175,31 +3172,31 @@ func injectglist(glist *gList) {
// One round of scheduler: find a runnable goroutine and execute it. // One round of scheduler: find a runnable goroutine and execute it.
// Never returns. // Never returns.
func schedule() { func schedule() {
_g_ := getg() mp := getg().m
if _g_.m.locks != 0 { if mp.locks != 0 {
throw("schedule: holding locks") throw("schedule: holding locks")
} }
if _g_.m.lockedg != 0 { if mp.lockedg != 0 {
stoplockedm() stoplockedm()
execute(_g_.m.lockedg.ptr(), false) // Never returns. execute(mp.lockedg.ptr(), false) // Never returns.
} }
// We should not schedule away from a g that is executing a cgo call, // We should not schedule away from a g that is executing a cgo call,
// since the cgo call is using the m's g0 stack. // since the cgo call is using the m's g0 stack.
if _g_.m.incgo { if mp.incgo {
throw("schedule: in cgo") throw("schedule: in cgo")
} }
top: top:
pp := _g_.m.p.ptr() pp := mp.p.ptr()
pp.preempt = false pp.preempt = false
// Safety check: if we are spinning, the run queue should be empty. // Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call // Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue. // goready to put a ready goroutine on the local run queue.
if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work") throw("schedule: spinning with local work")
} }
@ -3208,7 +3205,7 @@ top:
// This thread is going to run a goroutine and is not spinning anymore, // This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially // so if it was marked as spinning we need to reset it now and potentially
// start a new spinning M. // start a new spinning M.
if _g_.m.spinning { if mp.spinning {
resetspinning() resetspinning()
} }
@ -3331,19 +3328,19 @@ func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
// park continuation on g0. // park continuation on g0.
func park_m(gp *g) { func park_m(gp *g) {
_g_ := getg() mp := getg().m
if trace.enabled { if trace.enabled {
traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) traceGoPark(mp.waittraceev, mp.waittraceskip)
} }
casgstatus(gp, _Grunning, _Gwaiting) casgstatus(gp, _Grunning, _Gwaiting)
dropg() dropg()
if fn := _g_.m.waitunlockf; fn != nil { if fn := mp.waitunlockf; fn != nil {
ok := fn(gp, _g_.m.waitlock) ok := fn(gp, mp.waitlock)
_g_.m.waitunlockf = nil mp.waitunlockf = nil
_g_.m.waitlock = nil mp.waitlock = nil
if !ok { if !ok {
if trace.enabled { if trace.enabled {
traceGoUnpark(gp, 2) traceGoUnpark(gp, 2)
@ -3470,8 +3467,8 @@ func goexit1() {
// goexit continuation on g0. // goexit continuation on g0.
func goexit0(gp *g) { func goexit0(gp *g) {
_g_ := getg() mp := getg().m
pp := _g_.m.p.ptr() pp := mp.p.ptr()
casgstatus(gp, _Grunning, _Gdead) casgstatus(gp, _Grunning, _Gdead)
gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo)) gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
@ -3481,7 +3478,7 @@ func goexit0(gp *g) {
gp.m = nil gp.m = nil
locked := gp.lockedm != 0 locked := gp.lockedm != 0
gp.lockedm = 0 gp.lockedm = 0
_g_.m.lockedg = 0 mp.lockedg = 0
gp.preemptStop = false gp.preemptStop = false
gp.paniconfault = false gp.paniconfault = false
gp._defer = nil // should be true already but just in case. gp._defer = nil // should be true already but just in case.
@ -3509,8 +3506,8 @@ func goexit0(gp *g) {
schedule() // never returns schedule() // never returns
} }
if _g_.m.lockedInt != 0 { if mp.lockedInt != 0 {
print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") print("invalid m->lockedInt = ", mp.lockedInt, "\n")
throw("internal lockOSThread error") throw("internal lockOSThread error")
} }
gfput(pp, gp) gfput(pp, gp)
@ -3522,11 +3519,11 @@ func goexit0(gp *g) {
// Return to mstart, which will release the P and exit // Return to mstart, which will release the P and exit
// the thread. // the thread.
if GOOS != "plan9" { // See golang.org/issue/22227. if GOOS != "plan9" { // See golang.org/issue/22227.
gogo(&_g_.m.g0.sched) gogo(&mp.g0.sched)
} else { } else {
// Clear lockedExt on plan9 since we may end up re-using // Clear lockedExt on plan9 since we may end up re-using
// this thread. // this thread.
_g_.m.lockedExt = 0 mp.lockedExt = 0
} }
} }
schedule() schedule()
@ -4102,14 +4099,12 @@ func newproc(fn *funcval) {
// address of the go statement that created this. The caller is responsible // address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler. // for adding the new g to the scheduler.
func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g { func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
_g_ := getg()
if fn == nil { if fn == nil {
fatal("go of nil func value") fatal("go of nil func value")
} }
acquirem() // disable preemption because it can be holding p in a local var
pp := _g_.m.p.ptr() mp := acquirem() // disable preemption because we hold M and P in local vars.
pp := mp.p.ptr()
newg := gfget(pp) newg := gfget(pp)
if newg == nil { if newg == nil {
newg = malg(_StackMin) newg = malg(_StackMin)
@ -4148,8 +4143,8 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
atomic.Xadd(&sched.ngsys, +1) atomic.Xadd(&sched.ngsys, +1)
} else { } else {
// Only user goroutines inherit pprof labels. // Only user goroutines inherit pprof labels.
if _g_.m.curg != nil { if mp.curg != nil {
newg.labels = _g_.m.curg.labels newg.labels = mp.curg.labels
} }
if goroutineProfile.active { if goroutineProfile.active {
// A concurrent goroutine profile is running. It should include // A concurrent goroutine profile is running. It should include
@ -4189,7 +4184,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
if trace.enabled { if trace.enabled {
traceGoCreate(newg, newg.startpc) traceGoCreate(newg, newg.startpc)
} }
releasem(_g_.m) releasem(mp)
return newg return newg
} }