runtime: commit to spinbitmutex GOEXPERIMENT

Use the "spinbit" mutex implementation always (including on platforms
that need to emulate atomic.Xchg8), and delete the prior "tristate"
implementations.

The exception is GOARCH=wasm, where the Go runtime does not use multiple
threads.

For #68578

Change-Id: Ifc29bbfa05071d776c23a19ae185891a03a82417
Reviewed-on: https://go-review.googlesource.com/c/go/+/658456
Auto-Submit: Rhys Hiltner <rhys.hiltner@gmail.com>
Reviewed-by: Junyang Shao <shaojunyang@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Rhys Hiltner 2025-03-14 12:38:34 -07:00 committed by Gopher Robot
parent 7ce45a014c
commit 7a177114df
8 changed files with 4 additions and 314 deletions

View File

@ -67,8 +67,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
regabiSupported = true
}
haveThreads := goarch != "wasm"
// Older versions (anything before V16) of dsymutil don't handle
// the .debug_rnglists section in DWARF5. See
// https://github.com/golang/go/issues/26379#issuecomment-2677068742
@ -85,7 +83,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
RegabiArgs: regabiSupported,
AliasTypeParams: true,
SwissMap: true,
SpinbitMutex: haveThreads,
SyncHashTrieMap: true,
Dwarf5: dwarf5Supported,
}

View File

@ -1,8 +0,0 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.spinbitmutex
package goexperiment
const SpinbitMutex = false
const SpinbitMutexInt = 0

View File

@ -1,8 +0,0 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.spinbitmutex
package goexperiment
const SpinbitMutex = true
const SpinbitMutexInt = 1

View File

@ -115,10 +115,6 @@ type Flags struct {
// SwissMap enables the SwissTable-based map implementation.
SwissMap bool
// SpinbitMutex enables the new "spinbit" mutex implementation on supported
// platforms. See https://go.dev/issue/68578.
SpinbitMutex bool
// SyncHashTrieMap enables the HashTrieMap sync.Map implementation.
SyncHashTrieMap bool

View File

@ -1,138 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (dragonfly || freebsd || linux) && !goexperiment.spinbitmutex
package runtime
import (
"internal/runtime/atomic"
)
// This implementation depends on OS-specific implementations of
//
// futexsleep(addr *uint32, val uint32, ns int64)
// Atomically,
// if *addr == val { sleep }
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
// futexwakeup(addr *uint32, cnt uint32)
// If any procs are sleeping on addr, wake up at most cnt.
const (
mutex_unlocked = 0
mutex_locked = 1
mutex_sleeping = 2
active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)
// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
// mutex_sleeping means that there is presumably at least one sleeping thread.
// Note that there can be spinning threads during all states - they do not
// affect mutex's state.
type mWaitList struct{}
func lockVerifyMSize() {}
func mutexContended(l *mutex) bool {
return atomic.Load(key32(&l.key)) > mutex_locked
}
func lock(l *mutex) {
lockWithRank(l, getLockRank(l))
}
func lock2(l *mutex) {
gp := getg()
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++
// Speculative grab for lock.
v := atomic.Xchg(key32(&l.key), mutex_locked)
if v == mutex_unlocked {
return
}
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
// depending on whether there is a thread sleeping
// on this mutex. If we ever change l->key from
// MUTEX_SLEEPING to some other value, we must be
// careful to change it back to MUTEX_SLEEPING before
// returning, to ensure that the sleeping thread gets
// its wakeup call.
wait := v
timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessors, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
for {
// Try for lock, spinning.
for i := 0; i < spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
procyield(active_spin_cnt)
}
// Try for lock, rescheduling.
for i := 0; i < passive_spin; i++ {
for l.key == mutex_unlocked {
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
timer.end()
return
}
}
osyield()
}
// Sleep.
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
if v == mutex_unlocked {
timer.end()
return
}
wait = mutex_sleeping
futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}
func unlock(l *mutex) {
unlockWithRank(l)
}
func unlock2(l *mutex) {
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
if v == mutex_unlocked {
throw("unlock of unlocked lock")
}
if v == mutex_sleeping {
futexwakeup(key32(&l.key), 1)
}
gp := getg()
gp.m.mLockProfile.recordUnlock(l)
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}

View File

@ -1,148 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (aix || darwin || netbsd || openbsd || plan9 || solaris || windows) && !goexperiment.spinbitmutex
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// This implementation depends on OS-specific implementations of
//
// func semacreate(mp *m)
// Create a semaphore for mp, if it does not already have one.
//
// func semasleep(ns int64) int32
// If ns < 0, acquire m's semaphore and return 0.
// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
//
// func semawakeup(mp *m)
// Wake up mp, which is or will soon be sleeping on its semaphore.
const (
active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)
// mWaitList is part of the M struct, and holds the list of Ms that are waiting
// for a particular runtime.mutex.
//
// When an M is unable to immediately obtain a lock, it adds itself to the list
// of Ms waiting for the lock. It does that via this struct's next field,
// forming a singly-linked list with the mutex's key field pointing to the head
// of the list.
type mWaitList struct {
next muintptr // next m waiting for lock
}
func lockVerifyMSize() {}
func mutexContended(l *mutex) bool {
return atomic.Loaduintptr(&l.key) > locked
}
func lock(l *mutex) {
lockWithRank(l, getLockRank(l))
}
func lock2(l *mutex) {
gp := getg()
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++
// Speculative grab for lock.
if atomic.Casuintptr(&l.key, 0, locked) {
return
}
semacreate(gp.m)
timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessor's, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin := 0
if ncpu > 1 {
spin = active_spin
}
Loop:
for i := 0; ; i++ {
v := atomic.Loaduintptr(&l.key)
if v&locked == 0 {
// Unlocked. Try to lock.
if atomic.Casuintptr(&l.key, v, v|locked) {
timer.end()
return
}
i = 0
}
if i < spin {
procyield(active_spin_cnt)
} else if i < spin+passive_spin {
osyield()
} else {
// Someone else has it.
// l.key points to a linked list of M's waiting
// for this lock, chained through m.mWaitList.next.
// Queue this M.
for {
gp.m.mWaitList.next = muintptr(v &^ locked)
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
break
}
v = atomic.Loaduintptr(&l.key)
if v&locked == 0 {
continue Loop
}
}
if v&locked != 0 {
// Queued. Wait.
semasleep(-1)
i = 0
}
}
}
}
func unlock(l *mutex) {
unlockWithRank(l)
}
// We might not be holding a p in this code.
//
//go:nowritebarrier
func unlock2(l *mutex) {
gp := getg()
var mp *m
for {
v := atomic.Loaduintptr(&l.key)
if v == locked {
if atomic.Casuintptr(&l.key, locked, 0) {
break
}
} else {
// Other M's are waiting for the lock.
// Dequeue an M.
mp = muintptr(v &^ locked).ptr()
if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) {
// Dequeued an M. Wake it.
semawakeup(mp) // no use of mp after this point; it's awake
break
}
}
}
gp.m.mLockProfile.recordUnlock(l)
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || plan9 || solaris || windows) && goexperiment.spinbitmutex
//go:build !wasm
package runtime

View File

@ -8,7 +8,6 @@ import (
"internal/abi"
"internal/chacha8rand"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
@ -628,9 +627,9 @@ type mPadded struct {
// Size the runtime.m structure so it fits in the 2048-byte size class, and
// not in the next-smallest (1792-byte) size class. That leaves the 11 low
// bits of muintptr values available for flags, as required for
// GOEXPERIMENT=spinbitmutex.
_ [goexperiment.SpinbitMutexInt * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte
// bits of muintptr values available for flags, as required by
// lock_spinbit.go.
_ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte
}
type p struct {