all: separate doc comment from //go: directives

A future change to gofmt will rewrite

	// Doc comment.
	//go:foo

to

	// Doc comment.
	//
	//go:foo

Apply that change preemptively to all comments (not necessarily just doc comments).

For #51082.

Change-Id: Iffe0285418d1e79d34526af3520b415a12203ca9
Reviewed-on: https://go-review.googlesource.com/c/go/+/384260
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Russ Cox 2022-01-30 20:13:43 -05:00
parent 81431c7aa7
commit 9839668b56
148 changed files with 363 additions and 0 deletions

View File

@ -228,6 +228,7 @@ func (v *Value) auxString() string {
// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower.
// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar).
//
//go:noinline
func (v *Value) AddArg(w *Value) {
if v.Args == nil {
@ -331,6 +332,7 @@ func (v *Value) resetArgs() {
// reset is called from most rewrite rules.
// Allowing it to be inlined increases the size
// of cmd/compile by almost 10%, and slows it down.
//
//go:noinline
func (v *Value) reset(op Op) {
if v.InCache {
@ -377,6 +379,7 @@ func (v *Value) invalidateRecursively() bool {
// copyOf is called from rewrite rules.
// It modifies v to be (Copy a).
//
//go:noinline
func (v *Value) copyOf(a *Value) {
if v == a {

View File

@ -170,6 +170,7 @@ func cvt8(a float32) int32 {
}
// make sure to cover int, uint cases (issue #16738)
//
//go:noinline
func cvt9(a float64) int {
return int(a)

View File

@ -145,6 +145,7 @@ func (v V) val() int64 {
// and y.val() should be equal to which and y.p.val() should
// be equal to z.val(). Also, x(.p)**8 == x; that is, the
// autos are all linked into a ring.
//
//go:noinline
func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing
@ -191,6 +192,7 @@ func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
// gets is an address-mentioning way of implementing
// structure assignment.
//
//go:noinline
func (to *V) gets(from *V) {
*to = *from
@ -198,12 +200,14 @@ func (to *V) gets(from *V) {
// gets is an address-and-interface-mentioning way of
// implementing structure assignment.
//
//go:noinline
func (to *V) getsI(from interface{}) {
*to = *from.(*V)
}
// fill_ssa initializes r with V{w:w, x:x, p:p}
//
//go:noinline
func fill_ssa(w, x int64, r, p *V) {
*r = V{w: w, x: x, p: p}

View File

@ -225,6 +225,7 @@ func testArithConstShift(t *testing.T) {
// overflowConstShift_ssa verifes that constant folding for shift
// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0).
//
//go:noinline
func overflowConstShift64_ssa(x int64) int64 {
return x << uint64(0xffffffffffffffff) << uint64(1)

View File

@ -117,6 +117,7 @@ type junk struct {
// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR
// was scheduled between a compare and branch, clearing flags.
//
//go:noinline
func flagOverwrite_ssa(s *junk, c int) int {
if '0' <= c && c <= '9' {

View File

@ -14,6 +14,7 @@ import (
// manysub_ssa is designed to tickle bugs that depend on register
// pressure or unfriendly operand ordering in registers (and at
// least once it succeeded in this).
//
//go:noinline
func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) {
aa = a + 11.0 - a
@ -37,6 +38,7 @@ func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc
// fpspill_ssa attempts to trigger a bug where phis with floating point values
// were stored in non-fp registers causing an error in doasm.
//
//go:noinline
func fpspill_ssa(a int) float64 {

View File

@ -73,6 +73,7 @@ var b int
// testDeadStorePanic_ssa ensures that we don't optimize away stores
// that could be read by after recover(). Modeled after fixedbugs/issue1304.
//
//go:noinline
func testDeadStorePanic_ssa(a int) (r int) {
defer func() {

View File

@ -212,6 +212,7 @@ var runtimeDecls = [...]struct {
}
// Not inlining this function removes a significant chunk of init code.
//
//go:noinline
func newSig(params, results []*types.Field) *types.Type {
return types.NewSignature(types.NoPkg, nil, nil, params, results)

View File

@ -105,6 +105,7 @@ func mkbuiltin(w io.Writer, name string) {
fmt.Fprintln(w, `
// Not inlining this function removes a significant chunk of init code.
//
//go:noinline
func newSig(params, results []*types.Field) *types.Type {
return types.NewSignature(types.NoPkg, nil, nil, params, results)

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// js does not support inter-process file locking.
//
//go:build !js
package lockedfile_test

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// js does not support inter-process file locking.
//
//go:build !js
package lockedfile_test

View File

@ -37,6 +37,7 @@ type SymKind uint8
// These are used to index into cmd/link/internal/sym/AbiSymKindToSymKind
//
// TODO(rsc): Give idiomatic Go names.
//
//go:generate stringer -type=SymKind
const (
// An otherwise invalid zero value for the type

View File

@ -13,6 +13,7 @@ import (
)
// Implemented in the syscall package.
//
//go:linkname fcntl syscall.fcntl
func fcntl(fd int, cmd int, arg int) (int, error)

View File

@ -38,6 +38,7 @@ type SymKind uint8
// Defined SymKind values.
//
// TODO(rsc): Give idiomatic Go names.
//
//go:generate stringer -type=SymKind
const (
Sxxx SymKind = iota

View File

@ -39,6 +39,7 @@ func (x *cbc) BlockSize() int { return BlockSize }
// cryptBlocksChain invokes the cipher message with chaining (KMC) instruction
// with the given function code. The length must be a multiple of BlockSize (16).
//
//go:noescape
func cryptBlocksChain(c code, iv, key, dst, src *byte, length int)

View File

@ -28,6 +28,7 @@ type aesCipherAsm struct {
// cryptBlocks invokes the cipher message (KM) instruction with
// the given function code. This is equivalent to AES in ECB
// mode. The length must be a multiple of BlockSize (16).
//
//go:noescape
func cryptBlocks(c code, key, dst, src *byte, length int)

View File

@ -17,6 +17,7 @@ var _ ctrAble = (*aesCipherAsm)(nil)
// dst. If a and b are not the same length then the number of bytes processed
// will be equal to the length of shorter of the two. Returns the number
// of bytes processed.
//
//go:noescape
func xorBytes(dst, a, b []byte) int

View File

@ -100,6 +100,7 @@ func sliceForAppend(in []byte, n int) (head, tail []byte) {
// ghash uses the GHASH algorithm to hash data with the given key. The initial
// hash value is given by hash which will be updated with the new hash value.
// The length of data must be a multiple of 16-bytes.
//
//go:noescape
func ghash(key *gcmHashKey, hash *[16]byte, data []byte)
@ -127,6 +128,7 @@ func (g *gcmAsm) paddedGHASH(hash *[16]byte, data []byte) {
// The lengths of both dst and buf must be greater than or equal to the length
// of src. buf may be partially or completely overwritten during the execution
// of the function.
//
//go:noescape
func cryptBlocksGCM(fn code, key, dst, src, buf []byte, cnt *gcmCount)
@ -295,6 +297,7 @@ const (
// will be calculated and written to tag. cnt should contain the current
// counter state and will be overwritten with the updated counter state.
// TODO(mundaym): could pass in hash subkey
//
//go:noescape
func kmaGCM(fn code, key, dst, src, aad []byte, tag *[16]byte, cnt *gcmCount)

View File

@ -18,6 +18,7 @@ import (
// The return value corresponds to the condition code set by the
// instruction. Interrupted invocations are handled by the
// function.
//
//go:noescape
func kdsa(fc uint64, params *[4096]byte) (errn uint64)

View File

@ -5,9 +5,11 @@
package field
// feMul sets out = a * b. It works like feMulGeneric.
//
//go:noescape
func feMul(out *Element, a *Element, b *Element)
// feSquare sets out = a * a. It works like feSquareGeneric.
//
//go:noescape
func feSquare(out *Element, a *Element)

View File

@ -53,26 +53,32 @@ func (curve p256Curve) Params() *CurveParams {
// Functions implemented in p256_asm_*64.s
// Montgomery multiplication modulo P256
//
//go:noescape
func p256Mul(res, in1, in2 []uint64)
// Montgomery square modulo P256, repeated n times (n >= 1)
//
//go:noescape
func p256Sqr(res, in []uint64, n int)
// Montgomery multiplication by 1
//
//go:noescape
func p256FromMont(res, in []uint64)
// iff cond == 1 val <- -val
//
//go:noescape
func p256NegCond(val []uint64, cond int)
// if cond == 0 res <- b; else res <- a
//
//go:noescape
func p256MovCond(res, a, b []uint64, cond int)
// Endianness swap
//
//go:noescape
func p256BigToLittle(res []uint64, in []byte)
@ -80,6 +86,7 @@ func p256BigToLittle(res []uint64, in []byte)
func p256LittleToBig(res []byte, in []uint64)
// Constant time table access
//
//go:noescape
func p256Select(point, table []uint64, idx int)
@ -87,10 +94,12 @@ func p256Select(point, table []uint64, idx int)
func p256SelectBase(point *[12]uint64, table string, idx int)
// Montgomery multiplication modulo Ord(G)
//
//go:noescape
func p256OrdMul(res, in1, in2 []uint64)
// Montgomery square modulo Ord(G), repeated n times
//
//go:noescape
func p256OrdSqr(res, in []uint64, n int)
@ -98,16 +107,19 @@ func p256OrdSqr(res, in []uint64, n int)
// If sign == 1 -> in2 = -in2
// If sel == 0 -> res = in1
// if zero == 0 -> res = in2
//
//go:noescape
func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
// Point add. Returns one if the two input points were equal and zero
// otherwise. (Note that, due to the way that the equations work out, some
// representations of ∞ are considered equal to everything by this function.)
//
//go:noescape
func p256PointAddAsm(res, in1, in2 []uint64) int
// Point double
//
//go:noescape
func p256PointDoubleAsm(res, in []uint64)

View File

@ -19,6 +19,7 @@ import (
)
// Core Foundation linker flags for the external linker. See Issue 42459.
//
//go:cgo_ldflag "-framework"
//go:cgo_ldflag "CoreFoundation"

View File

@ -15,6 +15,7 @@ import (
)
// Security.framework linker flags for the external linker. See Issue 42459.
//
//go:cgo_ldflag "-framework"
//go:cgo_ldflag "Security"

View File

@ -8,6 +8,7 @@ package x509
// argument to the latest security_certificates version from
// https://opensource.apple.com/source/security_certificates/
// and run "go generate". See https://golang.org/issue/38843.
//
//go:generate go run root_ios_gen.go -version 55188.120.1.0.1
import "sync"

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// Delete the next line to include in the gob package.
//
//go:build ignore
package gob

View File

@ -18,11 +18,13 @@ import (
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
// instruction.
//
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
// instruction.
//
//go:noescape
func castagnoliSSE42Triple(
crcA, crcB, crcC uint32,
@ -32,6 +34,7 @@ func castagnoliSSE42Triple(
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
// instruction as well as SSE 4.1.
//
//go:noescape
func ieeeCLMUL(crc uint32, p []byte) uint32

View File

@ -19,6 +19,7 @@ const (
func ppc64SlicingUpdateBy8(crc uint32, table8 *slicing8Table, p []byte) uint32
// this function requires the buffer to be 16 byte aligned and > 16 bytes long
//
//go:noescape
func vectorCrc32(crc uint32, poly uint32, p []byte) uint32

View File

@ -17,11 +17,13 @@ var hasVX = cpu.S390X.HasVX
// vectorizedCastagnoli implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//
//go:noescape
func vectorizedCastagnoli(crc uint32, p []byte) uint32
// vectorizedIEEE implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//
//go:noescape
func vectorizedIEEE(crc uint32, p []byte) uint32

View File

@ -9,6 +9,7 @@ package goarch
// per-arch information, including constants named $GOARCH for every
// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
// by them is useful for defining GOARCH-specific constants.
//
//go:generate go run gengoarch.go
type ArchFamilyType int

View File

@ -9,4 +9,5 @@ package goos
// per-OS information, including constants named Is$GOOS for every
// known GOOS. The constant is 1 on the current system, 0 otherwise;
// multiplying by them is useful for defining GOOS-specific constants.
//
//go:generate go run gengoos.go

View File

@ -93,6 +93,7 @@ func GetByString(s string) *Value {
// We play unsafe games that violate Go's rules (and assume a non-moving
// collector). So we quiet Go here.
// See the comment below Get for more implementation details.
//
//go:nocheckptr
func get(k key) *Value {
mu.Lock()

View File

@ -9,5 +9,6 @@ package poll
import _ "unsafe" // for go:linkname
// Implemented in the syscall package.
//
//go:linkname fcntl syscall.fcntl
func fcntl(fd int, cmd int, arg int) (int, error)

View File

@ -34,5 +34,6 @@ func (fd *FD) OpenDir() (uintptr, string, error) {
}
// Implemented in syscall/syscall_darwin.go.
//
//go:linkname fdopendir syscall.fdopendir
func fdopendir(fd int) (dir uintptr, err error)

View File

@ -15,6 +15,7 @@ import (
)
// runtimeNano returns the current value of the runtime clock in nanoseconds.
//
//go:linkname runtimeNano runtime.nanotime
func runtimeNano() int64

View File

@ -12,5 +12,6 @@ import (
)
// Implemented in syscall/syscall_darwin.go.
//
//go:linkname writev syscall.writev
func writev(fd int, iovecs []syscall.Iovec) (uintptr, error)

View File

@ -7,6 +7,7 @@ package poll
import "syscall"
// Not strictly needed, but very helpful for debugging, see issue #10221.
//
//go:cgo_import_dynamic _ _ "libsendfile.so"
//go:cgo_import_dynamic _ _ "libsocket.so"

View File

@ -458,6 +458,7 @@ func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Po
func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
// typedmemmove copies a value of type t to dst from src.
//
//go:noescape
func typedmemmove(t *rtype, dst, src unsafe.Pointer)

View File

@ -20,5 +20,6 @@ func IsNonblock(fd int) (nonblocking bool, err error) {
}
// Implemented in the syscall package.
//
//go:linkname fcntl syscall.fcntl
func fcntl(fd int, cmd int, arg int) (int, error)

View File

@ -9,5 +9,6 @@ package net
import _ "unsafe" // for go:linkname
// Implemented in the syscall package.
//
//go:linkname fcntl syscall.fcntl
func fcntl(fd int, cmd int, arg int) (int, error)

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// Test broken pipes on Unix systems.
//
//go:build !plan9 && !js
package os_test

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// Test use of raw connections.
//
//go:build !plan9 && !js
package os_test

View File

@ -150,5 +150,6 @@ var (
func lastmoduleinit() (pluginpath string, syms map[string]any, errstr string)
// doInit is defined in package runtime
//
//go:linkname doInit runtime.doInit
func doInit(t unsafe.Pointer) // t should be a *runtime.initTask

View File

@ -545,6 +545,7 @@ func passEmptyStruct(a int, b struct{}, c float64) (int, struct{}, float64) {
// This test case forces a large argument to the stack followed by more
// in-register arguments.
//
//go:registerparams
//go:noinline
func passStruct10AndSmall(a Struct10, b byte, c uint) (Struct10, byte, uint) {

View File

@ -158,6 +158,7 @@ type makeFuncCtxt struct {
// nosplit because pointers are being held in uintptr slots in args, so
// having our stack scanned now could lead to accidentally freeing
// memory.
//
//go:nosplit
func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
for i, arg := range args.Ints {

View File

@ -2757,6 +2757,7 @@ type runtimeSelect struct {
// If the case was a receive, val is filled in with the received value.
// The conventional OK bool indicates whether the receive corresponds
// to a sent value.
//
//go:noescape
func rselect([]runtimeSelect) (chosen int, recvOK bool)
@ -3493,6 +3494,7 @@ func maplen(m unsafe.Pointer) int
// Arguments passed through to call do not escape. The type is used only in a
// very limited callee of call, the stackArgs are copied, and regArgs is only
// used in the call frame.
//
//go:noescape
//go:linkname call runtime.reflectcall
func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
@ -3500,29 +3502,35 @@ func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stac
func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
// memmove copies size bytes to dst from src. No write barriers are used.
//
//go:noescape
func memmove(dst, src unsafe.Pointer, size uintptr)
// typedmemmove copies a value of type t to dst from src.
//
//go:noescape
func typedmemmove(t *rtype, dst, src unsafe.Pointer)
// typedmemmovepartial is like typedmemmove but assumes that
// dst and src point off bytes into the value and only copies size bytes.
//
//go:noescape
func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr)
// typedmemclr zeros the value at ptr of type t.
//
//go:noescape
func typedmemclr(t *rtype, ptr unsafe.Pointer)
// typedmemclrpartial is like typedmemclr but assumes that
// dst points off bytes into the value and only clears size bytes.
//
//go:noescape
func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
// typedslicecopy copies a slice of elemType values from src to dst,
// returning the number of elements copied.
//
//go:noescape
func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int

View File

@ -56,6 +56,7 @@ func asanunpoison(addr unsafe.Pointer, sz uintptr)
func asanpoison(addr unsafe.Pointer, sz uintptr)
// These are called from asan_GOARCH.s
//
//go:cgo_import_static __asan_read_go
//go:cgo_import_static __asan_write_go
//go:cgo_import_static __asan_unpoison_go

View File

@ -21,6 +21,7 @@ import "unsafe"
// that pattern working. In particular, crosscall2 actually takes four
// arguments, but it works to call it with three arguments when
// calling _cgo_panic.
//
//go:cgo_export_static crosscall2
//go:cgo_export_dynamic crosscall2

View File

@ -6,6 +6,7 @@ package cgo
// These functions must be exported in order to perform
// longcall on cgo programs (cf gcc_aix_ppc64.c).
//
//go:cgo_export_static __cgo_topofstack
//go:cgo_export_static runtime.rt0_go
//go:cgo_export_static _rt0_ppc64_aix_lib

View File

@ -17,4 +17,5 @@ var _guard_local uintptr
// This is normally marked as hidden and placed in the
// .openbsd.randomdata section.
//
//go:cgo_export_dynamic __guard_local __guard_local

View File

@ -12,11 +12,13 @@ import "unsafe"
// _cgo_mmap is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_mmap _cgo_mmap
var _cgo_mmap unsafe.Pointer
// _cgo_munmap is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_munmap _cgo_munmap
var _cgo_munmap unsafe.Pointer
@ -24,6 +26,7 @@ var _cgo_munmap unsafe.Pointer
// support sanitizer interceptors. Don't allow stack splits, since this function
// (used by sysAlloc) is called in a lot of low-level parts of the runtime and
// callers often assume it won't acquire any locks.
//
//go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
if _cgo_mmap != nil {

View File

@ -9,4 +9,5 @@ package runtime
// crosscall_ppc64 calls into the runtime to set up the registers the
// Go runtime expects and so the symbol it calls needs to be exported
// for external linking to work.
//
//go:cgo_export_static _cgo_reginit

View File

@ -12,6 +12,7 @@ import "unsafe"
// _cgo_sigaction is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_sigaction _cgo_sigaction
var _cgo_sigaction unsafe.Pointer
@ -88,5 +89,6 @@ func sigaction(sig uint32, new, old *sigactiont) {
// callCgoSigaction calls the sigaction function in the runtime/cgo package
// using the GCC calling convention. It is implemented in assembly.
//
//go:noescape
func callCgoSigaction(sig uintptr, new, old *sigactiont) int32

View File

@ -102,6 +102,7 @@ type argset struct {
}
// wrapper for syscall package to call cgocall for libc (cgo) calls.
//
//go:linkname syscall_cgocaller syscall.cgocaller
//go:nosplit
//go:uintptrescapes
@ -199,6 +200,7 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
}
// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
//
//go:nosplit
func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
gp := getg()
@ -598,6 +600,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
// cgoIsGoPointer reports whether the pointer is a Go pointer--a
// pointer to Go memory. We only care about Go memory that might
// contain pointers.
//
//go:nosplit
//go:nowritebarrierrec
func cgoIsGoPointer(p unsafe.Pointer) bool {
@ -619,6 +622,7 @@ func cgoIsGoPointer(p unsafe.Pointer) bool {
}
// cgoInRange reports whether p is between start and end.
//
//go:nosplit
//go:nowritebarrierrec
func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {

View File

@ -61,6 +61,7 @@ func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
// size is the number of bytes to copy.
// It throws if the program is copying a block that contains a Go pointer
// into non-Go memory.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
@ -81,6 +82,7 @@ func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
// typ is the element type of the slice.
// It throws if the program is copying slice elements that contain Go pointers
// into non-Go memory.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
@ -103,6 +105,7 @@ func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
// and throws if it finds a Go pointer. The type of the memory is typ,
// and src is off bytes into that type.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
@ -166,6 +169,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
// cgoCheckBits checks the block of memory at src, for up to size
// bytes, and throws if it finds a Go pointer. The gcbits mark each
// pointer value. The src pointer is off bytes into the gcbits.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
@ -201,6 +205,7 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
// We only use this when looking at a value on the stack when the type
// uses a GC program, because otherwise it's more efficient to use the
// GC bits. This is called on the system stack.
//
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {

View File

@ -139,6 +139,7 @@ func full(c *hchan) bool {
}
// entry point for c <- x from compiled code
//
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
chansend(c, elem, true, getcallerpc())
@ -435,6 +436,7 @@ func empty(c *hchan) bool {
}
// entry points for <- c from compiled code
//
//go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer) {
chanrecv(c, elem, true)

View File

@ -88,6 +88,7 @@ func SetCPUProfileRate(hz int) {
// and cannot allocate memory or acquire locks that might be
// held at the time of the signal, nor can it use substantial amounts
// of stack.
//
//go:nowritebarrierrec
func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) {
// Simple cas-lock to coordinate with setcpuprofilerate.
@ -117,6 +118,7 @@ func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) {
// Instead, we copy the stack into cpuprof.extra,
// which will be drained the next time a Go thread
// gets the signal handling event.
//
//go:nosplit
//go:nowritebarrierrec
func (p *cpuProfile) addNonGo(stk []uintptr) {

View File

@ -25,6 +25,7 @@ const (
// For Plan 9 shared environment semantics, instead of Getenv(key) and
// Setenv(key, value), one can use os.ReadFile("/env/" + key) and
// os.WriteFile("/env/" + key, value, 0666) respectively.
//
//go:nosplit
func goenvs() {
buf := make([]byte, envBufSize)
@ -71,6 +72,7 @@ func goenvs() {
// Dofiles reads the directory opened with file descriptor fd, applying function f
// to each filename in it.
//
//go:nosplit
func dofiles(dirfd int32, f func([]byte)) {
dirbuf := new([dirBufSize]byte)
@ -96,6 +98,7 @@ func dofiles(dirfd int32, f func([]byte)) {
// Gdirname returns the first filename from a buffer of directory entries,
// and a slice containing the remaining directory entries.
// If the buffer doesn't start with a valid directory entry, the returned name is nil.
//
//go:nosplit
func gdirname(buf []byte) (name []byte, rest []byte) {
if 2+nameOffset+2 > len(buf) {
@ -116,6 +119,7 @@ func gdirname(buf []byte) (name []byte, rest []byte) {
// Gbit16 reads a 16-bit little-endian binary number from b and returns it
// with the remaining slice of b.
//
//go:nosplit
func gbit16(b []byte) (int, []byte) {
return int(b[0]) | int(b[1])<<8, b[2:]

View File

@ -49,6 +49,7 @@ var _cgo_unsetenv unsafe.Pointer // pointer to C function
// Update the C environment if cgo is loaded.
// Called from syscall.Setenv.
//
//go:linkname syscall_setenv_c syscall.setenv_c
func syscall_setenv_c(k string, v string) {
if _cgo_setenv == nil {
@ -60,6 +61,7 @@ func syscall_setenv_c(k string, v string) {
// Update the C environment if cgo is loaded.
// Called from syscall.unsetenv.
//
//go:linkname syscall_unsetenv_c syscall.unsetenv_c
func syscall_unsetenv_c(k string) {
if _cgo_unsetenv == nil {

View File

@ -1143,6 +1143,7 @@ func SemNwait(addr *uint32) uint32 {
}
// mspan wrapper for testing.
//
//go:notinheap
type MSpan mspan

View File

@ -84,6 +84,7 @@ type timeHistogram struct {
//
// Disallow preemptions and stack growths because this function
// may run in sensitive locations.
//
//go:nosplit
func (h *timeHistogram) record(duration int64) {
if duration < 0 {

View File

@ -9,6 +9,7 @@ package atomic
import "unsafe"
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Load
//go:linkname Loadp

View File

@ -7,6 +7,7 @@ package atomic
import "unsafe"
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Load
//go:linkname Loadp
//go:linkname Load64

View File

@ -12,6 +12,7 @@ import (
)
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Xchg
//go:linkname Xchguintptr
@ -43,6 +44,7 @@ func addrLock(addr *uint64) *spinlock {
}
// Atomic add and return new value.
//
//go:nosplit
func Xadd(val *uint32, delta int32) uint32 {
for {

View File

@ -5,6 +5,7 @@
//go:build mips || mipsle
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Xadd64
//go:linkname Xchg64
//go:linkname Cas64

View File

@ -7,6 +7,7 @@ package atomic
import "unsafe"
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Load
//go:linkname Loadp
//go:linkname Load64

View File

@ -6,6 +6,7 @@
// See https://github.com/WebAssembly/design/issues/1073
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Load
//go:linkname Loadp
//go:linkname Load64

View File

@ -38,6 +38,7 @@ const (
// affect mutex's state.
// We use the uintptr mutex.key and note.key as a uint32.
//
//go:nosplit
func key32(p *uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(p))

View File

@ -23,6 +23,7 @@ func lockWithRank(l *mutex, rank lockRank) {
}
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func acquireLockRank(rank lockRank) {
}
@ -32,6 +33,7 @@ func unlockWithRank(l *mutex) {
}
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func releaseLockRank(rank lockRank) {
}

View File

@ -82,6 +82,7 @@ func lockWithRank(l *mutex, rank lockRank) {
}
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func printHeldLocks(gp *g) {
if gp.m.locksHeldLen == 0 {
@ -97,6 +98,7 @@ func printHeldLocks(gp *g) {
// acquireLockRank acquires a rank which is not associated with a mutex lock
//
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func acquireLockRank(rank lockRank) {
gp := getg()
@ -181,6 +183,7 @@ func unlockWithRank(l *mutex) {
// releaseLockRank releases a rank which is not associated with a mutex lock
//
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func releaseLockRank(rank lockRank) {
gp := getg()
@ -226,6 +229,7 @@ func lockWithRankMayAcquire(l *mutex, rank lockRank) {
}
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func checkLockHeld(gp *g, l *mutex) bool {
for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
@ -239,6 +243,7 @@ func checkLockHeld(gp *g, l *mutex) bool {
// assertLockHeld throws if l is not held by the caller.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func assertLockHeld(l *mutex) {
gp := getg()
@ -264,6 +269,7 @@ func assertLockHeld(l *mutex) {
// pointer to the exact mutex is not available.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func assertRankHeld(r lockRank) {
gp := getg()
@ -289,6 +295,7 @@ func assertRankHeld(r lockRank) {
// Caller must hold worldsema.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func worldStopped() {
if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
@ -304,6 +311,7 @@ func worldStopped() {
// Caller must hold worldsema.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func worldStarted() {
if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
@ -315,6 +323,7 @@ func worldStarted() {
}
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func checkWorldStopped() bool {
stopped := atomic.Load(&worldIsStopped)
@ -332,6 +341,7 @@ func checkWorldStopped() bool {
// which M stopped the world.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func assertWorldStopped() {
if checkWorldStopped() {
@ -345,6 +355,7 @@ func assertWorldStopped() {
// passed lock is not held.
//
// nosplit to ensure it can be called in as many contexts as possible.
//
//go:nosplit
func assertWorldStoppedOrLockHeld(l *mutex) {
if checkWorldStopped() {

View File

@ -1326,6 +1326,7 @@ func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//
//go:systemstack
func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const (
@ -1395,6 +1396,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
// inPersistentAlloc reports whether p points to memory allocated by
// persistentalloc. This must be nosplit because it is called by the
// cgo checker code, which is called by the write barrier code.
//
//go:nosplit
func inPersistentAlloc(p uintptr) bool {
chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))

View File

@ -199,6 +199,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// typedmemmovepartial is like typedmemmove but assumes that
// dst and src point off bytes into the value and only copies size bytes.
// off must be a multiple of goarch.PtrSize.
//
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {

View File

@ -65,6 +65,7 @@ const (
)
// addb returns the byte pointer p+n.
//
//go:nowritebarrier
//go:nosplit
func addb(p *byte, n uintptr) *byte {
@ -75,6 +76,7 @@ func addb(p *byte, n uintptr) *byte {
}
// subtractb returns the byte pointer p-n.
//
//go:nowritebarrier
//go:nosplit
func subtractb(p *byte, n uintptr) *byte {
@ -85,6 +87,7 @@ func subtractb(p *byte, n uintptr) *byte {
}
// add1 returns the byte pointer p+1.
//
//go:nowritebarrier
//go:nosplit
func add1(p *byte) *byte {
@ -95,9 +98,11 @@ func add1(p *byte) *byte {
}
// subtract1 returns the byte pointer p-1.
//
//go:nowritebarrier
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func subtract1(p *byte) *byte {
// Note: wrote out full expression instead of calling subtractb(p, 1)
@ -314,6 +319,7 @@ func (m *markBits) advance() {
// In particular, be careful not to point past the end of an object.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func heapBitsForAddr(addr uintptr) (h heapBits) {
// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
@ -381,6 +387,7 @@ func badPointer(s *mspan, p, refBase, refOff uintptr) {
//
// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
// Since p is a uintptr, it would not be adjusted if the stack were to move.
//
//go:nosplit
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
s = spanOf(p)
@ -418,6 +425,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui
}
// verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
//
//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
func reflect_verifyNotInHeapPtr(p uintptr) bool {
// Conversion to a pointer is ok as long as findObject above does not call badPointer.
@ -431,6 +439,7 @@ func reflect_verifyNotInHeapPtr(p uintptr) bool {
// Note that next does not modify h. The caller must record the result.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (h heapBits) next() heapBits {
if h.shift < 3*heapBitsShift {
@ -477,6 +486,7 @@ func (h heapBits) nextArena() heapBits {
// h.forward(1) is equivalent to h.next(), just slower.
// Note that forward does not modify h. The caller must record the result.
// bits returns the heap bits for the current word.
//
//go:nosplit
func (h heapBits) forward(n uintptr) heapBits {
n += uintptr(h.shift) / heapBitsShift
@ -517,6 +527,7 @@ func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
// described by the same bitmap byte.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (h heapBits) bits() uint32 {
// The (shift & 31) eliminates a test and conditional branch
@ -534,6 +545,7 @@ func (h heapBits) morePointers() bool {
// isPointer reports whether the heap bits describe a pointer word.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (h heapBits) isPointer() bool {
return h.bits()&bitPointer != 0
@ -633,6 +645,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
//
// This is used for special cases where e.g. dst was just
// created and zeroed with malloc.
//
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
if (dst|src|size)&(goarch.PtrSize-1) != 0 {
@ -1951,6 +1964,7 @@ func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
// gcbits returns the GC type info for x, for testing.
// The result is the bitmap entries (0 or 1), one entry per byte.
//
//go:linkname reflect_gcbits reflect.gcbits
func reflect_gcbits(x any) []byte {
ret := getgcmask(x)

View File

@ -10,6 +10,7 @@ import (
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
@ -39,6 +40,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)

View File

@ -12,6 +12,7 @@ import (
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
@ -33,6 +34,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)

View File

@ -10,6 +10,7 @@ import (
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
@ -37,6 +38,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)

View File

@ -12,6 +12,7 @@ import (
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
p := sysReserveOS(nil, n)
@ -30,6 +31,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
}

View File

@ -16,6 +16,7 @@ const (
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
@ -162,6 +163,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)

View File

@ -23,6 +23,7 @@ const (
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
@ -95,6 +96,7 @@ func sysHugePageOS(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)

View File

@ -439,6 +439,7 @@ okarg:
}
// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
//
//go:noinline
// KeepAlive marks its argument as currently reachable.

View File

@ -888,6 +888,7 @@ func scanstack(gp *g, gcw *gcWork) int64 {
}
// Scan a stack frame: local variables and function arguments/results.
//
//go:nowritebarrier
func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
if _DebugGC > 1 && frame.continpc != 0 {
@ -1185,6 +1186,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
// gcw.bytesMarked or gcw.heapScanWork.
//
// If stk != nil, possible stack pointers are also reported to stk.putPtr.
//
//go:nowritebarrier
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
// Use local copies of original parameters, so that a stack trace
@ -1413,6 +1415,7 @@ func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackSca
// Shade the object if it isn't already.
// The object is not nil and known to be in the heap.
// Preemption must be disabled.
//
//go:nowritebarrier
func shade(b uintptr) {
if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {

View File

@ -158,6 +158,7 @@ type stackObject struct {
}
// obj.r = r, but with no write barrier.
//
//go:nowritebarrier
func (obj *stackObject) setRecord(r *stackObjectRecord) {
// Types of stack objects are always in read-only memory, not the heap.

View File

@ -424,6 +424,7 @@ func isSweepDone() bool {
}
// Returns only when span s has been swept.
//
//go:nowritebarrier
func (s *mspan) ensureSwept() {
// Caller must disable preemption.

View File

@ -107,6 +107,7 @@ func (w *gcWork) init() {
// put enqueues a pointer for the garbage collector to trace.
// obj must point to the beginning of a heap object or an oblet.
//
//go:nowritebarrierrec
func (w *gcWork) put(obj uintptr) {
flushed := false
@ -145,6 +146,7 @@ func (w *gcWork) put(obj uintptr) {
// putFast does a put and reports whether it can be done quickly
// otherwise it returns false and the caller needs to call put.
//
//go:nowritebarrierrec
func (w *gcWork) putFast(obj uintptr) bool {
wbuf := w.wbuf1
@ -196,6 +198,7 @@ func (w *gcWork) putBatch(obj []uintptr) {
// If there are no pointers remaining in this gcWork or in the global
// queue, tryGet returns 0. Note that there may still be pointers in
// other gcWork instances or other caches.
//
//go:nowritebarrierrec
func (w *gcWork) tryGet() uintptr {
wbuf := w.wbuf1
@ -225,6 +228,7 @@ func (w *gcWork) tryGet() uintptr {
// tryGetFast dequeues a pointer for the garbage collector to trace
// if one is readily available. Otherwise it returns 0 and
// the caller is expected to call tryGet().
//
//go:nowritebarrierrec
func (w *gcWork) tryGetFast() uintptr {
wbuf := w.wbuf1
@ -278,6 +282,7 @@ func (w *gcWork) dispose() {
// balance moves some work that's cached in this gcWork back on the
// global queue.
//
//go:nowritebarrierrec
func (w *gcWork) balance() {
if w.wbuf1 == nil {
@ -300,6 +305,7 @@ func (w *gcWork) balance() {
}
// empty reports whether w has no mark work available.
//
//go:nowritebarrierrec
func (w *gcWork) empty() bool {
return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
@ -340,6 +346,7 @@ func (b *workbuf) checkempty() {
// getempty pops an empty work buffer off the work.empty list,
// allocating new buffers if none are available.
//
//go:nowritebarrier
func getempty() *workbuf {
var b *workbuf
@ -395,6 +402,7 @@ func getempty() *workbuf {
// putempty puts a workbuf onto the work.empty list.
// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
//
//go:nowritebarrier
func putempty(b *workbuf) {
b.checkempty()
@ -404,6 +412,7 @@ func putempty(b *workbuf) {
// putfull puts the workbuf on the work.full list for the GC.
// putfull accepts partially full buffers so the GC can avoid competing
// with the mutators for ownership of partially full buffers.
//
//go:nowritebarrier
func putfull(b *workbuf) {
b.checknonempty()
@ -412,6 +421,7 @@ func putfull(b *workbuf) {
// trygetfull tries to get a full or partially empty workbuffer.
// If one is not immediately available return nil
//
//go:nowritebarrier
func trygetfull() *workbuf {
b := (*workbuf)(work.full.pop())

View File

@ -589,6 +589,7 @@ func (i arenaIdx) l2() uint {
// inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into mSpanManual spans.
// Non-preemptible because it is used by write barriers.
//
//go:nowritebarrier
//go:nosplit
func inheap(b uintptr) bool {

View File

@ -54,6 +54,7 @@ func msanfree(addr unsafe.Pointer, sz uintptr)
func msanmove(dst, src unsafe.Pointer, sz uintptr)
// These are called from msan_GOARCH.s
//
//go:cgo_import_static __msan_read_go
//go:cgo_import_static __msan_write_go
//go:cgo_import_static __msan_malloc_go

View File

@ -550,6 +550,7 @@ func readGCStats(pauses *[]uint64) {
// readGCStats_m must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//
//go:systemstack
func readGCStats_m(pauses *[]uint64) {
p := *pauses
@ -622,6 +623,7 @@ type sysMemStat uint64
// load atomically reads the value of the stat.
//
// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
//
//go:nosplit
func (s *sysMemStat) load() uint64 {
return atomic.Load64((*uint64)(s))
@ -630,6 +632,7 @@ func (s *sysMemStat) load() uint64 {
// add atomically adds the sysMemStat by n.
//
// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
//
//go:nosplit
func (s *sysMemStat) add(n int64) {
if s == nil {

View File

@ -271,6 +271,7 @@ func (c *pollCache) free(pd *pollDesc) {
// poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
// prepares a descriptor for polling in mode, which is 'r' or 'w'.
// This returns an error code; the codes are defined above.
//
//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
func poll_runtime_pollReset(pd *pollDesc, mode int) int {
errcode := netpollcheckerr(pd, int32(mode))
@ -289,6 +290,7 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int {
// waits for a descriptor to be ready for reading or writing,
// according to mode, which is 'r' or 'w'.
// This returns an error code; the codes are defined above.
//
//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
func poll_runtime_pollWait(pd *pollDesc, mode int) int {
errcode := netpollcheckerr(pd, int32(mode))
@ -438,6 +440,7 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
// whether the fd is ready for reading or writing or both.
//
// This may run while the world is stopped, so write barriers are not allowed.
//
//go:nowritebarrier
func netpollready(toRun *gList, pd *pollDesc, mode int32) {
var rg, wg *g

View File

@ -146,6 +146,7 @@ func netpollBreak() {
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
//
//go:nowritebarrierrec
func netpoll(delay int64) gList {
var timeout uintptr

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// The file contains tests that cannot run under race detector for some reason.
//
//go:build !race
package runtime_test

View File

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// The file contains tests that cannot run under race detector for some reason.
//
//go:build !race
package runtime_test

View File

@ -452,6 +452,7 @@ func pipe() (r, w int32, errno int32) {
// assembly routine; the higher bits (if required), should be provided
// by the assembly routine as 0.
// The err result is an OS error code such as ENOMEM.
//
//go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
r, err0 := syscall6(&libc_mmap, uintptr(addr), uintptr(n), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(off))

View File

@ -141,6 +141,7 @@ func osinit() {
func tstart_sysvicall(newm *m) uint32
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
var (
@ -267,6 +268,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp

View File

@ -150,6 +150,7 @@ var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
@ -296,6 +297,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp

View File

@ -195,6 +195,7 @@ func goenvs() {
}
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrierrec
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
@ -292,6 +293,7 @@ var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
@ -324,6 +326,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
// iOS does not support alternate signal stack.
@ -410,6 +413,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp

View File

@ -142,6 +142,7 @@ func futexwakeup(addr *uint32, cnt uint32) {
func lwp_start(uintptr)
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
@ -201,6 +202,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
@ -247,6 +249,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp

View File

@ -192,6 +192,7 @@ func futexwakeup(addr *uint32, cnt uint32) {
func thr_start()
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
@ -221,6 +222,7 @@ func newosproc(mp *m) {
}
// Version of newosproc that doesn't require a valid G.
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
stack := sysAlloc(stacksize, &memstats.stacks_sys)
@ -261,6 +263,7 @@ var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
@ -318,6 +321,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
@ -359,6 +363,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp
@ -431,6 +436,7 @@ func sysauxv(auxv []uintptr) {
}
// sysSigaction calls the sigaction system call.
//
//go:nosplit
func sysSigaction(sig uint32, new, old *sigactiont) {
// Use system stack to avoid split stack overflow on amd64
@ -442,6 +448,7 @@ func sysSigaction(sig uint32, new, old *sigactiont) {
}
// asmSigaction is implemented in assembly.
//
//go:noescape
func asmSigaction(sig uintptr, new, old *sigactiont) int32

View File

@ -126,6 +126,7 @@ func initsig(preinit bool) {
}
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
panic("newosproc: not implemented")

View File

@ -55,6 +55,7 @@ const (
// if(*addr == val) sleep
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
// Some Linux kernels have a bug where futex of
@ -73,6 +74,7 @@ func futexsleep(addr *uint32, val uint32, ns int64) {
}
// If any procs are sleeping on addr, wake up at most cnt.
//
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE_PRIVATE, cnt, nil, nil, 0)
@ -157,6 +159,7 @@ const (
func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
@ -184,6 +187,7 @@ func newosproc(mp *m) {
}
// Version of newosproc that doesn't require a valid G.
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
stack := sysAlloc(stacksize, &memstats.stacks_sys)
@ -365,6 +369,7 @@ func goenvs() {
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
@ -392,6 +397,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
@ -497,6 +503,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
@ -507,6 +514,7 @@ func (c *sigctxt) fixsigcode(sig uint32) {
}
// sysSigaction calls the rt_sigaction system call.
//
//go:nosplit
func sysSigaction(sig uint32, new, old *sigactiont) {
if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
@ -531,6 +539,7 @@ func sysSigaction(sig uint32, new, old *sigactiont) {
}
// rt_sigaction is implemented in assembly.
//
//go:noescape
func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32

View File

@ -201,6 +201,7 @@ func semawakeup(mp *m) {
}
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
@ -248,6 +249,7 @@ func netbsdMstart()
// baroque to remove a signal stack here only to add one in minit, but
// it's a simple change that keeps NetBSD working like other OS's.
// At this point all signals are blocked, so there is no race.
//
//go:nosplit
func netbsdMstart0() {
st := stackt{ss_flags: _SS_DISABLE}
@ -304,6 +306,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
@ -350,6 +353,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp

View File

@ -168,6 +168,7 @@ func minit() {
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
@ -214,6 +215,7 @@ func getsig(i uint32) uintptr {
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp

Some files were not shown because too many files have changed in this diff Show More