mirror of
https://github.com/golang/go.git
synced 2025-05-05 15:43:04 +00:00
all: gofmt main repo
[This CL is part of a sequence implementing the proposal #51082. The design doc is at https://go.dev/s/godocfmt-design.] Run the updated gofmt, which reformats doc comments, on the main repository. Vendored files are excluded. For #51082. Change-Id: I7332f099b60f716295fb34719c98c04eb1a85407 Reviewed-on: https://go-review.googlesource.com/c/go/+/384268 Reviewed-by: Jonathan Amsterdam <jba@google.com> Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
parent
017933163a
commit
19309779ac
@ -333,10 +333,9 @@ func (z *Int) Abs(x *Int) *Int {
|
||||
|
||||
// CmpInt compares x and y. The result is
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
func CmpInt(x, y *Int) int {
|
||||
x.doinit()
|
||||
y.doinit()
|
||||
|
@ -13,9 +13,11 @@
|
||||
// binary.
|
||||
//
|
||||
// This script requires that three environment variables be set:
|
||||
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
|
||||
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
|
||||
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
|
||||
//
|
||||
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
|
||||
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
|
||||
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
|
||||
//
|
||||
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
|
||||
package main
|
||||
|
||||
|
@ -221,9 +221,11 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
|
||||
// that the file has no data in it, which is rather odd.
|
||||
//
|
||||
// As an example, if the underlying raw file contains the 10-byte data:
|
||||
//
|
||||
// var compactFile = "abcdefgh"
|
||||
//
|
||||
// And the sparse map has the following entries:
|
||||
//
|
||||
// var spd sparseDatas = []sparseEntry{
|
||||
// {Offset: 2, Length: 5}, // Data fragment for 2..6
|
||||
// {Offset: 18, Length: 3}, // Data fragment for 18..20
|
||||
@ -235,6 +237,7 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
|
||||
// }
|
||||
//
|
||||
// Then the content of the resulting sparse file with a Header.Size of 25 is:
|
||||
//
|
||||
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||
type (
|
||||
sparseDatas []sparseEntry
|
||||
@ -293,9 +296,9 @@ func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
|
||||
// The input must have been already validated.
|
||||
//
|
||||
// This function mutates src and returns a normalized map where:
|
||||
// * adjacent fragments are coalesced together
|
||||
// * only the last fragment may be empty
|
||||
// * the endOffset of the last fragment is the total size
|
||||
// - adjacent fragments are coalesced together
|
||||
// - only the last fragment may be empty
|
||||
// - the endOffset of the last fragment is the total size
|
||||
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
|
||||
dst := src[:0]
|
||||
var pre sparseEntry
|
||||
|
@ -336,9 +336,9 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
// header in case further processing is required.
|
||||
//
|
||||
// The err will be set to io.EOF only when one of the following occurs:
|
||||
// * Exactly 0 bytes are read and EOF is hit.
|
||||
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||
// * At least 2 blocks of zeros are read.
|
||||
// - Exactly 0 bytes are read and EOF is hit.
|
||||
// - Exactly 1 block of zeros is read and EOF is hit.
|
||||
// - At least 2 blocks of zeros are read.
|
||||
func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
// Two blocks of zero bytes marks the end of the archive.
|
||||
if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
|
||||
|
@ -306,6 +306,7 @@ func formatPAXRecord(k, v string) (string, error) {
|
||||
|
||||
// validPAXRecord reports whether the key-value pair is valid where each
|
||||
// record is formatted as:
|
||||
//
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
//
|
||||
// Keys and values should be UTF-8, but the number of bad writers out there
|
||||
|
@ -137,9 +137,12 @@ type ComplexType complex64
|
||||
// new elements. If it does not, a new underlying array will be allocated.
|
||||
// Append returns the updated slice. It is therefore necessary to store the
|
||||
// result of append, often in the variable holding the slice itself:
|
||||
//
|
||||
// slice = append(slice, elem1, elem2)
|
||||
// slice = append(slice, anotherSlice...)
|
||||
//
|
||||
// As a special case, it is legal to append a string to a byte slice, like this:
|
||||
//
|
||||
// slice = append([]byte("hello "), "world"...)
|
||||
func append(slice []Type, elems ...Type) []Type
|
||||
|
||||
@ -156,24 +159,28 @@ func copy(dst, src []Type) int
|
||||
func delete(m map[Type]Type1, key Type)
|
||||
|
||||
// The len built-in function returns the length of v, according to its type:
|
||||
//
|
||||
// Array: the number of elements in v.
|
||||
// Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// String: the number of bytes in v.
|
||||
// Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a string literal or a simple array expression, the
|
||||
// result can be a constant. See the Go language specification's "Length and
|
||||
// capacity" section for details.
|
||||
func len(v Type) int
|
||||
|
||||
// The cap built-in function returns the capacity of v, according to its type:
|
||||
//
|
||||
// Array: the number of elements in v (same as len(v)).
|
||||
// Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a simple array expression, the result can be a
|
||||
// constant. See the Go language specification's "Length and capacity" section for
|
||||
// details.
|
||||
@ -184,6 +191,7 @@ func cap(v Type) int
|
||||
// value. Unlike new, make's return type is the same as the type of its
|
||||
// argument, not a pointer to it. The specification of the result depends on
|
||||
// the type:
|
||||
//
|
||||
// Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
@ -225,7 +233,9 @@ func imag(c ComplexType) FloatType
|
||||
// the last sent value is received. After the last value has been received
|
||||
// from a closed channel c, any receive from c will succeed without
|
||||
// blocking, returning the zero value for the channel element. The form
|
||||
//
|
||||
// x, ok := <-c
|
||||
//
|
||||
// will also set ok to false for a closed channel.
|
||||
func close(c chan<- Type)
|
||||
|
||||
|
@ -372,9 +372,10 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
|
||||
// the subslices between those separators.
|
||||
// If sep is empty, SplitN splits after each UTF-8 sequence.
|
||||
// The count determines the number of subslices to return:
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
//
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
//
|
||||
// To split around the first instance of a separator, see Cut.
|
||||
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
|
||||
@ -383,9 +384,10 @@ func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
|
||||
// returns a slice of those subslices.
|
||||
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
|
||||
// The count determines the number of subslices to return:
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
//
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
func SplitAfterN(s, sep []byte, n int) [][]byte {
|
||||
return genSplit(s, sep, len(sep), n)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
// just enough to support pprof.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go tool addr2line binary
|
||||
//
|
||||
// Addr2line reads hexadecimal addresses, one per line and with optional 0x prefix,
|
||||
|
@ -3,11 +3,11 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Asm, typically invoked as ``go tool asm'', assembles the source file into an object
|
||||
Asm, typically invoked as “go tool asm”, assembles the source file into an object
|
||||
file named for the basename of the argument source file with a .o suffix. The
|
||||
object file can then be combined with other objects into a package archive.
|
||||
|
||||
Command Line
|
||||
# Command Line
|
||||
|
||||
Usage:
|
||||
|
||||
|
@ -162,7 +162,7 @@ func (p *Parser) nextToken() lex.ScanToken {
|
||||
|
||||
// line consumes a single assembly line from p.lex of the form
|
||||
//
|
||||
// {label:} WORD[.cond] [ arg {, arg} ] (';' | '\n')
|
||||
// {label:} WORD[.cond] [ arg {, arg} ] (';' | '\n')
|
||||
//
|
||||
// It adds any labels to p.pendingLabels and returns the word, cond,
|
||||
// operand list, and true. If there is an error or EOF, it returns
|
||||
@ -891,7 +891,7 @@ func (p *Parser) symRefAttrs(name string, issueError bool) (bool, obj.ABI) {
|
||||
// constrained form of the operand syntax that's always SB-based,
|
||||
// non-static, and has at most a simple integer offset:
|
||||
//
|
||||
// [$|*]sym[<abi>][+Int](SB)
|
||||
// [$|*]sym[<abi>][+Int](SB)
|
||||
func (p *Parser) funcAddress() (string, obj.ABI, bool) {
|
||||
switch p.peek() {
|
||||
case '$', '*':
|
||||
@ -1041,9 +1041,13 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) {
|
||||
//
|
||||
// For 386/AMD64 register list specifies 4VNNIW-style multi-source operand.
|
||||
// For range of 4 elements, Intel manual uses "+3" notation, for example:
|
||||
//
|
||||
// VP4DPWSSDS zmm1{k1}{z}, zmm2+3, m128
|
||||
//
|
||||
// Given asm line:
|
||||
//
|
||||
// VP4DPWSSDS Z5, [Z10-Z13], (AX)
|
||||
//
|
||||
// zmm2 is Z10, and Z13 is the only valid value for it (Z10+3).
|
||||
// Only simple ranges are accepted, like [Z0-Z3].
|
||||
//
|
||||
|
@ -6,6 +6,7 @@
|
||||
Buildid displays or updates the build ID stored in a Go package or binary.
|
||||
|
||||
Usage:
|
||||
|
||||
go tool buildid [-w] file
|
||||
|
||||
By default, buildid prints the build ID found in the named file.
|
||||
|
@ -3,10 +3,9 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Cgo enables the creation of Go packages that call C code.
|
||||
|
||||
Using cgo with the go command
|
||||
# Using cgo with the go command
|
||||
|
||||
To use cgo write normal Go code that imports a pseudo-package "C".
|
||||
The Go code can then refer to types such as C.size_t, variables such
|
||||
@ -91,11 +90,11 @@ file. This allows pre-compiled static libraries to be included in the package
|
||||
directory and linked properly.
|
||||
For example if package foo is in the directory /go/src/foo:
|
||||
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/libs -lfoo
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/libs -lfoo
|
||||
|
||||
Will be expanded to:
|
||||
|
||||
// #cgo LDFLAGS: -L/go/src/foo/libs -lfoo
|
||||
// #cgo LDFLAGS: -L/go/src/foo/libs -lfoo
|
||||
|
||||
When the Go tool sees that one or more Go files use the special import
|
||||
"C", it will look for other non-Go files in the directory and compile
|
||||
@ -139,7 +138,7 @@ or you can set the CC environment variable any time you run the go tool.
|
||||
The CXX_FOR_TARGET, CXX_FOR_${GOOS}_${GOARCH}, and CXX
|
||||
environment variables work in a similar way for C++ code.
|
||||
|
||||
Go references to C
|
||||
# Go references to C
|
||||
|
||||
Within the Go file, C's struct field names that are keywords in Go
|
||||
can be accessed by prefixing them with an underscore: if x points at a C
|
||||
@ -291,7 +290,7 @@ the helper function crashes the program, like when Go itself runs out
|
||||
of memory. Because C.malloc cannot fail, it has no two-result form
|
||||
that returns errno.
|
||||
|
||||
C references to Go
|
||||
# C references to Go
|
||||
|
||||
Go functions can be exported for use by C code in the following way:
|
||||
|
||||
@ -327,7 +326,7 @@ definitions and declarations, then the two output files will produce
|
||||
duplicate symbols and the linker will fail. To avoid this, definitions
|
||||
must be placed in preambles in other files, or in C source files.
|
||||
|
||||
Passing pointers
|
||||
# Passing pointers
|
||||
|
||||
Go is a garbage collected language, and the garbage collector needs to
|
||||
know the location of every pointer to Go memory. Because of this,
|
||||
@ -398,7 +397,7 @@ passing uninitialized C memory to Go code if the Go code is going to
|
||||
store pointer values in it. Zero out the memory in C before passing it
|
||||
to Go.
|
||||
|
||||
Special cases
|
||||
# Special cases
|
||||
|
||||
A few special C types which would normally be represented by a pointer
|
||||
type in Go are instead represented by a uintptr. Those include:
|
||||
@ -449,9 +448,10 @@ to auto-update code from Go 1.14 and earlier:
|
||||
|
||||
go tool fix -r eglconf <pkg>
|
||||
|
||||
Using cgo directly
|
||||
# Using cgo directly
|
||||
|
||||
Usage:
|
||||
|
||||
go tool cgo [cgo options] [-- compiler options] gofiles...
|
||||
|
||||
Cgo transforms the specified input Go source files into several output
|
||||
|
@ -114,11 +114,11 @@ func (p *Package) addToFlag(flag string, args []string) {
|
||||
//
|
||||
// For example, the following string:
|
||||
//
|
||||
// `a b:"c d" 'e''f' "g\""`
|
||||
// `a b:"c d" 'e''f' "g\""`
|
||||
//
|
||||
// Would be parsed as:
|
||||
//
|
||||
// []string{"a", "b:c d", "ef", `g"`}
|
||||
// []string{"a", "b:c d", "ef", `g"`}
|
||||
func splitQuoted(s string) (r []string, err error) {
|
||||
var args []string
|
||||
arg := make([]rune, len(s))
|
||||
@ -1137,13 +1137,19 @@ func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bo
|
||||
|
||||
// checkIndex checks whether arg has the form &a[i], possibly inside
|
||||
// type conversions. If so, then in the general case it writes
|
||||
// _cgoIndexNN := a
|
||||
// _cgoNN := &cgoIndexNN[i] // with type conversions, if any
|
||||
//
|
||||
// _cgoIndexNN := a
|
||||
// _cgoNN := &cgoIndexNN[i] // with type conversions, if any
|
||||
//
|
||||
// to sb, and writes
|
||||
// _cgoCheckPointer(_cgoNN, _cgoIndexNN)
|
||||
//
|
||||
// _cgoCheckPointer(_cgoNN, _cgoIndexNN)
|
||||
//
|
||||
// to sbCheck, and returns true. If a is a simple variable or field reference,
|
||||
// it writes
|
||||
// _cgoIndexNN := &a
|
||||
//
|
||||
// _cgoIndexNN := &a
|
||||
//
|
||||
// and dereferences the uses of _cgoIndexNN. Taking the address avoids
|
||||
// making a copy of an array.
|
||||
//
|
||||
@ -1191,10 +1197,14 @@ func (p *Package) checkIndex(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) boo
|
||||
|
||||
// checkAddr checks whether arg has the form &x, possibly inside type
|
||||
// conversions. If so, it writes
|
||||
// _cgoBaseNN := &x
|
||||
// _cgoNN := _cgoBaseNN // with type conversions, if any
|
||||
//
|
||||
// _cgoBaseNN := &x
|
||||
// _cgoNN := _cgoBaseNN // with type conversions, if any
|
||||
//
|
||||
// to sb, and writes
|
||||
// _cgoCheckPointer(_cgoBaseNN, true)
|
||||
//
|
||||
// _cgoCheckPointer(_cgoBaseNN, true)
|
||||
//
|
||||
// to sbCheck, and returns true. This tells _cgoCheckPointer to check
|
||||
// just the contents of the pointer being passed, not any other part
|
||||
// of the memory allocation. This is run after checkIndex, which looks
|
||||
|
@ -111,7 +111,9 @@ func moveByType(t *types.Type) obj.As {
|
||||
}
|
||||
|
||||
// opregreg emits instructions for
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// and also returns the created obj.Prog so it
|
||||
// may be further adjusted (offset, scale, etc).
|
||||
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
|
||||
|
@ -522,7 +522,8 @@ func InlineCalls(fn *ir.Func) {
|
||||
// but then you may as well do it here. so this is cleaner and
|
||||
// shorter and less complicated.
|
||||
// The result of inlnode MUST be assigned back to n, e.g.
|
||||
// n.Left = inlnode(n.Left)
|
||||
//
|
||||
// n.Left = inlnode(n.Left)
|
||||
func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
|
||||
if n == nil {
|
||||
return n
|
||||
@ -657,7 +658,8 @@ var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCa
|
||||
// inlined function body, and (List, Rlist) contain the (input, output)
|
||||
// parameters.
|
||||
// The result of mkinlcall MUST be assigned back to n, e.g.
|
||||
// n.Left = mkinlcall(n.Left, fn, isddd)
|
||||
//
|
||||
// n.Left = mkinlcall(n.Left, fn, isddd)
|
||||
func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
|
||||
if fn.Inl == nil {
|
||||
if logopt.Enabled() {
|
||||
|
@ -951,11 +951,11 @@ var IsIntrinsicCall = func(*CallExpr) bool { return false }
|
||||
// instead of computing both. SameSafeExpr assumes that l and r are
|
||||
// used in the same statement or expression. In order for it to be
|
||||
// safe to reuse l or r, they must:
|
||||
// * be the same expression
|
||||
// * not have side-effects (no function calls, no channel ops);
|
||||
// however, panics are ok
|
||||
// * not cause inappropriate aliasing; e.g. two string to []byte
|
||||
// conversions, must result in two distinct slices
|
||||
// - be the same expression
|
||||
// - not have side-effects (no function calls, no channel ops);
|
||||
// however, panics are ok
|
||||
// - not cause inappropriate aliasing; e.g. two string to []byte
|
||||
// conversions, must result in two distinct slices
|
||||
//
|
||||
// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
|
||||
// as an lvalue (map assignment) and an rvalue (map access). This is
|
||||
|
@ -551,7 +551,8 @@ func SetPos(n Node) src.XPos {
|
||||
}
|
||||
|
||||
// The result of InitExpr MUST be assigned back to n, e.g.
|
||||
// n.X = InitExpr(init, n.X)
|
||||
//
|
||||
// n.X = InitExpr(init, n.X)
|
||||
func InitExpr(init []Node, expr Node) Node {
|
||||
if len(init) == 0 {
|
||||
return expr
|
||||
|
@ -244,8 +244,10 @@ func (lv *liveness) initcache() {
|
||||
// liveness effects on a variable.
|
||||
//
|
||||
// The possible flags are:
|
||||
//
|
||||
// uevar - used by the instruction
|
||||
// varkill - killed by the instruction (set)
|
||||
//
|
||||
// A kill happens after the use (for an instruction that updates a value, for example).
|
||||
type liveEffect int
|
||||
|
||||
@ -1460,14 +1462,14 @@ func (lv *liveness) emitStackObjects() *obj.LSym {
|
||||
// isfat reports whether a variable of type t needs multiple assignments to initialize.
|
||||
// For example:
|
||||
//
|
||||
// type T struct { x, y int }
|
||||
// x := T{x: 0, y: 1}
|
||||
// type T struct { x, y int }
|
||||
// x := T{x: 0, y: 1}
|
||||
//
|
||||
// Then we need:
|
||||
//
|
||||
// var t T
|
||||
// t.x = 0
|
||||
// t.y = 1
|
||||
// var t T
|
||||
// t.x = 0
|
||||
// t.y = 1
|
||||
//
|
||||
// to fully initialize t.
|
||||
func isfat(t *types.Type) bool {
|
||||
|
@ -33,38 +33,38 @@ var localPkgReader *pkgReader
|
||||
//
|
||||
// The pipeline contains 2 steps:
|
||||
//
|
||||
// 1) Generate package export data "stub".
|
||||
// 1. Generate package export data "stub".
|
||||
//
|
||||
// 2) Generate package IR from package export data.
|
||||
// 2. Generate package IR from package export data.
|
||||
//
|
||||
// The package data "stub" at step (1) contains everything from the local package,
|
||||
// but nothing that have been imported. When we're actually writing out export data
|
||||
// to the output files (see writeNewExport function), we run the "linker", which does
|
||||
// a few things:
|
||||
//
|
||||
// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
|
||||
// - Updates compiler extensions data (e.g., inlining cost, escape analysis results).
|
||||
//
|
||||
// + Handles re-exporting any transitive dependencies.
|
||||
// - Handles re-exporting any transitive dependencies.
|
||||
//
|
||||
// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
|
||||
// downstream importers only care about inlinable functions).
|
||||
// - Prunes out any unnecessary details (e.g., non-inlineable functions, because any
|
||||
// downstream importers only care about inlinable functions).
|
||||
//
|
||||
// The source files are typechecked twice, once before writing export data
|
||||
// using types2 checker, once after read export data using gc/typecheck.
|
||||
// This duplication of work will go away once we always use types2 checker,
|
||||
// we can remove the gc/typecheck pass. The reason it is still here:
|
||||
//
|
||||
// + It reduces engineering costs in maintaining a fork of typecheck
|
||||
// (e.g., no need to backport fixes like CL 327651).
|
||||
// - It reduces engineering costs in maintaining a fork of typecheck
|
||||
// (e.g., no need to backport fixes like CL 327651).
|
||||
//
|
||||
// + It makes it easier to pass toolstash -cmp.
|
||||
// - It makes it easier to pass toolstash -cmp.
|
||||
//
|
||||
// + Historically, we would always re-run the typechecker after import, even though
|
||||
// we know the imported data is valid. It's not ideal, but also not causing any
|
||||
// problem either.
|
||||
// - Historically, we would always re-run the typechecker after import, even though
|
||||
// we know the imported data is valid. It's not ideal, but also not causing any
|
||||
// problem either.
|
||||
//
|
||||
// + There's still transformation that being done during gc/typecheck, like rewriting
|
||||
// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
|
||||
// - There's still transformation that being done during gc/typecheck, like rewriting
|
||||
// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
|
||||
//
|
||||
// Using syntax+types2 tree, which already has a complete representation of generics,
|
||||
// the unified IR has the full typed AST for doing introspection during step (1).
|
||||
|
@ -65,9 +65,9 @@ func MakeInit() {
|
||||
// Task makes and returns an initialization record for the package.
|
||||
// See runtime/proc.go:initTask for its layout.
|
||||
// The 3 tasks for initialization are:
|
||||
// 1) Initialize all of the packages the current package depends on.
|
||||
// 2) Initialize all the variables that have initializers.
|
||||
// 3) Run any init functions.
|
||||
// 1. Initialize all of the packages the current package depends on.
|
||||
// 2. Initialize all the variables that have initializers.
|
||||
// 3. Run any init functions.
|
||||
func Task() *ir.Name {
|
||||
var deps []*obj.LSym // initTask records for packages the current package depends on
|
||||
var fns []*obj.LSym // functions to call for package initialization
|
||||
|
@ -681,7 +681,8 @@ func anyCall(fn *ir.Func) bool {
|
||||
}
|
||||
|
||||
// eqfield returns the node
|
||||
// p.field == q.field
|
||||
//
|
||||
// p.field == q.field
|
||||
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
|
||||
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
|
||||
ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
|
||||
@ -690,9 +691,13 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
|
||||
}
|
||||
|
||||
// EqString returns the nodes
|
||||
// len(s) == len(t)
|
||||
//
|
||||
// len(s) == len(t)
|
||||
//
|
||||
// and
|
||||
// memequal(s.ptr, t.ptr, len(s))
|
||||
//
|
||||
// memequal(s.ptr, t.ptr, len(s))
|
||||
//
|
||||
// which can be used to construct string equality comparison.
|
||||
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
|
||||
func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
|
||||
@ -714,9 +719,13 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
|
||||
}
|
||||
|
||||
// EqInterface returns the nodes
|
||||
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
|
||||
//
|
||||
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
|
||||
//
|
||||
// and
|
||||
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
|
||||
//
|
||||
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
|
||||
//
|
||||
// which can be used to construct interface equality comparison.
|
||||
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
|
||||
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
|
||||
@ -750,7 +759,8 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
|
||||
}
|
||||
|
||||
// eqmem returns the node
|
||||
// memequal(&p.field, &q.field [, size])
|
||||
//
|
||||
// memequal(&p.field, &q.field [, size])
|
||||
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
|
||||
nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
|
||||
ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
|
||||
|
@ -667,10 +667,10 @@ var kinds = []int{
|
||||
// tflag is documented in reflect/type.go.
|
||||
//
|
||||
// tflag values must be kept in sync with copies in:
|
||||
// - cmd/compile/internal/reflectdata/reflect.go
|
||||
// - cmd/link/internal/ld/decodesym.go
|
||||
// - reflect/type.go
|
||||
// - runtime/type.go
|
||||
// - cmd/compile/internal/reflectdata/reflect.go
|
||||
// - cmd/link/internal/ld/decodesym.go
|
||||
// - reflect/type.go
|
||||
// - runtime/type.go
|
||||
const (
|
||||
tflagUncommon = 1 << 0
|
||||
tflagExtraStar = 1 << 1
|
||||
@ -1794,13 +1794,17 @@ func NeedEmit(typ *types.Type) bool {
|
||||
// Also wraps methods on instantiated generic types for use in itab entries.
|
||||
// For an instantiated generic type G[int], we generate wrappers like:
|
||||
// G[int] pointer shaped:
|
||||
//
|
||||
// func (x G[int]) f(arg) {
|
||||
// .inst.G[int].f(dictionary, x, arg)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// G[int] not pointer shaped:
|
||||
//
|
||||
// func (x *G[int]) f(arg) {
|
||||
// .inst.G[int].f(dictionary, *x, arg)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// These wrappers are always fully stenciled.
|
||||
func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
|
||||
orig := rcvr
|
||||
|
@ -132,7 +132,9 @@ func moveByType(t *types.Type) obj.As {
|
||||
}
|
||||
|
||||
// opregreg emits instructions for
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// and also returns the created obj.Prog so it
|
||||
// may be further adjusted (offset, scale, etc).
|
||||
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
|
||||
@ -145,7 +147,9 @@ func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
|
||||
}
|
||||
|
||||
// opregregimm emits instructions for
|
||||
//
|
||||
// dest := src(From) op off
|
||||
//
|
||||
// and also returns the created obj.Prog so it
|
||||
// may be further adjusted (offset, scale, etc).
|
||||
func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
|
||||
|
@ -131,10 +131,14 @@ var needSplit = map[Op]bool{
|
||||
}
|
||||
|
||||
// For each entry k, v in this map, if we have a value x with:
|
||||
// x.Op == k[0]
|
||||
// x.Args[0].Op == k[1]
|
||||
//
|
||||
// x.Op == k[0]
|
||||
// x.Args[0].Op == k[1]
|
||||
//
|
||||
// then we can set x.Op to v and set x.Args like this:
|
||||
// x.Args[0].Args + x.Args[1:]
|
||||
//
|
||||
// x.Args[0].Args + x.Args[1:]
|
||||
//
|
||||
// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
|
||||
var combine = map[[2]Op]Op{
|
||||
// amd64
|
||||
|
@ -71,19 +71,25 @@ type Block struct {
|
||||
// Edge represents a CFG edge.
|
||||
// Example edges for b branching to either c or d.
|
||||
// (c and d have other predecessors.)
|
||||
// b.Succs = [{c,3}, {d,1}]
|
||||
// c.Preds = [?, ?, ?, {b,0}]
|
||||
// d.Preds = [?, {b,1}, ?]
|
||||
//
|
||||
// b.Succs = [{c,3}, {d,1}]
|
||||
// c.Preds = [?, ?, ?, {b,0}]
|
||||
// d.Preds = [?, {b,1}, ?]
|
||||
//
|
||||
// These indexes allow us to edit the CFG in constant time.
|
||||
// In addition, it informs phi ops in degenerate cases like:
|
||||
// b:
|
||||
// if k then c else c
|
||||
// c:
|
||||
// v = Phi(x, y)
|
||||
//
|
||||
// b:
|
||||
// if k then c else c
|
||||
// c:
|
||||
// v = Phi(x, y)
|
||||
//
|
||||
// Then the indexes tell you whether x is chosen from
|
||||
// the if or else branch from b.
|
||||
// b.Succs = [{c,0},{c,1}]
|
||||
// c.Preds = [{b,0},{b,1}]
|
||||
//
|
||||
// b.Succs = [{c,0},{c,1}]
|
||||
// c.Preds = [{b,0},{b,1}]
|
||||
//
|
||||
// means x is chosen if k is true.
|
||||
type Edge struct {
|
||||
// block edge goes to (in a Succs list) or from (in a Preds list)
|
||||
@ -106,12 +112,13 @@ func (e Edge) String() string {
|
||||
}
|
||||
|
||||
// BlockKind is the kind of SSA block.
|
||||
// kind controls successors
|
||||
// ------------------------------------------
|
||||
// Exit [return mem] []
|
||||
// Plain [] [next]
|
||||
// If [boolean Value] [then, else]
|
||||
// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
|
||||
//
|
||||
// kind controls successors
|
||||
// ------------------------------------------
|
||||
// Exit [return mem] []
|
||||
// Plain [] [next]
|
||||
// If [boolean Value] [then, else]
|
||||
// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
|
||||
type BlockKind int8
|
||||
|
||||
// short form print
|
||||
@ -330,10 +337,12 @@ func (b *Block) swapSuccessors() {
|
||||
//
|
||||
// b.removePred(i)
|
||||
// for _, v := range b.Values {
|
||||
// if v.Op != OpPhi {
|
||||
// continue
|
||||
// }
|
||||
// b.removeArg(v, i)
|
||||
//
|
||||
// if v.Op != OpPhi {
|
||||
// continue
|
||||
// }
|
||||
// b.removeArg(v, i)
|
||||
//
|
||||
// }
|
||||
func (b *Block) removePhiArg(phi *Value, i int) {
|
||||
n := len(b.Preds)
|
||||
|
@ -11,11 +11,11 @@ import "cmd/internal/src"
|
||||
//
|
||||
// Search for basic blocks that look like
|
||||
//
|
||||
// bb0 bb0
|
||||
// | \ / \
|
||||
// | bb1 or bb1 bb2 <- trivial if/else blocks
|
||||
// | / \ /
|
||||
// bb2 bb3
|
||||
// bb0 bb0
|
||||
// | \ / \
|
||||
// | bb1 or bb1 bb2 <- trivial if/else blocks
|
||||
// | / \ /
|
||||
// bb2 bb3
|
||||
//
|
||||
// where the intermediate blocks are mostly empty (with no side-effects);
|
||||
// rewrite Phis in the postdominator as CondSelects.
|
||||
|
@ -250,8 +250,8 @@ var GenssaDump map[string]bool = make(map[string]bool) // names of functions to
|
||||
// version is used as a regular expression to match the phase name(s).
|
||||
//
|
||||
// Special cases that have turned out to be useful:
|
||||
// - ssa/check/on enables checking after each phase
|
||||
// - ssa/all/time enables time reporting for all phases
|
||||
// - ssa/check/on enables checking after each phase
|
||||
// - ssa/all/time enables time reporting for all phases
|
||||
//
|
||||
// See gc/lex.go for dissection of the option string.
|
||||
// Example uses:
|
||||
|
@ -235,14 +235,15 @@ type eqclass []*Value
|
||||
|
||||
// partitionValues partitions the values into equivalence classes
|
||||
// based on having all the following features match:
|
||||
// - opcode
|
||||
// - type
|
||||
// - auxint
|
||||
// - aux
|
||||
// - nargs
|
||||
// - block # if a phi op
|
||||
// - first two arg's opcodes and auxint
|
||||
// - NOT first two arg's aux; that can break CSE.
|
||||
// - opcode
|
||||
// - type
|
||||
// - auxint
|
||||
// - aux
|
||||
// - nargs
|
||||
// - block # if a phi op
|
||||
// - first two arg's opcodes and auxint
|
||||
// - NOT first two arg's aux; that can break CSE.
|
||||
//
|
||||
// partitionValues returns a list of equivalence classes, each
|
||||
// being a sorted by ID list of *Values. The eqclass slices are
|
||||
// backed by the same storage as the input slice.
|
||||
|
@ -402,28 +402,28 @@ func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
|
||||
// OpArg{Int,Float}Reg values, inserting additional values in
|
||||
// cases where they are missing. Example:
|
||||
//
|
||||
// func foo(s string, used int, notused int) int {
|
||||
// return len(s) + used
|
||||
// }
|
||||
// func foo(s string, used int, notused int) int {
|
||||
// return len(s) + used
|
||||
// }
|
||||
//
|
||||
// In the function above, the incoming parameter "used" is fully live,
|
||||
// "notused" is not live, and "s" is partially live (only the length
|
||||
// field of the string is used). At the point where debug value
|
||||
// analysis runs, we might expect to see an entry block with:
|
||||
//
|
||||
// b1:
|
||||
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||
// b1:
|
||||
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||
//
|
||||
// While this is an accurate picture of the live incoming params,
|
||||
// we also want to have debug locations for non-live params (or
|
||||
// their non-live pieces), e.g. something like
|
||||
//
|
||||
// b1:
|
||||
// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
|
||||
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||
// v10 = ArgIntReg <int> {unused} [0] : DI
|
||||
// b1:
|
||||
// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
|
||||
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||
// v10 = ArgIntReg <int> {unused} [0] : DI
|
||||
//
|
||||
// This function examines the live OpArg{Int,Float}Reg values and
|
||||
// synthesizes new (dead) values for the non-live params or the
|
||||
@ -1489,14 +1489,14 @@ func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int)
|
||||
// that spills a register arg. It returns the ID of that instruction
|
||||
// Example:
|
||||
//
|
||||
// b1:
|
||||
// v3 = ArgIntReg <int> {p1+0} [0] : AX
|
||||
// ... more arg regs ..
|
||||
// v4 = ArgFloatReg <float32> {f1+0} [0] : X0
|
||||
// v52 = MOVQstore <mem> {p1} v2 v3 v1
|
||||
// ... more stores ...
|
||||
// v68 = MOVSSstore <mem> {f4} v2 v67 v66
|
||||
// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32
|
||||
// b1:
|
||||
// v3 = ArgIntReg <int> {p1+0} [0] : AX
|
||||
// ... more arg regs ..
|
||||
// v4 = ArgFloatReg <float32> {f1+0} [0] : X0
|
||||
// v52 = MOVQstore <mem> {p1} v2 v3 v1
|
||||
// ... more stores ...
|
||||
// v68 = MOVSSstore <mem> {f4} v2 v67 v66
|
||||
// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32
|
||||
//
|
||||
// Important: locatePrologEnd is expected to work properly only with
|
||||
// optimization turned off (e.g. "-N"). If optimization is enabled
|
||||
|
@ -84,7 +84,7 @@ var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gog
|
||||
// "O" is an explicit indication that we expect it to be optimized out.
|
||||
// For example:
|
||||
//
|
||||
// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
|
||||
// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
|
||||
//
|
||||
// TODO: not implemented for Delve yet, but this is the plan
|
||||
//
|
||||
|
@ -656,15 +656,16 @@ outer:
|
||||
// It decomposes a Load or an Arg into smaller parts and returns the new mem.
|
||||
// If the type does not match one of the expected aggregate types, it returns nil instead.
|
||||
// Parameters:
|
||||
// pos -- the location of any generated code.
|
||||
// b -- the block into which any generated code should normally be placed
|
||||
// source -- the value, possibly an aggregate, to be stored.
|
||||
// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
|
||||
// t -- the type of the value to be stored
|
||||
// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
|
||||
// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
|
||||
// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
|
||||
// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
|
||||
//
|
||||
// pos -- the location of any generated code.
|
||||
// b -- the block into which any generated code should normally be placed
|
||||
// source -- the value, possibly an aggregate, to be stored.
|
||||
// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
|
||||
// t -- the type of the value to be stored
|
||||
// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
|
||||
// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
|
||||
// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
|
||||
// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
|
||||
func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
|
||||
|
||||
pa := x.prAssignForArg(source)
|
||||
@ -777,15 +778,16 @@ func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off
|
||||
// It decomposes a Load into smaller parts and returns the new mem.
|
||||
// If the type does not match one of the expected aggregate types, it returns nil instead.
|
||||
// Parameters:
|
||||
// pos -- the location of any generated code.
|
||||
// b -- the block into which any generated code should normally be placed
|
||||
// source -- the value, possibly an aggregate, to be stored.
|
||||
// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
|
||||
// t -- the type of the value to be stored
|
||||
// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
|
||||
// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
|
||||
// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
|
||||
// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
|
||||
//
|
||||
// pos -- the location of any generated code.
|
||||
// b -- the block into which any generated code should normally be placed
|
||||
// source -- the value, possibly an aggregate, to be stored.
|
||||
// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
|
||||
// t -- the type of the value to be stored
|
||||
// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
|
||||
// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
|
||||
// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
|
||||
// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
|
||||
//
|
||||
// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates.
|
||||
func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
|
||||
|
@ -820,17 +820,22 @@ func (f *Func) invalidateCFG() {
|
||||
}
|
||||
|
||||
// DebugHashMatch reports whether environment variable evname
|
||||
// 1) is empty (this is a special more-quickly implemented case of 3)
|
||||
// 2) is "y" or "Y"
|
||||
// 3) is a suffix of the sha1 hash of name
|
||||
// 4) is a suffix of the environment variable
|
||||
// 1. is empty (this is a special more-quickly implemented case of 3)
|
||||
// 2. is "y" or "Y"
|
||||
// 3. is a suffix of the sha1 hash of name
|
||||
// 4. is a suffix of the environment variable
|
||||
// fmt.Sprintf("%s%d", evname, n)
|
||||
// provided that all such variables are nonempty for 0 <= i <= n
|
||||
//
|
||||
// Otherwise it returns false.
|
||||
// When true is returned the message
|
||||
// "%s triggered %s\n", evname, name
|
||||
//
|
||||
// "%s triggered %s\n", evname, name
|
||||
//
|
||||
// is printed on the file named in environment variable
|
||||
// GSHS_LOGFILE
|
||||
//
|
||||
// GSHS_LOGFILE
|
||||
//
|
||||
// or standard out if that is empty or there is an error
|
||||
// opening the file.
|
||||
func (f *Func) DebugHashMatch(evname string) bool {
|
||||
|
@ -55,19 +55,21 @@ func fuse(f *Func, typ fuseType) {
|
||||
|
||||
// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
|
||||
//
|
||||
// b b b b
|
||||
// \ / \ / | \ / \ / | | |
|
||||
// s0 s1 | s1 s0 | | |
|
||||
// \ / | / \ | | |
|
||||
// ss ss ss ss
|
||||
// b b b b
|
||||
// \ / \ / | \ / \ / | | |
|
||||
// s0 s1 | s1 s0 | | |
|
||||
// \ / | / \ | | |
|
||||
// ss ss ss ss
|
||||
//
|
||||
// If all Phi ops in ss have identical variables for slots corresponding to
|
||||
// s0, s1 and b then the branch can be dropped.
|
||||
// This optimization often comes up in switch statements with multiple
|
||||
// expressions in a case clause:
|
||||
// switch n {
|
||||
// case 1,2,3: return 4
|
||||
// }
|
||||
//
|
||||
// switch n {
|
||||
// case 1,2,3: return 4
|
||||
// }
|
||||
//
|
||||
// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
|
||||
func fuseBlockIf(b *Block) bool {
|
||||
if b.Kind != BlockIf {
|
||||
|
@ -8,21 +8,24 @@ package ssa
|
||||
// of an If block can be derived from its predecessor If block, in
|
||||
// some such cases, we can redirect the predecessor If block to the
|
||||
// corresponding successor block directly. For example:
|
||||
// p:
|
||||
// v11 = Less64 <bool> v10 v8
|
||||
// If v11 goto b else u
|
||||
// b: <- p ...
|
||||
// v17 = Leq64 <bool> v10 v8
|
||||
// If v17 goto s else o
|
||||
//
|
||||
// p:
|
||||
// v11 = Less64 <bool> v10 v8
|
||||
// If v11 goto b else u
|
||||
// b: <- p ...
|
||||
// v17 = Leq64 <bool> v10 v8
|
||||
// If v17 goto s else o
|
||||
//
|
||||
// We can redirect p to s directly.
|
||||
//
|
||||
// The implementation here borrows the framework of the prove pass.
|
||||
// 1, Traverse all blocks of function f to find If blocks.
|
||||
// 2, For any If block b, traverse all its predecessors to find If blocks.
|
||||
// 3, For any If block predecessor p, update relationship p->b.
|
||||
// 4, Traverse all successors of b.
|
||||
// 5, For any successor s of b, try to update relationship b->s, if a
|
||||
// contradiction is found then redirect p to another successor of b.
|
||||
//
|
||||
// 1, Traverse all blocks of function f to find If blocks.
|
||||
// 2, For any If block b, traverse all its predecessors to find If blocks.
|
||||
// 3, For any If block predecessor p, update relationship p->b.
|
||||
// 4, Traverse all successors of b.
|
||||
// 5, For any successor s of b, try to update relationship b->s, if a
|
||||
// contradiction is found then redirect p to another successor of b.
|
||||
func fuseBranchRedirect(f *Func) bool {
|
||||
ft := newFactsTable(f)
|
||||
ft.checkpoint()
|
||||
|
@ -9,22 +9,22 @@ package ssa
|
||||
//
|
||||
// Look for branch structure like:
|
||||
//
|
||||
// p
|
||||
// |\
|
||||
// | b
|
||||
// |/ \
|
||||
// s0 s1
|
||||
// p
|
||||
// |\
|
||||
// | b
|
||||
// |/ \
|
||||
// s0 s1
|
||||
//
|
||||
// In our example, p has control '1 <= x', b has control 'x < 5',
|
||||
// and s0 and s1 are the if and else results of the comparison.
|
||||
//
|
||||
// This will be optimized into:
|
||||
//
|
||||
// p
|
||||
// \
|
||||
// b
|
||||
// / \
|
||||
// s0 s1
|
||||
// p
|
||||
// \
|
||||
// b
|
||||
// / \
|
||||
// s0 s1
|
||||
//
|
||||
// where b has the combined control value 'unsigned(x-1) < 4'.
|
||||
// Later passes will then fuse p and b.
|
||||
|
@ -46,19 +46,19 @@ func (r *Register) GCNum() int16 {
|
||||
// variable that has been decomposed into multiple stack slots.
|
||||
// As an example, a string could have the following configurations:
|
||||
//
|
||||
// stack layout LocalSlots
|
||||
// stack layout LocalSlots
|
||||
//
|
||||
// Optimizations are disabled. s is on the stack and represented in its entirety.
|
||||
// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
|
||||
// Optimizations are disabled. s is on the stack and represented in its entirety.
|
||||
// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
|
||||
//
|
||||
// s was not decomposed, but the SSA operates on its parts individually, so
|
||||
// there is a LocalSlot for each of its fields that points into the single stack slot.
|
||||
// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
|
||||
// s was not decomposed, but the SSA operates on its parts individually, so
|
||||
// there is a LocalSlot for each of its fields that points into the single stack slot.
|
||||
// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
|
||||
//
|
||||
// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
|
||||
// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
|
||||
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
|
||||
// parent = &{N: s, Type: string}
|
||||
// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
|
||||
// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
|
||||
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
|
||||
// parent = &{N: s, Type: string}
|
||||
type LocalSlot struct {
|
||||
N *ir.Name // an ONAME *ir.Name representing a stack location.
|
||||
Type *types.Type // type of slot
|
||||
|
@ -31,9 +31,10 @@ type indVar struct {
|
||||
|
||||
// parseIndVar checks whether the SSA value passed as argument is a valid induction
|
||||
// variable, and, if so, extracts:
|
||||
// * the minimum bound
|
||||
// * the increment value
|
||||
// * the "next" value (SSA value that is Phi'd into the induction variable every loop)
|
||||
// - the minimum bound
|
||||
// - the increment value
|
||||
// - the "next" value (SSA value that is Phi'd into the induction variable every loop)
|
||||
//
|
||||
// Currently, we detect induction variables that match (Phi min nxt),
|
||||
// with nxt being (Add inc ind).
|
||||
// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
|
||||
@ -66,19 +67,18 @@ func parseIndVar(ind *Value) (min, inc, nxt *Value) {
|
||||
//
|
||||
// Look for variables and blocks that satisfy the following
|
||||
//
|
||||
// loop:
|
||||
// ind = (Phi min nxt),
|
||||
// if ind < max
|
||||
// then goto enter_loop
|
||||
// else goto exit_loop
|
||||
// loop:
|
||||
// ind = (Phi min nxt),
|
||||
// if ind < max
|
||||
// then goto enter_loop
|
||||
// else goto exit_loop
|
||||
//
|
||||
// enter_loop:
|
||||
// do something
|
||||
// nxt = inc + ind
|
||||
// goto loop
|
||||
//
|
||||
// exit_loop:
|
||||
// enter_loop:
|
||||
// do something
|
||||
// nxt = inc + ind
|
||||
// goto loop
|
||||
//
|
||||
// exit_loop:
|
||||
//
|
||||
// TODO: handle 32 bit operations
|
||||
func findIndVar(f *Func) []indVar {
|
||||
|
@ -8,19 +8,19 @@ package ssa
|
||||
// to loops with a check-loop-condition-at-end.
|
||||
// This helps loops avoid extra unnecessary jumps.
|
||||
//
|
||||
// loop:
|
||||
// CMPQ ...
|
||||
// JGE exit
|
||||
// ...
|
||||
// JMP loop
|
||||
// exit:
|
||||
// loop:
|
||||
// CMPQ ...
|
||||
// JGE exit
|
||||
// ...
|
||||
// JMP loop
|
||||
// exit:
|
||||
//
|
||||
// JMP entry
|
||||
// loop:
|
||||
// ...
|
||||
// entry:
|
||||
// CMPQ ...
|
||||
// JLT loop
|
||||
// JMP entry
|
||||
// loop:
|
||||
// ...
|
||||
// entry:
|
||||
// CMPQ ...
|
||||
// JLT loop
|
||||
func loopRotate(f *Func) {
|
||||
loopnest := f.loopnest()
|
||||
if loopnest.hasIrreducible {
|
||||
|
@ -110,7 +110,8 @@ type umagicData struct {
|
||||
|
||||
// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c).
|
||||
// The return values satisfy for all 0 <= x < 2^n
|
||||
// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
|
||||
//
|
||||
// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
|
||||
func umagic(n uint, c int64) umagicData {
|
||||
// Convert from ConstX auxint values to the real uint64 constant they represent.
|
||||
d := uint64(c) << (64 - n) >> (64 - n)
|
||||
@ -183,7 +184,8 @@ type smagicData struct {
|
||||
// magic computes the constants needed to strength reduce signed n-bit divides by the constant c.
|
||||
// Must have c>0.
|
||||
// The return values satisfy for all -2^(n-1) <= x < 2^(n-1)
|
||||
// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
|
||||
//
|
||||
// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
|
||||
func smagic(n uint, c int64) smagicData {
|
||||
C := new(big.Int).SetInt64(c)
|
||||
s := C.BitLen() - 1
|
||||
|
@ -391,9 +391,9 @@ const (
|
||||
|
||||
// A Sym represents a symbolic offset from a base register.
|
||||
// Currently a Sym can be one of 3 things:
|
||||
// - a *gc.Node, for an offset from SP (the stack pointer)
|
||||
// - a *obj.LSym, for an offset from SB (the global pointer)
|
||||
// - nil, for no offset
|
||||
// - a *gc.Node, for an offset from SP (the stack pointer)
|
||||
// - a *obj.LSym, for an offset from SB (the global pointer)
|
||||
// - nil, for no offset
|
||||
type Sym interface {
|
||||
CanBeAnSSASym()
|
||||
CanBeAnSSAAux()
|
||||
@ -479,12 +479,13 @@ const (
|
||||
)
|
||||
|
||||
// boundsAPI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
|
||||
// CMPQ c, cap
|
||||
// JA fail1
|
||||
// CMPQ b, c
|
||||
// JA fail2
|
||||
// CMPQ a, b
|
||||
// JA fail3
|
||||
//
|
||||
// CMPQ c, cap
|
||||
// JA fail1
|
||||
// CMPQ b, c
|
||||
// JA fail2
|
||||
// CMPQ a, b
|
||||
// JA fail3
|
||||
//
|
||||
// fail1: CALL panicSlice3Acap (c, cap)
|
||||
// fail2: CALL panicSlice3B (b, c)
|
||||
|
@ -8,13 +8,19 @@ package ssa
|
||||
// A phi is redundant if its arguments are all equal. For
|
||||
// purposes of counting, ignore the phi itself. Both of
|
||||
// these phis are redundant:
|
||||
// v = phi(x,x,x)
|
||||
// v = phi(x,v,x,v)
|
||||
//
|
||||
// v = phi(x,x,x)
|
||||
// v = phi(x,v,x,v)
|
||||
//
|
||||
// We repeat this process to also catch situations like:
|
||||
// v = phi(x, phi(x, x), phi(x, v))
|
||||
//
|
||||
// v = phi(x, phi(x, x), phi(x, v))
|
||||
//
|
||||
// TODO: Can we also simplify cases like:
|
||||
// v = phi(v, w, x)
|
||||
// w = phi(v, w, x)
|
||||
//
|
||||
// v = phi(v, w, x)
|
||||
// w = phi(v, w, x)
|
||||
//
|
||||
// and would that be useful?
|
||||
func phielim(f *Func) {
|
||||
for {
|
||||
|
@ -7,20 +7,22 @@ package ssa
|
||||
// phiopt eliminates boolean Phis based on the previous if.
|
||||
//
|
||||
// Main use case is to transform:
|
||||
// x := false
|
||||
// if b {
|
||||
// x = true
|
||||
// }
|
||||
//
|
||||
// x := false
|
||||
// if b {
|
||||
// x = true
|
||||
// }
|
||||
//
|
||||
// into x = b.
|
||||
//
|
||||
// In SSA code this appears as
|
||||
//
|
||||
// b0
|
||||
// If b -> b1 b2
|
||||
// b1
|
||||
// Plain -> b2
|
||||
// b2
|
||||
// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
|
||||
// b0
|
||||
// If b -> b1 b2
|
||||
// b1
|
||||
// Plain -> b2
|
||||
// b2
|
||||
// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
|
||||
//
|
||||
// In this case we can replace x with a copy of b.
|
||||
func phiopt(f *Func) {
|
||||
|
@ -140,11 +140,11 @@ type posetNode struct {
|
||||
// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
|
||||
// following DAG:
|
||||
//
|
||||
// A
|
||||
// / \
|
||||
// I extra
|
||||
// / \
|
||||
// J K
|
||||
// A
|
||||
// / \
|
||||
// I extra
|
||||
// / \
|
||||
// J K
|
||||
type poset struct {
|
||||
lastidx uint32 // last generated dense index
|
||||
flags uint8 // internal flags
|
||||
|
@ -27,17 +27,17 @@ const (
|
||||
//
|
||||
// E.g.
|
||||
//
|
||||
// r := relation(...)
|
||||
// r := relation(...)
|
||||
//
|
||||
// if v < w {
|
||||
// newR := r & lt
|
||||
// }
|
||||
// if v >= w {
|
||||
// newR := r & (eq|gt)
|
||||
// }
|
||||
// if v != w {
|
||||
// newR := r & (lt|gt)
|
||||
// }
|
||||
// if v < w {
|
||||
// newR := r & lt
|
||||
// }
|
||||
// if v >= w {
|
||||
// newR := r & (eq|gt)
|
||||
// }
|
||||
// if v != w {
|
||||
// newR := r & (lt|gt)
|
||||
// }
|
||||
type relation uint
|
||||
|
||||
const (
|
||||
@ -746,19 +746,19 @@ func (ft *factsTable) cleanup(f *Func) {
|
||||
// By far, the most common redundant pair are generated by bounds checking.
|
||||
// For example for the code:
|
||||
//
|
||||
// a[i] = 4
|
||||
// foo(a[i])
|
||||
// a[i] = 4
|
||||
// foo(a[i])
|
||||
//
|
||||
// The compiler will generate the following code:
|
||||
//
|
||||
// if i >= len(a) {
|
||||
// panic("not in bounds")
|
||||
// }
|
||||
// a[i] = 4
|
||||
// if i >= len(a) {
|
||||
// panic("not in bounds")
|
||||
// }
|
||||
// foo(a[i])
|
||||
// if i >= len(a) {
|
||||
// panic("not in bounds")
|
||||
// }
|
||||
// a[i] = 4
|
||||
// if i >= len(a) {
|
||||
// panic("not in bounds")
|
||||
// }
|
||||
// foo(a[i])
|
||||
//
|
||||
// The second comparison i >= len(a) is clearly redundant because if the
|
||||
// else branch of the first comparison is executed, we already know that i < len(a).
|
||||
|
@ -962,8 +962,9 @@ found:
|
||||
|
||||
// clobber invalidates values. Returns true.
|
||||
// clobber is used by rewrite rules to:
|
||||
// A) make sure the values are really dead and never used again.
|
||||
// B) decrement use counts of the values' args.
|
||||
//
|
||||
// A) make sure the values are really dead and never used again.
|
||||
// B) decrement use counts of the values' args.
|
||||
func clobber(vv ...*Value) bool {
|
||||
for _, v := range vv {
|
||||
v.reset(OpInvalid)
|
||||
@ -985,7 +986,9 @@ func clobberIfDead(v *Value) bool {
|
||||
|
||||
// noteRule is an easy way to track if a rule is matched when writing
|
||||
// new ones. Make the rule of interest also conditional on
|
||||
// noteRule("note to self: rule of interest matched")
|
||||
//
|
||||
// noteRule("note to self: rule of interest matched")
|
||||
//
|
||||
// and that message will print when the rule matches.
|
||||
func noteRule(s string) bool {
|
||||
fmt.Println(s)
|
||||
@ -1789,9 +1792,11 @@ func sequentialAddresses(x, y *Value, n int64) bool {
|
||||
// We happen to match the semantics to those of arm/arm64.
|
||||
// Note that these semantics differ from x86: the carry flag has the opposite
|
||||
// sense on a subtraction!
|
||||
// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
|
||||
// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
|
||||
// (because it does x + ^y + C).
|
||||
//
|
||||
// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
|
||||
// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
|
||||
// (because it does x + ^y + C).
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
|
||||
type flagConstant uint8
|
||||
|
||||
|
@ -68,8 +68,10 @@ func TestCondRewrite(t *testing.T) {
|
||||
}
|
||||
|
||||
// Profile the aforementioned optimization from two angles:
|
||||
// SoloJump: generated branching code has one 'jump', for '<' and '>='
|
||||
// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
|
||||
//
|
||||
// SoloJump: generated branching code has one 'jump', for '<' and '>='
|
||||
// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
|
||||
//
|
||||
// We expect that 'CombJump' is generally on par with the non-optimized code, and
|
||||
// 'SoloJump' demonstrates some improvement.
|
||||
// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
|
||||
|
@ -338,13 +338,15 @@ func schedule(f *Func) {
|
||||
// if v transitively depends on store s, v is ordered after s,
|
||||
// otherwise v is ordered before s.
|
||||
// Specifically, values are ordered like
|
||||
// store1
|
||||
// NilCheck that depends on store1
|
||||
// other values that depends on store1
|
||||
// store2
|
||||
// NilCheck that depends on store2
|
||||
// other values that depends on store2
|
||||
// ...
|
||||
//
|
||||
// store1
|
||||
// NilCheck that depends on store1
|
||||
// other values that depends on store1
|
||||
// store2
|
||||
// NilCheck that depends on store2
|
||||
// other values that depends on store2
|
||||
// ...
|
||||
//
|
||||
// The order of non-store and non-NilCheck values are undefined
|
||||
// (not necessarily dependency order). This should be cheaper
|
||||
// than a full scheduling as done above.
|
||||
|
@ -85,7 +85,7 @@ func TestShiftToExtensionAMD64(t *testing.T) {
|
||||
|
||||
// makeShiftExtensionFunc generates a function containing:
|
||||
//
|
||||
// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
|
||||
// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
|
||||
//
|
||||
// This may be equivalent to a sign or zero extension.
|
||||
func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
|
||||
|
@ -67,11 +67,11 @@ func shortcircuit(f *Func) {
|
||||
//
|
||||
// (1) Look for a CFG of the form
|
||||
//
|
||||
// p other pred(s)
|
||||
// \ /
|
||||
// b
|
||||
// / \
|
||||
// t other succ
|
||||
// p other pred(s)
|
||||
// \ /
|
||||
// b
|
||||
// / \
|
||||
// t other succ
|
||||
//
|
||||
// in which b is an If block containing a single phi value with a single use (b's Control),
|
||||
// which has a ConstBool arg.
|
||||
@ -80,21 +80,21 @@ func shortcircuit(f *Func) {
|
||||
//
|
||||
// Rewrite this into
|
||||
//
|
||||
// p other pred(s)
|
||||
// | /
|
||||
// | b
|
||||
// |/ \
|
||||
// t u
|
||||
// p other pred(s)
|
||||
// | /
|
||||
// | b
|
||||
// |/ \
|
||||
// t u
|
||||
//
|
||||
// and remove the appropriate phi arg(s).
|
||||
//
|
||||
// (2) Look for a CFG of the form
|
||||
//
|
||||
// p q
|
||||
// \ /
|
||||
// b
|
||||
// / \
|
||||
// t u
|
||||
// p q
|
||||
// \ /
|
||||
// b
|
||||
// / \
|
||||
// t u
|
||||
//
|
||||
// in which b is as described in (1).
|
||||
// However, b may also contain other phi values.
|
||||
|
@ -210,6 +210,7 @@ func (t SparseTree) isAncestor(x, y *Block) bool {
|
||||
// 1. If domorder(x) > domorder(y) then x does not dominate y.
|
||||
// 2. If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
|
||||
// then x does not dominate z.
|
||||
//
|
||||
// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
|
||||
// Property (2) allows searches for dominated blocks to exit early.
|
||||
func (t SparseTree) domorder(x *Block) int32 {
|
||||
|
@ -130,11 +130,11 @@ func emptyBlock(b *Block) bool {
|
||||
|
||||
// trimmableBlock reports whether the block can be trimmed from the CFG,
|
||||
// subject to the following criteria:
|
||||
// - it should not be the first block
|
||||
// - it should be BlockPlain
|
||||
// - it should not loop back to itself
|
||||
// - it either is the single predecessor of the successor block or
|
||||
// contains no actual instructions
|
||||
// - it should not be the first block
|
||||
// - it should be BlockPlain
|
||||
// - it should not loop back to itself
|
||||
// - it either is the single predecessor of the successor block or
|
||||
// contains no actual instructions
|
||||
func trimmableBlock(b *Block) bool {
|
||||
if b.Kind != BlockPlain || b == b.Func.Entry {
|
||||
return false
|
||||
|
@ -286,10 +286,10 @@ func dvarint(x *obj.LSym, off int, v int64) int {
|
||||
// for stack variables are specified as the number of bytes below varp (pointer to the
|
||||
// top of the local variables) for their starting address. The format is:
|
||||
//
|
||||
// - Offset of the deferBits variable
|
||||
// - Number of defers in the function
|
||||
// - Information about each defer call, in reverse order of appearance in the function:
|
||||
// - Offset of the closure value to call
|
||||
// - Offset of the deferBits variable
|
||||
// - Number of defers in the function
|
||||
// - Information about each defer call, in reverse order of appearance in the function:
|
||||
// - Offset of the closure value to call
|
||||
func (s *state) emitOpenDeferInfo() {
|
||||
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
|
||||
x.Set(obj.AttrContentAddressable, true)
|
||||
|
@ -11,10 +11,10 @@ import "fmt"
|
||||
// checkBranches checks correct use of labels and branch
|
||||
// statements (break, continue, goto) in a function body.
|
||||
// It catches:
|
||||
// - misplaced breaks and continues
|
||||
// - bad labeled breaks and continues
|
||||
// - invalid, unused, duplicate, and missing labels
|
||||
// - gotos jumping over variable declarations and into blocks
|
||||
// - misplaced breaks and continues
|
||||
// - bad labeled breaks and continues
|
||||
// - invalid, unused, duplicate, and missing labels
|
||||
// - gotos jumping over variable declarations and into blocks
|
||||
func checkBranches(body *BlockStmt, errh ErrorHandler) {
|
||||
if body == nil {
|
||||
return
|
||||
|
@ -620,7 +620,8 @@ func OrigInt(n ir.Node, v int64) ir.Node {
|
||||
// get the same type going out.
|
||||
// force means must assign concrete (non-ideal) type.
|
||||
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
|
||||
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
|
||||
//
|
||||
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
|
||||
func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
|
||||
if l.Type() == nil || r.Type() == nil {
|
||||
return l, r
|
||||
|
@ -76,8 +76,9 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
|
||||
// tcArith typechecks operands of a binary arithmetic expression.
|
||||
// The result of tcArith MUST be assigned back to original operands,
|
||||
// t is the type of the expression, and should be set by the caller. e.g:
|
||||
// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
|
||||
// n.SetType(t)
|
||||
//
|
||||
// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
|
||||
// n.SetType(t)
|
||||
func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
|
||||
l, r = defaultlit2(l, r, false)
|
||||
if l.Type() == nil || r.Type() == nil {
|
||||
@ -194,7 +195,8 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
|
||||
}
|
||||
|
||||
// The result of tcCompLit MUST be assigned back to n, e.g.
|
||||
// n.Left = tcCompLit(n.Left)
|
||||
//
|
||||
// n.Left = tcCompLit(n.Left)
|
||||
func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
|
||||
if base.EnableTrace && base.Flag.LowerT {
|
||||
defer tracePrint("tcCompLit", n)(&res)
|
||||
|
@ -258,7 +258,7 @@ import (
|
||||
// 1: added column details to Pos
|
||||
// 2: added information for generic function/types. The export of non-generic
|
||||
// functions/types remains largely backward-compatible. Breaking changes include:
|
||||
// - a 'kind' byte is added to constant values
|
||||
// - a 'kind' byte is added to constant values
|
||||
const (
|
||||
iexportVersionGo1_11 = 0
|
||||
iexportVersionPosCol = 1
|
||||
|
@ -24,7 +24,8 @@ func LookupRuntime(name string) *ir.Name {
|
||||
// successive occurrences of the "any" placeholder in the
|
||||
// type syntax expression n.Type.
|
||||
// The result of SubstArgTypes MUST be assigned back to old, e.g.
|
||||
// n.Left = SubstArgTypes(n.Left, t1, t2)
|
||||
//
|
||||
// n.Left = SubstArgTypes(n.Left, t1, t2)
|
||||
func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
|
||||
for _, t := range types_ {
|
||||
types.CalcSize(t)
|
||||
|
@ -240,7 +240,8 @@ func typecheckNtype(n ir.Ntype) ir.Ntype {
|
||||
|
||||
// typecheck type checks node n.
|
||||
// The result of typecheck MUST be assigned back to n, e.g.
|
||||
// n.Left = typecheck(n.Left, top)
|
||||
//
|
||||
// n.Left = typecheck(n.Left, top)
|
||||
func typecheck(n ir.Node, top int) (res ir.Node) {
|
||||
// cannot type check until all the source has been parsed
|
||||
if !TypecheckAllowed {
|
||||
@ -414,7 +415,8 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
|
||||
// but also accepts untyped numeric values representable as
|
||||
// value of type int (see also checkmake for comparison).
|
||||
// The result of indexlit MUST be assigned back to n, e.g.
|
||||
// n.Left = indexlit(n.Left)
|
||||
//
|
||||
// n.Left = indexlit(n.Left)
|
||||
func indexlit(n ir.Node) ir.Node {
|
||||
if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
|
||||
return DefaultLit(n, types.Types[types.TINT])
|
||||
@ -961,7 +963,8 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool {
|
||||
}
|
||||
|
||||
// The result of implicitstar MUST be assigned back to n, e.g.
|
||||
// n.Left = implicitstar(n.Left)
|
||||
//
|
||||
// n.Left = implicitstar(n.Left)
|
||||
func implicitstar(n ir.Node) ir.Node {
|
||||
// insert implicit * if needed for fixed array
|
||||
t := n.Type()
|
||||
@ -1607,7 +1610,8 @@ func checkassignto(src *types.Type, dst ir.Node) {
|
||||
}
|
||||
|
||||
// The result of stringtoruneslit MUST be assigned back to n, e.g.
|
||||
// n.Left = stringtoruneslit(n.Left)
|
||||
//
|
||||
// n.Left = stringtoruneslit(n.Left)
|
||||
func stringtoruneslit(n *ir.ConvExpr) ir.Node {
|
||||
if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
|
||||
base.Fatalf("stringtoarraylit %v", n)
|
||||
|
@ -492,9 +492,9 @@ type Slice struct {
|
||||
|
||||
// A Field is a (Sym, Type) pairing along with some other information, and,
|
||||
// depending on the context, is used to represent:
|
||||
// - a field in a struct
|
||||
// - a method in an interface or associated with a named type
|
||||
// - a function parameter
|
||||
// - a field in a struct
|
||||
// - a method in an interface or associated with a named type
|
||||
// - a function parameter
|
||||
type Field struct {
|
||||
flags bitset8
|
||||
|
||||
@ -1121,9 +1121,10 @@ func (t *Type) SimpleString() string {
|
||||
}
|
||||
|
||||
// Cmp is a comparison between values a and b.
|
||||
// -1 if a < b
|
||||
// 0 if a == b
|
||||
// 1 if a > b
|
||||
//
|
||||
// -1 if a < b
|
||||
// 0 if a == b
|
||||
// 1 if a > b
|
||||
type Cmp int8
|
||||
|
||||
const (
|
||||
|
@ -21,7 +21,6 @@
|
||||
// Type inference computes the type (Type) of every expression (syntax.Expr)
|
||||
// and checks for compliance with the language specification.
|
||||
// Use Info.Types[expr].Type for the results of type inference.
|
||||
//
|
||||
package types2
|
||||
|
||||
import (
|
||||
|
@ -263,7 +263,7 @@ func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) {
|
||||
// (and a separating "--"). For instance, to test the package made
|
||||
// of the files foo.go and bar.go, use:
|
||||
//
|
||||
// go test -run Manual -- foo.go bar.go
|
||||
// go test -run Manual -- foo.go bar.go
|
||||
//
|
||||
// If no source arguments are provided, the file testdata/manual.go
|
||||
// is used instead.
|
||||
|
@ -23,10 +23,10 @@ const useConstraintTypeInference = true
|
||||
//
|
||||
// Inference proceeds as follows. Starting with given type arguments:
|
||||
//
|
||||
// 1) apply FTI (function type inference) with typed arguments,
|
||||
// 2) apply CTI (constraint type inference),
|
||||
// 3) apply FTI with untyped function arguments,
|
||||
// 4) apply CTI.
|
||||
// 1. apply FTI (function type inference) with typed arguments,
|
||||
// 2. apply CTI (constraint type inference),
|
||||
// 3. apply FTI with untyped function arguments,
|
||||
// 4. apply CTI.
|
||||
//
|
||||
// The process stops as soon as all type arguments are known or an error occurs.
|
||||
func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (result []Type) {
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
// The last index entry is the field or method index in the (possibly embedded)
|
||||
// type where the entry was found, either:
|
||||
//
|
||||
// 1) the list of declared methods of a named type; or
|
||||
// 2) the list of all methods (method set) of an interface type; or
|
||||
// 3) the list of fields of a struct type.
|
||||
// 1. the list of declared methods of a named type; or
|
||||
// 2. the list of all methods (method set) of an interface type; or
|
||||
// 3. the list of fields of a struct type.
|
||||
//
|
||||
// The earlier index entries are the indices of the embedded struct fields
|
||||
// traversed to get to the found entry, starting at depth 0.
|
||||
@ -35,12 +35,12 @@ import (
|
||||
// If no entry is found, a nil object is returned. In this case, the returned
|
||||
// index and indirect values have the following meaning:
|
||||
//
|
||||
// - If index != nil, the index sequence points to an ambiguous entry
|
||||
// (the same name appeared more than once at the same embedding level).
|
||||
// - If index != nil, the index sequence points to an ambiguous entry
|
||||
// (the same name appeared more than once at the same embedding level).
|
||||
//
|
||||
// - If indirect is set, a method with a pointer receiver type was found
|
||||
// but there was no pointer on the path from the actual receiver type to
|
||||
// the method's formal receiver base type, nor was the receiver addressable.
|
||||
// - If indirect is set, a method with a pointer receiver type was found
|
||||
// but there was no pointer on the path from the actual receiver type to
|
||||
// the method's formal receiver base type, nor was the receiver addressable.
|
||||
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
|
||||
if T == nil {
|
||||
panic("LookupFieldOrMethod on nil type")
|
||||
|
@ -92,9 +92,9 @@ func (s *Selection) Type() Type {
|
||||
// The last index entry is the field or method index of the type declaring f;
|
||||
// either:
|
||||
//
|
||||
// 1) the list of declared methods of a named type; or
|
||||
// 2) the list of methods of an interface type; or
|
||||
// 3) the list of fields of a struct type.
|
||||
// 1. the list of declared methods of a named type; or
|
||||
// 2. the list of methods of an interface type; or
|
||||
// 3. the list of fields of a struct type.
|
||||
//
|
||||
// The earlier index entries are the indices of the embedded fields implicitly
|
||||
// traversed to get from (the type of) x to f, starting at embedding depth 0.
|
||||
@ -111,6 +111,7 @@ func (s *Selection) String() string { return SelectionString(s, nil) }
|
||||
// package-level objects, and may be nil.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// "field (T) f int"
|
||||
// "method (T) f(X) Y"
|
||||
// "method expr (T) f(X) Y"
|
||||
|
@ -24,19 +24,19 @@ type Sizes interface {
|
||||
// StdSizes is a convenience type for creating commonly used Sizes.
|
||||
// It makes the following simplifying assumptions:
|
||||
//
|
||||
// - The size of explicitly sized basic types (int16, etc.) is the
|
||||
// specified size.
|
||||
// - The size of strings and interfaces is 2*WordSize.
|
||||
// - The size of slices is 3*WordSize.
|
||||
// - The size of an array of n elements corresponds to the size of
|
||||
// a struct of n consecutive fields of the array's element type.
|
||||
// - The size of a struct is the offset of the last field plus that
|
||||
// field's size. As with all element types, if the struct is used
|
||||
// in an array its size must first be aligned to a multiple of the
|
||||
// struct's alignment.
|
||||
// - All other types have size WordSize.
|
||||
// - Arrays and structs are aligned per spec definition; all other
|
||||
// types are naturally aligned with a maximum alignment MaxAlign.
|
||||
// - The size of explicitly sized basic types (int16, etc.) is the
|
||||
// specified size.
|
||||
// - The size of strings and interfaces is 2*WordSize.
|
||||
// - The size of slices is 3*WordSize.
|
||||
// - The size of an array of n elements corresponds to the size of
|
||||
// a struct of n consecutive fields of the array's element type.
|
||||
// - The size of a struct is the offset of the last field plus that
|
||||
// field's size. As with all element types, if the struct is used
|
||||
// in an array its size must first be aligned to a multiple of the
|
||||
// struct's alignment.
|
||||
// - All other types have size WordSize.
|
||||
// - Arrays and structs are aligned per spec definition; all other
|
||||
// types are naturally aligned with a maximum alignment MaxAlign.
|
||||
//
|
||||
// *StdSizes implements Sizes.
|
||||
type StdSizes struct {
|
||||
|
@ -6,10 +6,10 @@ package types2
|
||||
|
||||
// A term describes elementary type sets:
|
||||
//
|
||||
// ∅: (*term)(nil) == ∅ // set of no types (empty set)
|
||||
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
|
||||
// T: &term{false, T} == {T} // set of type T
|
||||
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
|
||||
// ∅: (*term)(nil) == ∅ // set of no types (empty set)
|
||||
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
|
||||
// T: &term{false, T} == {T} // set of type T
|
||||
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
|
||||
type term struct {
|
||||
tilde bool // valid if typ != nil
|
||||
typ Type
|
||||
|
@ -242,6 +242,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node {
|
||||
|
||||
// check assign type list to
|
||||
// an expression list. called in
|
||||
//
|
||||
// expr-list = func()
|
||||
func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
|
||||
if len(nl) != nr.NumFields() {
|
||||
@ -273,6 +274,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
|
||||
|
||||
// check assign expression list to
|
||||
// an expression list. called in
|
||||
//
|
||||
// expr-list = expr-list
|
||||
func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
|
||||
// cannot happen: should have been rejected during type checking
|
||||
@ -455,17 +457,18 @@ func readsMemory(n ir.Node) bool {
|
||||
}
|
||||
|
||||
// expand append(l1, l2...) to
|
||||
// init {
|
||||
// s := l1
|
||||
// n := len(s) + len(l2)
|
||||
// // Compare as uint so growslice can panic on overflow.
|
||||
// if uint(n) > uint(cap(s)) {
|
||||
// s = growslice(s, n)
|
||||
// }
|
||||
// s = s[:n]
|
||||
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
|
||||
// }
|
||||
// s
|
||||
//
|
||||
// init {
|
||||
// s := l1
|
||||
// n := len(s) + len(l2)
|
||||
// // Compare as uint so growslice can panic on overflow.
|
||||
// if uint(n) > uint(cap(s)) {
|
||||
// s = growslice(s, n)
|
||||
// }
|
||||
// s = s[:n]
|
||||
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
|
||||
// }
|
||||
// s
|
||||
//
|
||||
// l2 is allowed to be a string.
|
||||
func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
|
||||
@ -597,32 +600,33 @@ func isAppendOfMake(n ir.Node) bool {
|
||||
}
|
||||
|
||||
// extendSlice rewrites append(l1, make([]T, l2)...) to
|
||||
// init {
|
||||
// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
|
||||
// } else {
|
||||
// panicmakeslicelen()
|
||||
// }
|
||||
// s := l1
|
||||
// n := len(s) + l2
|
||||
// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
|
||||
// // cap is a positive int and n can become negative when len(s) + l2
|
||||
// // overflows int. Interpreting n when negative as uint makes it larger
|
||||
// // than cap(s). growslice will check the int n arg and panic if n is
|
||||
// // negative. This prevents the overflow from being undetected.
|
||||
// if uint(n) > uint(cap(s)) {
|
||||
// s = growslice(T, s, n)
|
||||
// }
|
||||
// s = s[:n]
|
||||
// lptr := &l1[0]
|
||||
// sptr := &s[0]
|
||||
// if lptr == sptr || !T.HasPointers() {
|
||||
// // growslice did not clear the whole underlying array (or did not get called)
|
||||
// hp := &s[len(l1)]
|
||||
// hn := l2 * sizeof(T)
|
||||
// memclr(hp, hn)
|
||||
// }
|
||||
// }
|
||||
// s
|
||||
//
|
||||
// init {
|
||||
// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
|
||||
// } else {
|
||||
// panicmakeslicelen()
|
||||
// }
|
||||
// s := l1
|
||||
// n := len(s) + l2
|
||||
// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
|
||||
// // cap is a positive int and n can become negative when len(s) + l2
|
||||
// // overflows int. Interpreting n when negative as uint makes it larger
|
||||
// // than cap(s). growslice will check the int n arg and panic if n is
|
||||
// // negative. This prevents the overflow from being undetected.
|
||||
// if uint(n) > uint(cap(s)) {
|
||||
// s = growslice(T, s, n)
|
||||
// }
|
||||
// s = s[:n]
|
||||
// lptr := &l1[0]
|
||||
// sptr := &s[0]
|
||||
// if lptr == sptr || !T.HasPointers() {
|
||||
// // growslice did not clear the whole underlying array (or did not get called)
|
||||
// hp := &s[len(l1)]
|
||||
// hn := l2 * sizeof(T)
|
||||
// memclr(hp, hn)
|
||||
// }
|
||||
// }
|
||||
// s
|
||||
func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
|
||||
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
|
||||
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
|
||||
|
@ -26,19 +26,19 @@ import (
|
||||
//
|
||||
// For race detector, expand append(src, a [, b]* ) to
|
||||
//
|
||||
// init {
|
||||
// s := src
|
||||
// const argc = len(args) - 1
|
||||
// if cap(s) - len(s) < argc {
|
||||
// s = growslice(s, len(s)+argc)
|
||||
// }
|
||||
// n := len(s)
|
||||
// s = s[:n+argc]
|
||||
// s[n] = a
|
||||
// s[n+1] = b
|
||||
// ...
|
||||
// }
|
||||
// s
|
||||
// init {
|
||||
// s := src
|
||||
// const argc = len(args) - 1
|
||||
// if cap(s) - len(s) < argc {
|
||||
// s = growslice(s, len(s)+argc)
|
||||
// }
|
||||
// n := len(s)
|
||||
// s = s[:n+argc]
|
||||
// s[n] = a
|
||||
// s[n+1] = b
|
||||
// ...
|
||||
// }
|
||||
// s
|
||||
func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
|
||||
if !ir.SameSafeExpr(dst, n.Args[0]) {
|
||||
n.Args[0] = safeExpr(n.Args[0], init)
|
||||
|
@ -16,7 +16,8 @@ import (
|
||||
)
|
||||
|
||||
// The result of walkCompare MUST be assigned back to n, e.g.
|
||||
// n.Left = walkCompare(n.Left, init)
|
||||
//
|
||||
// n.Left = walkCompare(n.Left, init)
|
||||
func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
|
||||
if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
|
||||
return walkCompareInterface(n, init)
|
||||
@ -404,7 +405,8 @@ func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
|
||||
}
|
||||
|
||||
// The result of finishCompare MUST be assigned back to n, e.g.
|
||||
// n.Left = finishCompare(n.Left, x, r, init)
|
||||
//
|
||||
// n.Left = finishCompare(n.Left, x, r, init)
|
||||
func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
|
||||
r = typecheck.Expr(r)
|
||||
r = typecheck.Conv(r, n.Type())
|
||||
|
@ -20,7 +20,8 @@ import (
|
||||
)
|
||||
|
||||
// The result of walkExpr MUST be assigned back to n, e.g.
|
||||
// n.Left = walkExpr(n.Left, init)
|
||||
//
|
||||
// n.Left = walkExpr(n.Left, init)
|
||||
func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
|
||||
if n == nil {
|
||||
return n
|
||||
|
@ -237,7 +237,8 @@ func isaddrokay(n ir.Node) bool {
|
||||
// If the original argument n is not okay, addrTemp creates a tmp, emits
|
||||
// tmp = n, and then returns tmp.
|
||||
// The result of addrTemp MUST be assigned back to n, e.g.
|
||||
// n.Left = o.addrTemp(n.Left)
|
||||
//
|
||||
// n.Left = o.addrTemp(n.Left)
|
||||
func (o *orderState) addrTemp(n ir.Node) ir.Node {
|
||||
if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
|
||||
// TODO: expand this to all static composite literal nodes?
|
||||
@ -316,8 +317,10 @@ func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
|
||||
// Returns a bool that signals if a modification was made.
|
||||
//
|
||||
// For:
|
||||
// x = m[string(k)]
|
||||
// x = m[T1{... Tn{..., string(k), ...}]
|
||||
//
|
||||
// x = m[string(k)]
|
||||
// x = m[T1{... Tn{..., string(k), ...}]
|
||||
//
|
||||
// where k is []byte, T1 to Tn is a nesting of struct and array literals,
|
||||
// the allocation of backing bytes for the string can be avoided
|
||||
// by reusing the []byte backing array. These are special cases
|
||||
@ -400,9 +403,12 @@ func (o *orderState) stmtList(l ir.Nodes) {
|
||||
}
|
||||
|
||||
// orderMakeSliceCopy matches the pattern:
|
||||
// m = OMAKESLICE([]T, x); OCOPY(m, s)
|
||||
//
|
||||
// m = OMAKESLICE([]T, x); OCOPY(m, s)
|
||||
//
|
||||
// and rewrites it to:
|
||||
// m = OMAKESLICECOPY([]T, x, s); nil
|
||||
//
|
||||
// m = OMAKESLICECOPY([]T, x, s); nil
|
||||
func orderMakeSliceCopy(s []ir.Node) {
|
||||
if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
|
||||
return
|
||||
@ -473,7 +479,8 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
|
||||
// exprInPlace orders the side effects in *np and
|
||||
// leaves them as the init list of the final *np.
|
||||
// The result of exprInPlace MUST be assigned back to n, e.g.
|
||||
// n.Left = o.exprInPlace(n.Left)
|
||||
//
|
||||
// n.Left = o.exprInPlace(n.Left)
|
||||
func (o *orderState) exprInPlace(n ir.Node) ir.Node {
|
||||
var order orderState
|
||||
order.free = o.free
|
||||
@ -489,7 +496,9 @@ func (o *orderState) exprInPlace(n ir.Node) ir.Node {
|
||||
// orderStmtInPlace orders the side effects of the single statement *np
|
||||
// and replaces it with the resulting statement list.
|
||||
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
|
||||
// n.Left = orderStmtInPlace(n.Left)
|
||||
//
|
||||
// n.Left = orderStmtInPlace(n.Left)
|
||||
//
|
||||
// free is a map that can be used to obtain temporary variables by type.
|
||||
func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
|
||||
var order orderState
|
||||
@ -1087,7 +1096,8 @@ func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
|
||||
// Otherwise lhs == nil. (When lhs != nil it may be possible
|
||||
// to avoid copying the result of the expression to a temporary.)
|
||||
// The result of expr MUST be assigned back to n, e.g.
|
||||
// n.Left = o.expr(n.Left, lhs)
|
||||
//
|
||||
// n.Left = o.expr(n.Left, lhs)
|
||||
func (o *orderState) expr(n, lhs ir.Node) ir.Node {
|
||||
if n == nil {
|
||||
return n
|
||||
@ -1451,10 +1461,14 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
|
||||
// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
|
||||
// The caller should order the right-hand side of the assignment before calling order.as2func.
|
||||
// It rewrites,
|
||||
//
|
||||
// a, b, a = ...
|
||||
//
|
||||
// as
|
||||
//
|
||||
// tmp1, tmp2, tmp3 = ...
|
||||
// a, b, a = tmp1, tmp2, tmp3
|
||||
//
|
||||
// This is necessary to ensure left to right assignment order.
|
||||
func (o *orderState) as2func(n *ir.AssignListStmt) {
|
||||
results := n.Rhs[0].Type()
|
||||
|
@ -10,7 +10,8 @@ import (
|
||||
)
|
||||
|
||||
// The result of walkStmt MUST be assigned back to n, e.g.
|
||||
// n.Left = walkStmt(n.Left)
|
||||
//
|
||||
// n.Left = walkStmt(n.Left)
|
||||
func walkStmt(n ir.Node) ir.Node {
|
||||
if n == nil {
|
||||
return n
|
||||
|
@ -106,7 +106,9 @@ func moveByType(t *types.Type) obj.As {
|
||||
}
|
||||
|
||||
// opregreg emits instructions for
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// dest := dest(To) op src(From)
|
||||
//
|
||||
// and also returns the created obj.Prog so it
|
||||
// may be further adjusted (offset, scale, etc).
|
||||
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
|
||||
|
@ -377,7 +377,7 @@ func (f *File) newCounter(start, end token.Pos, numStmt int) string {
|
||||
// S1
|
||||
// if cond {
|
||||
// S2
|
||||
// }
|
||||
// }
|
||||
// S3
|
||||
//
|
||||
// counters will be added before S1 and before S3. The block containing S2
|
||||
|
@ -162,8 +162,8 @@ func buildCover(t *testing.T) {
|
||||
// Run this shell script, but do it in Go so it can be run by "go test".
|
||||
//
|
||||
// replace the word LINE with the line number < testdata/test.go > testdata/test_line.go
|
||||
// go build -o testcover
|
||||
// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go
|
||||
// go build -o testcover
|
||||
// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go
|
||||
// go run ./testdata/main.go ./testdata/test.go
|
||||
func TestCover(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -19,6 +19,7 @@ must be applied to the output of cgo preprocessing, not the input,
|
||||
because cover deletes comments that are significant to cgo.
|
||||
|
||||
For usage information, please see:
|
||||
|
||||
go help testflag
|
||||
go tool cover -help
|
||||
*/
|
||||
|
2
src/cmd/dist/build.go
vendored
2
src/cmd/dist/build.go
vendored
@ -1225,7 +1225,9 @@ var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link"}
|
||||
// commands (like "go tool dist test" in run.bash) can rely on bug fixes
|
||||
// made since Go 1.4, but this function cannot. In particular, the uses
|
||||
// of os/exec in this function cannot assume that
|
||||
//
|
||||
// cmd.Env = append(os.Environ(), "X=Y")
|
||||
//
|
||||
// sets $X to Y in the command's environment. That guarantee was
|
||||
// added after Go 1.4, and in fact in Go 1.4 it was typically the opposite:
|
||||
// if $X was already present in os.Environ(), most systems preferred
|
||||
|
20
src/cmd/dist/doc.go
vendored
20
src/cmd/dist/doc.go
vendored
@ -5,15 +5,17 @@
|
||||
// Dist helps bootstrap, build, and test the Go distribution.
|
||||
//
|
||||
// Usage:
|
||||
// go tool dist [command]
|
||||
//
|
||||
// go tool dist [command]
|
||||
//
|
||||
// The commands are:
|
||||
// banner print installation banner
|
||||
// bootstrap rebuild everything
|
||||
// clean deletes all built files
|
||||
// env [-p] print environment (-p: include $PATH)
|
||||
// install [dir] install individual directory
|
||||
// list [-json] list all supported platforms
|
||||
// test [-h] run Go test(s)
|
||||
// version print Go version
|
||||
//
|
||||
// banner print installation banner
|
||||
// bootstrap rebuild everything
|
||||
// clean deletes all built files
|
||||
// env [-p] print environment (-p: include $PATH)
|
||||
// install [dir] install individual directory
|
||||
// list [-json] list all supported platforms
|
||||
// test [-h] run Go test(s)
|
||||
// version print Go version
|
||||
package main
|
||||
|
@ -882,7 +882,9 @@ func TestDoc(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test the code to try multiple packages. Our test case is
|
||||
//
|
||||
// go doc rand.Float64
|
||||
//
|
||||
// This needs to find math/rand.Float64; however crypto/rand, which doesn't
|
||||
// have the symbol, usually appears first in the directory listing.
|
||||
func TestMultiplePackages(t *testing.T) {
|
||||
@ -939,11 +941,15 @@ func TestMultiplePackages(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test the code to look up packages when given two args. First test case is
|
||||
//
|
||||
// go doc binary BigEndian
|
||||
//
|
||||
// This needs to find encoding/binary.BigEndian, which means
|
||||
// finding the package encoding/binary given only "binary".
|
||||
// Second case is
|
||||
//
|
||||
// go doc rand Float64
|
||||
//
|
||||
// which again needs to find math/rand and not give up after crypto/rand,
|
||||
// which has no such function.
|
||||
func TestTwoArgLookup(t *testing.T) {
|
||||
|
@ -5,20 +5,25 @@
|
||||
// Doc (usually run as go doc) accepts zero, one or two arguments.
|
||||
//
|
||||
// Zero arguments:
|
||||
//
|
||||
// go doc
|
||||
//
|
||||
// Show the documentation for the package in the current directory.
|
||||
//
|
||||
// One argument:
|
||||
//
|
||||
// go doc <pkg>
|
||||
// go doc <sym>[.<methodOrField>]
|
||||
// go doc [<pkg>.]<sym>[.<methodOrField>]
|
||||
// go doc [<pkg>.][<sym>.]<methodOrField>
|
||||
//
|
||||
// The first item in this list that succeeds is the one whose documentation
|
||||
// is printed. If there is a symbol but no package, the package in the current
|
||||
// directory is chosen. However, if the argument begins with a capital
|
||||
// letter it is always assumed to be a symbol in the current directory.
|
||||
//
|
||||
// Two arguments:
|
||||
//
|
||||
// go doc <pkg> <sym>[.<methodOrField>]
|
||||
//
|
||||
// Show the documentation for the package, symbol, and method or field. The
|
||||
|
@ -24,9 +24,13 @@ var cftypeFix = fix{
|
||||
}
|
||||
|
||||
// Old state:
|
||||
// type CFTypeRef unsafe.Pointer
|
||||
//
|
||||
// type CFTypeRef unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
// type CFTypeRef uintptr
|
||||
//
|
||||
// type CFTypeRef uintptr
|
||||
//
|
||||
// and similar for other *Ref types.
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func cftypefix(f *ast.File) bool {
|
||||
|
@ -8,6 +8,7 @@ newer ones. After you update to a new Go release, fix helps make
|
||||
the necessary changes to your programs.
|
||||
|
||||
Usage:
|
||||
|
||||
go tool fix [-r name,...] [path ...]
|
||||
|
||||
Without an explicit path, fix reads standard input and writes the
|
||||
@ -30,7 +31,7 @@ Fix prints the full list of fixes it can apply in its help output;
|
||||
to see them, run go tool fix -help.
|
||||
|
||||
Fix does not make backup copies of the files that it edits.
|
||||
Instead, use a version control system's ``diff'' functionality to inspect
|
||||
Instead, use a version control system's “diff” functionality to inspect
|
||||
the changes that fix makes before committing them.
|
||||
*/
|
||||
package main
|
||||
|
@ -22,9 +22,13 @@ var eglFixDisplay = fix{
|
||||
}
|
||||
|
||||
// Old state:
|
||||
// type EGLDisplay unsafe.Pointer
|
||||
//
|
||||
// type EGLDisplay unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
// type EGLDisplay uintptr
|
||||
//
|
||||
// type EGLDisplay uintptr
|
||||
//
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func eglfixDisp(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
@ -41,9 +45,13 @@ var eglFixConfig = fix{
|
||||
}
|
||||
|
||||
// Old state:
|
||||
// type EGLConfig unsafe.Pointer
|
||||
//
|
||||
// type EGLConfig unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
// type EGLConfig uintptr
|
||||
//
|
||||
// type EGLConfig uintptr
|
||||
//
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func eglfixConfig(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
|
@ -21,9 +21,13 @@ var jniFix = fix{
|
||||
}
|
||||
|
||||
// Old state:
|
||||
// type jobject *_jobject
|
||||
//
|
||||
// type jobject *_jobject
|
||||
//
|
||||
// New state:
|
||||
// type jobject uintptr
|
||||
//
|
||||
// type jobject uintptr
|
||||
//
|
||||
// and similar for subtypes of jobject.
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func jnifix(f *ast.File) bool {
|
||||
|
@ -78,11 +78,11 @@ var defEnvMap = map[string]string{
|
||||
|
||||
// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
|
||||
// except:
|
||||
// 1. if the result starts with -command, record that shorthand
|
||||
// before moving on to the next test.
|
||||
// 2. If a source line number is specified, set that in the parser
|
||||
// before executing the test. i.e., execute the split as if it
|
||||
// processing that source line.
|
||||
// 1. if the result starts with -command, record that shorthand
|
||||
// before moving on to the next test.
|
||||
// 2. If a source line number is specified, set that in the parser
|
||||
// before executing the test. i.e., execute the split as if it
|
||||
// processing that source line.
|
||||
func TestGenerateCommandShorthand(t *testing.T) {
|
||||
g := &Generator{
|
||||
r: nil, // Unused here.
|
||||
@ -216,11 +216,11 @@ var splitTestsLines = []splitTestWithLine{
|
||||
|
||||
// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
|
||||
// except:
|
||||
// 1. if the result starts with -command, record that shorthand
|
||||
// before moving on to the next test.
|
||||
// 2. If a source line number is specified, set that in the parser
|
||||
// before executing the test. i.e., execute the split as if it
|
||||
// processing that source line.
|
||||
// 1. if the result starts with -command, record that shorthand
|
||||
// before moving on to the next test.
|
||||
// 2. If a source line number is specified, set that in the parser
|
||||
// before executing the test. i.e., execute the split as if it
|
||||
// processing that source line.
|
||||
func TestGenerateCommandShortHand2(t *testing.T) {
|
||||
g := &Generator{
|
||||
r: nil, // Unused here.
|
||||
|
@ -215,7 +215,9 @@ func matchTag(name string, tags map[string]bool, prefer bool) bool {
|
||||
}
|
||||
|
||||
// eval is like
|
||||
//
|
||||
// x.Eval(func(tag string) bool { return matchTag(tag, tags) })
|
||||
//
|
||||
// except that it implements the special case for tags["*"] meaning
|
||||
// all tags are both true and false at the same time.
|
||||
func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
|
||||
@ -236,17 +238,18 @@ func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
|
||||
// suffix which does not match the current system.
|
||||
// The recognized name formats are:
|
||||
//
|
||||
// name_$(GOOS).*
|
||||
// name_$(GOARCH).*
|
||||
// name_$(GOOS)_$(GOARCH).*
|
||||
// name_$(GOOS)_test.*
|
||||
// name_$(GOARCH)_test.*
|
||||
// name_$(GOOS)_$(GOARCH)_test.*
|
||||
// name_$(GOOS).*
|
||||
// name_$(GOARCH).*
|
||||
// name_$(GOOS)_$(GOARCH).*
|
||||
// name_$(GOOS)_test.*
|
||||
// name_$(GOARCH)_test.*
|
||||
// name_$(GOOS)_$(GOARCH)_test.*
|
||||
//
|
||||
// Exceptions:
|
||||
// if GOOS=android, then files with GOOS=linux are also matched.
|
||||
// if GOOS=illumos, then files with GOOS=solaris are also matched.
|
||||
// if GOOS=ios, then files with GOOS=darwin are also matched.
|
||||
//
|
||||
// if GOOS=android, then files with GOOS=linux are also matched.
|
||||
// if GOOS=illumos, then files with GOOS=solaris are also matched.
|
||||
// if GOOS=ios, then files with GOOS=darwin are also matched.
|
||||
//
|
||||
// If tags["*"] is true, then MatchFile will consider all possible
|
||||
// GOOS and GOARCH to be available and will consequently
|
||||
|
@ -76,9 +76,9 @@ func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *T
|
||||
}
|
||||
|
||||
// TestPackagesAndErrors returns three packages:
|
||||
// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest).
|
||||
// - ptest, the package p compiled with added "package p" test files.
|
||||
// - pxtest, the result of compiling any "package p_test" (external) test files.
|
||||
// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest).
|
||||
// - ptest, the package p compiled with added "package p" test files.
|
||||
// - pxtest, the result of compiling any "package p_test" (external) test files.
|
||||
//
|
||||
// If the package has no "package p_test" test files, pxtest will be nil.
|
||||
// If the non-test compilation of package p can be reused
|
||||
|
@ -17,9 +17,9 @@ import (
|
||||
// Opening an exclusive-use file returns an error.
|
||||
// The expected error strings are:
|
||||
//
|
||||
// - "open/create -- file is locked" (cwfs, kfs)
|
||||
// - "exclusive lock" (fossil)
|
||||
// - "exclusive use file already open" (ramfs)
|
||||
// - "open/create -- file is locked" (cwfs, kfs)
|
||||
// - "exclusive lock" (fossil)
|
||||
// - "exclusive use file already open" (ramfs)
|
||||
var lockedErrStrings = [...]string{
|
||||
"file is locked",
|
||||
"exclusive lock",
|
||||
|
@ -731,10 +731,10 @@ func (r *resolver) performWildcardQueries(ctx context.Context) {
|
||||
}
|
||||
|
||||
// queryWildcard adds a candidate set to q for each module for which:
|
||||
// - some version of the module is already in the build list, and
|
||||
// - that module exists at some version matching q.version, and
|
||||
// - either the module path itself matches q.pattern, or some package within
|
||||
// the module at q.version matches q.pattern.
|
||||
// - some version of the module is already in the build list, and
|
||||
// - that module exists at some version matching q.version, and
|
||||
// - either the module path itself matches q.pattern, or some package within
|
||||
// the module at q.version matches q.pattern.
|
||||
func (r *resolver) queryWildcard(ctx context.Context, q *query) {
|
||||
// For wildcard patterns, modload.QueryPattern only identifies modules
|
||||
// matching the prefix of the path before the wildcard. However, the build
|
||||
|
@ -676,11 +676,11 @@ func updateWorkspaceRoots(ctx context.Context, rs *Requirements, add []module.Ve
|
||||
// invariants of the go.mod file needed to support graph pruning for the given
|
||||
// packages:
|
||||
//
|
||||
// 1. For each package marked with pkgInAll, the module path that provided that
|
||||
// package is included as a root.
|
||||
// 2. For all packages, the module that provided that package either remains
|
||||
// selected at the same version or is upgraded by the dependencies of a
|
||||
// root.
|
||||
// 1. For each package marked with pkgInAll, the module path that provided that
|
||||
// package is included as a root.
|
||||
// 2. For all packages, the module that provided that package either remains
|
||||
// selected at the same version or is upgraded by the dependencies of a
|
||||
// root.
|
||||
//
|
||||
// If any module that provided a package has been upgraded above its previous
|
||||
// version, the caller may need to reload and recompute the package graph.
|
||||
@ -769,17 +769,17 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, direct map[
|
||||
// updatePrunedRoots returns a set of root requirements that maintains the
|
||||
// invariants of the go.mod file needed to support graph pruning:
|
||||
//
|
||||
// 1. The selected version of the module providing each package marked with
|
||||
// either pkgInAll or pkgIsRoot is included as a root.
|
||||
// Note that certain root patterns (such as '...') may explode the root set
|
||||
// to contain every module that provides any package imported (or merely
|
||||
// required) by any other module.
|
||||
// 2. Each root appears only once, at the selected version of its path
|
||||
// (if rs.graph is non-nil) or at the highest version otherwise present as a
|
||||
// root (otherwise).
|
||||
// 3. Every module path that appears as a root in rs remains a root.
|
||||
// 4. Every version in add is selected at its given version unless upgraded by
|
||||
// (the dependencies of) an existing root or another module in add.
|
||||
// 1. The selected version of the module providing each package marked with
|
||||
// either pkgInAll or pkgIsRoot is included as a root.
|
||||
// Note that certain root patterns (such as '...') may explode the root set
|
||||
// to contain every module that provides any package imported (or merely
|
||||
// required) by any other module.
|
||||
// 2. Each root appears only once, at the selected version of its path
|
||||
// (if rs.graph is non-nil) or at the highest version otherwise present as a
|
||||
// root (otherwise).
|
||||
// 3. Every module path that appears as a root in rs remains a root.
|
||||
// 4. Every version in add is selected at its given version unless upgraded by
|
||||
// (the dependencies of) an existing root or another module in add.
|
||||
//
|
||||
// The packages in pkgs are assumed to have been loaded from either the roots of
|
||||
// rs or the modules selected in the graph of rs.
|
||||
@ -787,26 +787,26 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, direct map[
|
||||
// The above invariants together imply the graph-pruning invariants for the
|
||||
// go.mod file:
|
||||
//
|
||||
// 1. (The import invariant.) Every module that provides a package transitively
|
||||
// imported by any package or test in the main module is included as a root.
|
||||
// This follows by induction from (1) and (3) above. Transitively-imported
|
||||
// packages loaded during this invocation are marked with pkgInAll (1),
|
||||
// and by hypothesis any transitively-imported packages loaded in previous
|
||||
// invocations were already roots in rs (3).
|
||||
// 1. (The import invariant.) Every module that provides a package transitively
|
||||
// imported by any package or test in the main module is included as a root.
|
||||
// This follows by induction from (1) and (3) above. Transitively-imported
|
||||
// packages loaded during this invocation are marked with pkgInAll (1),
|
||||
// and by hypothesis any transitively-imported packages loaded in previous
|
||||
// invocations were already roots in rs (3).
|
||||
//
|
||||
// 2. (The argument invariant.) Every module that provides a package matching
|
||||
// an explicit package pattern is included as a root. This follows directly
|
||||
// from (1): packages matching explicit package patterns are marked with
|
||||
// pkgIsRoot.
|
||||
// 2. (The argument invariant.) Every module that provides a package matching
|
||||
// an explicit package pattern is included as a root. This follows directly
|
||||
// from (1): packages matching explicit package patterns are marked with
|
||||
// pkgIsRoot.
|
||||
//
|
||||
// 3. (The completeness invariant.) Every module that contributed any package
|
||||
// to the build is required by either the main module or one of the modules
|
||||
// it requires explicitly. This invariant is left up to the caller, who must
|
||||
// not load packages from outside the module graph but may add roots to the
|
||||
// graph, but is facilited by (3). If the caller adds roots to the graph in
|
||||
// order to resolve missing packages, then updatePrunedRoots will retain them,
|
||||
// the selected versions of those roots cannot regress, and they will
|
||||
// eventually be written back to the main module's go.mod file.
|
||||
// 3. (The completeness invariant.) Every module that contributed any package
|
||||
// to the build is required by either the main module or one of the modules
|
||||
// it requires explicitly. This invariant is left up to the caller, who must
|
||||
// not load packages from outside the module graph but may add roots to the
|
||||
// graph, but is facilited by (3). If the caller adds roots to the graph in
|
||||
// order to resolve missing packages, then updatePrunedRoots will retain them,
|
||||
// the selected versions of those roots cannot regress, and they will
|
||||
// eventually be written back to the main module's go.mod file.
|
||||
//
|
||||
// (See https://golang.org/design/36460-lazy-module-loading#invariants for more
|
||||
// detail.)
|
||||
@ -1162,14 +1162,14 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, direct ma
|
||||
//
|
||||
// The roots are updated such that:
|
||||
//
|
||||
// 1. The selected version of every module path in direct is included as a root
|
||||
// (if it is not "none").
|
||||
// 2. Each root is the selected version of its path. (We say that such a root
|
||||
// set is “consistent”.)
|
||||
// 3. Every version selected in the graph of rs remains selected unless upgraded
|
||||
// by a dependency in add.
|
||||
// 4. Every version in add is selected at its given version unless upgraded by
|
||||
// (the dependencies of) an existing root or another module in add.
|
||||
// 1. The selected version of every module path in direct is included as a root
|
||||
// (if it is not "none").
|
||||
// 2. Each root is the selected version of its path. (We say that such a root
|
||||
// set is “consistent”.)
|
||||
// 3. Every version selected in the graph of rs remains selected unless upgraded
|
||||
// by a dependency in add.
|
||||
// 4. Every version in add is selected at its given version unless upgraded by
|
||||
// (the dependencies of) an existing root or another module in add.
|
||||
func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
|
||||
mg, err := rs.Graph(ctx)
|
||||
if err != nil {
|
||||
|
@ -16,20 +16,20 @@ import (
|
||||
|
||||
// editRequirements returns an edited version of rs such that:
|
||||
//
|
||||
// 1. Each module version in mustSelect is selected.
|
||||
// 1. Each module version in mustSelect is selected.
|
||||
//
|
||||
// 2. Each module version in tryUpgrade is upgraded toward the indicated
|
||||
// version as far as can be done without violating (1).
|
||||
// 2. Each module version in tryUpgrade is upgraded toward the indicated
|
||||
// version as far as can be done without violating (1).
|
||||
//
|
||||
// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned)
|
||||
// is downgraded from its original version only to the extent needed to
|
||||
// satisfy (1), or upgraded only to the extent needed to satisfy (1) and
|
||||
// (2).
|
||||
// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned)
|
||||
// is downgraded from its original version only to the extent needed to
|
||||
// satisfy (1), or upgraded only to the extent needed to satisfy (1) and
|
||||
// (2).
|
||||
//
|
||||
// 4. No module is upgraded above the maximum version of its path found in the
|
||||
// dependency graph of rs, the combined dependency graph of the versions in
|
||||
// mustSelect, or the dependencies of each individual module version in
|
||||
// tryUpgrade.
|
||||
// 4. No module is upgraded above the maximum version of its path found in the
|
||||
// dependency graph of rs, the combined dependency graph of the versions in
|
||||
// mustSelect, or the dependencies of each individual module version in
|
||||
// tryUpgrade.
|
||||
//
|
||||
// Generally, the module versions in mustSelect are due to the module or a
|
||||
// package within the module matching an explicit command line argument to 'go
|
||||
|
@ -1222,16 +1222,16 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
|
||||
//
|
||||
// In particular:
|
||||
//
|
||||
// - Modules that provide packages directly imported from the main module are
|
||||
// marked as direct, and are promoted to explicit roots. If a needed root
|
||||
// cannot be promoted due to -mod=readonly or -mod=vendor, the importing
|
||||
// package is marked with an error.
|
||||
// - Modules that provide packages directly imported from the main module are
|
||||
// marked as direct, and are promoted to explicit roots. If a needed root
|
||||
// cannot be promoted due to -mod=readonly or -mod=vendor, the importing
|
||||
// package is marked with an error.
|
||||
//
|
||||
// - If ld scanned the "all" pattern independent of build constraints, it is
|
||||
// guaranteed to have seen every direct import. Module dependencies that did
|
||||
// not provide any directly-imported package are then marked as indirect.
|
||||
// - If ld scanned the "all" pattern independent of build constraints, it is
|
||||
// guaranteed to have seen every direct import. Module dependencies that did
|
||||
// not provide any directly-imported package are then marked as indirect.
|
||||
//
|
||||
// - Root dependencies are updated to their selected versions.
|
||||
// - Root dependencies are updated to their selected versions.
|
||||
//
|
||||
// The "changed" return value reports whether the update changed the selected
|
||||
// version of any module that either provided a loaded package or may now
|
||||
|
@ -33,19 +33,27 @@ import (
|
||||
// The version must take one of the following forms:
|
||||
//
|
||||
// - the literal string "latest", denoting the latest available, allowed
|
||||
// tagged version, with non-prereleases preferred over prereleases.
|
||||
// If there are no tagged versions in the repo, latest returns the most
|
||||
// recent commit.
|
||||
//
|
||||
// tagged version, with non-prereleases preferred over prereleases.
|
||||
// If there are no tagged versions in the repo, latest returns the most
|
||||
// recent commit.
|
||||
//
|
||||
// - the literal string "upgrade", equivalent to "latest" except that if
|
||||
// current is a newer version, current will be returned (see below).
|
||||
//
|
||||
// current is a newer version, current will be returned (see below).
|
||||
//
|
||||
// - the literal string "patch", denoting the latest available tagged version
|
||||
// with the same major and minor number as current (see below).
|
||||
//
|
||||
// with the same major and minor number as current (see below).
|
||||
//
|
||||
// - v1, denoting the latest available tagged version v1.x.x.
|
||||
// - v1.2, denoting the latest available tagged version v1.2.x.
|
||||
// - v1.2.3, a semantic version string denoting that tagged version.
|
||||
// - <v1.2.3, <=v1.2.3, >v1.2.3, >=v1.2.3,
|
||||
// denoting the version closest to the target and satisfying the given operator,
|
||||
// with non-prereleases preferred over prereleases.
|
||||
//
|
||||
// denoting the version closest to the target and satisfying the given operator,
|
||||
// with non-prereleases preferred over prereleases.
|
||||
//
|
||||
// - a repository commit identifier or tag, denoting that commit.
|
||||
//
|
||||
// current denotes the currently-selected version of the module; it may be
|
||||
@ -433,9 +441,9 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
|
||||
|
||||
// filterVersions classifies versions into releases and pre-releases, filtering
|
||||
// out:
|
||||
// 1. versions that do not satisfy the 'allowed' predicate, and
|
||||
// 2. "+incompatible" versions, if a compatible one satisfies the predicate
|
||||
// and the incompatible version is not preferred.
|
||||
// 1. versions that do not satisfy the 'allowed' predicate, and
|
||||
// 2. "+incompatible" versions, if a compatible one satisfies the predicate
|
||||
// and the incompatible version is not preferred.
|
||||
//
|
||||
// If the allowed predicate returns an error not equivalent to ErrDisallowed,
|
||||
// filterVersions returns that error.
|
||||
|
@ -42,9 +42,9 @@ func RemoveAll(path string) error {
|
||||
// in this package attempt to mitigate.
|
||||
//
|
||||
// Errors considered ephemeral include:
|
||||
// - syscall.ERROR_ACCESS_DENIED
|
||||
// - syscall.ERROR_FILE_NOT_FOUND
|
||||
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
|
||||
// - syscall.ERROR_ACCESS_DENIED
|
||||
// - syscall.ERROR_FILE_NOT_FOUND
|
||||
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
|
||||
//
|
||||
// This set may be expanded in the future; programs must not rely on the
|
||||
// non-ephemerality of any given error.
|
||||
|
@ -30,7 +30,9 @@ func StringList(args ...any) []string {
|
||||
}
|
||||
|
||||
// ToFold returns a string with the property that
|
||||
//
|
||||
// strings.EqualFold(s, t) iff ToFold(s) == ToFold(t)
|
||||
//
|
||||
// This lets us test a large set of strings for fold-equivalent
|
||||
// duplicates without making a quadratic number of calls
|
||||
// to EqualFold. Note that strings.ToUpper and strings.ToLower
|
||||
|
@ -270,6 +270,7 @@ func (f *shuffleFlag) Set(value string) error {
|
||||
// pkg.test's arguments.
|
||||
// We allow known flags both before and after the package name list,
|
||||
// to allow both
|
||||
//
|
||||
// go test fmt -custom-flag-for-fmt-test
|
||||
// go test -x math
|
||||
func testFlags(args []string) (packageNames, passToTest []string) {
|
||||
|
@ -532,16 +532,22 @@ See also: go build, go get, go clean.
|
||||
// libname returns the filename to use for the shared library when using
|
||||
// -buildmode=shared. The rules we use are:
|
||||
// Use arguments for special 'meta' packages:
|
||||
//
|
||||
// std --> libstd.so
|
||||
// std cmd --> libstd,cmd.so
|
||||
//
|
||||
// A single non-meta argument with trailing "/..." is special cased:
|
||||
//
|
||||
// foo/... --> libfoo.so
|
||||
// (A relative path like "./..." expands the "." first)
|
||||
//
|
||||
// Use import paths for other cases, changing '/' to '-':
|
||||
//
|
||||
// somelib --> libsubdir-somelib.so
|
||||
// ./ or ../ --> libsubdir-somelib.so
|
||||
// gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
|
||||
// a/... b/... ---> liba/c,b/d.so - all matching import paths
|
||||
//
|
||||
// Name parts are joined with ','.
|
||||
func libname(args []string, pkgs []*load.Package) (string, error) {
|
||||
var libname string
|
||||
|
@ -1884,6 +1884,7 @@ func (b *Builder) installHeader(ctx context.Context, a *Action) error {
|
||||
}
|
||||
|
||||
// cover runs, in effect,
|
||||
//
|
||||
// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
|
||||
func (b *Builder) cover(a *Action, dst, src string, varName string) error {
|
||||
return b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
|
||||
|
@ -13,9 +13,11 @@ that directory, recursively. (Files starting with a period are ignored.)
|
||||
By default, gofmt prints the reformatted sources to standard output.
|
||||
|
||||
Usage:
|
||||
|
||||
gofmt [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
|
||||
-d
|
||||
Do not print reformatted sources to standard output.
|
||||
If a file's formatting is different than gofmt's, print diffs
|
||||
@ -37,10 +39,10 @@ The flags are:
|
||||
the original file is restored from an automatic backup.
|
||||
|
||||
Debugging support:
|
||||
|
||||
-cpuprofile filename
|
||||
Write cpu profile to the specified file.
|
||||
|
||||
|
||||
The rewrite rule specified with the -r flag must be a string of the form:
|
||||
|
||||
pattern -> replacement
|
||||
@ -57,7 +59,7 @@ such a fragment, gofmt preserves leading indentation as well as leading
|
||||
and trailing spaces, so that individual sections of a Go program can be
|
||||
formatted by piping them through gofmt.
|
||||
|
||||
Examples
|
||||
# Examples
|
||||
|
||||
To check files for unnecessary parentheses:
|
||||
|
||||
@ -71,7 +73,7 @@ To convert the package tree from explicit slice upper bounds to implicit ones:
|
||||
|
||||
gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src
|
||||
|
||||
The simplify command
|
||||
# The simplify command
|
||||
|
||||
When invoked with -s gofmt will make the following source transformations where possible.
|
||||
|
||||
|
@ -18,12 +18,12 @@ import (
|
||||
// because some operating systems place a limit on the number of
|
||||
// distinct mapped regions per process. As of this writing:
|
||||
//
|
||||
// Darwin unlimited
|
||||
// DragonFly 1000000 (vm.max_proc_mmap)
|
||||
// FreeBSD unlimited
|
||||
// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count?
|
||||
// NetBSD unlimited
|
||||
// OpenBSD unlimited
|
||||
// Darwin unlimited
|
||||
// DragonFly 1000000 (vm.max_proc_mmap)
|
||||
// FreeBSD unlimited
|
||||
// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count?
|
||||
// NetBSD unlimited
|
||||
// OpenBSD unlimited
|
||||
var mmapLimit int32 = 1<<31 - 1
|
||||
|
||||
func init() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user