[dev.typeparams] all: merge dev.regabi into dev.typeparams

The files below had conflicts that required manual resolution.
The unresolved conflict in noder.go was just in the import
declaration (trivial). All the other conflicts are in tests
where the ERROR regex patterns changed to accomodate gccgo
error messages (incoming from dev.regabi), and to accomodate
types2 in dev.typeparams. They were resolved by accepting the
dev.regabi changes (so as not to lose them) and then by re-
applying whatever changes needed to make them pass with types2.
Finally, the new test mainsig.go was excluded from run.go when
using types2 due to issue #43308.

	src/cmd/compile/internal/gc/noder.go
	test/fixedbugs/bug13343.go
	test/fixedbugs/bug462.go
	test/fixedbugs/issue10975.go
	test/fixedbugs/issue11326.go
	test/fixedbugs/issue11361.go
	test/fixedbugs/issue11371.go
	test/fixedbugs/issue11674.go
	test/fixedbugs/issue13365.go
	test/fixedbugs/issue13471.go
	test/fixedbugs/issue14136.go
	test/fixedbugs/issue14321.go
	test/fixedbugs/issue14729.go
	test/fixedbugs/issue15898.go
	test/fixedbugs/issue16439.go
	test/fixedbugs/issue17588.go
	test/fixedbugs/issue19323.go
	test/fixedbugs/issue19482.go
	test/fixedbugs/issue19880.go
	test/fixedbugs/issue20185.go
	test/fixedbugs/issue20227.go
	test/fixedbugs/issue20415.go
	test/fixedbugs/issue20749.go
	test/fixedbugs/issue22794.go
	test/fixedbugs/issue22822.go
	test/fixedbugs/issue22921.go
	test/fixedbugs/issue23823.go
	test/fixedbugs/issue25727.go
	test/fixedbugs/issue26616.go
	test/fixedbugs/issue28079c.go
	test/fixedbugs/issue28450.go
	test/fixedbugs/issue30085.go
	test/fixedbugs/issue30087.go
	test/fixedbugs/issue35291.go
	test/fixedbugs/issue38745.go
	test/fixedbugs/issue41247.go
	test/fixedbugs/issue41440.go
	test/fixedbugs/issue41500.go
	test/fixedbugs/issue4215.go
	test/fixedbugs/issue6402.go
	test/fixedbugs/issue6772.go
	test/fixedbugs/issue7129.go
	test/fixedbugs/issue7150.go
	test/fixedbugs/issue7153.go
	test/fixedbugs/issue7310.go
	test/fixedbugs/issue8183.go
	test/fixedbugs/issue8385.go
	test/fixedbugs/issue8438.go
	test/fixedbugs/issue8440.go
	test/fixedbugs/issue8507.go
	test/fixedbugs/issue9370.go
	test/fixedbugs/issue9521.go

Change-Id: I26e6e326fde6e3fca5400711a253834d710ab7f4
This commit is contained in:
Robert Griesemer 2020-12-21 13:41:23 -08:00
commit 53c4c17b09
327 changed files with 6195 additions and 4618 deletions

481
api/go1.16.txt Normal file
View File

@ -0,0 +1,481 @@
pkg archive/zip, method (*ReadCloser) Open(string) (fs.File, error)
pkg archive/zip, method (*Reader) Open(string) (fs.File, error)
pkg crypto/x509, method (SystemRootsError) Unwrap() error
pkg crypto/x509, type CertificateRequest struct, BasicConstraintsValid bool
pkg crypto/x509, type CertificateRequest struct, ExtKeyUsage []ExtKeyUsage
pkg crypto/x509, type CertificateRequest struct, IsCA bool
pkg crypto/x509, type CertificateRequest struct, KeyUsage KeyUsage
pkg crypto/x509, type CertificateRequest struct, MaxPathLen int
pkg crypto/x509, type CertificateRequest struct, MaxPathLenZero bool
pkg crypto/x509, type CertificateRequest struct, PolicyIdentifiers []asn1.ObjectIdentifier
pkg crypto/x509, type CertificateRequest struct, SubjectKeyId []uint8
pkg crypto/x509, type CertificateRequest struct, UnknownExtKeyUsage []asn1.ObjectIdentifier
pkg debug/elf, const DT_ADDRRNGHI = 1879047935
pkg debug/elf, const DT_ADDRRNGHI DynTag
pkg debug/elf, const DT_ADDRRNGLO = 1879047680
pkg debug/elf, const DT_ADDRRNGLO DynTag
pkg debug/elf, const DT_AUDIT = 1879047932
pkg debug/elf, const DT_AUDIT DynTag
pkg debug/elf, const DT_AUXILIARY = 2147483645
pkg debug/elf, const DT_AUXILIARY DynTag
pkg debug/elf, const DT_CHECKSUM = 1879047672
pkg debug/elf, const DT_CHECKSUM DynTag
pkg debug/elf, const DT_CONFIG = 1879047930
pkg debug/elf, const DT_CONFIG DynTag
pkg debug/elf, const DT_DEPAUDIT = 1879047931
pkg debug/elf, const DT_DEPAUDIT DynTag
pkg debug/elf, const DT_FEATURE = 1879047676
pkg debug/elf, const DT_FEATURE DynTag
pkg debug/elf, const DT_FILTER = 2147483647
pkg debug/elf, const DT_FILTER DynTag
pkg debug/elf, const DT_FLAGS_1 = 1879048187
pkg debug/elf, const DT_FLAGS_1 DynTag
pkg debug/elf, const DT_GNU_CONFLICT = 1879047928
pkg debug/elf, const DT_GNU_CONFLICT DynTag
pkg debug/elf, const DT_GNU_CONFLICTSZ = 1879047670
pkg debug/elf, const DT_GNU_CONFLICTSZ DynTag
pkg debug/elf, const DT_GNU_HASH = 1879047925
pkg debug/elf, const DT_GNU_HASH DynTag
pkg debug/elf, const DT_GNU_LIBLIST = 1879047929
pkg debug/elf, const DT_GNU_LIBLIST DynTag
pkg debug/elf, const DT_GNU_LIBLISTSZ = 1879047671
pkg debug/elf, const DT_GNU_LIBLISTSZ DynTag
pkg debug/elf, const DT_GNU_PRELINKED = 1879047669
pkg debug/elf, const DT_GNU_PRELINKED DynTag
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC = 1879048241
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC DynTag
pkg debug/elf, const DT_MIPS_BASE_ADDRESS = 1879048198
pkg debug/elf, const DT_MIPS_BASE_ADDRESS DynTag
pkg debug/elf, const DT_MIPS_COMPACT_SIZE = 1879048239
pkg debug/elf, const DT_MIPS_COMPACT_SIZE DynTag
pkg debug/elf, const DT_MIPS_CONFLICT = 1879048200
pkg debug/elf, const DT_MIPS_CONFLICT DynTag
pkg debug/elf, const DT_MIPS_CONFLICTNO = 1879048203
pkg debug/elf, const DT_MIPS_CONFLICTNO DynTag
pkg debug/elf, const DT_MIPS_CXX_FLAGS = 1879048226
pkg debug/elf, const DT_MIPS_CXX_FLAGS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS = 1879048215
pkg debug/elf, const DT_MIPS_DELTA_CLASS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM = 1879048224
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO = 1879048225
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO = 1879048216
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE = 1879048217
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO = 1879048218
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC = 1879048219
pkg debug/elf, const DT_MIPS_DELTA_RELOC DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO = 1879048220
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM = 1879048221
pkg debug/elf, const DT_MIPS_DELTA_SYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO = 1879048222
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO DynTag
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN = 1879048235
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN DynTag
pkg debug/elf, const DT_MIPS_FLAGS = 1879048197
pkg debug/elf, const DT_MIPS_FLAGS DynTag
pkg debug/elf, const DT_MIPS_GOTSYM = 1879048211
pkg debug/elf, const DT_MIPS_GOTSYM DynTag
pkg debug/elf, const DT_MIPS_GP_VALUE = 1879048240
pkg debug/elf, const DT_MIPS_GP_VALUE DynTag
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX = 1879048231
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_HIPAGENO = 1879048212
pkg debug/elf, const DT_MIPS_HIPAGENO DynTag
pkg debug/elf, const DT_MIPS_ICHECKSUM = 1879048195
pkg debug/elf, const DT_MIPS_ICHECKSUM DynTag
pkg debug/elf, const DT_MIPS_INTERFACE = 1879048234
pkg debug/elf, const DT_MIPS_INTERFACE DynTag
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE = 1879048236
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE DynTag
pkg debug/elf, const DT_MIPS_IVERSION = 1879048196
pkg debug/elf, const DT_MIPS_IVERSION DynTag
pkg debug/elf, const DT_MIPS_LIBLIST = 1879048201
pkg debug/elf, const DT_MIPS_LIBLIST DynTag
pkg debug/elf, const DT_MIPS_LIBLISTNO = 1879048208
pkg debug/elf, const DT_MIPS_LIBLISTNO DynTag
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX = 1879048229
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX = 1879048230
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO = 1879048202
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO DynTag
pkg debug/elf, const DT_MIPS_MSYM = 1879048199
pkg debug/elf, const DT_MIPS_MSYM DynTag
pkg debug/elf, const DT_MIPS_OPTIONS = 1879048233
pkg debug/elf, const DT_MIPS_OPTIONS DynTag
pkg debug/elf, const DT_MIPS_PERF_SUFFIX = 1879048238
pkg debug/elf, const DT_MIPS_PERF_SUFFIX DynTag
pkg debug/elf, const DT_MIPS_PIXIE_INIT = 1879048227
pkg debug/elf, const DT_MIPS_PIXIE_INIT DynTag
pkg debug/elf, const DT_MIPS_PLTGOT = 1879048242
pkg debug/elf, const DT_MIPS_PLTGOT DynTag
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX = 1879048232
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP = 1879048214
pkg debug/elf, const DT_MIPS_RLD_MAP DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP_REL = 1879048245
pkg debug/elf, const DT_MIPS_RLD_MAP_REL DynTag
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 1879048237
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR DynTag
pkg debug/elf, const DT_MIPS_RLD_VERSION = 1879048193
pkg debug/elf, const DT_MIPS_RLD_VERSION DynTag
pkg debug/elf, const DT_MIPS_RWPLT = 1879048244
pkg debug/elf, const DT_MIPS_RWPLT DynTag
pkg debug/elf, const DT_MIPS_SYMBOL_LIB = 1879048228
pkg debug/elf, const DT_MIPS_SYMBOL_LIB DynTag
pkg debug/elf, const DT_MIPS_SYMTABNO = 1879048209
pkg debug/elf, const DT_MIPS_SYMTABNO DynTag
pkg debug/elf, const DT_MIPS_TIME_STAMP = 1879048194
pkg debug/elf, const DT_MIPS_TIME_STAMP DynTag
pkg debug/elf, const DT_MIPS_UNREFEXTNO = 1879048210
pkg debug/elf, const DT_MIPS_UNREFEXTNO DynTag
pkg debug/elf, const DT_MOVEENT = 1879047674
pkg debug/elf, const DT_MOVEENT DynTag
pkg debug/elf, const DT_MOVESZ = 1879047675
pkg debug/elf, const DT_MOVESZ DynTag
pkg debug/elf, const DT_MOVETAB = 1879047934
pkg debug/elf, const DT_MOVETAB DynTag
pkg debug/elf, const DT_PLTPAD = 1879047933
pkg debug/elf, const DT_PLTPAD DynTag
pkg debug/elf, const DT_PLTPADSZ = 1879047673
pkg debug/elf, const DT_PLTPADSZ DynTag
pkg debug/elf, const DT_POSFLAG_1 = 1879047677
pkg debug/elf, const DT_POSFLAG_1 DynTag
pkg debug/elf, const DT_PPC64_GLINK = 1879048192
pkg debug/elf, const DT_PPC64_GLINK DynTag
pkg debug/elf, const DT_PPC64_OPD = 1879048193
pkg debug/elf, const DT_PPC64_OPD DynTag
pkg debug/elf, const DT_PPC64_OPDSZ = 1879048194
pkg debug/elf, const DT_PPC64_OPDSZ DynTag
pkg debug/elf, const DT_PPC64_OPT = 1879048195
pkg debug/elf, const DT_PPC64_OPT DynTag
pkg debug/elf, const DT_PPC_GOT = 1879048192
pkg debug/elf, const DT_PPC_GOT DynTag
pkg debug/elf, const DT_PPC_OPT = 1879048193
pkg debug/elf, const DT_PPC_OPT DynTag
pkg debug/elf, const DT_RELACOUNT = 1879048185
pkg debug/elf, const DT_RELACOUNT DynTag
pkg debug/elf, const DT_RELCOUNT = 1879048186
pkg debug/elf, const DT_RELCOUNT DynTag
pkg debug/elf, const DT_SPARC_REGISTER = 1879048193
pkg debug/elf, const DT_SPARC_REGISTER DynTag
pkg debug/elf, const DT_SYMINENT = 1879047679
pkg debug/elf, const DT_SYMINENT DynTag
pkg debug/elf, const DT_SYMINFO = 1879047935
pkg debug/elf, const DT_SYMINFO DynTag
pkg debug/elf, const DT_SYMINSZ = 1879047678
pkg debug/elf, const DT_SYMINSZ DynTag
pkg debug/elf, const DT_SYMTAB_SHNDX = 34
pkg debug/elf, const DT_SYMTAB_SHNDX DynTag
pkg debug/elf, const DT_TLSDESC_GOT = 1879047927
pkg debug/elf, const DT_TLSDESC_GOT DynTag
pkg debug/elf, const DT_TLSDESC_PLT = 1879047926
pkg debug/elf, const DT_TLSDESC_PLT DynTag
pkg debug/elf, const DT_USED = 2147483646
pkg debug/elf, const DT_USED DynTag
pkg debug/elf, const DT_VALRNGHI = 1879047679
pkg debug/elf, const DT_VALRNGHI DynTag
pkg debug/elf, const DT_VALRNGLO = 1879047424
pkg debug/elf, const DT_VALRNGLO DynTag
pkg debug/elf, const DT_VERDEF = 1879048188
pkg debug/elf, const DT_VERDEF DynTag
pkg debug/elf, const DT_VERDEFNUM = 1879048189
pkg debug/elf, const DT_VERDEFNUM DynTag
pkg debug/elf, const PT_AARCH64_ARCHEXT = 1879048192
pkg debug/elf, const PT_AARCH64_ARCHEXT ProgType
pkg debug/elf, const PT_AARCH64_UNWIND = 1879048193
pkg debug/elf, const PT_AARCH64_UNWIND ProgType
pkg debug/elf, const PT_ARM_ARCHEXT = 1879048192
pkg debug/elf, const PT_ARM_ARCHEXT ProgType
pkg debug/elf, const PT_ARM_EXIDX = 1879048193
pkg debug/elf, const PT_ARM_EXIDX ProgType
pkg debug/elf, const PT_GNU_EH_FRAME = 1685382480
pkg debug/elf, const PT_GNU_EH_FRAME ProgType
pkg debug/elf, const PT_GNU_MBIND_HI = 1685386580
pkg debug/elf, const PT_GNU_MBIND_HI ProgType
pkg debug/elf, const PT_GNU_MBIND_LO = 1685382485
pkg debug/elf, const PT_GNU_MBIND_LO ProgType
pkg debug/elf, const PT_GNU_PROPERTY = 1685382483
pkg debug/elf, const PT_GNU_PROPERTY ProgType
pkg debug/elf, const PT_GNU_RELRO = 1685382482
pkg debug/elf, const PT_GNU_RELRO ProgType
pkg debug/elf, const PT_GNU_STACK = 1685382481
pkg debug/elf, const PT_GNU_STACK ProgType
pkg debug/elf, const PT_MIPS_ABIFLAGS = 1879048195
pkg debug/elf, const PT_MIPS_ABIFLAGS ProgType
pkg debug/elf, const PT_MIPS_OPTIONS = 1879048194
pkg debug/elf, const PT_MIPS_OPTIONS ProgType
pkg debug/elf, const PT_MIPS_REGINFO = 1879048192
pkg debug/elf, const PT_MIPS_REGINFO ProgType
pkg debug/elf, const PT_MIPS_RTPROC = 1879048193
pkg debug/elf, const PT_MIPS_RTPROC ProgType
pkg debug/elf, const PT_OPENBSD_BOOTDATA = 1705253862
pkg debug/elf, const PT_OPENBSD_BOOTDATA ProgType
pkg debug/elf, const PT_OPENBSD_RANDOMIZE = 1705237478
pkg debug/elf, const PT_OPENBSD_RANDOMIZE ProgType
pkg debug/elf, const PT_OPENBSD_WXNEEDED = 1705237479
pkg debug/elf, const PT_OPENBSD_WXNEEDED ProgType
pkg debug/elf, const PT_PAX_FLAGS = 1694766464
pkg debug/elf, const PT_PAX_FLAGS ProgType
pkg debug/elf, const PT_S390_PGSTE = 1879048192
pkg debug/elf, const PT_S390_PGSTE ProgType
pkg debug/elf, const PT_SUNWSTACK = 1879048187
pkg debug/elf, const PT_SUNWSTACK ProgType
pkg debug/elf, const PT_SUNW_EH_FRAME = 1685382480
pkg debug/elf, const PT_SUNW_EH_FRAME ProgType
pkg embed, method (FS) Open(string) (fs.File, error)
pkg embed, method (FS) ReadDir(string) ([]fs.DirEntry, error)
pkg embed, method (FS) ReadFile(string) ([]uint8, error)
pkg embed, type FS struct
pkg flag, func Func(string, string, func(string) error)
pkg flag, method (*FlagSet) Func(string, string, func(string) error)
pkg go/build, type Package struct, EmbedPatterns []string
pkg go/build, type Package struct, IgnoredOtherFiles []string
pkg go/build, type Package struct, TestEmbedPatterns []string
pkg go/build, type Package struct, XTestEmbedPatterns []string
pkg html/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg html/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg io, func NopCloser(Reader) ReadCloser
pkg io, func ReadAll(Reader) ([]uint8, error)
pkg io, type ReadSeekCloser interface { Close, Read, Seek }
pkg io, type ReadSeekCloser interface, Close() error
pkg io, type ReadSeekCloser interface, Read([]uint8) (int, error)
pkg io, type ReadSeekCloser interface, Seek(int64, int) (int64, error)
pkg io, var Discard Writer
pkg io/fs, const ModeAppend = 1073741824
pkg io/fs, const ModeAppend FileMode
pkg io/fs, const ModeCharDevice = 2097152
pkg io/fs, const ModeCharDevice FileMode
pkg io/fs, const ModeDevice = 67108864
pkg io/fs, const ModeDevice FileMode
pkg io/fs, const ModeDir = 2147483648
pkg io/fs, const ModeDir FileMode
pkg io/fs, const ModeExclusive = 536870912
pkg io/fs, const ModeExclusive FileMode
pkg io/fs, const ModeIrregular = 524288
pkg io/fs, const ModeIrregular FileMode
pkg io/fs, const ModeNamedPipe = 33554432
pkg io/fs, const ModeNamedPipe FileMode
pkg io/fs, const ModePerm = 511
pkg io/fs, const ModePerm FileMode
pkg io/fs, const ModeSetgid = 4194304
pkg io/fs, const ModeSetgid FileMode
pkg io/fs, const ModeSetuid = 8388608
pkg io/fs, const ModeSetuid FileMode
pkg io/fs, const ModeSocket = 16777216
pkg io/fs, const ModeSocket FileMode
pkg io/fs, const ModeSticky = 1048576
pkg io/fs, const ModeSticky FileMode
pkg io/fs, const ModeSymlink = 134217728
pkg io/fs, const ModeSymlink FileMode
pkg io/fs, const ModeTemporary = 268435456
pkg io/fs, const ModeTemporary FileMode
pkg io/fs, const ModeType = 2401763328
pkg io/fs, const ModeType FileMode
pkg io/fs, func Glob(FS, string) ([]string, error)
pkg io/fs, func ReadDir(FS, string) ([]DirEntry, error)
pkg io/fs, func ReadFile(FS, string) ([]uint8, error)
pkg io/fs, func Stat(FS, string) (FileInfo, error)
pkg io/fs, func Sub(FS, string) (FS, error)
pkg io/fs, func ValidPath(string) bool
pkg io/fs, func WalkDir(FS, string, WalkDirFunc) error
pkg io/fs, method (*PathError) Error() string
pkg io/fs, method (*PathError) Timeout() bool
pkg io/fs, method (*PathError) Unwrap() error
pkg io/fs, method (FileMode) IsDir() bool
pkg io/fs, method (FileMode) IsRegular() bool
pkg io/fs, method (FileMode) Perm() FileMode
pkg io/fs, method (FileMode) String() string
pkg io/fs, method (FileMode) Type() FileMode
pkg io/fs, type DirEntry interface { Info, IsDir, Name, Type }
pkg io/fs, type DirEntry interface, Info() (FileInfo, error)
pkg io/fs, type DirEntry interface, IsDir() bool
pkg io/fs, type DirEntry interface, Name() string
pkg io/fs, type DirEntry interface, Type() FileMode
pkg io/fs, type FS interface { Open }
pkg io/fs, type FS interface, Open(string) (File, error)
pkg io/fs, type File interface { Close, Read, Stat }
pkg io/fs, type File interface, Close() error
pkg io/fs, type File interface, Read([]uint8) (int, error)
pkg io/fs, type File interface, Stat() (FileInfo, error)
pkg io/fs, type FileInfo interface { IsDir, ModTime, Mode, Name, Size, Sys }
pkg io/fs, type FileInfo interface, IsDir() bool
pkg io/fs, type FileInfo interface, ModTime() time.Time
pkg io/fs, type FileInfo interface, Mode() FileMode
pkg io/fs, type FileInfo interface, Name() string
pkg io/fs, type FileInfo interface, Size() int64
pkg io/fs, type FileInfo interface, Sys() interface{}
pkg io/fs, type FileMode uint32
pkg io/fs, type GlobFS interface { Glob, Open }
pkg io/fs, type GlobFS interface, Glob(string) ([]string, error)
pkg io/fs, type GlobFS interface, Open(string) (File, error)
pkg io/fs, type PathError struct
pkg io/fs, type PathError struct, Err error
pkg io/fs, type PathError struct, Op string
pkg io/fs, type PathError struct, Path string
pkg io/fs, type ReadDirFS interface { Open, ReadDir }
pkg io/fs, type ReadDirFS interface, Open(string) (File, error)
pkg io/fs, type ReadDirFS interface, ReadDir(string) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface { Close, Read, ReadDir, Stat }
pkg io/fs, type ReadDirFile interface, Close() error
pkg io/fs, type ReadDirFile interface, Read([]uint8) (int, error)
pkg io/fs, type ReadDirFile interface, ReadDir(int) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface, Stat() (FileInfo, error)
pkg io/fs, type ReadFileFS interface { Open, ReadFile }
pkg io/fs, type ReadFileFS interface, Open(string) (File, error)
pkg io/fs, type ReadFileFS interface, ReadFile(string) ([]uint8, error)
pkg io/fs, type StatFS interface { Open, Stat }
pkg io/fs, type StatFS interface, Open(string) (File, error)
pkg io/fs, type StatFS interface, Stat(string) (FileInfo, error)
pkg io/fs, type SubFS interface { Open, Sub }
pkg io/fs, type SubFS interface, Open(string) (File, error)
pkg io/fs, type SubFS interface, Sub(string) (FS, error)
pkg io/fs, type WalkDirFunc func(string, DirEntry, error) error
pkg io/fs, var ErrClosed error
pkg io/fs, var ErrExist error
pkg io/fs, var ErrInvalid error
pkg io/fs, var ErrNotExist error
pkg io/fs, var ErrPermission error
pkg io/fs, var SkipDir error
pkg log, func Default() *Logger
pkg net, var ErrClosed error
pkg net/http, func FS(fs.FS) FileSystem
pkg net/http, type Transport struct, GetProxyConnectHeader func(context.Context, *url.URL, string) (Header, error)
pkg os, const ModeAppend fs.FileMode
pkg os, const ModeCharDevice fs.FileMode
pkg os, const ModeDevice fs.FileMode
pkg os, const ModeDir fs.FileMode
pkg os, const ModeExclusive fs.FileMode
pkg os, const ModeIrregular fs.FileMode
pkg os, const ModeNamedPipe fs.FileMode
pkg os, const ModePerm fs.FileMode
pkg os, const ModeSetgid fs.FileMode
pkg os, const ModeSetuid fs.FileMode
pkg os, const ModeSocket fs.FileMode
pkg os, const ModeSticky fs.FileMode
pkg os, const ModeSymlink fs.FileMode
pkg os, const ModeTemporary fs.FileMode
pkg os, const ModeType fs.FileMode
pkg os, func Chmod(string, fs.FileMode) error
pkg os, func CreateTemp(string, string) (*File, error)
pkg os, func DirFS(string) fs.FS
pkg os, func Lstat(string) (fs.FileInfo, error)
pkg os, func Mkdir(string, fs.FileMode) error
pkg os, func MkdirAll(string, fs.FileMode) error
pkg os, func MkdirTemp(string, string) (string, error)
pkg os, func OpenFile(string, int, fs.FileMode) (*File, error)
pkg os, func ReadDir(string) ([]fs.DirEntry, error)
pkg os, func ReadFile(string) ([]uint8, error)
pkg os, func SameFile(fs.FileInfo, fs.FileInfo) bool
pkg os, func Stat(string) (fs.FileInfo, error)
pkg os, func WriteFile(string, []uint8, fs.FileMode) error
pkg os, method (*File) Chmod(fs.FileMode) error
pkg os, method (*File) ReadDir(int) ([]fs.DirEntry, error)
pkg os, method (*File) Readdir(int) ([]fs.FileInfo, error)
pkg os, method (*File) Stat() (fs.FileInfo, error)
pkg os, type DirEntry = fs.DirEntry
pkg os, type FileInfo = fs.FileInfo
pkg os, type FileMode = fs.FileMode
pkg os, type PathError = fs.PathError
pkg os, var ErrProcessDone error
pkg os/signal, func NotifyContext(context.Context, ...os.Signal) (context.Context, context.CancelFunc)
pkg path/filepath, func WalkDir(string, fs.WalkDirFunc) error
pkg runtime/metrics, const KindBad = 0
pkg runtime/metrics, const KindBad ValueKind
pkg runtime/metrics, const KindFloat64 = 2
pkg runtime/metrics, const KindFloat64 ValueKind
pkg runtime/metrics, const KindFloat64Histogram = 3
pkg runtime/metrics, const KindFloat64Histogram ValueKind
pkg runtime/metrics, const KindUint64 = 1
pkg runtime/metrics, const KindUint64 ValueKind
pkg runtime/metrics, func All() []Description
pkg runtime/metrics, func Read([]Sample)
pkg runtime/metrics, method (Value) Float64() float64
pkg runtime/metrics, method (Value) Float64Histogram() *Float64Histogram
pkg runtime/metrics, method (Value) Kind() ValueKind
pkg runtime/metrics, method (Value) Uint64() uint64
pkg runtime/metrics, type Description struct
pkg runtime/metrics, type Description struct, Cumulative bool
pkg runtime/metrics, type Description struct, Description string
pkg runtime/metrics, type Description struct, Kind ValueKind
pkg runtime/metrics, type Description struct, Name string
pkg runtime/metrics, type Description struct, StopTheWorld bool
pkg runtime/metrics, type Float64Histogram struct
pkg runtime/metrics, type Float64Histogram struct, Buckets []float64
pkg runtime/metrics, type Float64Histogram struct, Counts []uint64
pkg runtime/metrics, type Sample struct
pkg runtime/metrics, type Sample struct, Name string
pkg runtime/metrics, type Sample struct, Value Value
pkg runtime/metrics, type Value struct
pkg runtime/metrics, type ValueKind int
pkg syscall (linux-386), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func Setegid(int) error
pkg syscall (linux-386), func Seteuid(int) error
pkg syscall (linux-386-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func Setegid(int) error
pkg syscall (linux-386-cgo), func Seteuid(int) error
pkg syscall (linux-amd64), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func Setegid(int) error
pkg syscall (linux-amd64), func Seteuid(int) error
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func Setegid(int) error
pkg syscall (linux-amd64-cgo), func Seteuid(int) error
pkg syscall (linux-arm), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func Setegid(int) error
pkg syscall (linux-arm), func Seteuid(int) error
pkg syscall (linux-arm-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func Setegid(int) error
pkg syscall (linux-arm-cgo), func Seteuid(int) error
pkg syscall (windows-386), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-386), method (*DLLError) Unwrap() error
pkg syscall (windows-386), type SysProcAttr struct, NoInheritHandles bool
pkg syscall (windows-amd64), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-amd64), method (*DLLError) Unwrap() error
pkg syscall (windows-amd64), type SysProcAttr struct, NoInheritHandles bool
pkg testing/fstest, func TestFS(fs.FS, ...string) error
pkg testing/fstest, method (MapFS) Glob(string) ([]string, error)
pkg testing/fstest, method (MapFS) Open(string) (fs.File, error)
pkg testing/fstest, method (MapFS) ReadDir(string) ([]fs.DirEntry, error)
pkg testing/fstest, method (MapFS) ReadFile(string) ([]uint8, error)
pkg testing/fstest, method (MapFS) Stat(string) (fs.FileInfo, error)
pkg testing/fstest, method (MapFS) Sub(string) (fs.FS, error)
pkg testing/fstest, type MapFS map[string]*MapFile
pkg testing/fstest, type MapFile struct
pkg testing/fstest, type MapFile struct, Data []uint8
pkg testing/fstest, type MapFile struct, ModTime time.Time
pkg testing/fstest, type MapFile struct, Mode fs.FileMode
pkg testing/fstest, type MapFile struct, Sys interface{}
pkg testing/iotest, func ErrReader(error) io.Reader
pkg testing/iotest, func TestReader(io.Reader, []uint8) error
pkg text/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1
pkg text/template/parse, const ParseComments Mode
pkg text/template/parse, method (*CommentNode) Copy() Node
pkg text/template/parse, method (*CommentNode) String() string
pkg text/template/parse, method (CommentNode) Position() Pos
pkg text/template/parse, method (CommentNode) Type() NodeType
pkg text/template/parse, type CommentNode struct
pkg text/template/parse, type CommentNode struct, Text string
pkg text/template/parse, type CommentNode struct, embedded NodeType
pkg text/template/parse, type CommentNode struct, embedded Pos
pkg text/template/parse, type Mode uint
pkg text/template/parse, type Tree struct, Mode Mode
pkg unicode, const Version = "13.0.0"
pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable

View File

@ -1,452 +0,0 @@
pkg archive/zip, method (*ReadCloser) Open(string) (fs.File, error)
pkg archive/zip, method (*Reader) Open(string) (fs.File, error)
pkg debug/elf, const DT_ADDRRNGHI = 1879047935
pkg debug/elf, const DT_ADDRRNGHI DynTag
pkg debug/elf, const DT_ADDRRNGLO = 1879047680
pkg debug/elf, const DT_ADDRRNGLO DynTag
pkg debug/elf, const DT_AUDIT = 1879047932
pkg debug/elf, const DT_AUDIT DynTag
pkg debug/elf, const DT_AUXILIARY = 2147483645
pkg debug/elf, const DT_AUXILIARY DynTag
pkg debug/elf, const DT_CHECKSUM = 1879047672
pkg debug/elf, const DT_CHECKSUM DynTag
pkg debug/elf, const DT_CONFIG = 1879047930
pkg debug/elf, const DT_CONFIG DynTag
pkg debug/elf, const DT_DEPAUDIT = 1879047931
pkg debug/elf, const DT_DEPAUDIT DynTag
pkg debug/elf, const DT_FEATURE = 1879047676
pkg debug/elf, const DT_FEATURE DynTag
pkg debug/elf, const DT_FILTER = 2147483647
pkg debug/elf, const DT_FILTER DynTag
pkg debug/elf, const DT_FLAGS_1 = 1879048187
pkg debug/elf, const DT_FLAGS_1 DynTag
pkg debug/elf, const DT_GNU_CONFLICT = 1879047928
pkg debug/elf, const DT_GNU_CONFLICT DynTag
pkg debug/elf, const DT_GNU_CONFLICTSZ = 1879047670
pkg debug/elf, const DT_GNU_CONFLICTSZ DynTag
pkg debug/elf, const DT_GNU_HASH = 1879047925
pkg debug/elf, const DT_GNU_HASH DynTag
pkg debug/elf, const DT_GNU_LIBLIST = 1879047929
pkg debug/elf, const DT_GNU_LIBLIST DynTag
pkg debug/elf, const DT_GNU_LIBLISTSZ = 1879047671
pkg debug/elf, const DT_GNU_LIBLISTSZ DynTag
pkg debug/elf, const DT_GNU_PRELINKED = 1879047669
pkg debug/elf, const DT_GNU_PRELINKED DynTag
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC = 1879048241
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC DynTag
pkg debug/elf, const DT_MIPS_BASE_ADDRESS = 1879048198
pkg debug/elf, const DT_MIPS_BASE_ADDRESS DynTag
pkg debug/elf, const DT_MIPS_COMPACT_SIZE = 1879048239
pkg debug/elf, const DT_MIPS_COMPACT_SIZE DynTag
pkg debug/elf, const DT_MIPS_CONFLICT = 1879048200
pkg debug/elf, const DT_MIPS_CONFLICT DynTag
pkg debug/elf, const DT_MIPS_CONFLICTNO = 1879048203
pkg debug/elf, const DT_MIPS_CONFLICTNO DynTag
pkg debug/elf, const DT_MIPS_CXX_FLAGS = 1879048226
pkg debug/elf, const DT_MIPS_CXX_FLAGS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS = 1879048215
pkg debug/elf, const DT_MIPS_DELTA_CLASS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM = 1879048224
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO = 1879048225
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO = 1879048216
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE = 1879048217
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO = 1879048218
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC = 1879048219
pkg debug/elf, const DT_MIPS_DELTA_RELOC DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO = 1879048220
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM = 1879048221
pkg debug/elf, const DT_MIPS_DELTA_SYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO = 1879048222
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO DynTag
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN = 1879048235
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN DynTag
pkg debug/elf, const DT_MIPS_FLAGS = 1879048197
pkg debug/elf, const DT_MIPS_FLAGS DynTag
pkg debug/elf, const DT_MIPS_GOTSYM = 1879048211
pkg debug/elf, const DT_MIPS_GOTSYM DynTag
pkg debug/elf, const DT_MIPS_GP_VALUE = 1879048240
pkg debug/elf, const DT_MIPS_GP_VALUE DynTag
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX = 1879048231
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_HIPAGENO = 1879048212
pkg debug/elf, const DT_MIPS_HIPAGENO DynTag
pkg debug/elf, const DT_MIPS_ICHECKSUM = 1879048195
pkg debug/elf, const DT_MIPS_ICHECKSUM DynTag
pkg debug/elf, const DT_MIPS_INTERFACE = 1879048234
pkg debug/elf, const DT_MIPS_INTERFACE DynTag
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE = 1879048236
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE DynTag
pkg debug/elf, const DT_MIPS_IVERSION = 1879048196
pkg debug/elf, const DT_MIPS_IVERSION DynTag
pkg debug/elf, const DT_MIPS_LIBLIST = 1879048201
pkg debug/elf, const DT_MIPS_LIBLIST DynTag
pkg debug/elf, const DT_MIPS_LIBLISTNO = 1879048208
pkg debug/elf, const DT_MIPS_LIBLISTNO DynTag
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX = 1879048229
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX = 1879048230
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO = 1879048202
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO DynTag
pkg debug/elf, const DT_MIPS_MSYM = 1879048199
pkg debug/elf, const DT_MIPS_MSYM DynTag
pkg debug/elf, const DT_MIPS_OPTIONS = 1879048233
pkg debug/elf, const DT_MIPS_OPTIONS DynTag
pkg debug/elf, const DT_MIPS_PERF_SUFFIX = 1879048238
pkg debug/elf, const DT_MIPS_PERF_SUFFIX DynTag
pkg debug/elf, const DT_MIPS_PIXIE_INIT = 1879048227
pkg debug/elf, const DT_MIPS_PIXIE_INIT DynTag
pkg debug/elf, const DT_MIPS_PLTGOT = 1879048242
pkg debug/elf, const DT_MIPS_PLTGOT DynTag
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX = 1879048232
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP = 1879048214
pkg debug/elf, const DT_MIPS_RLD_MAP DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP_REL = 1879048245
pkg debug/elf, const DT_MIPS_RLD_MAP_REL DynTag
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 1879048237
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR DynTag
pkg debug/elf, const DT_MIPS_RLD_VERSION = 1879048193
pkg debug/elf, const DT_MIPS_RLD_VERSION DynTag
pkg debug/elf, const DT_MIPS_RWPLT = 1879048244
pkg debug/elf, const DT_MIPS_RWPLT DynTag
pkg debug/elf, const DT_MIPS_SYMBOL_LIB = 1879048228
pkg debug/elf, const DT_MIPS_SYMBOL_LIB DynTag
pkg debug/elf, const DT_MIPS_SYMTABNO = 1879048209
pkg debug/elf, const DT_MIPS_SYMTABNO DynTag
pkg debug/elf, const DT_MIPS_TIME_STAMP = 1879048194
pkg debug/elf, const DT_MIPS_TIME_STAMP DynTag
pkg debug/elf, const DT_MIPS_UNREFEXTNO = 1879048210
pkg debug/elf, const DT_MIPS_UNREFEXTNO DynTag
pkg debug/elf, const DT_MOVEENT = 1879047674
pkg debug/elf, const DT_MOVEENT DynTag
pkg debug/elf, const DT_MOVESZ = 1879047675
pkg debug/elf, const DT_MOVESZ DynTag
pkg debug/elf, const DT_MOVETAB = 1879047934
pkg debug/elf, const DT_MOVETAB DynTag
pkg debug/elf, const DT_PLTPAD = 1879047933
pkg debug/elf, const DT_PLTPAD DynTag
pkg debug/elf, const DT_PLTPADSZ = 1879047673
pkg debug/elf, const DT_PLTPADSZ DynTag
pkg debug/elf, const DT_POSFLAG_1 = 1879047677
pkg debug/elf, const DT_POSFLAG_1 DynTag
pkg debug/elf, const DT_PPC64_GLINK = 1879048192
pkg debug/elf, const DT_PPC64_GLINK DynTag
pkg debug/elf, const DT_PPC64_OPD = 1879048193
pkg debug/elf, const DT_PPC64_OPD DynTag
pkg debug/elf, const DT_PPC64_OPDSZ = 1879048194
pkg debug/elf, const DT_PPC64_OPDSZ DynTag
pkg debug/elf, const DT_PPC64_OPT = 1879048195
pkg debug/elf, const DT_PPC64_OPT DynTag
pkg debug/elf, const DT_PPC_GOT = 1879048192
pkg debug/elf, const DT_PPC_GOT DynTag
pkg debug/elf, const DT_PPC_OPT = 1879048193
pkg debug/elf, const DT_PPC_OPT DynTag
pkg debug/elf, const DT_RELACOUNT = 1879048185
pkg debug/elf, const DT_RELACOUNT DynTag
pkg debug/elf, const DT_RELCOUNT = 1879048186
pkg debug/elf, const DT_RELCOUNT DynTag
pkg debug/elf, const DT_SPARC_REGISTER = 1879048193
pkg debug/elf, const DT_SPARC_REGISTER DynTag
pkg debug/elf, const DT_SYMINENT = 1879047679
pkg debug/elf, const DT_SYMINENT DynTag
pkg debug/elf, const DT_SYMINFO = 1879047935
pkg debug/elf, const DT_SYMINFO DynTag
pkg debug/elf, const DT_SYMINSZ = 1879047678
pkg debug/elf, const DT_SYMINSZ DynTag
pkg debug/elf, const DT_SYMTAB_SHNDX = 34
pkg debug/elf, const DT_SYMTAB_SHNDX DynTag
pkg debug/elf, const DT_TLSDESC_GOT = 1879047927
pkg debug/elf, const DT_TLSDESC_GOT DynTag
pkg debug/elf, const DT_TLSDESC_PLT = 1879047926
pkg debug/elf, const DT_TLSDESC_PLT DynTag
pkg debug/elf, const DT_USED = 2147483646
pkg debug/elf, const DT_USED DynTag
pkg debug/elf, const DT_VALRNGHI = 1879047679
pkg debug/elf, const DT_VALRNGHI DynTag
pkg debug/elf, const DT_VALRNGLO = 1879047424
pkg debug/elf, const DT_VALRNGLO DynTag
pkg debug/elf, const DT_VERDEF = 1879048188
pkg debug/elf, const DT_VERDEF DynTag
pkg debug/elf, const DT_VERDEFNUM = 1879048189
pkg debug/elf, const DT_VERDEFNUM DynTag
pkg debug/elf, const PT_AARCH64_ARCHEXT = 1879048192
pkg debug/elf, const PT_AARCH64_ARCHEXT ProgType
pkg debug/elf, const PT_AARCH64_UNWIND = 1879048193
pkg debug/elf, const PT_AARCH64_UNWIND ProgType
pkg debug/elf, const PT_ARM_ARCHEXT = 1879048192
pkg debug/elf, const PT_ARM_ARCHEXT ProgType
pkg debug/elf, const PT_ARM_EXIDX = 1879048193
pkg debug/elf, const PT_ARM_EXIDX ProgType
pkg debug/elf, const PT_GNU_EH_FRAME = 1685382480
pkg debug/elf, const PT_GNU_EH_FRAME ProgType
pkg debug/elf, const PT_GNU_MBIND_HI = 1685386580
pkg debug/elf, const PT_GNU_MBIND_HI ProgType
pkg debug/elf, const PT_GNU_MBIND_LO = 1685382485
pkg debug/elf, const PT_GNU_MBIND_LO ProgType
pkg debug/elf, const PT_GNU_PROPERTY = 1685382483
pkg debug/elf, const PT_GNU_PROPERTY ProgType
pkg debug/elf, const PT_GNU_RELRO = 1685382482
pkg debug/elf, const PT_GNU_RELRO ProgType
pkg debug/elf, const PT_GNU_STACK = 1685382481
pkg debug/elf, const PT_GNU_STACK ProgType
pkg debug/elf, const PT_MIPS_ABIFLAGS = 1879048195
pkg debug/elf, const PT_MIPS_ABIFLAGS ProgType
pkg debug/elf, const PT_MIPS_OPTIONS = 1879048194
pkg debug/elf, const PT_MIPS_OPTIONS ProgType
pkg debug/elf, const PT_MIPS_REGINFO = 1879048192
pkg debug/elf, const PT_MIPS_REGINFO ProgType
pkg debug/elf, const PT_MIPS_RTPROC = 1879048193
pkg debug/elf, const PT_MIPS_RTPROC ProgType
pkg debug/elf, const PT_OPENBSD_BOOTDATA = 1705253862
pkg debug/elf, const PT_OPENBSD_BOOTDATA ProgType
pkg debug/elf, const PT_OPENBSD_RANDOMIZE = 1705237478
pkg debug/elf, const PT_OPENBSD_RANDOMIZE ProgType
pkg debug/elf, const PT_OPENBSD_WXNEEDED = 1705237479
pkg debug/elf, const PT_OPENBSD_WXNEEDED ProgType
pkg debug/elf, const PT_PAX_FLAGS = 1694766464
pkg debug/elf, const PT_PAX_FLAGS ProgType
pkg debug/elf, const PT_S390_PGSTE = 1879048192
pkg debug/elf, const PT_S390_PGSTE ProgType
pkg debug/elf, const PT_SUNWSTACK = 1879048187
pkg debug/elf, const PT_SUNWSTACK ProgType
pkg debug/elf, const PT_SUNW_EH_FRAME = 1685382480
pkg debug/elf, const PT_SUNW_EH_FRAME ProgType
pkg embed, method (FS) Open(string) (fs.File, error)
pkg embed, method (FS) ReadDir(string) ([]fs.DirEntry, error)
pkg embed, method (FS) ReadFile(string) ([]uint8, error)
pkg embed, type FS struct
pkg flag, func Func(string, string, func(string) error)
pkg flag, method (*FlagSet) Func(string, string, func(string) error)
pkg go/build, type Package struct, EmbedPatterns []string
pkg go/build, type Package struct, IgnoredOtherFiles []string
pkg go/build, type Package struct, TestEmbedPatterns []string
pkg go/build, type Package struct, XTestEmbedPatterns []string
pkg html/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg html/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg io, func NopCloser(Reader) ReadCloser
pkg io, func ReadAll(Reader) ([]uint8, error)
pkg io, type ReadSeekCloser interface { Close, Read, Seek }
pkg io, type ReadSeekCloser interface, Close() error
pkg io, type ReadSeekCloser interface, Read([]uint8) (int, error)
pkg io, type ReadSeekCloser interface, Seek(int64, int) (int64, error)
pkg io, var Discard Writer
pkg io/fs, const ModeAppend = 1073741824
pkg io/fs, const ModeAppend FileMode
pkg io/fs, const ModeCharDevice = 2097152
pkg io/fs, const ModeCharDevice FileMode
pkg io/fs, const ModeDevice = 67108864
pkg io/fs, const ModeDevice FileMode
pkg io/fs, const ModeDir = 2147483648
pkg io/fs, const ModeDir FileMode
pkg io/fs, const ModeExclusive = 536870912
pkg io/fs, const ModeExclusive FileMode
pkg io/fs, const ModeIrregular = 524288
pkg io/fs, const ModeIrregular FileMode
pkg io/fs, const ModeNamedPipe = 33554432
pkg io/fs, const ModeNamedPipe FileMode
pkg io/fs, const ModePerm = 511
pkg io/fs, const ModePerm FileMode
pkg io/fs, const ModeSetgid = 4194304
pkg io/fs, const ModeSetgid FileMode
pkg io/fs, const ModeSetuid = 8388608
pkg io/fs, const ModeSetuid FileMode
pkg io/fs, const ModeSocket = 16777216
pkg io/fs, const ModeSocket FileMode
pkg io/fs, const ModeSticky = 1048576
pkg io/fs, const ModeSticky FileMode
pkg io/fs, const ModeSymlink = 134217728
pkg io/fs, const ModeSymlink FileMode
pkg io/fs, const ModeTemporary = 268435456
pkg io/fs, const ModeTemporary FileMode
pkg io/fs, const ModeType = 2401763328
pkg io/fs, const ModeType FileMode
pkg io/fs, func Glob(FS, string) ([]string, error)
pkg io/fs, func ReadDir(FS, string) ([]DirEntry, error)
pkg io/fs, func ReadFile(FS, string) ([]uint8, error)
pkg io/fs, func Stat(FS, string) (FileInfo, error)
pkg io/fs, func ValidPath(string) bool
pkg io/fs, method (*PathError) Error() string
pkg io/fs, method (*PathError) Timeout() bool
pkg io/fs, method (*PathError) Unwrap() error
pkg io/fs, method (FileMode) IsDir() bool
pkg io/fs, method (FileMode) IsRegular() bool
pkg io/fs, method (FileMode) Perm() FileMode
pkg io/fs, method (FileMode) String() string
pkg io/fs, method (FileMode) Type() FileMode
pkg io/fs, type DirEntry interface { Info, IsDir, Name, Type }
pkg io/fs, type DirEntry interface, Info() (FileInfo, error)
pkg io/fs, type DirEntry interface, IsDir() bool
pkg io/fs, type DirEntry interface, Name() string
pkg io/fs, type DirEntry interface, Type() FileMode
pkg io/fs, type FS interface { Open }
pkg io/fs, type FS interface, Open(string) (File, error)
pkg io/fs, type File interface { Close, Read, Stat }
pkg io/fs, type File interface, Close() error
pkg io/fs, type File interface, Read([]uint8) (int, error)
pkg io/fs, type File interface, Stat() (FileInfo, error)
pkg io/fs, type FileInfo interface { IsDir, ModTime, Mode, Name, Size, Sys }
pkg io/fs, type FileInfo interface, IsDir() bool
pkg io/fs, type FileInfo interface, ModTime() time.Time
pkg io/fs, type FileInfo interface, Mode() FileMode
pkg io/fs, type FileInfo interface, Name() string
pkg io/fs, type FileInfo interface, Size() int64
pkg io/fs, type FileInfo interface, Sys() interface{}
pkg io/fs, type FileMode uint32
pkg io/fs, type GlobFS interface { Glob, Open }
pkg io/fs, type GlobFS interface, Glob(string) ([]string, error)
pkg io/fs, type GlobFS interface, Open(string) (File, error)
pkg io/fs, type PathError struct
pkg io/fs, type PathError struct, Err error
pkg io/fs, type PathError struct, Op string
pkg io/fs, type PathError struct, Path string
pkg io/fs, type ReadDirFS interface { Open, ReadDir }
pkg io/fs, type ReadDirFS interface, Open(string) (File, error)
pkg io/fs, type ReadDirFS interface, ReadDir(string) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface { Close, Read, ReadDir, Stat }
pkg io/fs, type ReadDirFile interface, Close() error
pkg io/fs, type ReadDirFile interface, Read([]uint8) (int, error)
pkg io/fs, type ReadDirFile interface, ReadDir(int) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface, Stat() (FileInfo, error)
pkg io/fs, type ReadFileFS interface { Open, ReadFile }
pkg io/fs, type ReadFileFS interface, Open(string) (File, error)
pkg io/fs, type ReadFileFS interface, ReadFile(string) ([]uint8, error)
pkg io/fs, type StatFS interface { Open, Stat }
pkg io/fs, type StatFS interface, Open(string) (File, error)
pkg io/fs, type StatFS interface, Stat(string) (FileInfo, error)
pkg io/fs, var ErrClosed error
pkg io/fs, var ErrExist error
pkg io/fs, var ErrInvalid error
pkg io/fs, var ErrNotExist error
pkg io/fs, var ErrPermission error
pkg log, func Default() *Logger
pkg net, var ErrClosed error
pkg net/http, func FS(fs.FS) FileSystem
pkg net/http, type Transport struct, GetProxyConnectHeader func(context.Context, *url.URL, string) (Header, error)
pkg os, const ModeAppend fs.FileMode
pkg os, const ModeCharDevice fs.FileMode
pkg os, const ModeDevice fs.FileMode
pkg os, const ModeDir fs.FileMode
pkg os, const ModeExclusive fs.FileMode
pkg os, const ModeIrregular fs.FileMode
pkg os, const ModeNamedPipe fs.FileMode
pkg os, const ModePerm fs.FileMode
pkg os, const ModeSetgid fs.FileMode
pkg os, const ModeSetuid fs.FileMode
pkg os, const ModeSocket fs.FileMode
pkg os, const ModeSticky fs.FileMode
pkg os, const ModeSymlink fs.FileMode
pkg os, const ModeTemporary fs.FileMode
pkg os, const ModeType fs.FileMode
pkg os, func Chmod(string, fs.FileMode) error
pkg os, func DirFS(string) fs.FS
pkg os, func Lstat(string) (fs.FileInfo, error)
pkg os, func Mkdir(string, fs.FileMode) error
pkg os, func MkdirAll(string, fs.FileMode) error
pkg os, func OpenFile(string, int, fs.FileMode) (*File, error)
pkg os, func SameFile(fs.FileInfo, fs.FileInfo) bool
pkg os, func Stat(string) (fs.FileInfo, error)
pkg os, method (*File) Chmod(fs.FileMode) error
pkg os, method (*File) ReadDir(int) ([]fs.DirEntry, error)
pkg os, method (*File) Readdir(int) ([]fs.FileInfo, error)
pkg os, method (*File) Stat() (fs.FileInfo, error)
pkg os, type DirEntry = fs.DirEntry
pkg os, type FileInfo = fs.FileInfo
pkg os, type FileMode = fs.FileMode
pkg os, type PathError = fs.PathError
pkg os/signal, func NotifyContext(context.Context, ...os.Signal) (context.Context, context.CancelFunc)
pkg runtime/metrics, const KindBad = 0
pkg runtime/metrics, const KindBad ValueKind
pkg runtime/metrics, const KindFloat64 = 2
pkg runtime/metrics, const KindFloat64 ValueKind
pkg runtime/metrics, const KindFloat64Histogram = 3
pkg runtime/metrics, const KindFloat64Histogram ValueKind
pkg runtime/metrics, const KindUint64 = 1
pkg runtime/metrics, const KindUint64 ValueKind
pkg runtime/metrics, func All() []Description
pkg runtime/metrics, func Read([]Sample)
pkg runtime/metrics, method (Value) Float64() float64
pkg runtime/metrics, method (Value) Float64Histogram() *Float64Histogram
pkg runtime/metrics, method (Value) Kind() ValueKind
pkg runtime/metrics, method (Value) Uint64() uint64
pkg runtime/metrics, type Description struct
pkg runtime/metrics, type Description struct, Cumulative bool
pkg runtime/metrics, type Description struct, Description string
pkg runtime/metrics, type Description struct, Kind ValueKind
pkg runtime/metrics, type Description struct, Name string
pkg runtime/metrics, type Description struct, StopTheWorld bool
pkg runtime/metrics, type Float64Histogram struct
pkg runtime/metrics, type Float64Histogram struct, Buckets []float64
pkg runtime/metrics, type Float64Histogram struct, Counts []uint64
pkg runtime/metrics, type Sample struct
pkg runtime/metrics, type Sample struct, Name string
pkg runtime/metrics, type Sample struct, Value Value
pkg runtime/metrics, type Value struct
pkg runtime/metrics, type ValueKind int
pkg syscall (linux-386), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func Setegid(int) error
pkg syscall (linux-386), func Seteuid(int) error
pkg syscall (linux-386-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func Setegid(int) error
pkg syscall (linux-386-cgo), func Seteuid(int) error
pkg syscall (linux-amd64), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func Setegid(int) error
pkg syscall (linux-amd64), func Seteuid(int) error
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func Setegid(int) error
pkg syscall (linux-amd64-cgo), func Seteuid(int) error
pkg syscall (linux-arm), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func Setegid(int) error
pkg syscall (linux-arm), func Seteuid(int) error
pkg syscall (linux-arm-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func Setegid(int) error
pkg syscall (linux-arm-cgo), func Seteuid(int) error
pkg syscall (windows-386), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-amd64), func RtlGenRandom(*uint8, uint32) error
pkg testing/fstest, func TestFS(fs.FS, ...string) error
pkg testing/fstest, method (MapFS) Glob(string) ([]string, error)
pkg testing/fstest, method (MapFS) Open(string) (fs.File, error)
pkg testing/fstest, method (MapFS) ReadDir(string) ([]fs.DirEntry, error)
pkg testing/fstest, method (MapFS) ReadFile(string) ([]uint8, error)
pkg testing/fstest, method (MapFS) Stat(string) (fs.FileInfo, error)
pkg testing/fstest, type MapFS map[string]*MapFile
pkg testing/fstest, type MapFile struct
pkg testing/fstest, type MapFile struct, Data []uint8
pkg testing/fstest, type MapFile struct, ModTime time.Time
pkg testing/fstest, type MapFile struct, Mode fs.FileMode
pkg testing/fstest, type MapFile struct, Sys interface{}
pkg testing/iotest, func ErrReader(error) io.Reader
pkg testing/iotest, func TestReader(io.Reader, []uint8) error
pkg text/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1
pkg text/template/parse, const ParseComments Mode
pkg text/template/parse, method (*CommentNode) Copy() Node
pkg text/template/parse, method (*CommentNode) String() string
pkg text/template/parse, method (CommentNode) Position() Pos
pkg text/template/parse, method (CommentNode) Type() NodeType
pkg text/template/parse, type CommentNode struct
pkg text/template/parse, type CommentNode struct, Text string
pkg text/template/parse, type CommentNode struct, embedded NodeType
pkg text/template/parse, type CommentNode struct, embedded Pos
pkg text/template/parse, type Mode uint
pkg text/template/parse, type Tree struct, Mode Mode
pkg unicode, const Version = "13.0.0"
pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable

View File

@ -397,6 +397,19 @@ Do not send CLs removing the interior tags from such phrases.
documentation</a> for more information.
</p>
<p><!-- CL 250940 -->
In Go 1.15.3 and later, cgo will not permit Go code to allocate an
undefined struct type (a C struct defined as just <code>struct
S;</code> or similar) on the stack or heap.
Go code will only be permitted to use pointers to those types.
Allocating an instance of such a struct and passing a pointer, or a
full struct value, to C code was always unsafe and unlikely to work
correctly; it is now forbidden.
The fix is to either rewrite the Go code to use only pointers, or to
ensure that the Go code sees the full definition of the struct by
including the appropriate C header file.
</p>
<h3 id="commonname">X.509 CommonName deprecation</h3>
<p><!-- CL 231379 -->

View File

@ -99,10 +99,6 @@ Do not send CLs removing the interior tags from such phrases.
<h2 id="tools">Tools</h2>
<p>
TODO
</p>
<h3 id="go-command">Go command</h3>
<h4 id="modules">Modules</h4>
@ -275,6 +271,20 @@ Do not send CLs removing the interior tags from such phrases.
but without the extra step.
</p>
<h4 id="overlay-flag">The <code>-overlay</code> flag</h4>
<p><!-- golang.org/issue/39958 -->
The <code>-overlay</code> flag specifies a JSON configuration file containing
a set of file path replacements. The <code>-overlay</code> flag may be used
with all build commands and <code>go</code> <code>mod</code> subcommands.
It is primarily intended to be used by editor tooling such as gopls to
understand the effects of unsaved changes to source files. The config file
maps actual file paths to replacement file paths and the <code>go</code>
command and its builds will run as if the actual file paths exist with the
contents given by the replacement file paths, or don't exist if the replacement
file paths are empty.
</p>
<h3 id="cgo">Cgo</h3>
<p><!-- CL 252378 -->
@ -287,12 +297,55 @@ Do not send CLs removing the interior tags from such phrases.
<h3 id="vet">Vet</h3>
<p>
TODO
<h4 id="vet-string-int">New warning for invalid testing.T use in
goroutines</h4>
<!-- CL 235677: https://golang.org/cl/235677: cmd/vet: bring in pass to catch invalid uses of testing.T in goroutines -->
<p><!-- CL 235677 -->
The vet tool now warns about invalid calls to the <code>testing.T</code>
method <code>Fatal</code> from within a goroutine created during the test.
This also warns on calls to <code>Fatalf</code>, <code>FailNow</code>, and
<code>Skip{,f,Now}</code> methods on <code>testing.T</code> tests or
<code>testing.B</code> benchmarks.
</p>
<p>
Calls to these methods stop the execution of the created goroutine and not
the <code>Test*</code> or <code>Benchmark*</code> function. So these are
<a href="/pkg/testing/#T.FailNow">required</a> to be called by the goroutine
running the test or benchmark function. For example:
</p>
<pre>
func TestFoo(t *testing.T) {
go func() {
if condition() {
t.Fatal("oops") // This exits the inner func instead of TestFoo.
}
...
}()
}
</pre>
<p>
Code calling <code>t.Fatal</code> (or a similar method) from a created
goroutine should be rewritten to signal the test failure using
<code>t.Error</code> and exit the goroutine early using an alternative
method, such as using a <code>return</code> statement. The previous example
could be rewritten as:
</p>
<pre>
func TestFoo(t *testing.T) {
go func() {
if condition() {
t.Error("oops")
return
}
...
}()
}
</pre>
<p><!-- CL 248686, CL 276372 -->
The vet tool now warns about amd64 assembly that clobbers the BP
register (the frame pointer) without saving and restoring it,
@ -326,7 +379,7 @@ Do not send CLs removing the interior tags from such phrases.
summarizing its execution time and memory allocation. This trace can
be used to find bottlenecks or regressions in Go startup
performance.
The <a href="/pkg/runtime/#hdr-Environment_Variables"><code>GODEBUG</code><
The <a href="/pkg/runtime/#hdr-Environment_Variables"><code>GODEBUG</code>
documentation</a> describes the format.
</p>
@ -408,7 +461,7 @@ Do not send CLs removing the interior tags from such phrases.
<p>
On the producer side of the interface,
the new <a href="/pkg/embed/#FS">embed.FS</code></a> type
the new <a href="/pkg/embed/#FS"><code>embed.FS</code></a> type
implements <code>fs.FS</code>, as does
<a href="/pkg/archive/zip/#Reader"><code>zip.Reader</code></a>.
The new <a href="/pkg/os/#DirFS"><code>os.DirFS</code></a> function
@ -438,10 +491,10 @@ Do not send CLs removing the interior tags from such phrases.
implementations.
</p>
<p>
TODO: when the "Minor changes to the library" section is close to completion,
decide if any changes are worth factoring out and highlighting in "Core library"
</p>
<!-- okay-after-beta1
TODO: decide if any additional changes are worth factoring out from
"Minor changes to the library" and highlighting in "Core library"
-->
<h3 id="minor_library_changes">Minor changes to the library</h3>
@ -451,10 +504,6 @@ Do not send CLs removing the interior tags from such phrases.
in mind.
</p>
<p>
TODO: complete this section, resolve TODOs below, add missing entries
</p>
<dl id="crypto/dsa"><dt><a href="/pkg/crypto/dsa/">crypto/dsa</a></dt>
<dd>
<p><!-- CL 257939 -->
@ -490,16 +539,6 @@ Do not send CLs removing the interior tags from such phrases.
indefinitely.
</p>
<p><!-- CL 246338 -->
The new <a href="/pkg/crypto/tls#Conn.HandshakeContext"><code>Conn.HandshakeContext</code></a>
method allows cancellation of an in-progress handshake. The provided
context is accessible through the new
<a href="/pkg/crypto/tls#ClientHelloInfo.Context"><code>ClientHelloInfo.Context</code></a>
and <a href="/pkg/crypto/tls#CertificateRequestInfo.Context">
<code>CertificateRequestInfo.Context</code></a> methods. Canceling the
context after the handshake has finished has no effect.
</p>
<p><!-- CL 239748 -->
Clients now return a handshake error if the server selects
<a href="/pkg/crypto/tls/#ConnectionState.NegotiatedProtocol">
@ -672,8 +711,8 @@ Do not send CLs removing the interior tags from such phrases.
<p><!-- CL 250357 -->
The case of I/O on a closed network connection, or I/O on a network
connection that is closed before any of the I/O completes, can now
be detected using the new <a href="/pkg/net/#ErrClosed">ErrClosed</a> error.
A typical use would be <code>errors.Is(err, net.ErrClosed)</code>.
be detected using the new <a href="/pkg/net/#ErrClosed"><code>ErrClosed</code></a>
error. A typical use would be <code>errors.Is(err, net.ErrClosed)</code>.
In earlier releases the only way to reliably detect this case was to
match the string returned by the <code>Error</code> method
with <code>"use of closed network connection"</code>.
@ -722,13 +761,6 @@ Do not send CLs removing the interior tags from such phrases.
generating a SameSite key without a value.
</p>
<p><!-- CL 246338 -->
The <a href="/pkg/net/http/"><code>net/http</code></a> package now passes the
<a href="/pkg/net/http/#Request.Context"><code>Request</code> context</a> to
<a href="/pkg/crypto/tls#Conn.HandshakeContext"><code>tls.Conn.HandshakeContext</code></a>
when performing TLS handshakes.
</p>
<p><!-- CL 250039 -->
The <a href="/pkg/net/http/#Client">Client</a> now sends
an explicit <code>Content-Length:</code> <code>0</code>
@ -737,9 +769,10 @@ Do not send CLs removing the interior tags from such phrases.
</p>
<p><!-- CL 249440 -->
The <a href="/pkg/net/http/#ProxyFromEnvironment">ProxyFromEnvironment</a> function
no longer returns the setting of the <code>HTTP_PROXY</code> environment
variable for <code>https://</code> URLs when <code>HTTPS_PROXY</code> is unset.
The <a href="/pkg/net/http/#ProxyFromEnvironment"><code>ProxyFromEnvironment</code></a>
function no longer returns the setting of the <code>HTTP_PROXY</code>
environment variable for <code>https://</code> URLs when
<code>HTTPS_PROXY</code> is unset.
</p>
</dd>
</dl><!-- net/http -->
@ -747,7 +780,7 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="net/http/httputil"><dt><a href="/pkg/net/http/httputil/">net/http/httputil</a></dt>
<dd>
<p><!-- CL 260637 -->
The <a href="/pkg/net/http/httputil/#ReverseProxy">ReverseProxy</a>
<a href="/pkg/net/http/httputil/#ReverseProxy"><code>ReverseProxy</code></a>
now flushes buffered data more aggressively when proxying
streamed responses with unknown body lengths.
</p>
@ -790,9 +823,9 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="path"><dt><a href="/pkg/path/">path</a></dt>
<dd>
<p><!-- CL 264397, golang.org/issues/28614 -->
The <code>Match</code> and <code>Glob</code> functions now
return an error if the unmatched part of the pattern has a
syntax error. Previously, the functions returned early on a failed
The <a href="/pkg/path/#Match"><code>Match</code></a> function now
returns an error if the unmatched part of the pattern has a
syntax error. Previously, the function returned early on a failed
match, and thus did not report any later syntax error in the
pattern.
</p>
@ -802,7 +835,8 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="path/filepath"><dt><a href="/pkg/path/filepath/">path/filepath</a></dt>
<dd>
<p><!-- CL 264397, golang.org/issues/28614 -->
The <code>Match</code> and <code>Glob</code> functions now
The <a href="/pkg/path/filepath#Match"><code>Match</code></a> and
<a href="/pkg/path/filepath#Glob"><code>Glob</code></a> functions now
return an error if the unmatched part of the pattern has a
syntax error. Previously, the functions returned early on a failed
match, and thus did not report any later syntax error in the
@ -814,9 +848,10 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="reflect"><dt><a href="/pkg/reflect/">reflect</a></dt>
<dd>
<p><!-- CL 248341, golang.org/issues/40281 -->
<code>StructTag</code> now allows multiple space-separated keys
in key:value pairs, as in <code>`json xml:"field1"`</code>
(equivalent to <code>`json:"field1" xml:"field1"`</code>).
<a href="/pkg/reflect/#StructTag"><code>StructTag</code></a>
now allows multiple space-separated keys in key:value pairs,
as in <code>`json xml:"field1"`</code> (equivalent to
<code>`json:"field1" xml:"field1"`</code>).
</p>
</dd>
</dl><!-- reflect -->

View File

@ -119,11 +119,26 @@ The Go toolchain is written in Go. To build it, you need a Go compiler installed
The scripts that do the initial build of the tools look for a "go" command
in <code>$PATH</code>, so as long as you have Go installed in your
system and configured in your <code>$PATH</code>, you are ready to build Go
from source.
from source.
Or if you prefer you can set <code>$GOROOT_BOOTSTRAP</code> to the
root of a Go installation to use to build the new Go toolchain;
<code>$GOROOT_BOOTSTRAP/bin/go</code> should be the go command to use.</p>
<p>
There are four possible ways to obtain a bootstrap toolchain:
</p>
<ul>
<li>Download a recent binary release of Go.
<li>Cross-compile a toolchain using a system with a working Go installation.
<li>Use gccgo.
<li>Compile a toolchain from Go 1.4, the last Go release with a compiler written in C.
</ul>
<p>
These approaches are detailed below.
</p>
<h3 id="bootstrapFromBinaryRelease">Bootstrap toolchain from binary release</h3>
<p>
@ -132,30 +147,6 @@ To use a binary release as a bootstrap toolchain, see
packaged Go distribution.
</p>
<h3 id="bootstrapFromSource">Bootstrap toolchain from source</h3>
<p>
To build a bootstrap toolchain from source, use
either the git branch <code>release-branch.go1.4</code> or
<a href="https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz">go1.4-bootstrap-20171003.tar.gz</a>,
which contains the Go 1.4 source code plus accumulated fixes
to keep the tools running on newer operating systems.
(Go 1.4 was the last distribution in which the toolchain was written in C.)
After unpacking the Go 1.4 source, <code>cd</code> to
the <code>src</code> subdirectory, set <code>CGO_ENABLED=0</code> in
the environment, and run <code>make.bash</code> (or,
on Windows, <code>make.bat</code>).
</p>
<p>
Once the Go 1.4 source has been unpacked into your GOROOT_BOOTSTRAP directory,
you must keep this git clone instance checked out to branch
<code>release-branch.go1.4</code>. Specifically, do not attempt to reuse
this git clone in the later step named "Fetch the repository." The go1.4
bootstrap toolchain <b>must be able</b> to properly traverse the go1.4 sources
that it assumes are present under this repository root.
</p>
<h3 id="bootstrapFromCrosscompiledSource">Bootstrap toolchain from cross-compiled source</h3>
<p>
@ -194,6 +185,36 @@ $ sudo update-alternatives --set go /usr/bin/go-5
$ GOROOT_BOOTSTRAP=/usr ./make.bash
</pre>
<h3 id="bootstrapFromSource">Bootstrap toolchain from C source code</h3>
<p>
To build a bootstrap toolchain from C source code, use
either the git branch <code>release-branch.go1.4</code> or
<a href="https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz">go1.4-bootstrap-20171003.tar.gz</a>,
which contains the Go 1.4 source code plus accumulated fixes
to keep the tools running on newer operating systems.
(Go 1.4 was the last distribution in which the toolchain was written in C.)
After unpacking the Go 1.4 source, <code>cd</code> to
the <code>src</code> subdirectory, set <code>CGO_ENABLED=0</code> in
the environment, and run <code>make.bash</code> (or,
on Windows, <code>make.bat</code>).
</p>
<p>
Once the Go 1.4 source has been unpacked into your GOROOT_BOOTSTRAP directory,
you must keep this git clone instance checked out to branch
<code>release-branch.go1.4</code>. Specifically, do not attempt to reuse
this git clone in the later step named "Fetch the repository." The go1.4
bootstrap toolchain <b>must be able</b> to properly traverse the go1.4 sources
that it assumes are present under this repository root.
</p>
<p>
Note that Go 1.4 does not run on all systems that later versions of Go do.
In particular, Go 1.4 does not support current versions of macOS.
On such systems, the bootstrap toolchain must be obtained using one of the other methods.
</p>
<h2 id="git">Install Git, if needed</h2>
<p>

View File

@ -20,6 +20,7 @@ var (
TrimPath = flag.String("trimpath", "", "remove prefix from recorded source file paths")
Shared = flag.Bool("shared", false, "generate code that can be linked into a shared library")
Dynlink = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries")
Linkshared = flag.Bool("linkshared", false, "generate code that will be linked against Go shared libraries")
AllErrors = flag.Bool("e", false, "no limit on number of errors reported")
SymABIs = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble")
Importpath = flag.String("p", "", "set expected package import to path")

View File

@ -37,6 +37,7 @@ func main() {
ctxt := obj.Linknew(architecture.LinkArch)
ctxt.Debugasm = flags.PrintOut
ctxt.Flag_dynlink = *flags.Dynlink
ctxt.Flag_linkshared = *flags.Linkshared
ctxt.Flag_shared = *flags.Shared || *flags.Dynlink
ctxt.IsAsm = true
ctxt.Pkgpath = *flags.Importpath

View File

@ -75,11 +75,8 @@ func tokenize(src string) []string {
}
func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int {
n := ir.AsNode(f.Nname)
if n == nil {
panic("not expected")
}
if n.Offset() != int64(r.Offset) {
n := ir.AsNode(f.Nname).(*ir.Name)
if n.FrameOffset() != int64(r.Offset) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
which, idx, r.Offset, n.Offset(), f.Type)
return 1

View File

@ -310,25 +310,25 @@ func genhash(t *types.Type) *obj.LSym {
// pure memory.
hashel := hashfor(t.Elem())
n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil))
ni := ir.Node(NewName(lookup("i")))
ni.SetType(types.Types[types.TINT])
n.PtrList().Set1(ni)
n.SetColas(true)
colasdefn(n.List().Slice(), n)
ni = n.List().First()
// for i := 0; i < nelem; i++
ni := temp(types.Types[types.TINT])
init := ir.Nod(ir.OAS, ni, nodintconst(0))
cond := ir.Nod(ir.OLT, ni, nodintconst(t.NumElem()))
post := ir.Nod(ir.OAS, ni, ir.Nod(ir.OADD, ni, nodintconst(1)))
loop := ir.Nod(ir.OFOR, cond, post)
loop.PtrInit().Append(init)
// h = hashel(&p[i], h)
call := ir.Nod(ir.OCALL, hashel, nil)
nx := ir.Nod(ir.OINDEX, np, ni)
nx.SetBounded(true)
na := ir.Nod(ir.OADDR, nx, nil)
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
n.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
fn.PtrBody().Append(n)
fn.PtrBody().Append(loop)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
@ -347,7 +347,7 @@ func genhash(t *types.Type) *obj.LSym {
hashel := hashfor(f.Type)
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
@ -362,7 +362,7 @@ func genhash(t *types.Type) *obj.LSym {
hashel := hashmem(f.Type)
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
call.PtrList().Append(nodintconst(size))
@ -394,7 +394,7 @@ func genhash(t *types.Type) *obj.LSym {
}
fn.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
@ -741,7 +741,7 @@ func geneq(t *types.Type) *obj.LSym {
// return (or goto ret)
fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
if EqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
} else {
@ -774,7 +774,7 @@ func geneq(t *types.Type) *obj.LSym {
// neither of which can be nil, and our comparisons
// are shallow.
fn.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
@ -782,14 +782,12 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
func hasCall(fn *ir.Func) bool {
found := ir.Find(fn, func(n ir.Node) interface{} {
if op := n.Op(); op == ir.OCALL || op == ir.OCALLFUNC {
return n
}
return nil
func anyCall(fn *ir.Func) bool {
return ir.Any(fn, func(n ir.Node) bool {
// TODO(rsc): No methods?
op := n.Op()
return op == ir.OCALL || op == ir.OCALLFUNC
})
return found != nil
}
// eqfield returns the node
@ -807,7 +805,7 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = conv(s, types.Types[types.TSTRING])
t = conv(t, types.Types[types.TSTRING])
sptr := ir.Nod(ir.OSPTR, s, nil)
@ -817,12 +815,11 @@ func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.Nod(ir.OCALL, fn, nil)
call.PtrList().Append(sptr, tptr, ir.Copy(slen))
call = typecheck(call, ctxExpr|ctxMultiOK)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr)
cmp := ir.Nod(ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
@ -833,7 +830,7 @@ func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
@ -855,12 +852,11 @@ func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := ir.Nod(ir.OCALL, fn, nil)
call.PtrList().Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr)
cmp := ir.Nod(ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
@ -868,10 +864,8 @@ func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil)
ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
nx := typecheck(nodAddr(nodSym(ir.OXDOT, p, field)), ctxExpr)
ny := typecheck(nodAddr(nodSym(ir.OXDOT, q, field)), ctxExpr)
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.Nod(ir.OCALL, fn, nil)
@ -884,7 +878,7 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
return call
}
func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) {
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
fn = syslook("memequal")

View File

@ -119,6 +119,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
}
f.Offset = o
if n := ir.AsNode(f.Nname); n != nil {
n := n.Name()
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
@ -127,10 +128,10 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
if n.Name().Stackcopy != nil {
n.Name().Stackcopy.SetOffset(o)
n.SetOffset(0)
n.Name().Stackcopy.SetFrameOffset(o)
n.SetFrameOffset(0)
} else {
n.SetOffset(o)
n.SetFrameOffset(o)
}
}

View File

@ -15,8 +15,11 @@ type exporter struct {
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
inlFlood(n.(*ir.Name))
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class() == ir.PFUNC {
inlFlood(n, exportsym)
}
}
p.markType(n.Type())

View File

@ -1,20 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
func npos(pos src.XPos, n ir.Node) ir.Node {
n.SetPos(pos)
return n
}
func builtinCall(op ir.Op) ir.Node {
return ir.Nod(ir.OCALL, mkname(types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}

View File

@ -89,7 +89,7 @@ func typecheckclosure(clo ir.Node, top int) {
fn.SetClosureCalled(top&ctxCallee != 0)
// Do not typecheck fn twice, otherwise, we will end up pushing
// fn to xtop multiple times, causing initLSym called twice.
// fn to Target.Decls multiple times, causing initLSym called twice.
// See #30709
if fn.Typecheck() == 1 {
return
@ -118,7 +118,7 @@ func typecheckclosure(clo ir.Node, top int) {
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
// underlying closure function we create is added to Target.Decls.
if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
Curfn = fn
@ -129,7 +129,7 @@ func typecheckclosure(clo ir.Node, top int) {
Curfn = oldfn
}
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
}
// globClosgen is like Func.Closgen, but for the global scope.
@ -192,14 +192,14 @@ func capturevars(fn *ir.Func) {
var outer ir.Node
outer = v.Outer
outermost := v.Defn
outermost := v.Defn.(*ir.Name)
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
v.SetByval(true)
} else {
outermost.Name().SetAddrtaken(true)
outer = ir.Nod(ir.OADDR, outer, nil)
outer = nodAddr(outer)
}
if base.Flag.LowerM > 1 {
@ -309,7 +309,7 @@ func transformclosure(fn *ir.Func) {
v.Heapaddr = addr
var src ir.Node = cr
if v.Byval() {
src = ir.Nod(ir.OADDR, cr, nil)
src = nodAddr(cr)
}
body = append(body, ir.Nod(ir.OAS, addr, src))
}
@ -378,7 +378,7 @@ func closureType(clo ir.Node) *types.Type {
return typ
}
func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
fn := clo.Func()
// If no closure vars, don't bother wrapping.
@ -396,43 +396,44 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
clos.SetEsc(clo.Esc())
clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(clo.Esc())
addr := nodAddr(clos)
addr.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
clos = convnop(clos, clo.Type())
cfn := convnop(addr, clo.Type())
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
if x := clo.Prealloc; x != nil {
if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
clos.Left().SetRight(x)
delete(prealloc, clo)
addr.SetRight(x)
clo.Prealloc = nil
}
return walkexpr(clos, init)
return walkexpr(cfn, init)
}
func typecheckpartialcall(dot ir.Node, sym *types.Sym) *ir.CallPartExpr {
switch dot.Op() {
func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr {
switch n.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
default:
base.Fatalf("invalid typecheckpartialcall")
}
dot := n.(*ir.SelectorExpr)
// Create top-level function.
fn := makepartialcall(dot, dot.Type(), sym)
fn.SetWrapper(true)
return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.(*ir.SelectorExpr).Selection, fn)
return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn)
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func {
func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
@ -475,18 +476,19 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func {
body = append(body, ir.Nod(ir.OAS, ptr, cr))
} else {
ptr.SetType(types.NewPtr(rcvrtype))
body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cr, nil)))
body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr)))
}
call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
n := ir.Nod(ir.ORETURN, nil, nil)
n.PtrList().Set1(call)
call = n
ret := ir.Nod(ir.ORETURN, nil, nil)
ret.PtrList().Set1(call)
body = append(body, ret)
} else {
body = append(body, call)
}
body = append(body, call)
fn.PtrBody().Set(body)
funcbody()
@ -497,7 +499,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func {
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
sym.Def = fn
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
Curfn = savecurfn
base.Pos = saveLineNo
@ -507,7 +509,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func {
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n ir.Node) *types.Type {
func partialCallType(n *ir.CallPartExpr) *types.Type {
t := tostruct([]*ir.Field{
namedfield("F", types.Types[types.TUINTPTR]),
namedfield("R", n.Left().Type()),
@ -530,8 +532,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
n.SetLeft(cheapexpr(n.Left(), init))
n.SetLeft(walkexpr(n.Left(), nil))
tab := ir.Nod(ir.OITAB, n.Left(), nil)
tab = typecheck(tab, ctxExpr)
tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr)
c := ir.Nod(ir.OCHECKNIL, tab, nil)
c.SetTypecheck(1)
@ -544,22 +545,22 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
clos.SetEsc(n.Esc())
clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(n.Esc())
addr := nodAddr(clos)
addr.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
clos = convnop(clos, n.Type())
cfn := convnop(addr, n.Type())
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
if x := n.Prealloc; x != nil {
if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
clos.Left().SetRight(x)
delete(prealloc, n)
addr.SetRight(x)
n.Prealloc = nil
}
return walkexpr(clos, init)
return walkexpr(cfn, init)
}
// callpartMethod returns the *types.Field representing the method

View File

@ -162,6 +162,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir
break
}
n := n.(*ir.UnaryExpr)
n.SetLeft(convlit(n.Left(), ot))
if n.Left().Type() == nil {
n.SetType(nil)
@ -177,14 +178,24 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir
break
}
n.SetLeft(convlit(n.Left(), ot))
n.SetRight(convlit(n.Right(), ot))
if n.Left().Type() == nil || n.Right().Type() == nil {
var l, r ir.Node
switch n := n.(type) {
case *ir.BinaryExpr:
n.SetLeft(convlit(n.Left(), ot))
n.SetRight(convlit(n.Right(), ot))
l, r = n.Left(), n.Right()
case *ir.LogicalExpr:
n.SetLeft(convlit(n.Left(), ot))
n.SetRight(convlit(n.Right(), ot))
l, r = n.Left(), n.Right()
}
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
return n
}
if !types.Identical(n.Left().Type(), n.Right().Type()) {
base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left().Type(), n.Right().Type())
if !types.Identical(l.Type(), r.Type()) {
base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
n.SetType(nil)
return n
}
@ -435,48 +446,56 @@ var tokenForOp = [...]token.Token{
// Otherwise, evalConst returns a new OLITERAL with the same value as n,
// and with .Orig pointing back to n.
func evalConst(n ir.Node) ir.Node {
nl, nr := n.Left(), n.Right()
// Pick off just the opcodes that can be constant evaluated.
switch op := n.Op(); op {
switch n.Op() {
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
nl := n.Left()
if nl.Op() == ir.OLITERAL {
var prec uint
if n.Type().IsUnsigned() {
prec = uint(n.Type().Size() * 8)
}
return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec))
return origConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
}
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND:
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT:
nl, nr := n.Left(), n.Right()
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
rval := nr.Val()
// check for divisor underflow in complex division (see issue 20227)
if op == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
if n.Op() == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
base.Errorf("complex division by zero")
n.SetType(nil)
return n
}
if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 {
if (n.Op() == ir.ODIV || n.Op() == ir.OMOD) && constant.Sign(rval) == 0 {
base.Errorf("division by zero")
n.SetType(nil)
return n
}
tok := tokenForOp[op]
if op == ir.ODIV && n.Type().IsInteger() {
tok := tokenForOp[n.Op()]
if n.Op() == ir.ODIV && n.Type().IsInteger() {
tok = token.QUO_ASSIGN // integer division
}
return origConst(n, constant.BinaryOp(nl.Val(), tok, rval))
}
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
case ir.OOROR, ir.OANDAND:
nl, nr := n.Left(), n.Right()
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val()))
return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
nl, nr := n.Left(), n.Right()
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
case ir.OLSH, ir.ORSH:
nl, nr := n.Left(), n.Right()
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
// shiftBound from go/types; "so we can express smallestFloat64"
const shiftBound = 1023 - 1 + 52
@ -486,15 +505,17 @@ func evalConst(n ir.Node) ir.Node {
n.SetType(nil)
break
}
return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s)))
return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
}
case ir.OCONV, ir.ORUNESTR:
nl := n.Left()
if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
return origConst(n, convertVal(nl.Val(), n.Type(), true))
}
case ir.OCONVNOP:
nl := n.Left()
if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
n.SetOp(ir.OCONV)
@ -532,28 +553,28 @@ func evalConst(n ir.Node) ir.Node {
i2++
}
nl := ir.Copy(n)
nl := ir.Copy(n).(*ir.AddStringExpr)
nl.PtrList().Set(s[i:i2])
nl = origConst(nl, constant.MakeString(strings.Join(strs, "")))
newList = append(newList, nl)
newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, ""))))
i = i2 - 1
} else {
newList = append(newList, s[i])
}
}
n = ir.Copy(n)
n.PtrList().Set(newList)
return n
nn := ir.Copy(n).(*ir.AddStringExpr)
nn.PtrList().Set(newList)
return nn
case ir.OCAP, ir.OLEN:
nl := n.Left()
switch nl.Type().Kind() {
case types.TSTRING:
if ir.IsConst(nl, constant.String) {
return origIntConst(n, int64(len(ir.StringVal(nl))))
}
case types.TARRAY:
if !hasCallOrChan(nl) {
if !anyCallOrChan(nl) {
return origIntConst(n, nl.Type().NumElem())
}
}
@ -562,16 +583,19 @@ func evalConst(n ir.Node) ir.Node {
return origIntConst(n, evalunsafe(n))
case ir.OREAL:
nl := n.Left()
if nl.Op() == ir.OLITERAL {
return origConst(n, constant.Real(nl.Val()))
}
case ir.OIMAG:
nl := n.Left()
if nl.Op() == ir.OLITERAL {
return origConst(n, constant.Imag(nl.Val()))
}
case ir.OCOMPLEX:
nl, nr := n.Left(), n.Right()
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
return origConst(n, makeComplex(nl.Val(), nr.Val()))
}
@ -779,9 +803,9 @@ func isGoConst(n ir.Node) bool {
return n.Op() == ir.OLITERAL
}
// hasCallOrChan reports whether n contains any calls or channel operations.
func hasCallOrChan(n ir.Node) bool {
found := ir.Find(n, func(n ir.Node) interface{} {
// anyCallOrChan reports whether n contains any calls or channel operations.
func anyCallOrChan(n ir.Node) bool {
return ir.Any(n, func(n ir.Node) bool {
switch n.Op() {
case ir.OAPPEND,
ir.OCALL,
@ -803,11 +827,10 @@ func hasCallOrChan(n ir.Node) bool {
ir.OREAL,
ir.ORECOVER,
ir.ORECV:
return n
return true
}
return nil
return false
})
return found != nil
}
// A constSet represents a set of Go constant expressions.
@ -830,8 +853,10 @@ type constSetKey struct {
//
// n must not be an untyped constant.
func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
if n.Op() == ir.OCONVIFACE && n.Implicit() {
n = n.Left()
if conv := n; conv.Op() == ir.OCONVIFACE {
if conv.Implicit() {
n = conv.Left()
}
}
if !isGoConst(n) {

View File

@ -15,9 +15,18 @@ import (
"strings"
)
// Declaration stack & operations
func EnableNoWriteBarrierRecCheck() {
nowritebarrierrecCheck = newNowritebarrierrecChecker()
}
var externdcl []ir.Node
func NoWriteBarrierRecCheck() {
// Write barriers are now known. Check the
// call graph.
nowritebarrierrecCheck.check()
nowritebarrierrecCheck = nil
}
var nowritebarrierrecCheck *nowritebarrierrecChecker
func testdclstack() {
if !types.IsDclstackValid() {
@ -28,12 +37,9 @@ func testdclstack() {
// redeclare emits a diagnostic about symbol s being redeclared at pos.
func redeclare(pos src.XPos, s *types.Sym, where string) {
if !s.Lastlineno.IsKnown() {
pkg := s.Origpkg
if pkg == nil {
pkg = s.Pkg
}
pkgName := dotImportRefs[s.Def.(*ir.Ident)]
base.ErrorfAt(pos, "%v redeclared %s\n"+
"\tprevious declaration during import %q", s, where, pkg.Path)
"\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
} else {
prevPos := s.Lastlineno
@ -46,7 +52,7 @@ func redeclare(pos src.XPos, s *types.Sym, where string) {
}
base.ErrorfAt(pos, "%v redeclared %s\n"+
"\tprevious declaration at %v", s, where, base.FmtPos(prevPos))
"\t%v: previous declaration", s, where, base.FmtPos(prevPos))
}
}
@ -78,7 +84,7 @@ func declare(n *ir.Name, ctxt ir.Class) {
if s.Name == "main" && s.Pkg.Name == "main" {
base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
}
externdcl = append(externdcl, n)
Target.Externs = append(Target.Externs, n)
} else {
if Curfn == nil && ctxt == ir.PAUTO {
base.Pos = n.Pos()
@ -99,7 +105,7 @@ func declare(n *ir.Name, ctxt ir.Class) {
}
if ctxt == ir.PAUTO {
n.SetOffset(0)
n.SetFrameOffset(0)
}
if s.Block == types.Block {
@ -168,10 +174,10 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node {
if Curfn != nil {
init = append(init, ir.Nod(ir.ODCL, v, nil))
}
e = ir.Nod(ir.OAS, v, e)
init = append(init, e)
if e.Right() != nil {
v.Defn = e
as := ir.Nod(ir.OAS, v, e)
init = append(init, as)
if e != nil {
v.Defn = as
}
}
}
@ -210,6 +216,10 @@ func symfield(s *types.Sym, typ *types.Type) *ir.Field {
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
func oldname(s *types.Sym) ir.Node {
if s.Pkg != types.LocalPkg {
return ir.NewIdent(base.Pos, s)
}
n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
@ -798,7 +808,7 @@ func makefuncsym(s *types.Sym) {
}
// setNodeNameFunc marks a node as a function.
func setNodeNameFunc(n ir.Node) {
func setNodeNameFunc(n *ir.Name) {
if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
@ -849,27 +859,31 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
// important to handle it for this check, so we model it
// directly. This has to happen before transformclosure since
// it's a lot harder to work out the argument after.
for _, n := range xtop {
for _, n := range Target.Decls {
if n.Op() != ir.ODCLFUNC {
continue
}
c.curfn = n.(*ir.Func)
ir.Inspect(n, c.findExtraCalls)
ir.Visit(n, c.findExtraCalls)
}
c.curfn = nil
return c
}
func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool {
if n.Op() != ir.OCALLFUNC {
return true
func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
if nn.Op() != ir.OCALLFUNC {
return
}
fn := n.Left()
if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
return true
n := nn.(*ir.CallExpr)
if n.Left() == nil || n.Left().Op() != ir.ONAME {
return
}
fn := n.Left().(*ir.Name)
if fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
return
}
if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
return true
return
}
var callee *ir.Func
@ -886,7 +900,6 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool {
base.Fatalf("expected ODCLFUNC node, got %+v", callee)
}
c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
return true
}
// recordCall records a call from ODCLFUNC node "from", to function
@ -921,7 +934,7 @@ func (c *nowritebarrierrecChecker) check() {
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
var q ir.NameQueue
for _, n := range xtop {
for _, n := range Target.Decls {
if n.Op() != ir.ODCLFUNC {
continue
}

View File

@ -17,8 +17,6 @@ import (
"strings"
)
var embedlist []ir.Node
const (
embedUnknown = iota
embedBytes
@ -26,8 +24,6 @@ const (
embedFiles
)
var numLocalEmbed int
func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
@ -65,25 +61,39 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [
p.errorAt(pos, "go:embed cannot apply to var without type")
return exprs
}
kind := embedKindApprox(typ)
if kind == embedUnknown {
p.errorAt(pos, "go:embed cannot apply to var of type %v", typ)
if dclcontext != ir.PEXTERN {
p.errorAt(pos, "go:embed cannot apply to var inside func")
return exprs
}
v := names[0].(*ir.Name)
Target.Embeds = append(Target.Embeds, v)
v.Embed = new([]ir.Embed)
for _, e := range embeds {
*v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
}
return exprs
}
func embedFileList(v *ir.Name) []string {
kind := embedKind(v.Type())
if kind == embedUnknown {
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
return nil
}
// Build list of files to store.
have := make(map[string]bool)
var list []string
for _, e := range embeds {
for _, e := range *v.Embed {
for _, pattern := range e.Patterns {
files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
if !ok {
p.errorAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if base.Flag.Cfg.Embed.Files[file] == "" {
p.errorAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
@ -105,25 +115,12 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
p.errorAt(pos, "invalid go:embed: multiple files for type %v", typ)
return exprs
base.ErrorfAt(v.Pos(), "invalid go:embed: multiple files for type %v", v.Type())
return nil
}
}
v := names[0]
if dclcontext != ir.PEXTERN {
numLocalEmbed++
v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed))
v.Sym().Def = v
v.Name().Ntype = typ
v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
exprs = []ir.Node{v}
}
v.Name().SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
return list
}
// embedKindApprox determines the kind of embedding variable, approximately.
@ -187,15 +184,15 @@ func embedFileLess(x, y string) bool {
}
func dumpembeds() {
for _, v := range embedlist {
for _, v := range Target.Embeds {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v ir.Node) {
files := v.Name().EmbedFiles()
func initEmbed(v *ir.Name) {
files := embedFileList(v)
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())

View File

@ -225,9 +225,10 @@ func (e *Escape) walkFunc(fn *ir.Func) {
fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
ir.InspectList(fn.Body(), func(n ir.Node) bool {
ir.Visit(fn, func(n ir.Node) {
switch n.Op() {
case ir.OLABEL:
n := n.(*ir.LabelStmt)
if e.labels == nil {
e.labels = make(map[*types.Sym]labelState)
}
@ -236,12 +237,11 @@ func (e *Escape) walkFunc(fn *ir.Func) {
case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
n := n.(*ir.BranchStmt)
if e.labels[n.Sym()] == nonlooping {
e.labels[n.Sym()] = looping
}
}
return true
})
e.curfn = fn
@ -307,15 +307,18 @@ func (e *Escape) stmt(n ir.Node) {
// TODO(mdempsky): Handle dead code?
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
e.stmts(n.List())
case ir.ODCL:
// Record loop depth at declaration.
n := n.(*ir.Decl)
if !ir.IsBlank(n.Left()) {
e.dcl(n.Left())
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
switch e.labels[n.Sym()] {
case nonlooping:
if base.Flag.LowerM > 2 {
@ -332,11 +335,13 @@ func (e *Escape) stmt(n ir.Node) {
delete(e.labels, n.Sym())
case ir.OIF:
n := n.(*ir.IfStmt)
e.discard(n.Left())
e.block(n.Body())
e.block(n.Rlist())
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
e.loopDepth++
e.discard(n.Left())
e.stmt(n.Right())
@ -345,6 +350,7 @@ func (e *Escape) stmt(n ir.Node) {
case ir.ORANGE:
// for List = range Right { Nbody }
n := n.(*ir.RangeStmt)
e.loopDepth++
ks := e.addrs(n.List())
e.block(n.Body())
@ -362,11 +368,13 @@ func (e *Escape) stmt(n ir.Node) {
e.expr(e.later(k), n.Right())
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
var ks []EscHole
for _, cas := range n.List().Slice() { // cases
if typesw && n.Left().Left() != nil {
cas := cas.(*ir.CaseStmt)
if typesw && n.Left().(*ir.TypeSwitchGuard).Left() != nil {
cv := cas.Rlist().First()
k := e.dcl(cv) // type switch variables have no ODCL.
if cv.Type().HasPointers() {
@ -379,50 +387,62 @@ func (e *Escape) stmt(n ir.Node) {
}
if typesw {
e.expr(e.teeHole(ks...), n.Left().Right())
e.expr(e.teeHole(ks...), n.Left().(*ir.TypeSwitchGuard).Right())
} else {
e.discard(n.Left())
}
case ir.OSELECT:
n := n.(*ir.SelectStmt)
for _, cas := range n.List().Slice() {
cas := cas.(*ir.CaseStmt)
e.stmt(cas.Left())
e.block(cas.Body())
}
case ir.OSELRECV:
e.assign(n.Left(), n.Right(), "selrecv", n)
case ir.OSELRECV2:
n := n.(*ir.AssignListStmt)
e.assign(n.List().First(), n.Rlist().First(), "selrecv", n)
e.assign(n.List().Second(), nil, "selrecv", n)
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
n := n.(*ir.UnaryExpr)
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
n := n.(*ir.SendStmt)
e.discard(n.Left())
e.assignHeap(n.Right(), "send", n)
case ir.OAS, ir.OASOP:
case ir.OAS:
n := n.(*ir.AssignStmt)
e.assign(n.Left(), n.Right(), "assign", n)
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
e.assign(n.Left(), n.Right(), "assign", n)
case ir.OAS2:
n := n.(*ir.AssignListStmt)
for i, nl := range n.List().Slice() {
e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
}
case ir.OAS2DOTTYPE: // v, ok = x.(type)
n := n.(*ir.AssignListStmt)
e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n)
e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
n := n.(*ir.AssignListStmt)
e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n)
e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
case ir.OAS2RECV: // v, ok = <-ch
n := n.(*ir.AssignListStmt)
e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n)
e.assign(n.List().Second(), nil, "assign-pair-receive", n)
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
e.stmts(n.Rlist().First().Init())
e.call(e.addrs(n.List()), n.Rlist().First(), nil)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
results := e.curfn.Type().Results().FieldSlice()
for i, v := range n.List().Slice() {
e.assign(ir.AsNode(results[i].Nname), v, "return", n)
@ -430,6 +450,7 @@ func (e *Escape) stmt(n ir.Node) {
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
e.stmts(n.Left().Init())
e.call(nil, n.Left(), n)
@ -474,7 +495,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() {
if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).Left().Type().IsUnsafePtr() {
// nop
} else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
@ -488,28 +509,44 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
// nop
case ir.ONAME:
n := n.(*ir.Name)
if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
e.expr(k, n.Name_)
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
e.discard(n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR:
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n := n.(*ir.BinaryExpr)
e.discard(n.Left())
e.discard(n.Right())
case ir.OANDAND, ir.OOROR:
n := n.(*ir.LogicalExpr)
e.discard(n.Left())
e.discard(n.Right())
case ir.OADDR:
n := n.(*ir.AddrExpr)
e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
case ir.ODEREF:
n := n.(*ir.StarExpr)
e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
n := n.(*ir.SelectorExpr)
e.expr(k.note(n, "dot"), n.Left())
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
n := n.(*ir.TypeAssertExpr)
e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.Left().Type().IsArray() {
e.expr(k.note(n, "fixed-array-index-of"), n.Left())
} else {
@ -518,9 +555,11 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
}
e.discard(n.Right())
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.Left())
e.discard(n.Right())
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
n := n.(*ir.SliceExpr)
e.expr(k.note(n, "slice"), n.Left())
low, high, max := n.SliceBounds()
e.discard(low)
@ -528,6 +567,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
e.discard(max)
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
@ -542,27 +582,33 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
e.expr(k, n.Left())
}
case ir.OCONVIFACE:
n := n.(*ir.ConvExpr)
if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.Left())
case ir.ORECV:
n := n.(*ir.UnaryExpr)
e.discard(n.Left())
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
e.call([]EscHole{k}, n, nil)
case ir.ONEW:
n := n.(*ir.UnaryExpr)
e.spill(k, n)
case ir.OMAKESLICE:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Left())
e.discard(n.Right())
case ir.OMAKECHAN:
n := n.(*ir.MakeExpr)
e.discard(n.Left())
case ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Left())
@ -573,6 +619,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
// Flow the receiver argument to both the closure and
// to the receiver parameter.
n := n.(*ir.CallPartExpr)
closureK := e.spill(k, n)
m := callpartMethod(n)
@ -593,37 +640,43 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
e.expr(e.teeHole(paramK, closureK), n.Left())
case ir.OPTRLIT:
n := n.(*ir.AddrExpr)
e.expr(e.spill(k, n), n.Left())
case ir.OARRAYLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List().Slice() {
if elt.Op() == ir.OKEY {
elt = elt.Right()
elt = elt.(*ir.KeyExpr).Right()
}
e.expr(k.note(n, "array literal element"), elt)
}
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
for _, elt := range n.List().Slice() {
if elt.Op() == ir.OKEY {
elt = elt.Right()
elt = elt.(*ir.KeyExpr).Right()
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List().Slice() {
e.expr(k.note(n, "struct literal element"), elt.Left())
e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Left())
}
case ir.OMAPLIT:
n := n.(*ir.CompLitExpr)
e.spill(k, n)
// Map keys and values are always stored in the heap.
for _, elt := range n.List().Slice() {
elt := elt.(*ir.KeyExpr)
e.assignHeap(elt.Left(), "map literal key", n)
e.assignHeap(elt.Right(), "map literal value", n)
}
@ -642,10 +695,12 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
n := n.(*ir.ConvExpr)
e.spill(k, n)
e.discard(n.Left())
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
e.spill(k, n)
// Arguments of OADDSTR never escape;
@ -665,23 +720,28 @@ func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.Left().Type().IsUnsafePtr() {
e.expr(k, n.Left())
} else {
e.discard(n.Left())
}
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
if isReflectHeaderDataField(n) {
e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
} else {
e.discard(n.Left())
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
e.unsafeValue(k, n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.Left())
e.unsafeValue(k, n.Right())
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.Left())
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
@ -717,13 +777,19 @@ func (e *Escape) addr(n ir.Node) EscHole {
default:
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class() == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
e.addr(n.Name_)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
k = e.addr(n.Left())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
e.discard(n.Right())
if n.Left().Type().IsArray() {
k = e.addr(n.Left())
@ -733,6 +799,7 @@ func (e *Escape) addr(n ir.Node) EscHole {
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.Left())
e.assignHeap(n.Right(), "key of map put", n)
}
@ -805,6 +872,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
base.Fatalf("unexpected call op: %v", call.Op())
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
call := call.(*ir.CallExpr)
fixVariadicCall(call)
// Pick out the function callee, if statically known.
@ -812,7 +880,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
switch call.Op() {
case ir.OCALLFUNC:
switch v := staticValue(call.Left()); {
case v.Op() == ir.ONAME && v.Class() == ir.PFUNC:
case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC:
fn = v.(*ir.Name)
case v.Op() == ir.OCLOSURE:
fn = v.Func().Nname
@ -833,7 +901,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
}
if r := fntype.Recv(); r != nil {
argument(e.tagHole(ks, fn, r), call.Left().Left())
argument(e.tagHole(ks, fn, r), call.Left().(*ir.SelectorExpr).Left())
} else {
// Evaluate callee function expression.
argument(e.discardHole(), call.Left())
@ -845,6 +913,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
}
case ir.OAPPEND:
call := call.(*ir.CallExpr)
args := call.List().Slice()
// Appendee slice may flow directly to the result, if
@ -870,6 +939,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
}
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), call.Left())
copiedK := e.discardHole()
@ -879,16 +949,20 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) {
argument(copiedK, call.Right())
case ir.OPANIC:
call := call.(*ir.UnaryExpr)
argument(e.heapHole(), call.Left())
case ir.OCOMPLEX:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), call.Left())
argument(e.discardHole(), call.Right())
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
call := call.(*ir.CallExpr)
for _, arg := range call.List().Slice() {
argument(e.discardHole(), arg)
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
call := call.(*ir.UnaryExpr)
argument(e.discardHole(), call.Left())
}
}
@ -1084,6 +1158,7 @@ func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
e.allLocs = append(e.allLocs, loc)
if n != nil {
if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn {
n := n.(*ir.Name)
base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn)
}
@ -1468,14 +1543,24 @@ func (e *Escape) finish(fns []*ir.Func) {
}
n.SetEsc(EscNone)
if loc.transient {
n.SetTransient(true)
switch n.Op() {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
n.SetTransient(true)
case ir.OCALLPART:
n := n.(*ir.CallPartExpr)
n.SetTransient(true)
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
n.SetTransient(true)
}
}
}
}
}
func (l *EscLocation) isName(c ir.Class) bool {
return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c
return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class() == c
}
const numEscResults = 7
@ -1638,7 +1723,18 @@ func isSliceSelfAssign(dst, src ir.Node) bool {
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME {
var dstX ir.Node
switch dst.Op() {
default:
return false
case ir.ODEREF:
dst := dst.(*ir.StarExpr)
dstX = dst.Left()
case ir.ODOTPTR:
dst := dst.(*ir.SelectorExpr)
dstX = dst.Left()
}
if dstX.Op() != ir.ONAME {
return false
}
// src is a slice operation.
@ -1655,6 +1751,7 @@ func isSliceSelfAssign(dst, src ir.Node) bool {
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
src := src.(*ir.SliceExpr)
if src.Left().Op() == ir.OADDR {
return false
}
@ -1662,11 +1759,22 @@ func isSliceSelfAssign(dst, src ir.Node) bool {
return false
}
// slice is applied to ONAME dereference.
if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME {
var baseX ir.Node
switch base := src.(*ir.SliceExpr).Left(); base.Op() {
default:
return false
case ir.ODEREF:
base := base.(*ir.StarExpr)
baseX = base.Left()
case ir.ODOTPTR:
base := base.(*ir.SelectorExpr)
baseX = base.Left()
}
if baseX.Op() != ir.ONAME {
return false
}
// dst and src reference the same base ONAME.
return dst.Left() == src.Left().Left()
return dstX.(*ir.Name) == baseX.(*ir.Name)
}
// isSelfAssign reports whether assignment from src to dst can
@ -1690,19 +1798,23 @@ func isSelfAssign(dst, src ir.Node) bool {
return false
}
// The expression prefix must be both "safe" and identical.
switch dst.Op() {
case ir.ODOT, ir.ODOTPTR:
// Safe trailing accessors that are permitted to differ.
dst := dst.(*ir.SelectorExpr)
src := src.(*ir.SelectorExpr)
return samesafeexpr(dst.Left(), src.Left())
case ir.OINDEX:
dst := dst.(*ir.IndexExpr)
src := src.(*ir.IndexExpr)
if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
return false
}
return samesafeexpr(dst.Left(), src.Left())
default:
return false
}
// The expression prefix must be both "safe" and identical.
return samesafeexpr(dst.Left(), src.Left())
}
// mayAffectMemory reports whether evaluation of n may affect the program's
@ -1715,17 +1827,36 @@ func mayAffectMemory(n ir.Node) bool {
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
// TODO(rsc): It seems like it should be possible to replace this with
// an ir.Any looking for any op that's not the ones in the case statement.
// But that produces changes in the compiled output detected by buildall.
switch n.Op() {
case ir.ONAME, ir.OCLOSUREREAD, ir.OLITERAL, ir.ONIL:
return false
// Left+Right group.
case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
n := n.(*ir.BinaryExpr)
return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
// Left group.
case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP,
ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
case ir.OINDEX:
n := n.(*ir.IndexExpr)
return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
case ir.OCONVNOP, ir.OCONV:
n := n.(*ir.ConvExpr)
return mayAffectMemory(n.Left())
case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
n := n.(*ir.UnaryExpr)
return mayAffectMemory(n.Left())
case ir.ODOT, ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
return mayAffectMemory(n.Left())
case ir.ODEREF:
n := n.(*ir.StarExpr)
return mayAffectMemory(n.Left())
default:
@ -1741,8 +1872,11 @@ func heapAllocReason(n ir.Node) string {
}
// Parameters are always passed via the stack.
if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
return ""
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
return ""
}
}
if n.Type().Width > maxStackVarSize {
@ -1756,11 +1890,12 @@ func heapAllocReason(n ir.Node) string {
if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMAKESLICE {
n := n.(*ir.MakeExpr)
r := n.Right()
if r == nil {
r = n.Left()
@ -1835,10 +1970,20 @@ func addrescapes(n ir.Node) {
// In &x[0], if x is a slice, then x does not
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP:
case ir.ODOT:
n := n.(*ir.SelectorExpr)
addrescapes(n.Left())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if !n.Left().Type().IsSlice() {
addrescapes(n.Left())
}
case ir.OPAREN:
n := n.(*ir.ParenExpr)
addrescapes(n.Left())
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
addrescapes(n.Left())
}
}
@ -1859,7 +2004,6 @@ func moveToHeap(n *ir.Name) {
// temp will add it to the function declaration list automatically.
heapaddr := temp(types.NewPtr(n.Type()))
heapaddr.SetSym(lookup("&" + n.Sym().Name))
ir.Orig(heapaddr).SetSym(heapaddr.Sym())
heapaddr.SetPos(n.Pos())
// Unset AutoTemp to persist the &foo variable name through SSA to
@ -1871,7 +2015,7 @@ func moveToHeap(n *ir.Name) {
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
if n.Offset() == types.BADWIDTH {
if n.FrameOffset() == types.BADWIDTH {
base.Fatalf("addrescapes before param assignment")
}
@ -1881,7 +2025,7 @@ func moveToHeap(n *ir.Name) {
// so that analyses of the local (on-stack) variables use it.
stackcopy := NewName(n.Sym())
stackcopy.SetType(n.Type())
stackcopy.SetOffset(n.Offset())
stackcopy.SetFrameOffset(n.FrameOffset())
stackcopy.SetClass(n.Class())
stackcopy.Heapaddr = heapaddr
if n.Class() == ir.PPARAMOUT {
@ -1918,7 +2062,7 @@ func moveToHeap(n *ir.Name) {
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(ir.PAUTOHEAP)
n.SetOffset(0)
n.SetFrameOffset(0)
n.Heapaddr = heapaddr
n.SetEsc(EscHeap)
if base.Flag.LowerM != 0 {
@ -1935,7 +2079,7 @@ const unsafeUintptrTag = "unsafe-uintptr"
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string {
func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name

View File

@ -21,8 +21,6 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) {
}
}
var asmlist []ir.Node
// exportsym marks n for export (or reexport).
func exportsym(n *ir.Name) {
if n.Sym().OnExportList() {
@ -34,7 +32,7 @@ func exportsym(n *ir.Name) {
fmt.Printf("export symbol %v\n", n.Sym())
}
exportlist = append(exportlist, n)
Target.Exports = append(Target.Exports, n)
}
func initname(s string) bool {
@ -57,11 +55,16 @@ func autoexport(n *ir.Name, ctxt ir.Class) {
}
if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
n.Sym().SetAsm(true)
asmlist = append(asmlist, n)
Target.Asms = append(Target.Asms, n)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range Target.Exports {
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
@ -74,7 +77,7 @@ func dumpexport(bout *bio.Writer) {
}
}
func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Name {
n := ir.AsNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
@ -92,7 +95,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
if n.Op() != ir.ONONAME && n.Op() != op {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
return n.(*ir.Name)
}
// importtype returns the named type declared by symbol s.
@ -102,7 +105,6 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
n := importsym(ipkg, s, ir.OTYPE)
if n.Op() != ir.OTYPE {
t := types.NewNamed(n)
n.SetOp(ir.OTYPE)
n.SetPos(pos)
n.SetType(t)
@ -121,7 +123,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node {
n := importsym(ipkg, s, op)
if n.Op() != ir.ONONAME {
if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
if n.Op() == op && (op == ir.ONAME && n.Class() != ctxt || !types.Identical(n.Type(), t)) {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
@ -203,7 +205,7 @@ func dumpasmhdr() {
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
for _, n := range asmlist {
for _, n := range Target.Asms {
if n.Sym().IsBlank() {
continue
}

View File

@ -31,13 +31,21 @@ func sysvar(name string) *obj.LSym {
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func isParamStackCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Heapaddr != nil
if n.Op() != ir.ONAME {
return false
}
name := n.(*ir.Name)
return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func isParamHeapCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy != nil
if n.Op() != ir.ONAME {
return false
}
name := n.(*ir.Name)
return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.

View File

@ -118,22 +118,19 @@ var (
okforadd [types.NTYPE]bool
okforand [types.NTYPE]bool
okfornone [types.NTYPE]bool
okforcmp [types.NTYPE]bool
okforbool [types.NTYPE]bool
okforcap [types.NTYPE]bool
okforlen [types.NTYPE]bool
okforarith [types.NTYPE]bool
)
var okforcmp [types.NTYPE]bool
var (
okfor [ir.OEND][]bool
iscmp [ir.OEND]bool
)
var xtop []ir.Node
var exportlist []*ir.Name
var importlist []*ir.Func // imported functions and methods with inlinable bodies
var (
@ -155,9 +152,6 @@ var typecheckok bool
// when the race detector is enabled.
var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
var nodfp *ir.Name
var autogeneratedPos src.XPos
@ -194,8 +188,8 @@ type Arch struct {
var thearch Arch
var (
staticuint64s,
zerobase ir.Node
staticuint64s *ir.Name
zerobase *ir.Name
assertE2I,
assertE2I2,

View File

@ -321,15 +321,9 @@ func ggloblsym(s *obj.LSym, width int32, flags int16) {
}
func Addrconst(a *obj.Addr, v int64) {
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = v
a.SetConst(v)
}
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
base.Fatalf("patch: not a branch")
}
p.To.SetTarget(to)
p.To.Offset = to.Pc
}

View File

@ -246,16 +246,6 @@ const (
)
func iexport(out *bufio.Writer) {
// Mark inline bodies that are reachable through exported objects.
// (Phase 0 of bexport.go.)
{
// TODO(mdempsky): Separate from bexport logic.
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range exportlist {
p.markObject(n)
}
}
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
@ -272,7 +262,7 @@ func iexport(out *bufio.Writer) {
}
// Initialize work queue with exported declarations.
for _, n := range exportlist {
for _, n := range Target.Exports {
p.pushDecl(n)
}
@ -1069,7 +1059,7 @@ func (w *exportWriter) stmt(n ir.Node) {
}
}
switch op := n.Op(); op {
switch n.Op() {
case ir.OBLOCK:
// No OBLOCK in export data.
// Inline content into this statement list,
@ -1084,7 +1074,7 @@ func (w *exportWriter) stmt(n ir.Node) {
case ir.ODCL:
w.op(ir.ODCL)
w.pos(n.Left().Pos())
w.localName(n.Left())
w.localName(n.Left().(*ir.Name))
w.typ(n.Left().Type())
case ir.OAS:
@ -1099,9 +1089,10 @@ func (w *exportWriter) stmt(n ir.Node) {
}
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
w.op(ir.OASOP)
w.pos(n.Pos())
w.op(n.SubOp())
w.op(n.AsOp)
w.expr(n.Left())
if w.bool(!n.Implicit()) {
w.expr(n.Right())
@ -1122,7 +1113,7 @@ func (w *exportWriter) stmt(n ir.Node) {
// unreachable - generated by compiler for trampolin routines
case ir.OGO, ir.ODEFER:
w.op(op)
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
@ -1148,8 +1139,15 @@ func (w *exportWriter) stmt(n ir.Node) {
w.expr(n.Right())
w.stmtList(n.Body())
case ir.OSELECT, ir.OSWITCH:
w.op(op)
case ir.OSELECT:
w.op(n.Op())
w.pos(n.Pos())
w.stmtList(n.Init())
w.exprsOrNil(nil, nil) // TODO(rsc): Delete (and fix importer).
w.caseList(n)
case ir.OSWITCH:
w.op(n.Op())
w.pos(n.Pos())
w.stmtList(n.Init())
w.exprsOrNil(n.Left(), nil)
@ -1163,7 +1161,7 @@ func (w *exportWriter) stmt(n ir.Node) {
w.pos(n.Pos())
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL:
w.op(op)
w.op(n.Op())
w.pos(n.Pos())
label := ""
if sym := n.Sym(); sym != nil {
@ -1176,19 +1174,34 @@ func (w *exportWriter) stmt(n ir.Node) {
}
}
func (w *exportWriter) caseList(sw ir.Node) {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
func isNamedTypeSwitch(n ir.Node) bool {
if n.Op() != ir.OSWITCH {
return false
}
sw := n.(*ir.SwitchStmt)
if sw.Left() == nil || sw.Left().Op() != ir.OTYPESW {
return false
}
guard := sw.Left().(*ir.TypeSwitchGuard)
return guard.Left() != nil
}
cases := sw.List().Slice()
func (w *exportWriter) caseList(sw ir.Node) {
namedTypeSwitch := isNamedTypeSwitch(sw)
var cases []ir.Node
if sw.Op() == ir.OSWITCH {
cases = sw.(*ir.SwitchStmt).List().Slice()
} else {
cases = sw.(*ir.SelectStmt).List().Slice()
}
w.uint64(uint64(len(cases)))
for _, cas := range cases {
if cas.Op() != ir.OCASE {
base.Fatalf("expected OCASE, got %v", cas)
}
cas := cas.(*ir.CaseStmt)
w.pos(cas.Pos())
w.stmtList(cas.List())
if namedTypeSwitch {
w.localName(cas.Rlist().First())
w.localName(cas.Rlist().First().(*ir.Name))
}
w.stmtList(cas.Body())
}
@ -1201,22 +1214,29 @@ func (w *exportWriter) exprList(list ir.Nodes) {
w.op(ir.OEND)
}
func (w *exportWriter) expr(n ir.Node) {
// from nodefmt (fmt.go)
//
// nodefmt reverts nodes back to their original - we don't need to do
// it because we are not bound to produce valid Go syntax when exporting
//
// if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
// n = n.Orig
// }
// from exprfmt (fmt.go)
for n.Op() == ir.OPAREN || n.Implicit() && (n.Op() == ir.ODEREF || n.Op() == ir.OADDR || n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR) {
n = n.Left()
func simplifyForExport(n ir.Node) ir.Node {
switch n.Op() {
case ir.OPAREN:
return simplifyForExport(n.Left())
case ir.ODEREF:
if n.Implicit() {
return simplifyForExport(n.Left())
}
case ir.OADDR:
if n.Implicit() {
return simplifyForExport(n.Left())
}
case ir.ODOT, ir.ODOTPTR:
if n.Implicit() {
return simplifyForExport(n.Left())
}
}
return n
}
switch op := n.Op(); op {
func (w *exportWriter) expr(n ir.Node) {
n = simplifyForExport(n)
switch n.Op() {
// expressions
// (somewhat closely following the structure of exprfmt in fmt.go)
case ir.ONIL:
@ -1236,13 +1256,16 @@ func (w *exportWriter) expr(n ir.Node) {
// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
// but for export, this should be rendered as (*pkg.T).meth.
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
n := n.(*ir.MethodExpr)
w.op(ir.OXDOT)
w.pos(n.Pos())
w.expr(n.Left()) // n.Left.Op == OTYPE
w.selector(n.Right().Sym())
w.op(ir.OTYPE)
w.typ(n.T) // n.Left.Op == OTYPE
w.selector(n.Method.Sym)
case ir.ONAME:
// Package scope name.
n := n.(*ir.Name)
if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) {
w.op(ir.ONONAME)
w.qualifiedIdent(n)
@ -1291,7 +1314,7 @@ func (w *exportWriter) expr(n ir.Node) {
w.op(ir.OSTRUCTLIT)
w.pos(n.Pos())
w.typ(n.Type())
w.elemList(n.List()) // special handling of field names
w.fieldList(n.List()) // special handling of field names
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
w.op(ir.OCOMPLIT)
@ -1349,7 +1372,7 @@ func (w *exportWriter) expr(n ir.Node) {
case ir.OCOPY, ir.OCOMPLEX:
// treated like other builtin calls (see e.g., OREAL)
w.op(op)
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
w.expr(n.Right())
@ -1361,20 +1384,21 @@ func (w *exportWriter) expr(n ir.Node) {
w.expr(n.Left())
w.typ(n.Type())
case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
w.op(op)
case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
w.op(n.Op())
w.pos(n.Pos())
if n.Left() != nil {
w.expr(n.Left())
w.op(ir.OEND)
} else {
w.exprList(n.List()) // emits terminating OEND
}
w.expr(n.Left())
w.op(ir.OEND)
case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
w.op(n.Op())
w.pos(n.Pos())
w.exprList(n.List()) // emits terminating OEND
// only append() calls may contain '...' arguments
if op == ir.OAPPEND {
if n.Op() == ir.OAPPEND {
w.bool(n.IsDDD())
} else if n.IsDDD() {
base.Fatalf("exporter: unexpected '...' with %v call", op)
base.Fatalf("exporter: unexpected '...' with %v call", n.Op())
}
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
@ -1386,15 +1410,13 @@ func (w *exportWriter) expr(n ir.Node) {
w.bool(n.IsDDD())
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
w.op(op) // must keep separate from OMAKE for importer
w.op(n.Op()) // must keep separate from OMAKE for importer
w.pos(n.Pos())
w.typ(n.Type())
switch {
default:
// empty list
w.op(ir.OEND)
case n.List().Len() != 0: // pre-typecheck
w.exprList(n.List()) // emits terminating OEND
case n.Right() != nil:
w.expr(n.Left())
w.expr(n.Right())
@ -1405,15 +1427,37 @@ func (w *exportWriter) expr(n ir.Node) {
}
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV:
w.op(op)
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
case ir.OADDR:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
case ir.ODEREF:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
case ir.OSEND:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
w.expr(n.Right())
// binary expressions
case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR:
w.op(op)
case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
w.expr(n.Right())
case ir.OANDAND, ir.OOROR:
w.op(n.Op())
w.pos(n.Pos())
w.expr(n.Left())
w.expr(n.Right())
@ -1454,15 +1498,16 @@ func (w *exportWriter) exprsOrNil(a, b ir.Node) {
}
}
func (w *exportWriter) elemList(list ir.Nodes) {
func (w *exportWriter) fieldList(list ir.Nodes) {
w.uint64(uint64(list.Len()))
for _, n := range list.Slice() {
n := n.(*ir.StructKeyExpr)
w.selector(n.Sym())
w.expr(n.Left())
}
}
func (w *exportWriter) localName(n ir.Node) {
func (w *exportWriter) localName(n *ir.Name) {
// Escape analysis happens after inline bodies are saved, but
// we're using the same ONAME nodes, so we might still see
// PAUTOHEAP here.

View File

@ -165,17 +165,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType)
s := pkg.Lookup(p.stringAt(ird.uint64()))
off := ird.uint64()
if _, ok := declImporter[s]; ok {
continue
if _, ok := declImporter[s]; !ok {
declImporter[s] = iimporterAndOffset{p, off}
}
declImporter[s] = iimporterAndOffset{p, off}
// Create stub declaration. If used, this will
// be overwritten by expandDecl.
if s.Def != nil {
base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def))
}
s.Def = ir.NewDeclNameAt(src.NoXPos, s)
}
}
@ -187,10 +179,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType)
s := pkg.Lookup(p.stringAt(ird.uint64()))
off := ird.uint64()
if _, ok := inlineImporter[s]; ok {
continue
if _, ok := inlineImporter[s]; !ok {
inlineImporter[s] = iimporterAndOffset{p, off}
}
inlineImporter[s] = iimporterAndOffset{p, off}
}
}
@ -442,10 +433,16 @@ func (r *importReader) ident() *types.Sym {
return pkg.Lookup(name)
}
func (r *importReader) qualifiedIdent() *types.Sym {
func (r *importReader) qualifiedIdent() *ir.Name {
name := r.string()
pkg := r.pkg()
return pkg.Lookup(name)
sym := pkg.Lookup(name)
n := sym.PkgDef()
if n == nil {
n = ir.NewDeclNameAt(src.NoXPos, sym)
sym.SetPkgDef(n)
}
return n.(*ir.Name)
}
func (r *importReader) pos() src.XPos {
@ -501,9 +498,9 @@ func (r *importReader) typ1() *types.Type {
// support inlining functions with local defined
// types. Therefore, this must be a package-scope
// type.
n := ir.AsNode(r.qualifiedIdent().PkgDef())
n := r.qualifiedIdent()
if n.Op() == ir.ONONAME {
expandDecl(n.(*ir.Name))
expandDecl(n)
}
if n.Op() != ir.OTYPE {
base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
@ -756,7 +753,7 @@ func (r *importReader) stmtList() []ir.Node {
}
func (r *importReader) caseList(sw ir.Node) []ir.Node {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
namedTypeSwitch := isNamedTypeSwitch(sw)
cases := make([]ir.Node, r.uint64())
for i := range cases {
@ -769,7 +766,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node {
caseVar := ir.NewNameAt(cas.Pos(), r.ident())
declare(caseVar, dclcontext)
cas.PtrRlist().Set1(caseVar)
caseVar.Defn = sw.Left()
caseVar.Defn = sw.(*ir.SwitchStmt).Left()
}
cas.PtrBody().Set(r.stmtList())
cases[i] = cas
@ -821,10 +818,10 @@ func (r *importReader) node() ir.Node {
return n
case ir.ONONAME:
return mkname(r.qualifiedIdent())
return r.qualifiedIdent()
case ir.ONAME:
return mkname(r.ident())
return r.ident().Def.(*ir.Name)
// case OPACK, ONONAME:
// unreachable - should have been resolved by typechecking
@ -897,10 +894,10 @@ func (r *importReader) node() ir.Node {
// unreachable - mapped to cases below by exporter
case ir.OINDEX:
return ir.NodAt(r.pos(), op, r.expr(), r.expr())
return ir.NodAt(r.pos(), ir.OINDEX, r.expr(), r.expr())
case ir.OSLICE, ir.OSLICE3:
n := ir.NodAt(r.pos(), op, r.expr(), nil)
n := ir.NewSliceExpr(r.pos(), op, r.expr())
low, high := r.exprsOrNil()
var max ir.Node
if n.Op().IsSlice3() {
@ -918,14 +915,14 @@ func (r *importReader) node() ir.Node {
return n
case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
n := npos(r.pos(), builtinCall(op))
n := builtinCall(r.pos(), op)
n.PtrList().Set(r.exprList())
if op == ir.OAPPEND {
n.SetIsDDD(r.bool())
}
return n
// case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
// case OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
// unreachable - mapped to OCALL case below by exporter
case ir.OCALL:
@ -937,19 +934,31 @@ func (r *importReader) node() ir.Node {
return n
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
n := npos(r.pos(), builtinCall(ir.OMAKE))
n := builtinCall(r.pos(), ir.OMAKE)
n.PtrList().Append(ir.TypeNode(r.typ()))
n.PtrList().Append(r.exprList()...)
return n
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV:
return ir.NodAt(r.pos(), op, r.expr(), nil)
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
return ir.NewUnaryExpr(r.pos(), op, r.expr())
case ir.OADDR:
return nodAddrAt(r.pos(), r.expr())
case ir.ODEREF:
return ir.NewStarExpr(r.pos(), r.expr())
// binary expressions
case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR:
return ir.NodAt(r.pos(), op, r.expr(), r.expr())
case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
return ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
case ir.OANDAND, ir.OOROR:
return ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr())
case ir.OSEND:
return ir.NewSendStmt(r.pos(), r.expr(), r.expr())
case ir.OADDSTR:
pos := r.pos()
@ -1004,7 +1013,7 @@ func (r *importReader) node() ir.Node {
// unreachable - generated by compiler for trampolin routines (not exported)
case ir.OGO, ir.ODEFER:
return ir.NodAt(r.pos(), op, r.expr(), nil)
return ir.NewGoDeferStmt(r.pos(), op, r.expr())
case ir.OIF:
n := ir.NodAt(r.pos(), ir.OIF, nil, nil)
@ -1030,8 +1039,15 @@ func (r *importReader) node() ir.Node {
n.PtrBody().Set(r.stmtList())
return n
case ir.OSELECT, ir.OSWITCH:
n := ir.NodAt(r.pos(), op, nil, nil)
case ir.OSELECT:
n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil)
n.PtrInit().Set(r.stmtList())
r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil.
n.PtrList().Set(r.caseList(n))
return n
case ir.OSWITCH:
n := ir.NodAt(r.pos(), ir.OSWITCH, nil, nil)
n.PtrInit().Set(r.stmtList())
left, _ := r.exprsOrNil()
n.SetLeft(left)
@ -1048,12 +1064,16 @@ func (r *importReader) node() ir.Node {
// case OEMPTY:
// unreachable - not emitted by exporter
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL:
n := ir.NodAt(r.pos(), op, nil, nil)
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
var sym *types.Sym
pos := r.pos()
if label := r.string(); label != "" {
n.SetSym(lookup(label))
sym = lookup(label)
}
return n
return ir.NewBranchStmt(pos, op, sym)
case ir.OLABEL:
return ir.NewLabelStmt(r.pos(), lookup(r.string()))
case ir.OEND:
return nil
@ -1089,3 +1109,12 @@ func (r *importReader) exprsOrNil() (a, b ir.Node) {
}
return
}
func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
func npos(pos src.XPos, n ir.Node) ir.Node {
n.SetPos(pos)
return n
}

View File

@ -27,31 +27,28 @@ func renameinit() *types.Sym {
return s
}
// List of imported packages, in source code order. See #31636.
var sourceOrderImports []*types.Pkg
// fninit makes an initialization record for the package.
// fninit makes and returns an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
func fninit(n []ir.Node) {
nf := initOrder(n)
func fninit() *ir.Name {
nf := initOrder(Target.Decls)
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
// Find imported packages with init tasks.
for _, pkg := range sourceOrderImports {
n := resolve(ir.AsNode(pkg.Lookup(".inittask").Def))
if n == nil {
for _, pkg := range Target.Imports {
n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
if n.Op() == ir.ONONAME {
continue
}
if n.Op() != ir.ONAME || n.Class() != ir.PEXTERN {
if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN {
base.Fatalf("bad inittask: %v", n)
}
deps = append(deps, n.Sym().Linksym())
deps = append(deps, n.(*ir.Name).Sym().Linksym())
}
// Make a function that contains all the initialization statements.
@ -72,7 +69,7 @@ func fninit(n []ir.Node) {
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
fns = append(fns, initializers.Linksym())
}
if initTodo.Dcl != nil {
@ -84,29 +81,26 @@ func fninit(n []ir.Node) {
initTodo = nil
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
fn := ir.AsNode(s.Def).Name().Defn
for _, fn := range Target.Inits {
// Skip init functions with empty bodies.
if fn.Body().Len() == 1 {
if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.List().Len() == 0 {
if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 {
continue
}
}
fns = append(fns, s.Linksym())
fns = append(fns, fn.Nname.Sym().Linksym())
}
if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" {
return // nothing to initialize
return nil // nothing to initialize
}
// Make an .inittask structure.
sym := lookup(".inittask")
nn := NewName(sym)
nn.SetType(types.Types[types.TUINT8]) // fake type
nn.SetClass(ir.PEXTERN)
sym.Def = nn
exportsym(nn)
task := NewName(sym)
task.SetType(types.Types[types.TUINT8]) // fake type
task.SetClass(ir.PEXTERN)
sym.Def = task
lsym := sym.Linksym()
ot := 0
ot = duintptr(lsym, ot, 0) // state: not initialized yet
@ -121,4 +115,5 @@ func fninit(n []ir.Node) {
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
ggloblsym(lsym, int32(ot), obj.NOPTR)
return task
}

View File

@ -11,7 +11,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// Package initialization
@ -69,6 +68,8 @@ type InitOrder struct {
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
ready declOrder
order map[ir.Node]int
}
// initOrder computes initialization order for a list l of
@ -78,10 +79,11 @@ type InitOrder struct {
func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
initplans: make(map[ir.Node]*InitPlan),
inittemps: make(map[ir.Node]ir.Node),
inittemps: make(map[ir.Node]*ir.Name),
}
o := InitOrder{
blocking: make(map[ir.Node][]ir.Node),
order: make(map[ir.Node]int),
}
// Process all package-level assignment in declaration order.
@ -102,7 +104,7 @@ func initOrder(l []ir.Node) []ir.Node {
for _, n := range l {
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
if n.Initorder() != InitDone {
if o.order[n] != orderDone {
// If there have already been errors
// printed, those errors may have
// confused us and there might not be
@ -110,7 +112,7 @@ func initOrder(l []ir.Node) []ir.Node {
// first.
base.ExitIfErrors()
findInitLoopAndExit(firstLHS(n), new([]*ir.Name))
o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
@ -126,12 +128,10 @@ func initOrder(l []ir.Node) []ir.Node {
}
func (o *InitOrder) processAssign(n ir.Node) {
if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
if _, ok := o.order[n]; ok {
base.Fatalf("unexpected state: %v, %v", n, o.order[n])
}
n.SetInitorder(InitPending)
n.SetOffset(0)
o.order[n] = 0
// Compute number of variable dependencies and build the
// inverse dependency ("blocking") graph.
@ -139,38 +139,38 @@ func (o *InitOrder) processAssign(n ir.Node) {
defn := dep.Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone {
if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone {
continue
}
n.SetOffset(n.Offset() + 1)
o.order[n]++
o.blocking[defn] = append(o.blocking[defn], n)
}
if n.Offset() == 0 {
if o.order[n] == 0 {
heap.Push(&o.ready, n)
}
}
const orderDone = -1000
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
n := heap.Pop(&o.ready).(ir.Node)
if n.Initorder() != InitPending || n.Offset() != 0 {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
if order, ok := o.order[n]; !ok || order != 0 {
base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
}
initialize(n)
n.SetInitorder(InitDone)
n.SetOffset(types.BADWIDTH)
o.order[n] = orderDone
blocked := o.blocking[n]
delete(o.blocking, n)
for _, m := range blocked {
m.SetOffset(m.Offset() - 1)
if m.Offset() == 0 {
if o.order[m]--; o.order[m] == 0 {
heap.Push(&o.ready, m)
}
}
@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
@ -203,11 +203,11 @@ func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
if ref.Class() == ir.PEXTERN && ref.Defn.Initorder() == InitDone {
if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone {
continue
}
findInitLoopAndExit(ref, path)
o.findInitLoopAndExit(ref, path)
}
*path = (*path)[:len(*path)-1]
}
@ -268,23 +268,31 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet {
type initDeps struct {
transitive bool
seen ir.NameSet
cvisit func(ir.Node)
}
func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) }
func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) }
func (d *initDeps) cachedVisit() func(ir.Node) {
if d.cvisit == nil {
d.cvisit = d.visit // cache closure
}
return d.cvisit
}
func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) }
func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
func (d *initDeps) visit(n ir.Node) bool {
func (d *initDeps) visit(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
return false
case ir.ONAME:
n := n.(*ir.Name)
switch n.Class() {
case ir.PEXTERN, ir.PFUNC:
d.foundDep(n.(*ir.Name))
d.foundDep(n)
}
case ir.OCLOSURE:
@ -293,8 +301,6 @@ func (d *initDeps) visit(n ir.Node) bool {
case ir.ODOTMETH, ir.OCALLPART:
d.foundDep(methodExprName(n))
}
return true
}
// foundDep records that we've found a dependency on n by adding it to
@ -317,7 +323,7 @@ func (d *initDeps) foundDep(n *ir.Name) {
}
d.seen.Add(n)
if d.transitive && n.Class() == ir.PFUNC {
d.inspectList(n.Defn.Body())
d.inspectList(n.Defn.(*ir.Func).Body())
}
}

View File

@ -39,6 +39,9 @@ import (
"strings"
)
// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
var IsIntrinsicCall = func(*ir.CallExpr) bool { return false }
// Inlining budget parameters, gathered in one place
const (
inlineMaxBudget = 80
@ -230,7 +233,7 @@ func caninl(fn *ir.Func) {
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
func inlFlood(n *ir.Name) {
func inlFlood(n *ir.Name, exportsym func(*ir.Name)) {
if n == nil {
return
}
@ -255,16 +258,16 @@ func inlFlood(n *ir.Name) {
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool {
ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
inlFlood(methodExprName(n))
inlFlood(methodExprName(n), exportsym)
case ir.ONAME:
n := n.(*ir.Name)
switch n.Class() {
case ir.PFUNC:
inlFlood(n)
inlFlood(n, exportsym)
exportsym(n)
case ir.PEXTERN:
exportsym(n)
@ -282,7 +285,6 @@ func inlFlood(n *ir.Name) {
// inlFlood(n.Func.Closure.Func.Nname)
base.Fatalf("unexpected closure in inlinable function")
}
return true
})
}
@ -321,22 +323,26 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
// Functions that call runtime.getcaller{pc,sp} can not be inlined
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
//
// runtime.throw is a "cheap call" like panic in normal code.
if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) {
fn := n.Left().Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
return errors.New("call to " + fn)
}
if fn == "throw" {
v.budget -= inlineExtraThrowCost
break
if n.Left().Op() == ir.ONAME {
name := n.Left().(*ir.Name)
if name.Class() == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) {
fn := name.Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
return errors.New("call to " + fn)
}
if fn == "throw" {
v.budget -= inlineExtraThrowCost
break
}
}
}
if isIntrinsicCall(n) {
if IsIntrinsicCall(n) {
// Treat like any other node.
break
}
@ -402,11 +408,15 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
// These nodes don't produce code; omit from inlining budget.
return nil
case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH:
// ORANGE, OSELECT in "unhandled" above
case ir.OFOR, ir.OFORUNTIL:
if n.Sym() != nil {
return errors.New("labeled control")
}
case ir.OSWITCH:
if n.Sym() != nil {
return errors.New("labeled control")
}
// case ir.ORANGE, ir.OSELECT in "unhandled" above
case ir.OBREAK, ir.OCONTINUE:
if n.Sym() != nil {
@ -458,14 +468,10 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
func isBigFunc(fn *ir.Func) bool {
budget := inlineBigFunctionNodes
over := ir.Find(fn, func(n ir.Node) interface{} {
return ir.Any(fn, func(n ir.Node) bool {
budget--
if budget <= 0 {
return n
}
return nil
return budget <= 0
})
return over != nil
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
@ -493,7 +499,7 @@ func inlcalls(fn *ir.Func) {
}
// Turn an OINLCALL into a statement.
func inlconv2stmt(inlcall ir.Node) ir.Node {
func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil)
n.SetList(inlcall.Init())
n.PtrList().AppendNodes(inlcall.PtrBody())
@ -503,7 +509,7 @@ func inlconv2stmt(inlcall ir.Node) ir.Node {
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
func inlconv2expr(n ir.Node) ir.Node {
func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
r := n.Rlist().First()
return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r)
}
@ -513,7 +519,7 @@ func inlconv2expr(n ir.Node) ir.Node {
// containing the inlined statements on the first list element so
// order will be preserved. Used in return, oas2func and call
// statements.
func inlconv2list(n ir.Node) []ir.Node {
func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
@ -543,9 +549,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
switch n.Op() {
case ir.ODEFER, ir.OGO:
switch n.Left().Op() {
switch call := n.Left(); call.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
n.Left().SetNoInline(true)
call.SetNoInline(true)
}
// TODO do them here (or earlier),
@ -564,11 +570,13 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
ir.EditChildren(n, edit)
if n.Op() == ir.OAS2FUNC && n.Rlist().First().Op() == ir.OINLCALL {
n.PtrRlist().Set(inlconv2list(n.Rlist().First()))
n.SetOp(ir.OAS2)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
if as := n; as.Op() == ir.OAS2FUNC {
if as.Rlist().First().Op() == ir.OINLCALL {
as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr)))
as.SetOp(ir.OAS2)
as.SetTypecheck(0)
n = typecheck(as, ctxStmt)
}
}
// with all the branches out of the way, it is now time to
@ -581,45 +589,46 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
}
}
var call ir.Node
var call *ir.CallExpr
switch n.Op() {
case ir.OCALLFUNC:
call = n
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left())
fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left())
}
if isIntrinsicCall(n) {
if IsIntrinsicCall(call) {
break
}
if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil {
n = mkinlcall(n, fn, maxCost, inlMap, edit)
if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
case ir.OCALLMETH:
call = n
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right())
fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.Left().(*ir.SelectorExpr).Sel)
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
if n.Left().Type() == nil {
base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
if call.Left().Type() == nil {
base.Fatalf("no function type for [%p] %+v\n", call.Left(), call.Left())
}
n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap, edit)
n = mkinlcall(call, methodExprName(call.Left()).Func(), maxCost, inlMap, edit)
}
base.Pos = lno
if n.Op() == ir.OINLCALL {
switch call.(*ir.CallExpr).Use {
ic := n.(*ir.InlinedCallExpr)
switch call.Use {
default:
ir.Dump("call", call)
base.Fatalf("call missing use")
case ir.CallUseExpr:
n = inlconv2expr(n)
n = inlconv2expr(ic)
case ir.CallUseStmt:
n = inlconv2stmt(n)
n = inlconv2stmt(ic)
case ir.CallUseList:
// leave for caller to convert
}
@ -632,19 +641,22 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
// that it refers to if statically known. Otherwise, it returns nil.
func inlCallee(fn ir.Node) *ir.Func {
fn = staticValue(fn)
switch {
case fn.Op() == ir.OMETHEXPR:
switch fn.Op() {
case ir.OMETHEXPR:
fn := fn.(*ir.MethodExpr)
n := methodExprName(fn)
// Check that receiver type matches fn.Left.
// TODO(mdempsky): Handle implicit dereference
// of pointer receiver argument?
if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) {
if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) {
return nil
}
return n.Func()
case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC:
return fn.Func()
case fn.Op() == ir.OCLOSURE:
case ir.ONAME:
if fn.Class() == ir.PFUNC {
return fn.Func()
}
case ir.OCLOSURE:
c := fn.Func()
caninl(c)
return c
@ -655,7 +667,7 @@ func inlCallee(fn ir.Node) *ir.Func {
func staticValue(n ir.Node) ir.Node {
for {
if n.Op() == ir.OCONVNOP {
n = n.Left()
n = n.(*ir.ConvExpr).Left()
continue
}
@ -670,8 +682,12 @@ func staticValue(n ir.Node) ir.Node {
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
func staticValue1(n ir.Node) ir.Node {
if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() {
func staticValue1(nn ir.Node) ir.Node {
if nn.Op() != ir.ONAME {
return nil
}
n := nn.(*ir.Name)
if n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
@ -700,15 +716,13 @@ FindRHS:
base.Fatalf("RHS is nil: %v", defn)
}
if reassigned(n.(*ir.Name)) {
if reassigned(n) {
return nil
}
return rhs
}
var errFound = errors.New("found")
// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
// indicating whether the name has any assignments other than its declaration.
// The second return value is the first such assignment encountered in the walk, if any. It is mostly
@ -723,22 +737,21 @@ func reassigned(name *ir.Name) bool {
if name.Curfn == nil {
return true
}
a := ir.Find(name.Curfn, func(n ir.Node) interface{} {
return ir.Any(name.Curfn, func(n ir.Node) bool {
switch n.Op() {
case ir.OAS:
if n.Left() == name && n != name.Defn {
return n
return true
}
case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE:
case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2:
for _, p := range n.List().Slice() {
if p == name && n != name.Defn {
return n
return true
}
}
}
return nil
return false
})
return a != nil
}
func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node {
@ -758,6 +771,10 @@ func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node
var inlgen int
// SSADumpInline gives the SSA back end a chance to dump the function
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
@ -765,7 +782,7 @@ var inlgen int
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
if fn.Inl == nil {
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
@ -825,9 +842,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
if ssaDump != "" && ssaDump == ir.FuncName(Curfn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
SSADumpInline(fn)
ninit := n.Init()
@ -838,8 +853,9 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
if n.Op() == ir.OCALLFUNC {
callee := n.Left()
for callee.Op() == ir.OCONVNOP {
ninit.AppendNodes(callee.PtrInit())
callee = callee.Left()
conv := callee.(*ir.ConvExpr)
ninit.AppendNodes(conv.PtrInit())
callee = conv.Left()
}
if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
base.Fatalf("unexpected callee expression: %v", callee)
@ -878,7 +894,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
addr.SetType(types.NewPtr(v.Type()))
ia := typecheck(inlvar(addr), ctxExpr)
ninit.Append(ir.Nod(ir.ODCL, ia, nil))
ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt))
ninit.Append(typecheck(ir.Nod(ir.OAS, ia, nodAddr(o)), ctxStmt))
inlvars[addr] = ia
// When capturing by reference, all occurrence of the captured var
@ -916,11 +932,10 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
}
nreturns := 0
ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool {
ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
return true
})
// We can delay declaring+initializing result parameters if:
@ -961,16 +976,17 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
as := ir.Nod(ir.OAS2, nil, nil)
as.SetColas(true)
if n.Op() == ir.OCALLMETH {
if n.Left().Left() == nil {
sel := n.Left().(*ir.SelectorExpr)
if sel.Left() == nil {
base.Fatalf("method call without receiver: %+v", n)
}
as.PtrRlist().Append(n.Left().Left())
as.PtrRlist().Append(sel.Left())
}
as.PtrRlist().Append(n.List().Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas ir.Node
var vas *ir.AssignStmt
if recv := fn.Type().Recv(); recv != nil {
as.PtrList().Append(inlParam(recv, as, inlvars))
@ -993,25 +1009,24 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
}
varargs := as.List().Slice()[x:]
vas = ir.Nod(ir.OAS, nil, nil)
vas = ir.NewAssignStmt(base.Pos, nil, nil)
vas.SetLeft(inlParam(param, vas, inlvars))
if len(varargs) == 0 {
vas.SetRight(nodnil())
vas.Right().SetType(param.Type)
} else {
vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type)))
vas.Right().PtrList().Set(varargs)
lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type))
lit.PtrList().Set(varargs)
vas.SetRight(lit)
}
}
if as.Rlist().Len() != 0 {
as = typecheck(as, ctxStmt)
ninit.Append(as)
ninit.Append(typecheck(as, ctxStmt))
}
if vas != nil {
vas = typecheck(vas, ctxStmt)
ninit.Append(vas)
ninit.Append(typecheck(vas, ctxStmt))
}
if !delayretvars {
@ -1019,8 +1034,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool,
for _, n := range retvars {
ninit.Append(ir.Nod(ir.ODCL, n, nil))
ras := ir.Nod(ir.OAS, n, nil)
ras = typecheck(ras, ctxStmt)
ninit.Append(ras)
ninit.Append(typecheck(ras, ctxStmt))
}
}
@ -1211,11 +1225,19 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
if n.Sym() != nil {
return n
}
if n, ok := n.(*ir.Name); ok && n.Op() == ir.OLITERAL {
// This happens for unnamed OLITERAL.
// which should really not be a *Name, but for now it is.
// ir.Copy(n) is not allowed generally and would panic below,
// but it's OK in this situation.
n = n.CloneName()
n.SetPos(subst.updatedPos(n.Pos()))
return n
}
// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
// dump("Return before substitution", n);
case ir.ORETURN:
// Since we don't handle bodies with closures,
// this return is guaranteed to belong to the current inlined function.
init := subst.list(n.Init())
if len(subst.retvars) != 0 && n.List().Len() != 0 {
as := ir.Nod(ir.OAS2, nil, nil)
@ -1235,20 +1257,26 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
}
as = typecheck(as, ctxStmt)
init = append(init, as)
init = append(init, typecheck(as, ctxStmt))
}
init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel))
typecheckslice(init, ctxStmt)
return ir.NewBlockStmt(base.Pos, init)
case ir.OGOTO, ir.OLABEL:
m := ir.Copy(n)
case ir.OGOTO:
m := ir.Copy(n).(*ir.BranchStmt)
m.SetPos(subst.updatedPos(m.Pos()))
m.PtrInit().Set(nil)
p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
m.SetSym(lookup(p))
return m
case ir.OLABEL:
m := ir.Copy(n).(*ir.LabelStmt)
m.SetPos(subst.updatedPos(m.Pos()))
m.PtrInit().Set(nil)
p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
m.SetSym(lookup(p))
return m
}
@ -1291,40 +1319,40 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
// concrete-type method calls where applicable.
func devirtualize(fn *ir.Func) {
Curfn = fn
ir.InspectList(fn.Body(), func(n ir.Node) bool {
ir.VisitList(fn.Body(), func(n ir.Node) {
if n.Op() == ir.OCALLINTER {
devirtualizeCall(n)
devirtualizeCall(n.(*ir.CallExpr))
}
return true
})
}
func devirtualizeCall(call ir.Node) {
recv := staticValue(call.Left().Left())
if recv.Op() != ir.OCONVIFACE {
func devirtualizeCall(call *ir.CallExpr) {
sel := call.Left().(*ir.SelectorExpr)
r := staticValue(sel.Left())
if r.Op() != ir.OCONVIFACE {
return
}
recv := r.(*ir.ConvExpr)
typ := recv.Left().Type()
if typ.IsInterface() {
return
}
x := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil)
x.SetType(typ)
x = nodlSym(call.Left().Pos(), ir.OXDOT, x, call.Left().Sym())
x = typecheck(x, ctxExpr|ctxCallee)
dt := ir.NodAt(sel.Pos(), ir.ODOTTYPE, sel.Left(), nil)
dt.SetType(typ)
x := typecheck(nodlSym(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee)
switch x.Op() {
case ir.ODOTMETH:
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", call.Left(), typ)
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLMETH)
call.SetLeft(x)
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", call.Left(), typ)
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLINTER)
call.SetLeft(x)

View File

@ -51,11 +51,12 @@ func hidePanic() {
}
}
// Target is the package being compiled.
var Target *ir.Package
// timing data for compiler phases
var timings Timings
var nowritebarrierrecCheck *nowritebarrierrecChecker
// Main parses flags and Go source files specified in the command-line
// arguments, type-checks the parsed Go package, compiles functions to machine
// code, and finally writes the compiled package definition to disk.
@ -188,6 +189,9 @@ func Main(archInit func(*Arch)) {
logopt.LogJsonOption(base.Flag.JSON)
}
IsIntrinsicCall = isIntrinsicCall
SSADumpInline = ssaDumpInline
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
@ -202,11 +206,11 @@ func Main(archInit func(*Arch)) {
}
}
trackScopes = base.Flag.Dwarf
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
Target = new(ir.Package)
// initialize types package
// (we need to do this to break dependencies that otherwise
// would lead to import cycles)
@ -221,6 +225,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "parse")
lines := parseFiles(flag.Args())
cgoSymABIs()
timings.Stop()
timings.AddEvent(int64(lines), "lines")
if base.Flag.G != 0 && base.Flag.G < 3 {
@ -245,33 +250,33 @@ func Main(archInit func(*Arch)) {
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.
// Don't use range--typecheck can add closures to xtop.
// Don't use range--typecheck can add closures to Target.Decls.
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Alias()) {
xtop[i] = typecheck(n, ctxStmt)
for i := 0; i < len(Target.Decls); i++ {
n := Target.Decls[i]
if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) {
Target.Decls[i] = typecheck(n, ctxStmt)
}
}
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
// Don't use range--typecheck can add closures to xtop.
// Don't use range--typecheck can add closures to Target.Decls.
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Alias() {
xtop[i] = typecheck(n, ctxStmt)
for i := 0; i < len(Target.Decls); i++ {
n := Target.Decls[i]
if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() {
Target.Decls[i] = typecheck(n, ctxStmt)
}
}
// Phase 3: Type check function bodies.
// Don't use range--typecheck can add closures to xtop.
// Don't use range--typecheck can add closures to Target.Decls.
timings.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(xtop); i++ {
n := xtop[i]
for i := 0; i < len(Target.Decls); i++ {
n := Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
Curfn = n.(*ir.Func)
decldepth = 1
@ -287,21 +292,34 @@ func Main(archInit func(*Arch)) {
fcount++
}
}
// With all types checked, it's now safe to verify map keys. One single
// check past phase 9 isn't sufficient, as we may exit with other errors
// before then, thus skipping map key errors.
// Phase 3.11: Check external declarations.
// TODO(mdempsky): This should be handled when type checking their
// corresponding ODCL nodes.
timings.Start("fe", "typecheck", "externdcls")
for i, n := range Target.Externs {
if n.Op() == ir.ONAME {
Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr)
}
}
// Phase 3.14: With all user code type-checked, it's now safe to verify map keys
// and unused dot imports.
checkMapKeys()
checkDotImports()
base.ExitIfErrors()
timings.AddEvent(fcount, "funcs")
fninit(xtop)
if initTask := fninit(); initTask != nil {
exportsym(initTask)
}
// Phase 4: Decide how to capture closed variables.
// This needs to run before escape analysis,
// because variables captured by value do not escape.
timings.Start("fe", "capturevars")
for _, n := range xtop {
for _, n := range Target.Decls {
if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n.(*ir.Func)
capturevars(Curfn)
@ -326,7 +344,7 @@ func Main(archInit func(*Arch)) {
if base.Flag.LowerL != 0 {
// Find functions that can be inlined and clone them before walk expands them.
visitBottomUp(xtop, func(list []*ir.Func, recursive bool) {
visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
@ -344,7 +362,7 @@ func Main(archInit func(*Arch)) {
})
}
for _, n := range xtop {
for _, n := range Target.Decls {
if n.Op() == ir.ODCLFUNC {
devirtualize(n.(*ir.Func))
}
@ -360,21 +378,21 @@ func Main(archInit func(*Arch)) {
// Large values are also moved off stack in escape analysis;
// because large values may contain pointers, it must happen early.
timings.Start("fe", "escapes")
escapes(xtop)
escapes(Target.Decls)
// Collect information for go:nowritebarrierrec
// checking. This must happen before transformclosure.
// We'll do the final check after write barriers are
// inserted.
if base.Flag.CompilingRuntime {
nowritebarrierrecCheck = newNowritebarrierrecChecker()
EnableNoWriteBarrierRecCheck()
}
// Phase 7: Transform closure bodies to properly reference captured variables.
// This needs to happen before walk, because closures must be transformed
// before walk reaches a call of a closure.
timings.Start("fe", "xclosures")
for _, n := range xtop {
for _, n := range Target.Decls {
if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n.(*ir.Func)
transformclosure(Curfn)
@ -393,11 +411,11 @@ func Main(archInit func(*Arch)) {
peekitabs()
// Phase 8: Compile top level functions.
// Don't use range--walk can add functions to xtop.
// Don't use range--walk can add functions to Target.Decls.
timings.Start("be", "compilefuncs")
fcount = 0
for i := 0; i < len(xtop); i++ {
n := xtop[i]
for i := 0; i < len(Target.Decls); i++ {
n := Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
funccompile(n.(*ir.Func))
fcount++
@ -407,11 +425,9 @@ func Main(archInit func(*Arch)) {
compileFunctions()
if nowritebarrierrecCheck != nil {
// Write barriers are now known. Check the
// call graph.
nowritebarrierrecCheck.check()
nowritebarrierrecCheck = nil
if base.Flag.CompilingRuntime {
// Write barriers are now known. Check the call graph.
NoWriteBarrierRecCheck()
}
// Finalize DWARF inline routine DIEs, then explicitly turn off
@ -423,18 +439,6 @@ func Main(archInit func(*Arch)) {
base.Flag.GenDwarfInl = 0
}
// Phase 9: Check external declarations.
timings.Start("be", "externaldcls")
for i, n := range externdcl {
if n.Op() == ir.ONAME {
externdcl[i] = typecheck(externdcl[i], ctxExpr)
}
}
// Check the map keys again, since we typechecked the external
// declarations.
checkMapKeys()
base.ExitIfErrors()
// Write object data to disk.
timings.Start("be", "dumpobj")
dumpdata()
@ -476,6 +480,20 @@ func Main(archInit func(*Arch)) {
}
}
func cgoSymABIs() {
// The linker expects an ABI0 wrapper for all cgo-exported
// functions.
for _, prag := range Target.CgoPragmas {
switch prag[0] {
case "cgo_export_static", "cgo_export_dynamic":
if symabiRefs == nil {
symabiRefs = make(map[string]obj.ABI)
}
symabiRefs[prag[1]] = obj.ABI0
}
}
}
// numNonClosures returns the number of functions in list which are not closures.
func numNonClosures(list []*ir.Func) int {
count := 0
@ -961,10 +979,7 @@ func clearImports() {
if IsAlias(s) {
// throw away top-level name left over
// from previous import . "x"
if name := n.Name(); name != nil && name.PkgName != nil && !name.PkgName.Used && base.SyntaxErrors() == 0 {
unused = append(unused, importedPkg{name.PkgName.Pos(), name.PkgName.Pkg.Path, ""})
name.PkgName.Used = true
}
// We'll report errors after type checking in checkDotImports.
s.Def = nil
continue
}

View File

@ -23,14 +23,13 @@ import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
// parseFiles concurrently parses files into *syntax.File structures.
// Each declaration in every *syntax.File is converted to a syntax tree
// and its root represented by *Node is appended to xtop.
// and its root represented by *Node is appended to Target.Decls.
// Returns the total count of parsed lines.
func parseFiles(filenames []string) (lines uint) {
noders := make([]*noder, 0, len(filenames))
@ -39,8 +38,9 @@ func parseFiles(filenames []string) (lines uint) {
for _, filename := range filenames {
p := &noder{
basemap: make(map[*syntax.PosBase]*src.PosBase),
err: make(chan syntax.Error),
basemap: make(map[*syntax.PosBase]*src.PosBase),
err: make(chan syntax.Error),
trackScopes: base.Flag.Dwarf,
}
noders = append(noders, p)
@ -253,7 +253,8 @@ type noder struct {
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
scopeVars []int
trackScopes bool
scopeVars []int
// typeInfo provides access to the type information computed by the new
// typechecker. It is only present if -G is set, and all noders point to
@ -312,7 +313,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
func (p *noder) openScope(pos syntax.Pos) {
types.Markdcl()
if trackScopes {
if p.trackScopes {
Curfn.Parents = append(Curfn.Parents, p.scope)
p.scopeVars = append(p.scopeVars, len(Curfn.Dcl))
p.scope = ir.ScopeID(len(Curfn.Parents))
@ -325,7 +326,7 @@ func (p *noder) closeScope(pos syntax.Pos) {
p.lastCloseScopePos = pos
types.Popdcl()
if trackScopes {
if p.trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
if scopeVars == len(Curfn.Dcl) {
@ -393,7 +394,7 @@ func (p *noder) node() {
p.checkUnused(pragma)
}
xtop = append(xtop, p.decls(p.file.DeclList)...)
Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...)
base.Pos = src.NoXPos
clearImports()
@ -417,20 +418,7 @@ func (p *noder) processPragmas() {
}
n.Sym().Linkname = l.remote
}
// The linker expects an ABI0 wrapper for all cgo-exported
// functions.
for _, prag := range p.pragcgobuf {
switch prag[0] {
case "cgo_export_static", "cgo_export_dynamic":
if symabiRefs == nil {
symabiRefs = make(map[string]obj.ABI)
}
symabiRefs[prag[1]] = obj.ABI0
}
}
pragcgobuf = append(pragcgobuf, p.pragcgobuf...)
Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...)
}
func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
@ -487,7 +475,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
}
if !ipkg.Direct {
sourceOrderImports = append(sourceOrderImports, ipkg)
Target.Imports = append(Target.Imports, ipkg)
}
ipkg.Direct = true
@ -502,7 +490,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
switch my.Name {
case ".":
importdot(ipkg, pack)
importDot(pack)
return
case "init":
base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
@ -660,13 +648,14 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
if fun.Recv == nil {
if name.Name == "init" {
name = renameinit()
if t.List().Len() > 0 || t.Rlist().Len() > 0 {
if len(t.Params) > 0 || len(t.Results) > 0 {
base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
Target.Inits = append(Target.Inits, f)
}
if types.LocalPkg.Name == "main" && name.Name == "main" {
if t.List().Len() > 0 || t.Rlist().Len() > 0 {
if len(t.Params) > 0 || len(t.Results) > 0 {
base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
}
}
@ -832,7 +821,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
if expr.Full {
op = ir.OSLICE3
}
n := p.nod(expr, op, p.expr(expr.X), nil)
n := ir.NewSliceExpr(p.pos(expr), op, p.expr(expr.X))
var index [3]ir.Node
for i, x := range &expr.Index {
if x != nil {
@ -849,9 +838,22 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
}
x := p.expr(expr.X)
if expr.Y == nil {
return p.nod(expr, p.unOp(expr.Op), x, nil)
pos, op := p.pos(expr), p.unOp(expr.Op)
switch op {
case ir.OADDR:
return nodAddrAt(pos, x)
case ir.ODEREF:
return ir.NewStarExpr(pos, x)
}
return ir.NewUnaryExpr(pos, op, x)
}
return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y)
switch op {
case ir.OANDAND, ir.OOROR:
return ir.NewLogicalExpr(pos, op, x, y)
}
return ir.NewBinaryExpr(pos, op, x, y)
case *syntax.CallExpr:
n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
n.PtrList().Set(p.exprs(expr.ArgList))
@ -1103,10 +1105,10 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
} else if s.Op() == ir.OBLOCK && s.List().Len() > 0 {
} else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 {
// Inline non-empty block.
// Empty blocks must be preserved for checkreturn.
nodes = append(nodes, s.List().Slice()...)
nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...)
} else {
nodes = append(nodes, s)
}
@ -1140,22 +1142,23 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
return liststmt(p.decls(stmt.DeclList))
case *syntax.AssignStmt:
if stmt.Op != 0 && stmt.Op != syntax.Def {
n := p.nod(stmt, ir.OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
n.SetSubOp(p.binOp(stmt.Op))
return n
}
rhs := p.exprList(stmt.Rhs)
if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
n := p.nod(stmt, ir.OAS2, nil, nil)
n.PtrList().Set(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def))
n.SetColas(stmt.Op == syntax.Def)
n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas()))
n.PtrRlist().Set(rhs)
return n
}
n := p.nod(stmt, ir.OAS, nil, nil)
n.SetLeft(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)[0])
n.SetColas(stmt.Op == syntax.Def)
n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0])
n.SetRight(rhs[0])
return n
@ -1176,11 +1179,11 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
default:
panic("unhandled BranchStmt")
}
n := p.nod(stmt, op, nil, nil)
var sym *types.Sym
if stmt.Label != nil {
n.SetSym(p.name(stmt.Label))
sym = p.name(stmt.Label)
}
return n
return ir.NewBranchStmt(p.pos(stmt), op, sym)
case *syntax.CallStmt:
var op ir.Op
switch stmt.Tok {
@ -1191,7 +1194,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
default:
panic("unhandled CallStmt")
}
return p.nod(stmt, op, p.expr(stmt.Call), nil)
return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call))
case *syntax.ReturnStmt:
var results []ir.Node
if stmt.Results != nil {
@ -1230,8 +1233,6 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node
return p.exprList(expr)
}
defn.SetColas(true)
var exprs []syntax.Expr
if list, ok := expr.(*syntax.ListExpr); ok {
exprs = list.ElemList
@ -1316,27 +1317,30 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
p.openScope(stmt.Pos())
var n ir.Node
if r, ok := stmt.Init.(*syntax.RangeClause); ok {
if stmt.Cond != nil || stmt.Post != nil {
panic("unexpected RangeClause")
}
n = p.nod(r, ir.ORANGE, nil, p.expr(r.X))
n := p.nod(r, ir.ORANGE, nil, p.expr(r.X))
if r.Lhs != nil {
n.PtrList().Set(p.assignList(r.Lhs, n, r.Def))
}
} else {
n = p.nod(stmt, ir.OFOR, nil, nil)
if stmt.Init != nil {
n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
n.SetLeft(p.expr(stmt.Cond))
}
if stmt.Post != nil {
n.SetRight(p.stmt(stmt.Post))
n.SetColas(r.Def)
n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas()))
}
n.PtrBody().Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
n := p.nod(stmt, ir.OFOR, nil, nil)
if stmt.Init != nil {
n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
n.SetLeft(p.expr(stmt.Cond))
}
if stmt.Post != nil {
n.SetRight(p.stmt(stmt.Post))
}
n.PtrBody().Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
@ -1353,9 +1357,9 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
n.SetLeft(p.expr(stmt.Tag))
}
tswitch := n.Left()
if tswitch != nil && tswitch.Op() != ir.OTYPESW {
tswitch = nil
var tswitch *ir.TypeSwitchGuard
if l := n.Left(); l != nil && l.Op() == ir.OTYPESW {
tswitch = l.(*ir.TypeSwitchGuard)
}
n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
@ -1363,7 +1367,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
return n
}
func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node {
func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []ir.Node {
nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
@ -1448,10 +1452,18 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
ls = p.stmtFall(label.Stmt, fallOK)
switch label.Stmt.(type) {
case *syntax.ForStmt, *syntax.SwitchStmt, *syntax.SelectStmt:
// Attach label directly to control statement too.
ls.SetSym(sym)
// Attach label directly to control statement too.
if ls != nil {
switch ls.Op() {
case ir.OFOR:
ls.SetSym(sym)
case ir.ORANGE:
ls.SetSym(sym)
case ir.OSWITCH:
ls.SetSym(sym)
case ir.OSELECT:
ls.SetSym(sym)
}
}
}
@ -1603,8 +1615,9 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
}
fallthrough
case ir.ONAME, ir.ONONAME, ir.OPACK:
x = p.nod(n, ir.OPAREN, x, nil)
x.SetImplicit(true)
p := p.nod(n, ir.OPAREN, x, nil)
p.SetImplicit(true)
return p
}
return x
}

View File

@ -117,13 +117,14 @@ func dumpCompilerObj(bout *bio.Writer) {
}
func dumpdata() {
externs := len(externdcl)
xtops := len(xtop)
numExterns := len(Target.Externs)
numDecls := len(Target.Decls)
dumpglobls()
dumpglobls(Target.Externs)
dumpfuncsyms()
addptabs()
exportlistLen := len(exportlist)
addsignats(externdcl)
numExports := len(Target.Exports)
addsignats(Target.Externs)
dumpsignats()
dumptabs()
ptabsLen := len(ptabs)
@ -140,28 +141,22 @@ func dumpdata() {
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
for i := numDecls; i < len(Target.Decls); i++ {
n := Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
funccompile(n.(*ir.Func))
}
}
xtops = len(xtop)
numDecls = len(Target.Decls)
compileFunctions()
dumpsignats()
if xtops == len(xtop) {
if numDecls == len(Target.Decls) {
break
}
}
// Dump extra globals.
tmp := externdcl
if externdcl != nil {
externdcl = externdcl[externs:]
}
dumpglobls()
externdcl = tmp
dumpglobls(Target.Externs[numExterns:])
if zerosize > 0 {
zero := mappkg.Lookup("zero")
@ -170,8 +165,8 @@ func dumpdata() {
addGCLocals()
if exportlistLen != len(exportlist) {
base.Fatalf("exportlist changed after compile functions loop")
if numExports != len(Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
if ptabsLen != len(ptabs) {
base.Fatalf("ptabs changed after compile functions loop")
@ -184,11 +179,11 @@ func dumpdata() {
func dumpLinkerObj(bout *bio.Writer) {
printObjHeader(bout)
if len(pragcgobuf) != 0 {
if len(Target.CgoPragmas) != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil {
base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
@ -203,15 +198,16 @@ func addptabs() {
if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
return
}
for _, exportn := range exportlist {
for _, exportn := range Target.Exports {
s := exportn.Sym()
n := ir.AsNode(s.Def)
if n == nil {
nn := ir.AsNode(s.Def)
if nn == nil {
continue
}
if n.Op() != ir.ONAME {
if nn.Op() != ir.ONAME {
continue
}
n := nn.(*ir.Name)
if !types.IsExported(s.Name) {
continue
}
@ -228,7 +224,7 @@ func addptabs() {
}
}
func dumpGlobal(n ir.Node) {
func dumpGlobal(n *ir.Name) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
@ -266,17 +262,19 @@ func dumpGlobalConst(n ir.Node) {
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v))
}
func dumpglobls() {
func dumpglobls(externs []ir.Node) {
// add globals
for _, n := range externdcl {
for _, n := range externs {
switch n.Op() {
case ir.ONAME:
dumpGlobal(n)
dumpGlobal(n.(*ir.Name))
case ir.OLITERAL:
dumpGlobalConst(n)
}
}
}
func dumpfuncsyms() {
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
})
@ -285,9 +283,6 @@ func dumpglobls() {
dsymptr(sf, 0, s.Linksym(), 0)
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
// Do not reprocess funcsyms on next dumpglobls call.
funcsyms = nil
}
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
@ -475,7 +470,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var slicedataGen int
func slicedata(pos src.XPos, s string) ir.Node {
func slicedata(pos src.XPos, s string) *ir.Name {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := types.LocalPkg.Lookup(symname)
@ -489,11 +484,11 @@ func slicedata(pos src.XPos, s string) ir.Node {
return symnode
}
func slicebytes(nam ir.Node, s string) {
func slicebytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
slicesym(nam, slicedata(nam.Pos(), s), int64(len(s)))
slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
@ -528,22 +523,21 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
func slicesym(n, arr ir.Node, lencap int64) {
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// slicesym does not modify n.
func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
s := n.Sym().Linksym()
off := n.Offset()
if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset())
s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap)
s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap)
s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0)
s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap)
s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func addrsym(n, a ir.Node) {
func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
@ -554,12 +548,12 @@ func addrsym(n, a ir.Node) {
base.Fatalf("addrsym a op %v", a.Op())
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset())
s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff)
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func pfuncsym(n, f ir.Node) {
func pfuncsym(n *ir.Name, noff int64, f *ir.Name) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
@ -570,21 +564,18 @@ func pfuncsym(n, f ir.Node) {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset())
s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0)
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
func litsym(n, c ir.Node, wid int) {
func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
}
if !types.Identical(n.Type(), c.Type()) {
base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type())
}
if c.Op() == ir.ONIL {
return
}
@ -595,37 +586,37 @@ func litsym(n, c ir.Node, wid int) {
switch u := c.Val(); u.Kind() {
case constant.Bool:
i := int64(obj.Bool2int(constant.BoolVal(u)))
s.WriteInt(base.Ctxt, n.Offset(), wid, i)
s.WriteInt(base.Ctxt, noff, wid, i)
case constant.Int:
s.WriteInt(base.Ctxt, n.Offset(), wid, ir.IntVal(n.Type(), u))
s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch n.Type().Kind() {
switch c.Type().Kind() {
case types.TFLOAT32:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(f))
s.WriteFloat32(base.Ctxt, noff, float32(f))
case types.TFLOAT64:
s.WriteFloat64(base.Ctxt, n.Offset(), f)
s.WriteFloat64(base.Ctxt, noff, f)
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch n.Type().Kind() {
switch c.Type().Kind() {
case types.TCOMPLEX64:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(re))
s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im))
s.WriteFloat32(base.Ctxt, noff, float32(re))
s.WriteFloat32(base.Ctxt, noff+4, float32(im))
case types.TCOMPLEX128:
s.WriteFloat64(base.Ctxt, n.Offset(), re)
s.WriteFloat64(base.Ctxt, n.Offset()+8, im)
s.WriteFloat64(base.Ctxt, noff, re)
s.WriteFloat64(base.Ctxt, noff+8, im)
}
case constant.String:
i := constant.StringVal(u)
symdata := stringsym(n.Pos(), i)
s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0)
s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i)))
s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0)
s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i)))
default:
base.Fatalf("litsym unhandled OLITERAL %v", c)

View File

@ -47,6 +47,7 @@ type Order struct {
out []ir.Node // list of generated statements
temp []*ir.Name // stack of temporary variables
free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
}
// Order rewrites fn.Nbody to apply the ordering constraints
@ -60,6 +61,11 @@ func order(fn *ir.Func) {
orderBlock(fn.PtrBody(), map[string][]*ir.Name{})
}
// append typechecks stmt and appends it to out.
func (o *Order) append(stmt ir.Node) {
o.out = append(o.out, typecheck(stmt, ctxStmt))
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
@ -82,9 +88,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name {
v = temp(t)
}
if clear {
a := ir.Nod(ir.OAS, v, nil)
a = typecheck(a, ctxStmt)
o.out = append(o.out, a)
o.append(ir.Nod(ir.OAS, v, nil))
}
o.temp = append(o.temp, v)
@ -114,9 +118,7 @@ func (o *Order) copyExprClear(n ir.Node) *ir.Name {
func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name {
t := n.Type()
v := o.newTemp(t, clear)
a := ir.Nod(ir.OAS, v, n)
a = typecheck(a, ctxStmt)
o.out = append(o.out, a)
o.append(ir.Nod(ir.OAS, v, n))
return v
}
@ -137,7 +139,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node {
if l == n.Left() {
return n
}
a := ir.SepCopy(n)
a := ir.SepCopy(n).(*ir.UnaryExpr)
a.SetLeft(l)
return typecheck(a, ctxExpr)
}
@ -157,21 +159,39 @@ func (o *Order) safeExpr(n ir.Node) ir.Node {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.ODOT, ir.OLEN, ir.OCAP:
case ir.OLEN, ir.OCAP:
l := o.safeExpr(n.Left())
if l == n.Left() {
return n
}
a := ir.SepCopy(n)
a := ir.SepCopy(n).(*ir.UnaryExpr)
a.SetLeft(l)
return typecheck(a, ctxExpr)
case ir.ODOTPTR, ir.ODEREF:
case ir.ODOT:
l := o.safeExpr(n.Left())
if l == n.Left() {
return n
}
a := ir.SepCopy(n).(*ir.SelectorExpr)
a.SetLeft(l)
return typecheck(a, ctxExpr)
case ir.ODOTPTR:
l := o.cheapExpr(n.Left())
if l == n.Left() {
return n
}
a := ir.SepCopy(n)
a := ir.SepCopy(n).(*ir.SelectorExpr)
a.SetLeft(l)
return typecheck(a, ctxExpr)
case ir.ODEREF:
l := o.cheapExpr(n.Left())
if l == n.Left() {
return n
}
a := ir.SepCopy(n).(*ir.StarExpr)
a.SetLeft(l)
return typecheck(a, ctxExpr)
@ -186,7 +206,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node {
if l == n.Left() && r == n.Right() {
return n
}
a := ir.SepCopy(n)
a := ir.SepCopy(n).(*ir.IndexExpr)
a.SetLeft(l)
a.SetRight(r)
return typecheck(a, ctxExpr)
@ -204,7 +224,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node {
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
func isaddrokay(n ir.Node) bool {
return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
@ -219,11 +239,11 @@ func (o *Order) addrTemp(n ir.Node) ir.Node {
dowidth(n.Type())
vstat := readonlystaticname(n.Type())
var s InitSchedule
s.staticassign(vstat, n)
s.staticassign(vstat, 0, n, n.Type())
if s.out != nil {
base.Fatalf("staticassign of const generated code: %+v", n)
}
vstat = typecheck(vstat, ctxExpr)
vstat = typecheck(vstat, ctxExpr).(*ir.Name)
return vstat
}
if isaddrokay(n) {
@ -265,6 +285,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool {
replaced = true
case ir.OSTRUCTLIT:
for _, elem := range n.List().Slice() {
elem := elem.(*ir.StructKeyExpr)
if mapKeyReplaceStrConv(elem.Left()) {
replaced = true
}
@ -272,7 +293,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool {
case ir.OARRAYLIT:
for _, elem := range n.List().Slice() {
if elem.Op() == ir.OKEY {
elem = elem.Right()
elem = elem.(*ir.KeyExpr).Right()
}
if mapKeyReplaceStrConv(elem) {
replaced = true
@ -306,9 +327,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
kill := ir.Nod(ir.OVARKILL, n, nil)
kill = typecheck(kill, ctxStmt)
out = append(out, kill)
out = append(out, typecheck(ir.Nod(ir.OVARKILL, n, nil), ctxStmt))
}
return out
}
@ -337,60 +356,31 @@ func orderMakeSliceCopy(s []ir.Node) {
if base.Flag.N != 0 || instrumenting {
return
}
if len(s) < 2 {
if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
return
}
asn := s[0]
copyn := s[1]
if asn == nil || asn.Op() != ir.OAS {
return
}
if asn.Left().Op() != ir.ONAME {
return
}
if ir.IsBlank(asn.Left()) {
return
}
maken := asn.Right()
if maken == nil || maken.Op() != ir.OMAKESLICE {
return
}
if maken.Esc() == EscNone {
return
}
if maken.Left() == nil || maken.Right() != nil {
return
}
if copyn.Op() != ir.OCOPY {
return
}
if copyn.Left().Op() != ir.ONAME {
return
}
if asn.Left().Sym() != copyn.Left().Sym() {
return
}
if copyn.Right().Op() != ir.ONAME {
as := s[0].(*ir.AssignStmt)
cp := s[1].(*ir.BinaryExpr)
if as.Right() == nil || as.Right().Op() != ir.OMAKESLICE || ir.IsBlank(as.Left()) ||
as.Left().Op() != ir.ONAME || cp.Left().Op() != ir.ONAME || cp.Right().Op() != ir.ONAME ||
as.Left().Name() != cp.Left().Name() || cp.Left().Name() == cp.Right().Name() {
// The line above this one is correct with the differing equality operators:
// we want as.X and cp.X to be the same name,
// but we want the initial data to be coming from a different name.
return
}
if copyn.Left().Sym() == copyn.Right().Sym() {
mk := as.Right().(*ir.MakeExpr)
if mk.Esc() == EscNone || mk.Left() == nil || mk.Right() != nil {
return
}
maken.SetOp(ir.OMAKESLICECOPY)
maken.SetRight(copyn.Right())
mk.SetOp(ir.OMAKESLICECOPY)
mk.SetRight(cp.Right())
// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
maken.SetBounded(maken.Left().Op() == ir.OLEN && samesafeexpr(maken.Left().Left(), copyn.Right()))
maken = typecheck(maken, ctxExpr)
mk.SetBounded(mk.Left().Op() == ir.OLEN && samesafeexpr(mk.Left().(*ir.UnaryExpr).Left(), cp.Right()))
as.SetRight(typecheck(mk, ctxExpr))
s[1] = nil // remove separate copy call
return
}
// edge inserts coverage instrumentation for libfuzzer.
@ -405,11 +395,8 @@ func (o *Order) edge() {
counter.Name().SetLibfuzzerExtraCounter(true)
// counter += 1
incr := ir.Nod(ir.OASOP, counter, nodintconst(1))
incr.SetSubOp(ir.OADD)
incr = typecheck(incr, ctxStmt)
o.out = append(o.out, incr)
incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, nodintconst(1))
o.append(incr)
}
// orderBlock orders the block of statements in n into a new slice,
@ -471,20 +458,34 @@ func (o *Order) init(n ir.Node) {
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
func (o *Order) call(n ir.Node) {
if n.Init().Len() > 0 {
// Caller should have already called o.init(n).
base.Fatalf("%v with unexpected ninit", n.Op())
func (o *Order) call(nn ir.Node) {
if nn.Init().Len() > 0 {
// Caller should have already called o.init(nn).
base.Fatalf("%v with unexpected ninit", nn.Op())
}
// Builtin functions.
if n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER {
n.SetLeft(o.expr(n.Left(), nil))
n.SetRight(o.expr(n.Right(), nil))
o.exprList(n.List())
if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
switch n := nn.(type) {
default:
base.Fatalf("unexpected call: %+v", n)
case *ir.UnaryExpr:
n.SetLeft(o.expr(n.Left(), nil))
case *ir.ConvExpr:
n.SetLeft(o.expr(n.Left(), nil))
case *ir.BinaryExpr:
n.SetLeft(o.expr(n.Left(), nil))
n.SetRight(o.expr(n.Right(), nil))
case *ir.MakeExpr:
n.SetLeft(o.expr(n.Left(), nil))
n.SetRight(o.expr(n.Right(), nil))
case *ir.CallExpr:
o.exprList(n.List())
}
return
}
n := nn.(*ir.CallExpr)
fixVariadicCall(n)
n.SetLeft(o.expr(n.Left(), nil))
o.exprList(n.List())
@ -497,11 +498,13 @@ func (o *Order) call(n ir.Node) {
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() {
x := o.copyExpr(arg.Left())
arg.SetLeft(x)
x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
if arg.Op() == ir.OCONVNOP {
if arg.Left().Type().IsUnsafePtr() {
x := o.copyExpr(arg.Left())
arg.SetLeft(x)
x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
}
}
}
@ -539,18 +542,14 @@ func (o *Order) mapAssign(n ir.Node) {
default:
base.Fatalf("order.mapAssign %v", n.Op())
case ir.OAS, ir.OASOP:
case ir.OAS:
if n.Left().Op() == ir.OINDEXMAP {
// Make sure we evaluate the RHS before starting the map insert.
// We need to make sure the RHS won't panic. See issue 22881.
if n.Right().Op() == ir.OAPPEND {
s := n.Right().List().Slice()[1:]
for i, n := range s {
s[i] = o.cheapExpr(n)
}
} else {
n.SetRight(o.cheapExpr(n.Right()))
}
n.SetRight(o.safeMapRHS(n.Right()))
}
o.out = append(o.out, n)
case ir.OASOP:
if n.Left().Op() == ir.OINDEXMAP {
n.SetRight(o.safeMapRHS(n.Right()))
}
o.out = append(o.out, n)
@ -559,6 +558,7 @@ func (o *Order) mapAssign(n ir.Node) {
for i, m := range n.List().Slice() {
switch {
case m.Op() == ir.OINDEXMAP:
m := m.(*ir.IndexExpr)
if !ir.IsAutoTmp(m.Left()) {
m.SetLeft(o.copyExpr(m.Left()))
}
@ -570,8 +570,7 @@ func (o *Order) mapAssign(n ir.Node) {
t := o.newTemp(m.Type(), false)
n.List().SetIndex(i, t)
a := ir.Nod(ir.OAS, m, t)
a = typecheck(a, ctxStmt)
post = append(post, a)
post = append(post, typecheck(a, ctxStmt))
}
}
@ -580,6 +579,19 @@ func (o *Order) mapAssign(n ir.Node) {
}
}
func (o *Order) safeMapRHS(r ir.Node) ir.Node {
// Make sure we evaluate the RHS before starting the map insert.
// We need to make sure the RHS won't panic. See issue 22881.
if r.Op() == ir.OAPPEND {
s := r.List().Slice()[1:]
for i, n := range s {
s[i] = o.cheapExpr(n)
}
return r
}
return o.cheapExpr(r)
}
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
@ -619,15 +631,15 @@ func (o *Order) stmt(n ir.Node) {
// makes sure there is nothing too deep being copied.
l1 := o.safeExpr(n.Left())
l2 := ir.DeepCopy(src.NoXPos, l1)
if l1.Op() == ir.OINDEXMAP {
if l2.Op() == ir.OINDEXMAP {
l2.SetIndexMapLValue(false)
}
l2 = o.copyExpr(l2)
r := ir.NodAt(n.Pos(), n.SubOp(), l2, n.Right())
r = typecheck(r, ctxExpr)
r = o.expr(r, nil)
n = ir.NodAt(n.Pos(), ir.OAS, l1, r)
n = typecheck(n, ctxStmt)
r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil)
as := typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt)
o.mapAssign(as)
o.cleanTemp(t)
return
}
o.mapAssign(n)
@ -642,6 +654,7 @@ func (o *Order) stmt(n ir.Node) {
// Special: avoid copy of func call n.Right
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
t := o.markTemp()
o.exprList(n.List())
o.init(n.Rlist().First())
@ -656,11 +669,14 @@ func (o *Order) stmt(n ir.Node) {
// OAS2MAPR: make sure key is addressable if needed,
// and make sure OINDEXMAP is not copied out.
case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
n := n.(*ir.AssignListStmt)
t := o.markTemp()
o.exprList(n.List())
switch r := n.Rlist().First(); r.Op() {
case ir.ODOTTYPE2, ir.ORECV:
case ir.ODOTTYPE2:
r.SetLeft(o.expr(r.Left(), nil))
case ir.ORECV:
r.SetLeft(o.expr(r.Left(), nil))
case ir.OINDEXMAP:
r.SetLeft(o.expr(r.Left(), nil))
@ -698,17 +714,22 @@ func (o *Order) stmt(n ir.Node) {
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OCLOSE,
ir.OCOPY,
ir.OPRINT,
ir.OPRINTN,
ir.ORECOVER,
ir.ORECV:
case ir.OCLOSE, ir.ORECV:
t := o.markTemp()
n.SetLeft(o.expr(n.Left(), nil))
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OCOPY:
t := o.markTemp()
n.SetLeft(o.expr(n.Left(), nil))
n.SetRight(o.expr(n.Right(), nil))
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
t := o.markTemp()
o.exprList(n.List())
o.exprList(n.Rlist())
o.out = append(o.out, n)
o.cleanTemp(t)
@ -776,8 +797,9 @@ func (o *Order) stmt(n ir.Node) {
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
n := n.(*ir.RangeStmt)
if n.Right().Op() == ir.OSTR2BYTES {
n.Right().SetOp(ir.OSTR2BYTESTMP)
n.Right().(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
}
t := o.markTemp()
@ -824,9 +846,9 @@ func (o *Order) stmt(n ir.Node) {
r := n.Right()
n.SetRight(o.copyExpr(r))
// prealloc[n] is the temp for the iterator.
// n.Prealloc is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
prealloc[n] = o.newTemp(hiter(n.Type()), true)
n.Prealloc = o.newTemp(hiter(n.Type()), true)
}
o.exprListInPlace(n.List())
if orderBody {
@ -850,17 +872,14 @@ func (o *Order) stmt(n ir.Node) {
// give this away).
case ir.OSELECT:
t := o.markTemp()
for _, n2 := range n.List().Slice() {
if n2.Op() != ir.OCASE {
base.Fatalf("order select case %v", n2.Op())
}
r := n2.Left()
setlineno(n2)
for _, ncas := range n.List().Slice() {
ncas := ncas.(*ir.CaseStmt)
r := ncas.Left()
setlineno(ncas)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
if n2.Init().Len() != 0 {
if ncas.Init().Len() != 0 {
base.Fatalf("order select ninit")
}
if r == nil {
@ -871,84 +890,46 @@ func (o *Order) stmt(n ir.Node) {
ir.Dump("select case", r)
base.Fatalf("unknown op in select %v", r.Op())
case ir.OSELRECV, ir.OSELRECV2:
var dst, ok, recv ir.Node
if r.Op() == ir.OSELRECV {
// case x = <-c
// case <-c (dst is ir.BlankNode)
dst, ok, recv = r.Left(), ir.BlankNode, r.Right()
} else {
// case x, ok = <-c
dst, ok, recv = r.List().First(), r.List().Second(), r.Rlist().First()
case ir.OSELRECV2:
// case x, ok = <-c
r := r.(*ir.AssignListStmt)
recv := r.Rlist().First().(*ir.UnaryExpr)
recv.SetLeft(o.expr(recv.Left(), nil))
if !ir.IsAutoTmp(recv.Left()) {
recv.SetLeft(o.copyExpr(recv.Left()))
}
init := r.PtrInit().Slice()
r.PtrInit().Set(nil)
// If this is case x := <-ch or case x, y := <-ch, the case has
// the ODCL nodes to declare x and y. We want to delay that
// declaration (and possible allocation) until inside the case body.
// Delete the ODCL nodes here and recreate them inside the body below.
if r.Colas() {
init := r.Init().Slice()
if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == dst {
init = init[1:]
colas := r.Colas()
do := func(i int, t *types.Type) {
n := r.List().Index(i)
if ir.IsBlank(n) {
return
}
if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == ok {
init = init[1:]
// If this is case x := <-ch or case x, y := <-ch, the case has
// the ODCL nodes to declare x and y. We want to delay that
// declaration (and possible allocation) until inside the case body.
// Delete the ODCL nodes here and recreate them inside the body below.
if colas {
if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n {
init = init[1:]
}
dcl := typecheck(ir.Nod(ir.ODCL, n, nil), ctxStmt)
ncas.PtrInit().Append(dcl)
}
r.PtrInit().Set(init)
tmp := o.newTemp(t, t.HasPointers())
as := typecheck(ir.Nod(ir.OAS, n, conv(tmp, n.Type())), ctxStmt)
ncas.PtrInit().Append(as)
r.PtrList().SetIndex(i, tmp)
}
if r.Init().Len() != 0 {
do(0, recv.Left().Type().Elem())
do(1, types.Types[types.TBOOL])
if len(init) != 0 {
ir.DumpList("ninit", r.Init())
base.Fatalf("ninit on select recv")
}
recv.SetLeft(o.expr(recv.Left(), nil))
if recv.Left().Op() != ir.ONAME {
recv.SetLeft(o.copyExpr(recv.Left()))
}
// Introduce temporary for receive and move actual copy into case body.
// avoids problems with target being addressed, as usual.
// NOTE: If we wanted to be clever, we could arrange for just one
// temporary per distinct type, sharing the temp among all receives
// with that temp. Similarly one ok bool could be shared among all
// the x,ok receives. Not worth doing until there's a clear need.
if !ir.IsBlank(dst) {
// use channel element type for temporary to avoid conversions,
// such as in case interfacevalue = <-intchan.
// the conversion happens in the OAS instead.
if r.Colas() {
dcl := ir.Nod(ir.ODCL, dst, nil)
dcl = typecheck(dcl, ctxStmt)
n2.PtrInit().Append(dcl)
}
tmp := o.newTemp(recv.Left().Type().Elem(), recv.Left().Type().Elem().HasPointers())
as := ir.Nod(ir.OAS, dst, tmp)
as = typecheck(as, ctxStmt)
n2.PtrInit().Append(as)
dst = tmp
}
if !ir.IsBlank(ok) {
if r.Colas() {
dcl := ir.Nod(ir.ODCL, ok, nil)
dcl = typecheck(dcl, ctxStmt)
n2.PtrInit().Append(dcl)
}
tmp := o.newTemp(types.Types[types.TBOOL], false)
as := ir.Nod(ir.OAS, ok, conv(tmp, ok.Type()))
as = typecheck(as, ctxStmt)
n2.PtrInit().Append(as)
ok = tmp
}
if r.Op() == ir.OSELRECV {
r.SetLeft(dst)
} else {
r.List().SetIndex(0, dst)
r.List().SetIndex(1, ok)
}
orderBlock(n2.PtrInit(), o.free)
orderBlock(ncas.PtrInit(), o.free)
case ir.OSEND:
if r.Init().Len() != 0 {
@ -972,14 +953,15 @@ func (o *Order) stmt(n ir.Node) {
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
for _, n3 := range n.List().Slice() {
orderBlock(n3.PtrBody(), o.free)
n3.PtrBody().Prepend(o.cleanTempNoPop(t)...)
for _, cas := range n.List().Slice() {
cas := cas.(*ir.CaseStmt)
orderBlock(cas.PtrBody(), o.free)
cas.PtrBody().Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkselect appears to walk Ninit.
n3.PtrBody().Prepend(n3.Init().Slice()...)
n3.PtrInit().Set(nil)
cas.PtrBody().Prepend(cas.Init().Slice()...)
cas.PtrInit().Set(nil)
}
o.out = append(o.out, n)
@ -1008,6 +990,7 @@ func (o *Order) stmt(n ir.Node) {
// For now just clean all the temporaries at the end.
// In practice that's fine.
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
// Add empty "default:" case for instrumentation.
n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil))
@ -1016,9 +999,7 @@ func (o *Order) stmt(n ir.Node) {
t := o.markTemp()
n.SetLeft(o.expr(n.Left(), nil))
for _, ncas := range n.List().Slice() {
if ncas.Op() != ir.OCASE {
base.Fatalf("order switch case %v", ncas.Op())
}
ncas := ncas.(*ir.CaseStmt)
o.exprListInPlace(ncas.List())
orderBlock(ncas.PtrBody(), o.free)
}
@ -1030,11 +1011,9 @@ func (o *Order) stmt(n ir.Node) {
base.Pos = lno
}
func hasDefaultCase(n ir.Node) bool {
func hasDefaultCase(n *ir.SwitchStmt) bool {
for _, ncas := range n.List().Slice() {
if ncas.Op() != ir.OCASE {
base.Fatalf("expected case, found %v", ncas.Op())
}
ncas := ncas.(*ir.CaseStmt)
if ncas.List().Len() == 0 {
return true
}
@ -1059,9 +1038,6 @@ func (o *Order) exprListInPlace(l ir.Nodes) {
}
}
// prealloc[x] records the allocation to use for x.
var prealloc = map[ir.Node]ir.Node{}
func (o *Order) exprNoLHS(n ir.Node) ir.Node {
return o.expr(n, nil)
}
@ -1077,23 +1053,33 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
lno := setlineno(n)
n = o.expr1(n, lhs)
base.Pos = lno
return n
}
func (o *Order) expr1(n, lhs ir.Node) ir.Node {
o.init(n)
switch n.Op() {
default:
ir.EditChildren(n, o.exprNoLHS)
if o.edit == nil {
o.edit = o.exprNoLHS // create closure once
}
ir.EditChildren(n, o.edit)
return n
// Addition of strings turns into a function call.
// Allocate a temporary to hold the strings.
// Fewer than 5 strings use direct runtime helpers.
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
o.exprList(n.List())
if n.List().Len() > 5 {
t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len()))
prealloc[n] = o.newTemp(t, false)
n.Prealloc = o.newTemp(t, false)
}
// Mark string(byteSlice) arguments to reuse byteSlice backing
@ -1118,6 +1104,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
}
}
}
return n
case ir.OINDEXMAP:
n.SetLeft(o.expr(n.Left(), nil))
@ -1140,15 +1127,16 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// key must be addressable
n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right()))
if needCopy {
n = o.copyExpr(n)
return o.copyExpr(n)
}
return n
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
case ir.OCONVIFACE:
n.SetLeft(o.expr(n.Left(), nil))
if n.Left().Type().IsInterface() {
break
return n
}
if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) {
// Need a temp if we need to pass the address to the conversion function.
@ -1156,20 +1144,23 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// whose address we can put directly in an interface (see OCONVIFACE case in walk).
n.SetLeft(o.addrTemp(n.Left()))
}
return n
case ir.OCONVNOP:
if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) {
call := n.Left().(*ir.CallExpr)
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
// together. See golang.org/issue/15329.
o.init(n.Left())
o.call(n.Left())
o.init(call)
o.call(call)
if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
n = o.copyExpr(n)
return o.copyExpr(n)
}
} else {
n.SetLeft(o.expr(n.Left(), nil))
}
return n
case ir.OANDAND, ir.OOROR:
// ... = LHS && RHS
@ -1206,7 +1197,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
nif.PtrRlist().Set(gen)
}
o.out = append(o.out, nif)
n = r
return r
case ir.OCALLFUNC,
ir.OCALLINTER,
@ -1229,27 +1220,31 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
if isRuneCount(n) {
// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
n.Left().SetLeft(o.expr(n.Left().Left(), nil))
conv := n.(*ir.UnaryExpr).Left().(*ir.ConvExpr)
conv.SetLeft(o.expr(conv.Left(), nil))
} else {
o.call(n)
}
if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
n = o.copyExpr(n)
return o.copyExpr(n)
}
return n
case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
if isAppendOfMake(n) {
n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
n.List().Second().SetLeft(o.expr(n.List().Second().Left(), nil)) // order y
n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
mk := n.List().Second().(*ir.MakeExpr)
mk.SetLeft(o.expr(mk.Left(), nil)) // order y
} else {
o.exprList(n.List())
}
if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) {
n = o.copyExpr(n)
return o.copyExpr(n)
}
return n
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
n.SetLeft(o.expr(n.Left(), nil))
@ -1262,39 +1257,45 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
max = o.cheapExpr(max)
n.SetSliceBounds(low, high, max)
if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) {
n = o.copyExpr(n)
return o.copyExpr(n)
}
return n
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
if n.Transient() && len(n.Func().ClosureVars) > 0 {
prealloc[n] = o.newTemp(closureType(n), false)
n.Prealloc = o.newTemp(closureType(n), false)
}
return n
case ir.OSLICELIT, ir.OCALLPART:
case ir.OCALLPART:
n := n.(*ir.CallPartExpr)
n.SetLeft(o.expr(n.Left(), nil))
n.SetRight(o.expr(n.Right(), nil))
o.exprList(n.List())
o.exprList(n.Rlist())
if n.Transient() {
var t *types.Type
switch n.Op() {
case ir.OSLICELIT:
t = types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right()))
case ir.OCALLPART:
t = partialCallType(n)
}
prealloc[n] = o.newTemp(t, false)
t := partialCallType(n)
n.Prealloc = o.newTemp(t, false)
}
return n
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
o.exprList(n.List())
if n.Transient() {
t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right()))
n.Prealloc = o.newTemp(t, false)
}
return n
case ir.ODOTTYPE, ir.ODOTTYPE2:
n.SetLeft(o.expr(n.Left(), nil))
if !isdirectiface(n.Type()) || instrumenting {
n = o.copyExprClear(n)
return o.copyExprClear(n)
}
return n
case ir.ORECV:
n.SetLeft(o.expr(n.Left(), nil))
n = o.copyExprClear(n)
return o.copyExprClear(n)
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n.SetLeft(o.expr(n.Left(), nil))
@ -1307,10 +1308,10 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
if n.Left().Op() == ir.OBYTES2STR {
n.Left().SetOp(ir.OBYTES2STRTMP)
n.Left().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
if n.Right().Op() == ir.OBYTES2STR {
n.Right().SetOp(ir.OBYTES2STRTMP)
n.Right().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
case t.IsStruct() || t.IsArray():
@ -1319,6 +1320,8 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
n.SetLeft(o.addrTemp(n.Left()))
n.SetRight(o.addrTemp(n.Right()))
}
return n
case ir.OMAPLIT:
// Order map by converting:
// map[int]int{
@ -1337,11 +1340,9 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// See issue 26552.
entries := n.List().Slice()
statics := entries[:0]
var dynamics []ir.Node
var dynamics []*ir.KeyExpr
for _, r := range entries {
if r.Op() != ir.OKEY {
base.Fatalf("OMAPLIT entry not OKEY: %v\n", r)
}
r := r.(*ir.KeyExpr)
if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
@ -1350,7 +1351,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// Recursively ordering some static entries can change them to dynamic;
// e.g., OCONVIFACE nodes. See #31777.
r = o.expr(r, nil)
r = o.expr(r, nil).(*ir.KeyExpr)
if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
@ -1361,7 +1362,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
n.PtrList().Set(statics)
if len(dynamics) == 0 {
break
return n
}
// Emit the creation of the map (with all its static entries).
@ -1369,18 +1370,17 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
as := ir.Nod(ir.OAS, m, n)
typecheck(as, ctxStmt)
o.stmt(as)
n = m
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left()), r.Right())
as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, r.Left()), r.Right())
typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
return m
}
base.Pos = lno
return n
// No return - type-assertions above. Each case must return for itself.
}
// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
@ -1391,7 +1391,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node {
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
func (o *Order) as2(n ir.Node) {
func (o *Order) as2(n *ir.AssignListStmt) {
tmplist := []ir.Node{}
left := []ir.Node{}
for ni, l := range n.List().Slice() {
@ -1408,13 +1408,12 @@ func (o *Order) as2(n ir.Node) {
as := ir.Nod(ir.OAS2, nil, nil)
as.PtrList().Set(left)
as.PtrRlist().Set(tmplist)
as = typecheck(as, ctxStmt)
o.stmt(as)
o.stmt(typecheck(as, ctxStmt))
}
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
func (o *Order) okAs2(n ir.Node) {
func (o *Order) okAs2(n *ir.AssignListStmt) {
var tmp1, tmp2 ir.Node
if !ir.IsBlank(n.List().First()) {
typ := n.Rlist().First().Type()
@ -1429,14 +1428,12 @@ func (o *Order) okAs2(n ir.Node) {
if tmp1 != nil {
r := ir.Nod(ir.OAS, n.List().First(), tmp1)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
o.mapAssign(typecheck(r, ctxStmt))
n.List().SetFirst(tmp1)
}
if tmp2 != nil {
r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type()))
r = typecheck(r, ctxStmt)
o.mapAssign(r)
o.mapAssign(typecheck(r, ctxStmt))
n.List().SetSecond(tmp2)
}
}

View File

@ -74,7 +74,7 @@ func cmpstackvarlt(a, b *ir.Name) bool {
}
if a.Class() != ir.PAUTO {
return a.Offset() < b.Offset()
return a.FrameOffset() < b.FrameOffset()
}
if a.Used() != b.Used() {
@ -186,7 +186,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
n.SetOffset(-s.stksize)
n.SetFrameOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
@ -287,7 +287,7 @@ func compilenow(fn *ir.Func) bool {
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the xtop list.
// compiling a function later on in the Target.Decls list.
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
@ -536,10 +536,11 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
offs := n.Offset()
var offs int64
switch n.Class() {
case ir.PAUTO:
offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
@ -551,7 +552,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs += base.Ctxt.FixedFrameSize()
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
@ -693,7 +694,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.Offset()),
StackOffset: int32(n.FrameOffset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
@ -737,6 +738,7 @@ func stackOffset(slot ssa.LocalSlot) int32 {
var off int64
switch n.Class() {
case ir.PAUTO:
off = n.FrameOffset()
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(Widthptr)
}
@ -745,9 +747,9 @@ func stackOffset(slot ssa.LocalSlot) int32 {
off -= int64(Widthptr)
}
case ir.PPARAM, ir.PPARAMOUT:
off += base.Ctxt.FixedFrameSize()
off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
}
return int32(off + n.Offset() + slot.Off)
return int32(off + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.

View File

@ -43,7 +43,7 @@ func TestCmpstackvar(t *testing.T) {
}
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetFrameOffset(xoffset)
n.SetClass(cl)
return n
}
@ -158,7 +158,7 @@ func TestStackvarSort(t *testing.T) {
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetFrameOffset(xoffset)
n.SetClass(cl)
return n
}

View File

@ -254,7 +254,9 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty
hasPhi.add(c.ID)
v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
s.s.addNamedValue(var_, v)
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)
}
for range c.Preds {
v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
}
@ -546,7 +548,9 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.
// Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_})
s.defvars[b.ID][var_] = v
s.s.addNamedValue(var_, v)
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)
}
s.fwdrefs = append(s.fwdrefs, v)
return v
}

View File

@ -206,8 +206,12 @@ type progeffectscache struct {
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
func livenessShouldTrack(nn ir.Node) bool {
if nn.Op() != ir.ONAME {
return false
}
n := nn.(*ir.Name)
return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@ -492,10 +496,10 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec)
node := vars[i]
switch node.Class() {
case ir.PAUTO:
onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals)
onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
case ir.PPARAM, ir.PPARAMOUT:
onebitwalktype1(node.Type(), node.Offset(), args)
onebitwalktype1(node.Type(), node.FrameOffset(), args)
}
}
}
@ -1165,11 +1169,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
var maxArgNode ir.Node
var maxArgNode *ir.Name
for _, n := range lv.vars {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
if maxArgNode == nil || n.Offset() > maxArgNode.Offset() {
if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
maxArgNode = n
}
}
@ -1177,7 +1181,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type())
maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
@ -1229,10 +1233,10 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap {
// Construct the global liveness state.
vars, idx := getvariables(e.curfn)
lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
vars, idx := getvariables(curfn)
lv := newliveness(curfn, f, vars, idx, stkptrsize)
// Run the dataflow framework.
lv.prologue()
@ -1267,7 +1271,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
}
// Emit the live pointer map data structures
ls := e.curfn.LSym
ls := curfn.LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()

View File

@ -83,9 +83,9 @@ func instrument(fn *ir.Func) {
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
nodpc := ir.Copy(nodfp).(*ir.Name)
nodpc := nodfp.CloneName()
nodpc.SetType(types.Types[types.TUINTPTR])
nodpc.SetOffset(int64(-Widthptr))
nodpc.SetFrameOffset(int64(-Widthptr))
fn.Dcl = append(fn.Dcl, nodpc)
fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
fn.Exit.Append(mkcall("racefuncexit", nil, nil))

View File

@ -13,7 +13,7 @@ import (
)
// range
func typecheckrange(n ir.Node) {
func typecheckrange(n *ir.RangeStmt) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
@ -39,7 +39,7 @@ func typecheckrange(n ir.Node) {
decldepth--
}
func typecheckrangeExpr(n ir.Node) {
func typecheckrangeExpr(n *ir.RangeStmt) {
n.SetRight(typecheck(n.Right(), ctxExpr))
t := n.Right().Type()
@ -157,7 +157,7 @@ func cheapComputableIndex(width int64) bool {
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(nrange ir.Node) ir.Node {
func walkrange(nrange *ir.RangeStmt) ir.Node {
if isMapClear(nrange) {
m := nrange.Right()
lno := setlineno(m)
@ -204,7 +204,7 @@ func walkrange(nrange ir.Node) ir.Node {
base.Fatalf("walkrange: v2 != nil while v1 == nil")
}
var ifGuard ir.Node
var ifGuard *ir.IfStmt
var body []ir.Node
var init []ir.Node
@ -267,14 +267,14 @@ func walkrange(nrange ir.Node) ir.Node {
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
ifGuard = ir.Nod(ir.OIF, nil, nil)
ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
nfor.SetOp(ir.OFORUNTIL)
hp := temp(types.NewPtr(nrange.Type().Elem()))
tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil)))
init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
@ -288,16 +288,15 @@ func walkrange(nrange ir.Node) ir.Node {
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
a = typecheck(a, ctxStmt)
nfor.PtrList().Set1(a)
as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
nfor.PtrList().Set1(typecheck(as, ctxStmt))
case types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
hit := prealloc[nrange]
hit := nrange.Prealloc
th := hit.Type()
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
@ -305,22 +304,20 @@ func walkrange(nrange ir.Node) ir.Node {
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil)))
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit)))
nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
nfor.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)))
nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit)))
key := nodSym(ir.ODOT, hit, keysym)
key = ir.Nod(ir.ODEREF, key, nil)
key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil)
if v1 == nil {
body = nil
} else if v2 == nil {
body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
elem := nodSym(ir.ODOT, hit, elemsym)
elem = ir.Nod(ir.ODEREF, elem, nil)
elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(key, elem)
@ -429,7 +426,7 @@ func walkrange(nrange ir.Node) ir.Node {
if ifGuard != nil {
ifGuard.PtrInit().Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt)
ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt)
} else {
nfor.PtrInit().Append(init...)
}
@ -462,7 +459,7 @@ func walkrange(nrange ir.Node) ir.Node {
// }
//
// where == for keys of map m is reflexive.
func isMapClear(n ir.Node) bool {
func isMapClear(n *ir.RangeStmt) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
@ -491,7 +488,7 @@ func isMapClear(n ir.Node) bool {
}
m := n.Right()
if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) {
if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) {
return false
}
@ -511,11 +508,7 @@ func mapClear(m ir.Node) ir.Node {
fn := syslook("mapclear")
fn = substArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m)
n = typecheck(n, ctxStmt)
n = walkstmt(n)
return n
return walkstmt(typecheck(n, ctxStmt))
}
// Lower n into runtime·memclr if possible, for
@ -529,7 +522,7 @@ func mapClear(m ir.Node) ir.Node {
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(loop, v1, v2, a ir.Node) ir.Node {
func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
if base.Flag.N != 0 || instrumenting {
return nil
}
@ -542,12 +535,17 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node {
return nil
}
stmt := loop.Body().First() // only stmt in body
if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX {
stmt1 := loop.Body().First() // only stmt in body
if stmt1.Op() != ir.OAS {
return nil
}
stmt := stmt1.(*ir.AssignStmt)
if stmt.Left().Op() != ir.OINDEX {
return nil
}
lhs := stmt.Left().(*ir.IndexExpr)
if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) {
if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) {
return nil
}
@ -570,19 +568,15 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node {
// hp = &a[0]
hp := temp(types.Types[types.TUNSAFEPTR])
tmp := ir.Nod(ir.OINDEX, a, nodintconst(0))
tmp.SetBounded(true)
tmp = ir.Nod(ir.OADDR, tmp, nil)
tmp = convnop(tmp, types.Types[types.TUNSAFEPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp))
ix := ir.Nod(ir.OINDEX, a, nodintconst(0))
ix.SetBounded(true)
addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr))
// hn = len(a) * sizeof(elem(a))
hn := temp(types.Types[types.TUINTPTR])
tmp = ir.Nod(ir.OLEN, a, nil)
tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize))
tmp = conv(tmp, types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul))
var fn ir.Node
if a.Type().Elem().HasPointers() {
@ -604,8 +598,7 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node {
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
typecheckslice(n.Body().Slice(), ctxStmt)
n = walkstmt(n)
return n
return walkstmt(n)
}
// addptr returns (*T)(uintptr(p) + n).

View File

@ -986,7 +986,7 @@ func typenamesym(t *types.Type) *types.Sym {
return s
}
func typename(t *types.Type) ir.Node {
func typename(t *types.Type) *ir.AddrExpr {
s := typenamesym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
@ -996,13 +996,13 @@ func typename(t *types.Type) ir.Node {
s.Def = n
}
n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
n := nodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
}
func itabname(t, itype *types.Type) ir.Node {
func itabname(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
}
@ -1016,7 +1016,7 @@ func itabname(t, itype *types.Type) ir.Node {
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
}
n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
n := nodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
@ -1880,7 +1880,7 @@ func zeroaddr(size int64) ir.Node {
x.SetTypecheck(1)
s.Def = x
}
z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
z := nodAddr(ir.AsNode(s.Def))
z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z

View File

@ -75,7 +75,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
ir.InspectList(n.Body(), func(n ir.Node) bool {
ir.Visit(n, func(n ir.Node) {
switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
@ -101,9 +101,11 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
}
case ir.OCALLPART:
fn := ir.AsNode(callpartMethod(n).Nname)
if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
min = m
if fn != nil && fn.Op() == ir.ONAME {
if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
min = m
}
}
}
case ir.OCLOSURE:
@ -111,7 +113,6 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
min = m
}
}
return true
})
if (min == id || min == id+1) && !n.IsHiddenClosure() {

View File

@ -11,15 +11,12 @@ import (
)
// select
func typecheckselect(sel ir.Node) {
func typecheckselect(sel *ir.SelectStmt) {
var def ir.Node
lno := setlineno(sel)
typecheckslice(sel.Init().Slice(), ctxStmt)
for _, ncase := range sel.List().Slice() {
if ncase.Op() != ir.OCASE {
setlineno(ncase)
base.Fatalf("typecheckselect %v", ncase.Op())
}
ncase := ncase.(*ir.CaseStmt)
if ncase.List().Len() == 0 {
// default
@ -35,6 +32,14 @@ func typecheckselect(sel ir.Node) {
n := ncase.List().First()
ncase.SetLeft(n)
ncase.PtrList().Set(nil)
oselrecv2 := func(dst, recv ir.Node, colas bool) {
n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil)
n.PtrList().Set2(dst, ir.BlankNode)
n.PtrRlist().Set1(recv)
n.SetColas(colas)
n.SetTypecheck(1)
ncase.SetLeft(n)
}
switch n.Op() {
default:
pos := n.Pos()
@ -48,20 +53,21 @@ func typecheckselect(sel ir.Node) {
base.ErrorfAt(pos, "select case must be receive, send or assign recv")
case ir.OAS:
// convert x = <-c into OSELRECV(x, <-c).
// convert x = <-c into x, _ = <-c
// remove implicit conversions; the eventual assignment
// will reintroduce them.
if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() {
n.SetRight(n.Right().Left())
if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
if r.Implicit() {
n.SetRight(r.Left())
}
}
if n.Right().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
n.SetOp(ir.OSELRECV)
oselrecv2(n.Left(), n.Right(), n.Colas())
case ir.OAS2RECV:
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
if n.Rlist().First().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
@ -69,10 +75,8 @@ func typecheckselect(sel ir.Node) {
n.SetOp(ir.OSELRECV2)
case ir.ORECV:
// convert <-c into OSELRECV(_, <-c)
n = ir.NodAt(n.Pos(), ir.OSELRECV, ir.BlankNode, n)
n.SetTypecheck(1)
ncase.SetLeft(n)
// convert <-c into _, _ = <-c
oselrecv2(ir.BlankNode, n, false)
case ir.OSEND:
break
@ -85,7 +89,7 @@ func typecheckselect(sel ir.Node) {
base.Pos = lno
}
func walkselect(sel ir.Node) {
func walkselect(sel *ir.SelectStmt) {
lno := setlineno(sel)
if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
@ -94,8 +98,8 @@ func walkselect(sel ir.Node) {
init := sel.Init().Slice()
sel.PtrInit().Set(nil)
init = append(init, walkselectcases(sel.PtrList())...)
sel.PtrList().Set(nil)
init = append(init, walkselectcases(sel.List())...)
sel.SetList(ir.Nodes{})
sel.PtrBody().Set(init)
walkstmtlist(sel.Body().Slice())
@ -103,7 +107,7 @@ func walkselect(sel ir.Node) {
base.Pos = lno
}
func walkselectcases(cases *ir.Nodes) []ir.Node {
func walkselectcases(cases ir.Nodes) []ir.Node {
ncas := cases.Len()
sellineno := base.Pos
@ -114,7 +118,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// optimization: one-case select: single op.
if ncas == 1 {
cas := cases.First()
cas := cases.First().(*ir.CaseStmt)
setlineno(cas)
l := cas.Init().Slice()
if cas.Left() != nil { // not default:
@ -128,19 +132,13 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
case ir.OSEND:
// already ok
case ir.OSELRECV:
if ir.IsBlank(n.Left()) {
n = n.Right()
break
}
n.SetOp(ir.OAS)
case ir.OSELRECV2:
if ir.IsBlank(n.List().First()) && ir.IsBlank(n.List().Second()) {
n = n.Rlist().First()
r := n.(*ir.AssignListStmt)
if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) {
n = r.Rlist().First()
break
}
n.SetOp(ir.OAS2RECV)
r.SetOp(ir.OAS2RECV)
}
l = append(l, n)
@ -153,36 +151,23 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
var dflt ir.Node
var dflt *ir.CaseStmt
for _, cas := range cases.Slice() {
cas := cas.(*ir.CaseStmt)
setlineno(cas)
n := cas.Left()
if n == nil {
dflt = cas
continue
}
// Lower x, _ = <-c to x = <-c.
if n.Op() == ir.OSELRECV2 && ir.IsBlank(n.List().Second()) {
n = ir.NodAt(n.Pos(), ir.OSELRECV, n.List().First(), n.Rlist().First())
n.SetTypecheck(1)
cas.SetLeft(n)
}
switch n.Op() {
case ir.OSEND:
n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil))
n.SetRight(nodAddr(n.Right()))
n.SetRight(typecheck(n.Right(), ctxExpr))
case ir.OSELRECV:
if !ir.IsBlank(n.Left()) {
n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
n.SetLeft(typecheck(n.Left(), ctxExpr))
}
case ir.OSELRECV2:
if !ir.IsBlank(n.List().First()) {
n.List().SetIndex(0, ir.Nod(ir.OADDR, n.List().First(), nil))
n.List().SetIndex(0, nodAddr(n.List().First()))
n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr))
}
}
@ -190,9 +175,9 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// optimization: two-case select but one is default: single non-blocking op.
if ncas == 2 && dflt != nil {
cas := cases.First()
cas := cases.First().(*ir.CaseStmt)
if cas == dflt {
cas = cases.Second()
cas = cases.Second().(*ir.CaseStmt)
}
n := cas.Left()
@ -209,25 +194,22 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
ch := n.Left()
call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())
case ir.OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
ch := n.Right().Left()
elem := n.Left()
if ir.IsBlank(elem) {
elem = nodnil()
}
call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
case ir.OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
ch := n.Rlist().First().Left()
recv := n.Rlist().First().(*ir.UnaryExpr)
ch := recv.Left()
elem := n.List().First()
if ir.IsBlank(elem) {
elem = nodnil()
}
receivedp := ir.Nod(ir.OADDR, n.List().Second(), nil)
receivedp = typecheck(receivedp, ctxExpr)
call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
if ir.IsBlank(n.List().Second()) {
// if selectnbrecv(&v, c) { body } else { default body }
call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
} else {
// TODO(cuonglm): make this use selectnbrecv()
// if selectnbrecv2(&v, &received, c) { body } else { default body }
receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr)
call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
}
}
r.SetLeft(typecheck(call, ctxExpr))
@ -239,7 +221,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
if dflt != nil {
ncas--
}
casorder := make([]ir.Node, ncas)
casorder := make([]*ir.CaseStmt, ncas)
nsends, nrecvs := 0, 0
var init []ir.Node
@ -247,9 +229,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// generate sel-struct
base.Pos = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
r := ir.Nod(ir.OAS, selv, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt))
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
@ -257,13 +237,14 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
var pc0, pcs ir.Node
if base.Flag.Race {
pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr)
} else {
pc0 = nodnil()
}
// register cases
for _, cas := range cases.Slice() {
cas := cas.(*ir.CaseStmt)
setlineno(cas)
init = append(init, cas.Init().Slice()...)
@ -284,15 +265,11 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
nsends++
c = n.Left()
elem = n.Right()
case ir.OSELRECV:
nrecvs++
i = ncas - nrecvs
c = n.Right().Left()
elem = n.Left()
case ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
c = n.Rlist().First().Left()
recv := n.Rlist().First().(*ir.UnaryExpr)
c = recv.Left()
elem = n.List().First()
}
@ -300,8 +277,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
setField := func(f string, val ir.Node) {
r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
init = append(init, typecheck(r, ctxStmt))
}
c = convnop(c, types.Types[types.TUNSAFEPTR])
@ -314,7 +290,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if base.Flag.Race {
r = mkcall("selectsetpc", nil, nil, ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))), nil))
r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i)))))
init = append(init, r)
}
}
@ -326,12 +302,11 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
base.Pos = sellineno
chosen := temp(types.Types[types.TINT])
recvOK := temp(types.Types[types.TBOOL])
r = ir.Nod(ir.OAS2, nil, nil)
r := ir.Nod(ir.OAS2, nil, nil)
r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
init = append(init, typecheck(r, ctxStmt))
// selv and order are no longer alive after selectgo.
init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
@ -341,16 +316,17 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
}
// dispatch cases
dispatch := func(cond, cas ir.Node) {
dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
r := ir.Nod(ir.OIF, cond, nil)
if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
x := ir.Nod(ir.OAS, n.List().Second(), recvOK)
x = typecheck(x, ctxStmt)
r.PtrBody().Append(x)
if !ir.IsBlank(n.List().Second()) {
x := ir.Nod(ir.OAS, n.List().Second(), recvOK)
r.PtrBody().Append(typecheck(x, ctxStmt))
}
}
r.PtrBody().AppendNodes(cas.PtrBody())
@ -372,7 +348,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node {
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
func bytePtrToIndex(n ir.Node, i int64) ir.Node {
s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil)
s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i)))
t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
}

View File

@ -32,7 +32,7 @@ type InitSchedule struct {
out []ir.Node
initplans map[ir.Node]*InitPlan
inittemps map[ir.Node]ir.Node
inittemps map[ir.Node]*ir.Name
}
func (s *InitSchedule) append(n ir.Node) {
@ -51,61 +51,69 @@ func (s *InitSchedule) staticInit(n ir.Node) {
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
func (s *InitSchedule) tryStaticInit(n ir.Node) bool {
func (s *InitSchedule) tryStaticInit(nn ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
// OAS2* assignments mostly necessitate dynamic execution
// anyway.
if n.Op() != ir.OAS {
if nn.Op() != ir.OAS {
return false
}
if ir.IsBlank(n.Left()) && !hasSideEffects(n.Right()) {
n := nn.(*ir.AssignStmt)
if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) {
// Discard.
return true
}
lno := setlineno(n)
defer func() { base.Pos = lno }()
return s.staticassign(n.Left(), n.Right())
nam := n.Left().(*ir.Name)
return s.staticassign(nam, 0, n.Right(), nam.Type())
}
// like staticassign but we are copying an already
// initialized value r.
func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR {
return false
}
if r.Class() == ir.PFUNC {
pfuncsym(l, r)
func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
if rn.Class() == ir.PFUNC {
// TODO if roff != 0 { panic }
pfuncsym(l, loff, rn)
return true
}
if r.Class() != ir.PEXTERN || r.Sym().Pkg != types.LocalPkg {
if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
return false
}
if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return false
}
if r.Name().Defn.Op() != ir.OAS {
if rn.Defn.Op() != ir.OAS {
return false
}
if r.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
return false
}
orig := r
r = r.Name().Defn.Right()
orig := rn
r := rn.Defn.(*ir.AssignStmt).Right()
for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) {
r = r.Left()
for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
r = r.(*ir.ConvExpr).Left()
}
switch r.Op() {
case ir.ONAME, ir.OMETHEXPR:
if s.staticcopy(l, r) {
case ir.OMETHEXPR:
r = r.(*ir.MethodExpr).FuncName()
fallthrough
case ir.ONAME:
r := r.(*ir.Name)
if s.staticcopy(l, loff, r, typ) {
return true
}
// We may have skipped past one or more OCONVNOPs, so
// use conv to ensure r is assignable to l (#13263).
s.append(ir.Nod(ir.OAS, l, conv(r, l.Type())))
dst := ir.Node(l)
if loff != 0 || !types.Identical(typ, l.Type()) {
dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
}
s.append(ir.Nod(ir.OAS, dst, conv(r, typ)))
return true
case ir.ONIL:
@ -115,12 +123,13 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
if isZero(r) {
return true
}
litsym(l, r, int(l.Type().Width))
litsym(l, loff, r, int(typ.Width))
return true
case ir.OADDR:
if a := r.Left(); a.Op() == ir.ONAME {
addrsym(l, a)
a := a.(*ir.Name)
addrsym(l, loff, a, 0)
return true
}
@ -128,37 +137,35 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
switch r.Left().Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
addrsym(l, s.inittemps[r])
addrsym(l, loff, s.inittemps[r], 0)
return true
}
case ir.OSLICELIT:
// copy slice
a := s.inittemps[r]
slicesym(l, a, ir.Int64Val(r.Right()))
slicesym(l, loff, s.inittemps[r], ir.Int64Val(r.Right()))
return true
case ir.OARRAYLIT, ir.OSTRUCTLIT:
p := s.initplans[r]
n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
n.SetOffset(l.Offset() + e.Xoffset)
n.SetType(e.Expr.Type())
typ := e.Expr.Type()
if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
litsym(n, e.Expr, int(n.Type().Width))
litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width))
continue
}
ll := ir.SepCopy(n)
if s.staticcopy(ll, e.Expr) {
x := e.Expr
if x.Op() == ir.OMETHEXPR {
x = x.(*ir.MethodExpr).FuncName()
}
if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
rr := ir.SepCopy(orig)
rr.SetType(ll.Type())
rr.SetOffset(rr.Offset() + e.Xoffset)
ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
setlineno(rr)
s.append(ir.Nod(ir.OAS, ll, rr))
}
@ -169,14 +176,19 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
return false
}
func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
for r.Op() == ir.OCONVNOP {
r = r.Left()
r = r.(*ir.ConvExpr).Left()
}
switch r.Op() {
case ir.ONAME, ir.OMETHEXPR:
return s.staticcopy(l, r)
case ir.ONAME:
r := r.(*ir.Name)
return s.staticcopy(l, loff, r, typ)
case ir.OMETHEXPR:
r := r.(*ir.MethodExpr)
return s.staticcopy(l, loff, r.FuncName(), typ)
case ir.ONIL:
return true
@ -185,12 +197,12 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
if isZero(r) {
return true
}
litsym(l, r, int(l.Type().Width))
litsym(l, loff, r, int(typ.Width))
return true
case ir.OADDR:
if nam := stataddr(r.Left()); nam != nil {
addrsym(l, nam)
if name, offset, ok := stataddr(r.Left()); ok {
addrsym(l, loff, name, offset)
return true
}
fallthrough
@ -202,10 +214,10 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
a := staticname(r.Left().Type())
s.inittemps[r] = a
addrsym(l, a)
addrsym(l, loff, a, 0)
// Init underlying literal.
if !s.staticassign(a, r.Left()) {
if !s.staticassign(a, 0, r.Left(), a.Type()) {
s.append(ir.Nod(ir.OAS, a, r.Left()))
}
return true
@ -215,7 +227,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
case ir.OSTR2BYTES:
if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL {
sval := ir.StringVal(r.Left())
slicebytes(l, sval)
slicebytes(l, loff, sval)
return true
}
@ -227,27 +239,25 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
ta.SetNoalg(true)
a := staticname(ta)
s.inittemps[r] = a
slicesym(l, a, bound)
slicesym(l, loff, a, bound)
// Fall through to init underlying array.
l = a
loff = 0
fallthrough
case ir.OARRAYLIT, ir.OSTRUCTLIT:
s.initplan(r)
p := s.initplans[r]
n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
n.SetOffset(l.Offset() + e.Xoffset)
n.SetType(e.Expr.Type())
if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
litsym(n, e.Expr, int(n.Type().Width))
litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
continue
}
setlineno(e.Expr)
a := ir.SepCopy(n)
if !s.staticassign(a, e.Expr) {
if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
s.append(ir.Nod(ir.OAS, a, e.Expr))
}
}
@ -264,7 +274,8 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
}
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
pfuncsym(l, r.Func().Nname)
// TODO if roff != 0 { panic }
pfuncsym(l, loff, r.Func().Nname)
return true
}
closuredebugruntimecheck(r)
@ -274,9 +285,9 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
// If you change something here, change it there, and vice versa.
// Determine the underlying concrete type and value we are converting from.
val := r
val := ir.Node(r)
for val.Op() == ir.OCONVIFACE {
val = val.Left()
val = val.(*ir.ConvExpr).Left()
}
if val.Type().IsInterface() {
@ -290,19 +301,17 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
var itab ir.Node
if l.Type().IsEmptyInterface() {
var itab *ir.AddrExpr
if typ.IsEmptyInterface() {
itab = typename(val.Type())
} else {
itab = itabname(val.Type(), l.Type())
itab = itabname(val.Type(), typ)
}
// Create a copy of l to modify while we emit data.
n := ir.Copy(l)
// Emit itab, advance offset.
addrsym(n, itab.Left()) // itab is an OADDR node
n.SetOffset(n.Offset() + int64(Widthptr))
addrsym(l, loff, itab.Left().(*ir.Name), 0)
// Emit data.
if isdirectiface(val.Type()) {
@ -311,20 +320,19 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
return true
}
// Copy val directly into n.
n.SetType(val.Type())
setlineno(val)
a := ir.SepCopy(n)
if !s.staticassign(a, val) {
if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type())
s.append(ir.Nod(ir.OAS, a, val))
}
} else {
// Construct temp to hold val, write pointer to temp into n.
a := staticname(val.Type())
s.inittemps[val] = a
if !s.staticassign(a, val) {
if !s.staticassign(a, 0, val, val.Type()) {
s.append(ir.Nod(ir.OAS, a, val))
}
addrsym(n, a)
addrsym(l, loff+int64(Widthptr), a, 0)
}
return true
@ -368,7 +376,7 @@ var statuniqgen int // name generator for static temps
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
func staticname(t *types.Type) ir.Node {
func staticname(t *types.Type) *ir.Name {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
@ -379,22 +387,23 @@ func staticname(t *types.Type) ir.Node {
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
func readonlystaticname(t *types.Type) ir.Node {
func readonlystaticname(t *types.Type) *ir.Name {
n := staticname(t)
n.MarkReadonly()
n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
func isSimpleName(n ir.Node) bool {
return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
func isSimpleName(nn ir.Node) bool {
if nn.Op() != ir.ONAME {
return false
}
n := nn.(*ir.Name)
return n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
a := ir.Nod(ir.OAS, l, r)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, l, r))
}
// initGenType is a bitmap indicating the types of generation that will occur for a static value.
@ -431,14 +440,15 @@ func getdyn(n ir.Node, top bool) initGenType {
case ir.OARRAYLIT, ir.OSTRUCTLIT:
}
lit := n.(*ir.CompLitExpr)
var mode initGenType
for _, n1 := range n.List().Slice() {
for _, n1 := range lit.List().Slice() {
switch n1.Op() {
case ir.OKEY:
n1 = n1.Right()
n1 = n1.(*ir.KeyExpr).Right()
case ir.OSTRUCTKEY:
n1 = n1.Left()
n1 = n1.(*ir.StructKeyExpr).Left()
}
mode |= getdyn(n1, false)
if mode == initDynamic|initConst {
@ -456,7 +466,7 @@ func isStaticCompositeLiteral(n ir.Node) bool {
case ir.OARRAYLIT:
for _, r := range n.List().Slice() {
if r.Op() == ir.OKEY {
r = r.Right()
r = r.(*ir.KeyExpr).Right()
}
if !isStaticCompositeLiteral(r) {
return false
@ -465,9 +475,7 @@ func isStaticCompositeLiteral(n ir.Node) bool {
return true
case ir.OSTRUCTLIT:
for _, r := range n.List().Slice() {
if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
}
r := r.(*ir.StructKeyExpr)
if !isStaticCompositeLiteral(r.Left()) {
return false
}
@ -477,9 +485,9 @@ func isStaticCompositeLiteral(n ir.Node) bool {
return true
case ir.OCONVIFACE:
// See staticassign's OCONVIFACE case for comments.
val := n
val := ir.Node(n)
for val.Op() == ir.OCONVIFACE {
val = val.Left()
val = val.(*ir.ConvExpr).Left()
}
if val.Type().IsInterface() {
return val.Op() == ir.ONIL
@ -511,7 +519,7 @@ const (
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) {
func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
isBlank := var_ == ir.BlankNode
var splitnode func(ir.Node) (a ir.Node, value ir.Node)
switch n.Op() {
@ -519,24 +527,23 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir
var k int64
splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() == ir.OKEY {
k = indexconst(r.Left())
kv := r.(*ir.KeyExpr)
k = indexconst(kv.Left())
if k < 0 {
base.Fatalf("fixedlit: invalid index %v", r.Left())
base.Fatalf("fixedlit: invalid index %v", kv.Left())
}
r = r.Right()
r = kv.Right()
}
a := ir.Nod(ir.OINDEX, var_, nodintconst(k))
k++
if isBlank {
a = ir.BlankNode
return ir.BlankNode, r
}
return a, r
}
case ir.OSTRUCTLIT:
splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
r := rn.(*ir.StructKeyExpr)
if r.Sym().IsBlank() || isBlank {
return ir.BlankNode, r.Left()
}
@ -549,19 +556,21 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir
for _, r := range n.List().Slice() {
a, value := splitnode(r)
if a == ir.BlankNode && !hasSideEffects(value) {
if a == ir.BlankNode && !anySideEffects(value) {
// Discard.
continue
}
switch value.Op() {
case ir.OSLICELIT:
value := value.(*ir.CompLitExpr)
if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
slicelit(ctxt, value, a, init)
continue
}
case ir.OARRAYLIT, ir.OSTRUCTLIT:
value := value.(*ir.CompLitExpr)
fixedlit(ctxt, kind, value, a, init)
continue
}
@ -573,13 +582,13 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir
// build list of assignments: var[index] = expr
setlineno(a)
a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
as := ir.NewAssignStmt(base.Pos, a, value)
as = typecheck(as, ctxStmt).(*ir.AssignStmt)
switch kind {
case initKindStatic:
genAsStatic(a)
genAsStatic(as)
case initKindDynamic, initKindLocalCode:
a = orderStmtInPlace(a, map[string][]*ir.Name{})
a = orderStmtInPlace(as, map[string][]*ir.Name{})
a = walkstmt(a)
init.Append(a)
default:
@ -589,7 +598,7 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir
}
}
func isSmallSliceLit(n ir.Node) bool {
func isSmallSliceLit(n *ir.CompLitExpr) bool {
if n.Op() != ir.OSLICELIT {
return false
}
@ -599,7 +608,7 @@ func isSmallSliceLit(n ir.Node) bool {
return smallintconst(r) && (n.Type().Elem().Width == 0 || ir.Int64Val(r) <= smallArrayBytes/n.Type().Elem().Width)
}
func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right()))
dowidth(t)
@ -613,11 +622,11 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// copy static to slice
var_ = typecheck(var_, ctxExpr|ctxAssign)
nam := stataddr(var_)
if nam == nil || nam.Class() != ir.PEXTERN {
name, offset, ok := stataddr(var_)
if !ok || name.Class() != ir.PEXTERN {
base.Fatalf("slicelit: %v", var_)
}
slicesym(nam, vstat, t.NumElem())
slicesym(name, offset, vstat, t.NumElem())
return
}
@ -659,7 +668,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// set auto to point at new temp or heap (3 assign)
var a ir.Node
if x := prealloc[n]; x != nil {
if x := n.Prealloc; x != nil {
// temp allocated during order.go for dddarg
if !types.Identical(t, x.Type()) {
panic("dotdotdot base type does not match order's assigned type")
@ -675,47 +684,40 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
init.Append(ir.Nod(ir.OVARDEF, x, nil))
}
a = ir.Nod(ir.OADDR, x, nil)
a = nodAddr(x)
} else if n.Esc() == EscNone {
a = temp(t)
if vstat == nil {
a = ir.Nod(ir.OAS, temp(t), nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
a = a.Left()
a = a.(*ir.AssignStmt).Left()
} else {
init.Append(ir.Nod(ir.OVARDEF, a, nil))
}
a = ir.Nod(ir.OADDR, a, nil)
a = nodAddr(a)
} else {
a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil)
}
a = ir.Nod(ir.OAS, vauto, a)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, vauto, a))
if vstat != nil {
// copy static to heap (4)
a = ir.Nod(ir.ODEREF, vauto, nil)
a = ir.Nod(ir.OAS, a, vstat)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, a, vstat))
}
// put dynamics into array (5)
var index int64
for _, value := range n.List().Slice() {
if value.Op() == ir.OKEY {
index = indexconst(value.Left())
kv := value.(*ir.KeyExpr)
index = indexconst(kv.Left())
if index < 0 {
base.Fatalf("slicelit: invalid index %v", value.Left())
base.Fatalf("slicelit: invalid index %v", kv.Left())
}
value = value.Right()
value = kv.Right()
}
a := ir.Nod(ir.OINDEX, vauto, nodintconst(index))
a.SetBounded(true)
@ -728,6 +730,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
break
case ir.OARRAYLIT, ir.OSTRUCTLIT:
value := value.(*ir.CompLitExpr)
k := initKindDynamic
if vstat == nil {
// Generate both static and dynamic initializations.
@ -744,12 +747,10 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// build list of vauto[c] = expr
setlineno(value)
a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
a = orderStmtInPlace(a, map[string][]*ir.Name{})
a = walkstmt(a)
init.Append(a)
as := typecheck(ir.Nod(ir.OAS, a, value), ctxStmt)
as = orderStmtInPlace(as, map[string][]*ir.Name{})
as = walkstmt(as)
init.Append(as)
}
// make slice out of heap (6)
@ -761,7 +762,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
init.Append(a)
}
func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// make the map var
a := ir.Nod(ir.OMAKE, nil, nil)
a.SetEsc(n.Esc())
@ -773,6 +774,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
// The order pass already removed any dynamic (runtime-computed) entries.
// All remaining entries are static. Double-check that.
for _, r := range entries {
r := r.(*ir.KeyExpr)
if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
base.Fatalf("maplit: entry is not a literal: %v", r)
}
@ -795,9 +797,10 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
vstatk := readonlystaticname(tk)
vstate := readonlystaticname(te)
datak := ir.Nod(ir.OARRAYLIT, nil, nil)
datae := ir.Nod(ir.OARRAYLIT, nil, nil)
datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
for _, r := range entries {
r := r.(*ir.KeyExpr)
datak.PtrList().Append(r.Left())
datae.PtrList().Append(r.Right())
}
@ -825,9 +828,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
loop.PtrBody().Set1(body)
loop.PtrInit().Set1(zero)
loop = typecheck(loop, ctxStmt)
loop = walkstmt(loop)
init.Append(loop)
appendWalkStmt(init, loop)
return
}
// For a small number of entries, just add them directly.
@ -839,33 +840,21 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
tmpelem := temp(m.Type().Elem())
for _, r := range entries {
r := r.(*ir.KeyExpr)
index, elem := r.Left(), r.Right()
setlineno(index)
a := ir.Nod(ir.OAS, tmpkey, index)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, tmpkey, index))
setlineno(elem)
a = ir.Nod(ir.OAS, tmpelem, elem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, tmpelem, elem))
setlineno(tmpelem)
a = ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem))
}
a = ir.Nod(ir.OVARKILL, tmpkey, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
a = ir.Nod(ir.OVARKILL, tmpelem, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpkey, nil))
appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpelem, nil))
}
func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
@ -874,10 +863,12 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
default:
base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
case ir.ONAME, ir.OMETHEXPR:
a := ir.Nod(ir.OAS, var_, n)
a = typecheck(a, ctxStmt)
init.Append(a)
case ir.ONAME:
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
case ir.OMETHEXPR:
n := n.(*ir.MethodExpr)
anylit(n.FuncName(), var_, init)
case ir.OPTRLIT:
if !t.IsPtr() {
@ -887,26 +878,20 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
var r ir.Node
if n.Right() != nil {
// n.Right is stack temporary used as backing store.
init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
r = ir.Nod(ir.OADDR, n.Right(), nil)
r = typecheck(r, ctxExpr)
appendWalkStmt(init, ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
r = nodAddr(n.Right())
} else {
r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil)
r = typecheck(r, ctxExpr)
r.SetEsc(n.Esc())
}
r = walkexpr(r, init)
a := ir.Nod(ir.OAS, var_, r)
a = typecheck(a, ctxStmt)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, var_, r))
var_ = ir.Nod(ir.ODEREF, var_, nil)
var_ = typecheck(var_, ctxExpr|ctxAssign)
anylit(n.Left(), var_, init)
case ir.OSTRUCTLIT, ir.OARRAYLIT:
n := n.(*ir.CompLitExpr)
if !t.IsStruct() && !t.IsArray() {
base.Fatalf("anylit: not struct/array")
}
@ -922,11 +907,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
fixedlit(ctxt, initKindStatic, n, vstat, init)
// copy static to var
a := ir.Nod(ir.OAS, var_, vstat)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, var_, vstat))
// add expressions to automatic
fixedlit(inInitFunction, initKindDynamic, n, var_, init)
@ -941,18 +922,17 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
}
// initialization of an array or struct with unspecified components (missing fields or arrays)
if isSimpleName(var_) || int64(n.List().Len()) < components {
a := ir.Nod(ir.OAS, var_, nil)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil))
}
fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
slicelit(inInitFunction, n, var_, init)
case ir.OMAPLIT:
n := n.(*ir.CompLitExpr)
if !t.IsMap() {
base.Fatalf("anylit: not map")
}
@ -963,7 +943,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
// oaslit handles special composite literal assignments.
// It returns true if n's effects have been added to init,
// in which case n should be dropped from the program by the caller.
func oaslit(n ir.Node, init *ir.Nodes) bool {
func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
@ -987,7 +967,7 @@ func oaslit(n ir.Node, init *ir.Nodes) bool {
return false
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
if vmatch1(n.Left(), n.Right()) {
if refersToCommonName(n.Left(), n.Right()) {
// not a special composite literal assignment
return false
}
@ -1005,30 +985,32 @@ func getlit(lit ir.Node) int {
}
// stataddr returns the static address of n, if n has one, or else nil.
func stataddr(n ir.Node) ir.Node {
func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) {
if n == nil {
return nil
return nil, 0, false
}
switch n.Op() {
case ir.ONAME, ir.OMETHEXPR:
return ir.SepCopy(n)
case ir.ONAME:
n := n.(*ir.Name)
return n, 0, true
case ir.OMETHEXPR:
n := n.(*ir.MethodExpr)
return stataddr(n.FuncName())
case ir.ODOT:
nam := stataddr(n.Left())
if nam == nil {
if name, offset, ok = stataddr(n.Left()); !ok {
break
}
nam.SetOffset(nam.Offset() + n.Offset())
nam.SetType(n.Type())
return nam
offset += n.Offset()
return name, offset, true
case ir.OINDEX:
if n.Left().Type().IsSlice() {
break
}
nam := stataddr(n.Left())
if nam == nil {
if name, offset, ok = stataddr(n.Left()); !ok {
break
}
l := getlit(n.Right())
@ -1040,12 +1022,11 @@ func stataddr(n ir.Node) ir.Node {
if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) {
break
}
nam.SetOffset(nam.Offset() + int64(l)*n.Type().Width)
nam.SetType(n.Type())
return nam
offset += int64(l) * n.Type().Width
return name, offset, true
}
return nil
return nil, 0, false
}
func (s *InitSchedule) initplan(n ir.Node) {
@ -1062,11 +1043,12 @@ func (s *InitSchedule) initplan(n ir.Node) {
var k int64
for _, a := range n.List().Slice() {
if a.Op() == ir.OKEY {
k = indexconst(a.Left())
kv := a.(*ir.KeyExpr)
k = indexconst(kv.Left())
if k < 0 {
base.Fatalf("initplan arraylit: invalid index %v", a.Left())
base.Fatalf("initplan arraylit: invalid index %v", kv.Left())
}
a = a.Right()
a = kv.Right()
}
s.addvalue(p, k*n.Type().Elem().Width, a)
k++
@ -1077,6 +1059,7 @@ func (s *InitSchedule) initplan(n ir.Node) {
if a.Op() != ir.OSTRUCTKEY {
base.Fatalf("initplan structlit")
}
a := a.(*ir.StructKeyExpr)
if a.Sym().IsBlank() {
continue
}
@ -1088,6 +1071,7 @@ func (s *InitSchedule) initplan(n ir.Node) {
if a.Op() != ir.OKEY {
base.Fatalf("initplan maplit")
}
a := a.(*ir.KeyExpr)
s.addvalue(p, -1, a.Right())
}
}
@ -1133,7 +1117,7 @@ func isZero(n ir.Node) bool {
case ir.OARRAYLIT:
for _, n1 := range n.List().Slice() {
if n1.Op() == ir.OKEY {
n1 = n1.Right()
n1 = n1.(*ir.KeyExpr).Right()
}
if !isZero(n1) {
return false
@ -1143,6 +1127,7 @@ func isZero(n ir.Node) bool {
case ir.OSTRUCTLIT:
for _, n1 := range n.List().Slice() {
n1 := n1.(*ir.StructKeyExpr)
if !isZero(n1.Left()) {
return false
}
@ -1157,22 +1142,33 @@ func isvaluelit(n ir.Node) bool {
return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
func genAsStatic(as ir.Node) {
func genAsStatic(as *ir.AssignStmt) {
if as.Left().Type() == nil {
base.Fatalf("genAsStatic as.Left not typechecked")
}
nam := stataddr(as.Left())
if nam == nil || (nam.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
name, offset, ok := stataddr(as.Left())
if !ok || (name.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
base.Fatalf("genAsStatic: lhs %v", as.Left())
}
switch {
case as.Right().Op() == ir.OLITERAL:
litsym(nam, as.Right(), int(as.Right().Type().Width))
case (as.Right().Op() == ir.ONAME || as.Right().Op() == ir.OMETHEXPR) && as.Right().Class() == ir.PFUNC:
pfuncsym(nam, as.Right())
default:
base.Fatalf("genAsStatic: rhs %v", as.Right())
switch r := as.Right(); r.Op() {
case ir.OLITERAL:
litsym(name, offset, r, int(r.Type().Width))
return
case ir.OMETHEXPR:
r := r.(*ir.MethodExpr)
pfuncsym(name, offset, r.FuncName())
return
case ir.ONAME:
r := r.(*ir.Name)
if r.Offset() != 0 {
base.Fatalf("genAsStatic %+v", as)
}
if r.Class() == ir.PFUNC {
pfuncsym(name, offset, r)
return
}
}
base.Fatalf("genAsStatic: rhs %v", as.Right())
}

File diff suppressed because it is too large Load Diff

View File

@ -100,13 +100,26 @@ func autolabel(prefix string) *types.Sym {
return lookupN(prefix, int(n))
}
// find all the exported symbols in package opkg
// dotImports tracks all PkgNames that have been dot-imported.
var dotImports []*ir.PkgName
// dotImportRefs maps idents introduced by importDot back to the
// ir.PkgName they were dot-imported through.
var dotImportRefs map[*ir.Ident]*ir.PkgName
// find all the exported symbols in package referenced by PkgName,
// and make them available in the current package
func importdot(opkg *types.Pkg, pack *ir.PkgName) {
n := 0
func importDot(pack *ir.PkgName) {
if dotImportRefs == nil {
dotImportRefs = make(map[*ir.Ident]*ir.PkgName)
}
opkg := pack.Pkg
for _, s := range opkg.Syms {
if s.Def == nil {
continue
if _, ok := declImporter[s]; !ok {
continue
}
}
if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
continue
@ -118,21 +131,36 @@ func importdot(opkg *types.Pkg, pack *ir.PkgName) {
continue
}
s1.Def = s.Def
s1.Block = s.Block
if ir.AsNode(s1.Def).Name() == nil {
ir.Dump("s1def", ir.AsNode(s1.Def))
base.Fatalf("missing Name")
}
ir.AsNode(s1.Def).Name().PkgName = pack
s1.Origpkg = opkg
n++
id := ir.NewIdent(src.NoXPos, s)
dotImportRefs[id] = pack
s1.Def = id
s1.Block = 1
}
if n == 0 {
// can't possibly be used - there were no symbols
base.ErrorfAt(pack.Pos(), "imported and not used: %q", opkg.Path)
dotImports = append(dotImports, pack)
}
// checkDotImports reports errors for any unused dot imports.
func checkDotImports() {
for _, pack := range dotImports {
if !pack.Used {
base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
}
}
// No longer needed; release memory.
dotImports = nil
dotImportRefs = nil
}
// nodAddr returns a node representing &n at base.Pos.
func nodAddr(n ir.Node) *ir.AddrExpr {
return nodAddrAt(base.Pos, n)
}
// nodAddrPos returns a node representing &n at position pos.
func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
return ir.NewAddrExpr(pos, n)
}
// newname returns a new ONAME Node associated with symbol s.
@ -519,8 +547,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
op = ir.OCONV
}
r := ir.Nod(op, n, nil)
r.SetType(t)
r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
r.SetImplicit(true)
return r
@ -528,7 +555,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) {
func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
@ -540,17 +567,17 @@ func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) {
} else {
ptr.SetType(n.Type().Elem().PtrTo())
}
len = ir.Nod(ir.OLEN, n, nil)
len.SetType(types.Types[types.TINT])
return ptr, len
length = ir.Nod(ir.OLEN, n, nil)
length.SetType(types.Types[types.TINT])
return ptr, length
}
func syslook(name string) ir.Node {
func syslook(name string) *ir.Name {
s := Runtimepkg.Lookup(name)
if s == nil || s.Def == nil {
base.Fatalf("syslook: can't find runtime.%s", name)
}
return ir.AsNode(s.Def)
return ir.AsNode(s.Def).(*ir.Name)
}
// typehash computes a hash value for type t to use in type switch statements.
@ -578,7 +605,11 @@ func calcHasCall(n ir.Node) bool {
}
switch n.Op() {
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE:
default:
base.Fatalf("calcHasCall %+v", n)
panic("unreachable")
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
if n.HasCall() {
base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
}
@ -590,6 +621,7 @@ func calcHasCall(n ir.Node) bool {
if instrumenting {
return true
}
return n.Left().HasCall() || n.Right().HasCall()
case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
// These ops might panic, make sure they are done
@ -598,27 +630,68 @@ func calcHasCall(n ir.Node) bool {
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL:
case ir.OADD, ir.OSUB, ir.OMUL:
if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
return true
}
return n.Left().HasCall() || n.Right().HasCall()
case ir.ONEG:
if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
return true
}
return n.Left().HasCall()
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) {
return true
}
return n.Left().HasCall() || n.Right().HasCall()
case ir.OCONV:
if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) {
return true
}
}
return n.Left().HasCall()
if n.Left() != nil && n.Left().HasCall() {
return true
case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
return n.Left().HasCall() || n.Right().HasCall()
case ir.OAS:
return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall()
case ir.OADDR:
return n.Left().HasCall()
case ir.OPAREN:
return n.Left().HasCall()
case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
return n.Left().HasCall()
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
return n.Left().HasCall()
case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
return false
// TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
case ir.OADDSTR:
// TODO(rsc): This used to check left and right, which are not part of OADDSTR.
return false
case ir.OBLOCK:
// TODO(rsc): Surely the block's statements matter.
return false
case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
// TODO(rsc): Some conversions are themselves calls, no?
return n.Left().HasCall()
case ir.ODOTTYPE2:
// TODO(rsc): Shouldn't this be up with ODOTTYPE above?
return n.Left().HasCall()
case ir.OSLICEHEADER:
// TODO(rsc): What about len and cap?
return n.Left().HasCall()
case ir.OAS2DOTTYPE, ir.OAS2FUNC:
// TODO(rsc): Surely we need to check List and Rlist.
return false
}
if n.Right() != nil && n.Right().HasCall() {
return true
}
return false
}
func badtype(op ir.Op, tl, tr *types.Type) {
@ -697,29 +770,35 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
}
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
return n
case ir.ODOT, ir.OLEN, ir.OCAP:
case ir.OLEN, ir.OCAP:
l := safeexpr(n.Left(), init)
if l == n.Left() {
return n
}
r := ir.Copy(n)
r.SetLeft(l)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
return r
case ir.ODOTPTR, ir.ODEREF:
l := safeexpr(n.Left(), init)
if l == n.Left() {
return n
}
a := ir.Copy(n)
a := ir.Copy(n).(*ir.UnaryExpr)
a.SetLeft(l)
a = walkexpr(a, init)
return a
return walkexpr(typecheck(a, ctxExpr), init)
case ir.ODOT, ir.ODOTPTR:
l := safeexpr(n.Left(), init)
if l == n.Left() {
return n
}
a := ir.Copy(n).(*ir.SelectorExpr)
a.SetLeft(l)
return walkexpr(typecheck(a, ctxExpr), init)
case ir.ODEREF:
l := safeexpr(n.Left(), init)
if l == n.Left() {
return n
}
a := ir.Copy(n).(*ir.StarExpr)
a.SetLeft(l)
return walkexpr(typecheck(a, ctxExpr), init)
case ir.OINDEX, ir.OINDEXMAP:
l := safeexpr(n.Left(), init)
@ -727,11 +806,10 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if l == n.Left() && r == n.Right() {
return n
}
a := ir.Copy(n)
a := ir.Copy(n).(*ir.IndexExpr)
a.SetLeft(l)
a.SetRight(r)
a = walkexpr(a, init)
return a
return walkexpr(typecheck(a, ctxExpr), init)
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
if isStaticCompositeLiteral(n) {
@ -748,10 +826,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := temp(t)
a := ir.Nod(ir.OAS, l, n)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
appendWalkStmt(init, ir.Nod(ir.OAS, l, n))
return l
}
@ -903,7 +978,7 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (
// find missing fields that
// will give shortest unique addressing.
// modify the tree with missing type names.
func adddot(n ir.Node) ir.Node {
func adddot(n *ir.SelectorExpr) *ir.SelectorExpr {
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
if n.Left().Diag() {
n.SetDiag(true)
@ -926,8 +1001,9 @@ func adddot(n ir.Node) ir.Node {
case path != nil:
// rebuild elided dots
for c := len(path) - 1; c >= 0; c-- {
n.SetLeft(nodSym(ir.ODOT, n.Left(), path[c].field.Sym))
n.Left().SetImplicit(true)
dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym)
dot.SetImplicit(true)
n.SetLeft(dot)
}
case ambig:
base.Errorf("ambiguous selector %v", n)
@ -1144,7 +1220,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
fn.PtrBody().Append(n)
}
dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym))
dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
@ -1155,12 +1231,12 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
// value for that function.
if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
dot = dot.Left() // skip final .M
left := dot.Left() // skip final .M
// TODO(mdempsky): Remove dependency on dotlist.
if !dotlist[0].field.Type.IsPtr() {
dot = ir.Nod(ir.OADDR, dot, nil)
left = nodAddr(left)
}
as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr))
as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr))
fn.PtrBody().Append(as)
fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
} else {
@ -1169,11 +1245,12 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
if method.Type.NumResults() > 0 {
n := ir.Nod(ir.ORETURN, nil, nil)
n.PtrList().Set1(call)
call = n
ret := ir.Nod(ir.ORETURN, nil, nil)
ret.PtrList().Set1(call)
fn.PtrBody().Append(ret)
} else {
fn.PtrBody().Append(call)
}
fn.PtrBody().Append(call)
}
if false && base.Flag.LowerR != 0 {
@ -1198,7 +1275,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
escapeFuncs([]*ir.Func{fn}, false)
Curfn = nil
xtop = append(xtop, fn)
Target.Decls = append(Target.Decls, fn)
}
func paramNnames(ft *types.Type) []ir.Node {
@ -1362,8 +1439,9 @@ func initExpr(init []ir.Node, n ir.Node) ir.Node {
}
if ir.MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
n = ir.Nod(ir.OCONVNOP, n, nil)
n.SetType(n.Left().Type())
old := n
n = ir.Nod(ir.OCONVNOP, old, nil)
n.SetType(old.Type())
n.SetTypecheck(1)
}

View File

@ -15,7 +15,7 @@ import (
)
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n ir.Node) {
func typecheckswitch(n *ir.SwitchStmt) {
typecheckslice(n.Init().Slice(), ctxStmt)
if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
@ -24,24 +24,26 @@ func typecheckswitch(n ir.Node) {
}
}
func typecheckTypeSwitch(n ir.Node) {
n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
t := n.Left().Right().Type()
func typecheckTypeSwitch(n *ir.SwitchStmt) {
guard := n.Left().(*ir.TypeSwitchGuard)
guard.SetRight(typecheck(guard.Right(), ctxExpr))
t := guard.Right().Type()
if t != nil && !t.IsInterface() {
base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right())
base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
var defCase, nilCase ir.Node
var ts typeSet
for _, ncase := range n.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
@ -60,31 +62,33 @@ func typecheckTypeSwitch(n ir.Node) {
var missing, have *types.Field
var ptr int
switch {
case ir.IsNil(n1): // case nil:
if ir.IsNil(n1) { // case nil:
if nilCase != nil {
base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
case n1.Op() != ir.OTYPE:
continue
}
if n1.Op() != ir.OTYPE {
base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke():
continue
}
if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
if have != nil && !have.Broke() {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym)
" (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym)
} else {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym)
" (missing %v method)", guard.Right(), n1.Type(), missing.Sym)
}
continue
}
if n1.Op() == ir.OTYPE {
ts.add(ncase.Pos(), n1.Type())
}
ts.add(ncase.Pos(), n1.Type())
}
if ncase.Rlist().Len() != 0 {
@ -144,7 +148,7 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) {
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
func typecheckExprSwitch(n ir.Node) {
func typecheckExprSwitch(n *ir.SwitchStmt) {
t := types.Types[types.TBOOL]
if n.Left() != nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
@ -175,6 +179,7 @@ func typecheckExprSwitch(n ir.Node) {
var defCase ir.Node
var cs constSet
for _, ncase := range n.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
@ -225,7 +230,7 @@ func typecheckExprSwitch(n ir.Node) {
}
// walkswitch walks a switch statement.
func walkswitch(sw ir.Node) {
func walkswitch(sw *ir.SwitchStmt) {
// Guard against double walk, see #25776.
if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
@ -240,7 +245,7 @@ func walkswitch(sw ir.Node) {
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
func walkExprSwitch(sw ir.Node) {
func walkExprSwitch(sw *ir.SwitchStmt) {
lno := setlineno(sw)
cond := sw.Left()
@ -278,6 +283,7 @@ func walkExprSwitch(sw ir.Node) {
var defaultGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
label := autolabel(".s")
jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
@ -296,7 +302,7 @@ func walkExprSwitch(sw ir.Node) {
// Process body.
body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
body.Append(ncase.Body().Slice()...)
if fall, pos := hasFall(ncase.Body().Slice()); !fall {
if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall {
br := ir.Nod(ir.OBREAK, nil, nil)
br.SetPos(pos)
body.Append(br)
@ -393,7 +399,7 @@ func (s *exprSwitch) flush() {
func(i int) ir.Node {
return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
func(i int, nif ir.Node) {
func(i int, nif *ir.IfStmt) {
run := runs[i]
nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
s.search(run, nif.PtrBody())
@ -428,7 +434,7 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
func(i int, nif ir.Node) {
func(i int, nif *ir.IfStmt) {
c := &cc[i]
nif.SetLeft(c.test(s.exprname))
nif.PtrBody().Set1(c.jmp)
@ -456,7 +462,7 @@ func (c *exprClause) test(exprname ir.Node) ir.Node {
return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
@ -465,9 +471,7 @@ func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
// enough.
for _, ncase := range sw.List().Slice() {
if ncase.Op() != ir.OCASE {
base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op())
}
ncase := ncase.(*ir.CaseStmt)
for _, v := range ncase.List().Slice() {
if v.Op() != ir.OLITERAL {
return false
@ -477,8 +481,8 @@ func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
return true
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
func hasFall(stmts []ir.Node) (bool, src.XPos) {
// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
@ -497,9 +501,9 @@ func hasFall(stmts []ir.Node) (bool, src.XPos) {
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw ir.Node) {
func walkTypeSwitch(sw *ir.SwitchStmt) {
var s typeSwitch
s.facename = sw.Left().Right()
s.facename = sw.Left().(*ir.TypeSwitchGuard).Right()
sw.SetLeft(nil)
s.facename = walkexpr(s.facename, sw.PtrInit())
@ -541,6 +545,7 @@ func walkTypeSwitch(sw ir.Node) {
var defaultGoto, nilGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
var caseVar ir.Node
if ncase.Rlist().Len() != 0 {
caseVar = ncase.Rlist().First()
@ -654,9 +659,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
dot.SetType(typ) // iface.(type)
as.PtrRlist().Set1(dot)
as = typecheck(as, ctxStmt)
as = walkexpr(as, &body)
body.Append(as)
appendWalkStmt(&body, as)
// if ok { goto label }
nif := ir.NodAt(pos, ir.OIF, nil, nil)
@ -706,7 +709,7 @@ func (s *typeSwitch) flush() {
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
func(i int, nif ir.Node) {
func(i int, nif *ir.IfStmt) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
@ -725,7 +728,7 @@ func (s *typeSwitch) flush() {
//
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) {
func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *ir.Nodes)
@ -733,7 +736,7 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
nif := ir.Nod(ir.OIF, nil, nil)
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
leaf(i, nif)
base.Pos = base.Pos.WithNotStmt()
nif.SetLeft(typecheck(nif.Left(), ctxExpr))

View File

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"go/constant"
"go/token"
@ -90,11 +91,24 @@ func resolve(n ir.Node) (res ir.Node) {
defer tracePrint("resolve", n)(&res)
}
// Stub ir.Name left for us by iimport.
if n, ok := n.(*ir.Name); ok {
if n.Sym().Pkg == types.LocalPkg {
base.Fatalf("unexpected Name: %+v", n)
if sym := n.Sym(); sym.Pkg != types.LocalPkg {
// We might have an ir.Ident from oldname or importDot.
if id, ok := n.(*ir.Ident); ok {
if pkgName := dotImportRefs[id]; pkgName != nil {
pkgName.Used = true
}
if sym.Def == nil {
if _, ok := declImporter[sym]; !ok {
return n // undeclared name
}
sym.Def = ir.NewDeclNameAt(src.NoXPos, sym)
}
n = ir.AsNode(sym.Def)
}
// Stub ir.Name left for us by iimport.
n := n.(*ir.Name)
if inimport {
base.Fatalf("recursive inimport")
}
@ -236,7 +250,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
// Skip over parens.
for n.Op() == ir.OPAREN {
n = n.Left()
n = n.(*ir.ParenExpr).Left()
}
// Resolve definition of name and value of iota lazily.
@ -425,10 +439,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
}
if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
base.Errorf("use of builtin %v not in function call", n.Sym())
n.SetType(nil)
return n
if n.Op() == ir.ONAME {
if n.SubOp() != 0 && top&ctxCallee == 0 {
base.Errorf("use of builtin %v not in function call", n.Sym())
n.SetType(nil)
return n
}
}
typecheckdef(n)
@ -472,6 +488,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
}
return n
case ir.ONAMEOFFSET:
// type already set
return n
case ir.OPACK:
base.Errorf("use of package %v without selector", n.Sym())
n.SetType(nil)
@ -637,19 +657,29 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
ir.OOROR,
ir.OSUB,
ir.OXOR:
var l ir.Node
var op ir.Op
var r ir.Node
var l, r ir.Node
var setLR func()
switch n := n.(type) {
case *ir.AssignOpStmt:
l, r = n.Left(), n.Right()
setLR = func() { n.SetLeft(l); n.SetRight(r) }
case *ir.BinaryExpr:
l, r = n.Left(), n.Right()
setLR = func() { n.SetLeft(l); n.SetRight(r) }
case *ir.LogicalExpr:
l, r = n.Left(), n.Right()
setLR = func() { n.SetLeft(l); n.SetRight(r) }
}
l = typecheck(l, ctxExpr)
r = typecheck(r, ctxExpr)
setLR()
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
return n
}
op := n.Op()
if n.Op() == ir.OASOP {
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetRight(typecheck(n.Right(), ctxExpr))
l = n.Left()
r = n.Right()
checkassign(n, n.Left())
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
return n
}
checkassign(n, l)
if n.Implicit() && !okforarith[l.Type().Kind()] {
base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
n.SetType(nil)
@ -657,20 +687,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
}
// TODO(marvin): Fix Node.EType type union.
op = n.SubOp()
} else {
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetRight(typecheck(n.Right(), ctxExpr))
l = n.Left()
r = n.Right()
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
return n
}
op = n.Op()
}
if op == ir.OLSH || op == ir.ORSH {
r = defaultlit(r, types.Types[types.TUINT])
n.SetRight(r)
setLR()
t := r.Type()
if !t.IsInteger() {
base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
@ -716,9 +736,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
// ideal mixed with non-ideal
l, r = defaultlit2(l, r, false)
setLR()
n.SetLeft(l)
n.SetRight(r)
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
return n
@ -752,10 +771,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
dowidth(l.Type())
if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
l = ir.Nod(aop, l, nil)
l.SetType(r.Type())
l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
l.SetTypecheck(1)
n.SetLeft(l)
setLR()
}
t = r.Type()
@ -774,10 +792,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
dowidth(r.Type())
if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
r = ir.Nod(aop, r, nil)
r.SetType(l.Type())
r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
r.SetTypecheck(1)
n.SetRight(r)
setLR()
}
t = l.Type()
@ -846,29 +863,30 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
if iscmp[n.Op()] {
t = types.UntypedBool
n.SetType(t)
n = evalConst(n)
if n.Op() != ir.OLITERAL {
l, r = defaultlit2(l, r, true)
n.SetLeft(l)
n.SetRight(r)
if con := evalConst(n); con.Op() == ir.OLITERAL {
return con
}
l, r = defaultlit2(l, r, true)
setLR()
return n
}
if et == types.TSTRING && n.Op() == ir.OADD {
// create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
var add *ir.AddStringExpr
if l.Op() == ir.OADDSTR {
orig := n
n = l
n.SetPos(orig.Pos())
add = l.(*ir.AddStringExpr)
add.SetPos(n.Pos())
} else {
n = ir.NodAt(n.Pos(), ir.OADDSTR, nil, nil)
n.PtrList().Set1(l)
add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
}
if r.Op() == ir.OADDSTR {
n.PtrList().AppendNodes(r.PtrList())
add.PtrList().AppendNodes(r.PtrList())
} else {
n.PtrList().Append(r)
add.PtrList().Append(r)
}
add.SetType(t)
return add
}
if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
@ -938,9 +956,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
case ir.OCOMPLIT:
return typecheckcomplit(n)
return typecheckcomplit(n.(*ir.CompLitExpr))
case ir.OXDOT, ir.ODOT:
n := n.(*ir.SelectorExpr)
if n.Op() == ir.OXDOT {
n = adddot(n)
n.SetOp(ir.ODOT)
@ -1009,7 +1028,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
}
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
n = typecheckpartialcall(n, s)
return typecheckpartialcall(n, s)
}
return n
@ -1274,9 +1293,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
}
n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
n.Left().SetImplicit(true)
n.SetLeft(typecheck(n.Left(), ctxExpr))
addr := nodAddr(n.Left())
addr.SetImplicit(true)
n.SetLeft(typecheck(addr, ctxExpr))
l = n.Left()
}
t := l.Type()
@ -1326,9 +1345,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
// call and call like
case ir.OCALL:
n.(*ir.CallExpr).Use = ir.CallUseExpr
n := n.(*ir.CallExpr)
n.Use = ir.CallUseExpr
if top == ctxStmt {
n.(*ir.CallExpr).Use = ir.CallUseStmt
n.Use = ir.CallUseStmt
}
typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee))
@ -1338,7 +1358,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
l := n.Left()
if l.Op() == ir.ONAME && l.SubOp() != 0 {
if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 {
if n.IsDDD() && l.SubOp() != ir.OAPPEND {
base.Errorf("invalid use of ... with builtin %v", l)
}
@ -1347,12 +1367,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
switch l.SubOp() {
default:
base.Fatalf("unknown builtin %v", l)
return n
case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
n.SetOp(l.SubOp())
n.SetLeft(nil)
n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
return typecheck(n, top)
case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
typecheckargs(n)
@ -1363,9 +1383,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
n.SetType(nil)
return n
}
old := n
n = ir.NodAt(n.Pos(), l.SubOp(), arg, nil)
n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init
u := ir.NewUnaryExpr(n.Pos(), l.SubOp(), arg)
return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init
case ir.OCOMPLEX, ir.OCOPY:
typecheckargs(n)
@ -1374,11 +1393,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
n.SetType(nil)
return n
}
old := n
n = ir.NodAt(n.Pos(), l.SubOp(), arg1, arg2)
n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init
b := ir.NewBinaryExpr(n.Pos(), l.SubOp(), arg1, arg2)
return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init
}
return typecheck(n, top)
panic("unreachable")
}
n.SetLeft(defaultlit(n.Left(), nil))
@ -1398,7 +1416,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
}
n = ir.NodAt(n.Pos(), ir.OCONV, arg, nil)
n := ir.NodAt(n.Pos(), ir.OCONV, arg, nil)
n.SetType(l.Type())
return typecheck1(n, top)
}
@ -1453,14 +1471,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
if t.NumResults() == 1 {
n.SetType(l.Type().Results().Field(0).Type)
if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && isRuntimePkg(n.Left().Sym().Pkg) && n.Left().Sym().Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
// (in particular, the exact time of the function call) by introducing temporaries.
// In this case, we know getg() always returns the same result within a given function
// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
n.SetOp(ir.OGETG)
if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME {
if sym := n.Left().(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
// (in particular, the exact time of the function call) by introducing temporaries.
// In this case, we know getg() always returns the same result within a given function
// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
n.SetOp(ir.OGETG)
}
}
return n
}
@ -1723,6 +1743,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
case ir.OCONV:
n := n.(*ir.ConvExpr)
checkwidth(n.Type()) // ensure width is calculated for backend
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(convlit1(n.Left(), n.Type(), true, nil))
@ -1761,7 +1782,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
case ir.OSTR2RUNES:
if n.Left().Op() == ir.OLITERAL {
n = stringtoruneslit(n)
return stringtoruneslit(n)
}
}
return n
@ -1871,8 +1892,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
}
nn.SetType(t)
n = nn
return n
return nn
case ir.ONEW:
if n.Left() == nil {
@ -1980,6 +2000,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
// statements
case ir.OAS:
n := n.(*ir.AssignStmt)
typecheckas(n)
// Code that creates temps does not bother to set defn, so do it here.
@ -1989,7 +2010,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
case ir.OAS2:
typecheckas2(n)
typecheckas2(n.(*ir.AssignListStmt))
return n
case ir.OBREAK,
@ -2016,6 +2037,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
case ir.ODEFER, ir.OGO:
n := n.(*ir.GoDeferStmt)
n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
if !n.Left().Diag() {
checkdefergo(n)
@ -2073,15 +2095,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
return n
case ir.OSELECT:
typecheckselect(n)
typecheckselect(n.(*ir.SelectStmt))
return n
case ir.OSWITCH:
typecheckswitch(n)
typecheckswitch(n.(*ir.SwitchStmt))
return n
case ir.ORANGE:
typecheckrange(n)
typecheckrange(n.(*ir.RangeStmt))
return n
case ir.OTYPESW:
@ -2109,13 +2131,26 @@ func typecheck1(n ir.Node, top int) (res ir.Node) {
}
func typecheckargs(n ir.Node) {
if n.List().Len() != 1 || n.IsDDD() {
typecheckslice(n.List().Slice(), ctxExpr)
var list []ir.Node
switch n := n.(type) {
default:
base.Fatalf("typecheckargs %+v", n.Op())
case *ir.CallExpr:
list = n.List().Slice()
if n.IsDDD() {
typecheckslice(list, ctxExpr)
return
}
case *ir.ReturnStmt:
list = n.List().Slice()
}
if len(list) != 1 {
typecheckslice(list, ctxExpr)
return
}
typecheckslice(n.List().Slice(), ctxExpr|ctxMultiOK)
t := n.List().First().Type()
typecheckslice(list, ctxExpr|ctxMultiOK)
t := list[0].Type()
if t == nil || !t.IsFuncArgStruct() {
return
}
@ -2128,7 +2163,7 @@ func typecheckargs(n ir.Node) {
}
as := ir.Nod(ir.OAS2, nil, nil)
as.PtrRlist().AppendNodes(n.PtrList())
as.PtrRlist().Append(list...)
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
@ -2139,18 +2174,25 @@ func typecheckargs(n ir.Node) {
if static {
Curfn = initTodo
}
list = nil
for _, f := range t.FieldSlice() {
t := temp(f.Type)
as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil))
as.PtrList().Append(t)
n.PtrList().Append(t)
list = append(list, t)
}
if static {
Curfn = nil
}
as = typecheck(as, ctxStmt)
n.PtrInit().Append(as)
switch n := n.(type) {
case *ir.CallExpr:
n.PtrList().Set(list)
case *ir.ReturnStmt:
n.PtrList().Set(list)
}
n.PtrInit().Append(typecheck(as, ctxStmt))
}
func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
@ -2192,7 +2234,7 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool {
return true
}
func checkdefergo(n ir.Node) {
func checkdefergo(n *ir.GoDeferStmt) {
what := "defer"
if n.Op() == ir.OGO {
what = "go"
@ -2260,13 +2302,12 @@ func implicitstar(n ir.Node) ir.Node {
if !t.IsArray() {
return n
}
n = ir.Nod(ir.ODEREF, n, nil)
n.SetImplicit(true)
n = typecheck(n, ctxExpr)
return n
star := ir.Nod(ir.ODEREF, n, nil)
star.SetImplicit(true)
return typecheck(star, ctxExpr)
}
func needOneArg(n ir.Node, f string, args ...interface{}) (ir.Node, bool) {
func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
if n.List().Len() == 0 {
p := fmt.Sprintf(f, args...)
base.Errorf("missing argument to %s: %v", p, n)
@ -2282,7 +2323,7 @@ func needOneArg(n ir.Node, f string, args ...interface{}) (ir.Node, bool) {
return n.List().First(), true
}
func needTwoArgs(n ir.Node) (ir.Node, ir.Node, bool) {
func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
if n.List().Len() != 2 {
if n.List().Len() < 2 {
base.Errorf("not enough arguments in call to %v", n)
@ -2325,7 +2366,7 @@ func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, do
// typecheckMethodExpr checks selector expressions (ODOT) where the
// base expression is a type expression (OTYPE).
func typecheckMethodExpr(n ir.Node) (res ir.Node) {
func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckMethodExpr", n)(&res)
}
@ -2378,16 +2419,16 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) {
return n
}
me := ir.NodAt(n.Pos(), ir.OMETHEXPR, n.Left(), NewName(n.Sym()))
me.SetSym(methodSym(t, n.Sym()))
me := ir.NewMethodExpr(n.Pos(), n.Left().Type(), m)
me.SetType(methodfunc(m.Type, n.Left().Type()))
me.SetOffset(0)
me.SetClass(ir.PFUNC)
me.(*ir.MethodExpr).Method = m
f := NewName(methodSym(t, m.Sym))
f.SetClass(ir.PFUNC)
f.SetType(me.Type())
me.FuncName_ = f
// Issue 25065. Make sure that we emit the symbol for a local method.
if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) {
makefuncsym(me.Sym())
makefuncsym(me.FuncName_.Sym())
}
return me
@ -2408,7 +2449,7 @@ func derefall(t *types.Type) *types.Type {
return t
}
func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
s := n.Sym()
dowidth(t)
@ -2440,14 +2481,14 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
n.SetType(f1.Type)
if t.IsInterface() {
if n.Left().Type().IsPtr() {
n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) // implicitstar
n.Left().SetImplicit(true)
n.SetLeft(typecheck(n.Left(), ctxExpr))
star := ir.Nod(ir.ODEREF, n.Left(), nil)
star.SetImplicit(true)
n.SetLeft(typecheck(star, ctxExpr))
}
n.SetOp(ir.ODOTINTER)
}
n.(*ir.SelectorExpr).Selection = f1
n.Selection = f1
return f1
}
@ -2462,13 +2503,13 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
if !types.Identical(rcvr, tt) {
if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
checklvalue(n.Left(), "call pointer method on")
n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
n.Left().SetImplicit(true)
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
addr := nodAddr(n.Left())
addr.SetImplicit(true)
n.SetLeft(typecheck(addr, ctxType|ctxExpr))
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
n.Left().SetImplicit(true)
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
star := ir.Nod(ir.ODEREF, n.Left(), nil)
star.SetImplicit(true)
n.SetLeft(typecheck(star, ctxType|ctxExpr))
} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left())
for tt.IsPtr() {
@ -2476,9 +2517,9 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
n.Left().SetImplicit(true)
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
star := ir.Nod(ir.ODEREF, n.Left(), nil)
star.SetImplicit(true)
n.SetLeft(typecheck(star, ctxType|ctxExpr))
tt = tt.Elem()
}
} else {
@ -2486,13 +2527,16 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
}
}
pll := n
ll := n.Left()
for ll.Left() != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
pll = ll
ll = ll.Left()
implicit, ll := n.Implicit(), n.Left()
for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
switch l := ll.(type) {
case *ir.SelectorExpr:
implicit, ll = l.Implicit(), l.Left()
case *ir.StarExpr:
implicit, ll = l.Implicit(), l.Left()
}
}
if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n.Left == ll to clarify error message.
n.SetLeft(ll)
@ -2503,7 +2547,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
n.SetOffset(f2.Offset)
n.SetType(f2.Type)
n.SetOp(ir.ODOTMETH)
n.(*ir.SelectorExpr).Selection = f2
n.Selection = f2
return f2
}
@ -2733,8 +2777,12 @@ func iscomptype(t *types.Type) bool {
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
func pushtype(n ir.Node, t *types.Type) ir.Node {
if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil {
func pushtype(nn ir.Node, t *types.Type) ir.Node {
if nn == nil || nn.Op() != ir.OCOMPLIT {
return nn
}
n := nn.(*ir.CompLitExpr)
if n.Right() != nil {
return n
}
@ -2747,16 +2795,16 @@ func pushtype(n ir.Node, t *types.Type) ir.Node {
// For *T, return &T{...}.
n.SetRight(ir.TypeNode(t.Elem()))
n = ir.NodAt(n.Pos(), ir.OADDR, n, nil)
n.SetImplicit(true)
addr := nodAddrAt(n.Pos(), n)
addr.SetImplicit(true)
return addr
}
return n
}
// The result of typecheckcomplit MUST be assigned back to n, e.g.
// n.Left = typecheckcomplit(n.Left)
func typecheckcomplit(n ir.Node) (res ir.Node) {
func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
}
@ -2773,7 +2821,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) {
}
// Save original node (including n.Right)
n.(ir.OrigNode).SetOrig(ir.Copy(n))
n.SetOrig(ir.Copy(n))
setlineno(n.Right())
@ -2824,6 +2872,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) {
base.Errorf("missing key in map literal")
continue
}
l := l.(*ir.KeyExpr)
r := l.Left()
r = pushtype(r, t.Key())
@ -2867,9 +2916,9 @@ func typecheckcomplit(n ir.Node) (res ir.Node) {
}
// No pushtype allowed here. Must name fields for that.
n1 = assignconv(n1, f.Type, "field value")
n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym)
n1.SetOffset(f.Offset)
ls[i] = n1
sk := nodSym(ir.OSTRUCTKEY, n1, f.Sym)
sk.SetOffset(f.Offset)
ls[i] = sk
}
if len(ls) < t.NumFields() {
base.Errorf("too few values in %v", n)
@ -2883,33 +2932,28 @@ func typecheckcomplit(n ir.Node) (res ir.Node) {
setlineno(l)
if l.Op() == ir.OKEY {
key := l.Left()
sk := ir.NewStructKeyExpr(l.Pos(), nil, l.Right())
ls[i] = sk
l = sk
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() {
base.Errorf("invalid field name %v in struct initializer", key)
sk.SetLeft(typecheck(sk.Left(), ctxExpr))
continue
}
kv := l.(*ir.KeyExpr)
key := kv.Left()
// Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym
// before we do the lookup.
s := key.Sym()
if s.Pkg != types.LocalPkg && types.IsExported(s.Name) {
s1 := lookup(s.Name)
if s1.Origpkg == s.Pkg {
s = s1
}
if id, ok := key.(*ir.Ident); ok && dotImportRefs[id] != nil {
s = lookup(s.Name)
}
sk.SetSym(s)
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
base.Errorf("invalid field name %v in struct initializer", key)
continue
}
l = ir.NewStructKeyExpr(l.Pos(), s, kv.Right())
ls[i] = l
}
if l.Op() != ir.OSTRUCTKEY {
@ -2920,6 +2964,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) {
ls[i] = typecheck(ls[i], ctxExpr)
continue
}
l := l.(*ir.StructKeyExpr)
f := lookdot1(nil, l.Sym(), t, t.Fields(), 0)
if f == nil {
@ -2980,8 +3025,9 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st
for i, elt := range elts {
setlineno(elt)
r := elts[i]
var kv ir.Node
var kv *ir.KeyExpr
if elt.Op() == ir.OKEY {
elt := elt.(*ir.KeyExpr)
elt.SetLeft(typecheck(elt.Left(), ctxExpr))
key = indexconst(elt.Left())
if key < 0 {
@ -3064,6 +3110,9 @@ func islvalue(n ir.Node) bool {
return false
}
return true
case ir.ONAMEOFFSET:
return true
}
return false
@ -3101,9 +3150,9 @@ func checkassign(stmt ir.Node, n ir.Node) {
}
switch {
case n.Op() == ir.ODOT && n.Left().Op() == ir.OINDEXMAP:
case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).Left().Op() == ir.OINDEXMAP:
base.Errorf("cannot assign to struct field %v in map", n)
case (n.Op() == ir.OINDEX && n.Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
base.Errorf("cannot assign to %v (strings are immutable)", n)
case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n):
base.Errorf("cannot assign to %v (declared const)", n)
@ -3144,19 +3193,40 @@ func samesafeexpr(l ir.Node, r ir.Node) bool {
return l == r
case ir.ODOT, ir.ODOTPTR:
l := l.(*ir.SelectorExpr)
r := r.(*ir.SelectorExpr)
return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left())
case ir.ODEREF, ir.OCONVNOP,
ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
case ir.ODEREF:
l := l.(*ir.StarExpr)
r := r.(*ir.StarExpr)
return samesafeexpr(l.Left(), r.Left())
case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
l := l.(*ir.UnaryExpr)
r := r.(*ir.UnaryExpr)
return samesafeexpr(l.Left(), r.Left())
case ir.OCONVNOP:
l := l.(*ir.ConvExpr)
r := r.(*ir.ConvExpr)
return samesafeexpr(l.Left(), r.Left())
case ir.OCONV:
l := l.(*ir.ConvExpr)
r := r.(*ir.ConvExpr)
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left())
case ir.OINDEX, ir.OINDEXMAP,
ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
case ir.OINDEX, ir.OINDEXMAP:
l := l.(*ir.IndexExpr)
r := r.(*ir.IndexExpr)
return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
l := l.(*ir.BinaryExpr)
r := r.(*ir.BinaryExpr)
return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
case ir.OLITERAL:
@ -3172,7 +3242,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool {
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
func typecheckas(n ir.Node) {
func typecheckas(n *ir.AssignStmt) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
}
@ -3196,7 +3266,7 @@ func typecheckas(n ir.Node) {
checkassign(n, n.Left())
if n.Right() != nil && n.Right().Type() != nil {
if n.Right().Type().IsFuncArgStruct() {
base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().Left(), n.Right().Type().NumFields())
base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().(*ir.CallExpr).Left(), n.Right().Type().NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
n.Right().SetType(nil)
@ -3230,7 +3300,7 @@ func checkassignto(src *types.Type, dst ir.Node) {
}
}
func typecheckas2(n ir.Node) {
func typecheckas2(n *ir.AssignListStmt) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
}
@ -3397,7 +3467,7 @@ func typecheckfunc(n *ir.Func) {
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
func stringtoruneslit(n ir.Node) ir.Node {
func stringtoruneslit(n *ir.ConvExpr) ir.Node {
if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
base.Fatalf("stringtoarraylit %v", n)
}
@ -3411,8 +3481,7 @@ func stringtoruneslit(n ir.Node) ir.Node {
nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type()))
nn.PtrList().Set(l)
nn = typecheck(nn, ctxExpr)
return nn
return typecheck(nn, ctxExpr)
}
var mapqueue []*ir.MapType
@ -3681,19 +3750,25 @@ func markBreak(fn *ir.Func) {
case ir.OBREAK:
if n.Sym() == nil {
if implicit != nil {
implicit.SetHasBreak(true)
}
setHasBreak(implicit)
} else {
if lab := labels[n.Sym()]; lab != nil {
lab.SetHasBreak(true)
}
setHasBreak(labels[n.Sym()])
}
case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE:
case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
old := implicit
implicit = n
sym := n.Sym()
var sym *types.Sym
switch n := n.(type) {
case *ir.ForStmt:
sym = n.Sym()
case *ir.RangeStmt:
sym = n.Sym()
case *ir.SelectStmt:
sym = n.Sym()
case *ir.SwitchStmt:
sym = n.Sym()
}
if sym != nil {
if labels == nil {
// Map creation delayed until we need it - most functions don't.
@ -3713,6 +3788,39 @@ func markBreak(fn *ir.Func) {
mark(fn)
}
func controlLabel(n ir.Node) *types.Sym {
switch n := n.(type) {
default:
base.Fatalf("controlLabel %+v", n.Op())
return nil
case *ir.ForStmt:
return n.Sym()
case *ir.RangeStmt:
return n.Sym()
case *ir.SelectStmt:
return n.Sym()
case *ir.SwitchStmt:
return n.Sym()
}
}
func setHasBreak(n ir.Node) {
switch n := n.(type) {
default:
base.Fatalf("setHasBreak %+v", n.Op())
case nil:
// ignore
case *ir.ForStmt:
n.SetHasBreak(true)
case *ir.RangeStmt:
n.SetHasBreak(true)
case *ir.SelectStmt:
n.SetHasBreak(true)
case *ir.SwitchStmt:
n.SetHasBreak(true)
}
}
// isTermNodes reports whether the Nodes list ends with a terminating statement.
func isTermNodes(l ir.Nodes) bool {
s := l.Slice()
@ -3750,23 +3858,32 @@ func isTermNode(n ir.Node) bool {
case ir.OIF:
return isTermNodes(n.Body()) && isTermNodes(n.Rlist())
case ir.OSWITCH, ir.OTYPESW, ir.OSELECT:
case ir.OSWITCH:
if n.HasBreak() {
return false
}
def := false
for _, n1 := range n.List().Slice() {
if !isTermNodes(n1.Body()) {
for _, cas := range n.List().Slice() {
cas := cas.(*ir.CaseStmt)
if !isTermNodes(cas.Body()) {
return false
}
if n1.List().Len() == 0 { // default
if cas.List().Len() == 0 { // default
def = true
}
}
return def
if n.Op() != ir.OSELECT && !def {
case ir.OSELECT:
if n.HasBreak() {
return false
}
for _, cas := range n.List().Slice() {
cas := cas.(*ir.CaseStmt)
if !isTermNodes(cas.Body()) {
return false
}
}
return true
}
@ -3913,7 +4030,7 @@ func deadcodeexpr(n ir.Node) ir.Node {
func getIotaValue() int64 {
if i := len(typecheckdefstack); i > 0 {
if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
return x.Iota()
return x.(*ir.Name).Iota()
}
}

View File

@ -152,23 +152,27 @@ func initUniverse() {
for _, s := range &builtinFuncs {
s2 := types.BuiltinPkg.Lookup(s.name)
s2.Def = NewName(s2)
ir.AsNode(s2.Def).SetSubOp(s.op)
def := NewName(s2)
def.SetSubOp(s.op)
s2.Def = def
}
for _, s := range &unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
s2.Def = NewName(s2)
ir.AsNode(s2.Def).SetSubOp(s.op)
def := NewName(s2)
def.SetSubOp(s.op)
s2.Def = def
}
s = types.BuiltinPkg.Lookup("true")
s.Def = nodbool(true)
ir.AsNode(s.Def).SetSym(lookup("true"))
b := nodbool(true)
b.(*ir.Name).SetSym(lookup("true"))
s.Def = b
s = types.BuiltinPkg.Lookup("false")
s.Def = nodbool(false)
ir.AsNode(s.Def).SetSym(lookup("false"))
b = nodbool(false)
b.(*ir.Name).SetSym(lookup("false"))
s.Def = b
s = lookup("_")
types.BlankSym = s
@ -187,8 +191,9 @@ func initUniverse() {
types.Types[types.TNIL] = types.New(types.TNIL)
s = types.BuiltinPkg.Lookup("nil")
s.Def = nodnil()
ir.AsNode(s.Def).SetSym(s)
nnil := nodnil()
nnil.(*ir.NilExpr).SetSym(s)
s.Def = nnil
s = types.BuiltinPkg.Lookup("iota")
s.Def = ir.NewIota(base.Pos, s)

View File

@ -31,18 +31,20 @@ func evalunsafe(n ir.Node) int64 {
base.Errorf("invalid expression %v", n)
return 0
}
sel := n.Left().(*ir.SelectorExpr)
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr))
sbase := n.Left().Left()
sel.SetLeft(typecheck(sel.Left(), ctxExpr))
sbase := sel.Left()
n.SetLeft(typecheck(n.Left(), ctxExpr))
if n.Left().Type() == nil {
tsel := typecheck(sel, ctxExpr)
n.SetLeft(tsel)
if tsel.Type() == nil {
return 0
}
switch n.Left().Op() {
switch tsel.Op() {
case ir.ODOT, ir.ODOTPTR:
break
case ir.OCALLPART:
@ -55,7 +57,8 @@ func evalunsafe(n ir.Node) int64 {
// Sum offsets for dots until we reach sbase.
var v int64
for r := n.Left(); r != sbase; r = r.Left() {
var next ir.Node
for r := tsel; r != sbase; r = next {
switch r.Op() {
case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
@ -68,8 +71,9 @@ func evalunsafe(n ir.Node) int64 {
fallthrough
case ir.ODOT:
v += r.Offset()
next = r.Left()
default:
ir.Dump("unsafenmagic", n.Left())
ir.Dump("unsafenmagic", tsel)
base.Fatalf("impossible %v node after dot insertion", r.Op())
}
}

File diff suppressed because it is too large Load Diff

View File

@ -64,12 +64,6 @@ func Copy(n Node) Node {
return c
}
func copyList(x Nodes) Nodes {
c := make([]Node, x.Len())
copy(c, x.Slice())
return AsNodes(c)
}
// DeepCopy returns a “deep” copy of n, with its entire structure copied
// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.

View File

@ -140,15 +140,8 @@ func (p *dumper) dump(x reflect.Value, depth int) {
return
}
// special cases
switch v := x.Interface().(type) {
case Nodes:
// unpack Nodes since reflect cannot look inside
// due to the unexported field in its struct
x = reflect.ValueOf(v.Slice())
case src.XPos:
p.printf("%s", base.FmtPos(v))
if pos, ok := x.Interface().(src.XPos); ok {
p.printf("%s", base.FmtPos(pos))
return
}

View File

@ -52,10 +52,10 @@ type miniExpr struct {
const (
miniExprHasCall = 1 << iota
miniExprImplicit
miniExprNonNil
miniExprTransient
miniExprBounded
miniExprImplicit // for use by implementations; not supported by every Expr
)
func (*miniExpr) isExpr() {}
@ -66,8 +66,6 @@ func (n *miniExpr) Opt() interface{} { return n.opt }
func (n *miniExpr) SetOpt(x interface{}) { n.opt = x }
func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 }
func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) }
func (n *miniExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *miniExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
@ -91,7 +89,8 @@ func toNtype(x Node) Ntype {
// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
type AddStringExpr struct {
miniExpr
List_ Nodes
List_ Nodes
Prealloc *Name
}
func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
@ -121,10 +120,12 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
return n
}
func (n *AddrExpr) Left() Node { return n.X }
func (n *AddrExpr) SetLeft(x Node) { n.X = x }
func (n *AddrExpr) Right() Node { return n.Alloc }
func (n *AddrExpr) SetRight(x Node) { n.Alloc = x }
func (n *AddrExpr) Left() Node { return n.X }
func (n *AddrExpr) SetLeft(x Node) { n.X = x }
func (n *AddrExpr) Right() Node { return n.Alloc }
func (n *AddrExpr) SetRight(x Node) { n.Alloc = x }
func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (n *AddrExpr) SetOp(op Op) {
switch op {
@ -233,9 +234,10 @@ func (n *CallExpr) SetOp(op Op) {
// A CallPartExpr is a method expression X.Method (uncalled).
type CallPartExpr struct {
miniExpr
Func_ *Func
X Node
Method *types.Field
Func_ *Func
X Node
Method *types.Field
Prealloc *Name
}
func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr {
@ -255,7 +257,8 @@ func (n *CallPartExpr) SetLeft(x Node) { n.X = x }
// A ClosureExpr is a function literal expression.
type ClosureExpr struct {
miniExpr
Func_ *Func
Func_ *Func
Prealloc *Name
}
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
@ -287,9 +290,10 @@ func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ }
// Before type-checking, the type is Ntype.
type CompLitExpr struct {
miniExpr
orig Node
Ntype Ntype
List_ Nodes // initialized values
orig Node
Ntype Ntype
List_ Nodes // initialized values
Prealloc *Name
}
func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
@ -301,13 +305,15 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
return n
}
func (n *CompLitExpr) Orig() Node { return n.orig }
func (n *CompLitExpr) SetOrig(x Node) { n.orig = x }
func (n *CompLitExpr) Right() Node { return n.Ntype }
func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) }
func (n *CompLitExpr) List() Nodes { return n.List_ }
func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ }
func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x }
func (n *CompLitExpr) Orig() Node { return n.orig }
func (n *CompLitExpr) SetOrig(x Node) { n.orig = x }
func (n *CompLitExpr) Right() Node { return n.Ntype }
func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) }
func (n *CompLitExpr) List() Nodes { return n.List_ }
func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ }
func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x }
func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (n *CompLitExpr) SetOp(op Op) {
switch op {
@ -354,8 +360,10 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
return n
}
func (n *ConvExpr) Left() Node { return n.X }
func (n *ConvExpr) SetLeft(x Node) { n.X = x }
func (n *ConvExpr) Left() Node { return n.X }
func (n *ConvExpr) SetLeft(x Node) { n.X = x }
func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (n *ConvExpr) SetOp(op Op) {
switch op {
@ -522,35 +530,31 @@ func (n *MakeExpr) SetOp(op Op) {
}
}
// A MethodExpr is a method value X.M (where X is an expression, not a type).
// A MethodExpr is a method expression T.M (where T is a type).
type MethodExpr struct {
miniExpr
X Node
M Node
Sym_ *types.Sym
Offset_ int64
Class_ Class
Method *types.Field
T *types.Type
Method *types.Field
FuncName_ *Name
}
func NewMethodExpr(pos src.XPos, x, m Node) *MethodExpr {
n := &MethodExpr{X: x, M: m}
func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr {
n := &MethodExpr{T: t, Method: method}
n.pos = pos
n.op = OMETHEXPR
n.Offset_ = types.BADWIDTH
return n
}
func (n *MethodExpr) Left() Node { return n.X }
func (n *MethodExpr) SetLeft(x Node) { n.X = x }
func (n *MethodExpr) Right() Node { return n.M }
func (n *MethodExpr) SetRight(y Node) { n.M = y }
func (n *MethodExpr) Sym() *types.Sym { return n.Sym_ }
func (n *MethodExpr) SetSym(x *types.Sym) { n.Sym_ = x }
func (n *MethodExpr) Offset() int64 { return n.Offset_ }
func (n *MethodExpr) SetOffset(x int64) { n.Offset_ = x }
func (n *MethodExpr) Class() Class { return n.Class_ }
func (n *MethodExpr) SetClass(x Class) { n.Class_ = x }
func (n *MethodExpr) FuncName() *Name { return n.FuncName_ }
func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") }
func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") }
func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") }
func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") }
func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") }
func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") }
func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") }
func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") }
func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") }
// A NilExpr represents the predefined untyped constant nil.
// (It may be copied and assigned a type, though.)
@ -583,8 +587,10 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
return n
}
func (n *ParenExpr) Left() Node { return n.X }
func (n *ParenExpr) SetLeft(x Node) { n.X = x }
func (n *ParenExpr) Left() Node { return n.X }
func (n *ParenExpr) SetLeft(x Node) { n.X = x }
func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (*ParenExpr) CanBeNtype() {}
@ -613,6 +619,21 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
func (n *ResultExpr) Offset() int64 { return n.Offset_ }
func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x }
// A NameOffsetExpr refers to an offset within a variable.
// It is like a SelectorExpr but without the field name.
type NameOffsetExpr struct {
miniExpr
Name_ *Name
Offset_ int64
}
func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr {
n := &NameOffsetExpr{Name_: name, Offset_: offset}
n.typ = typ
n.op = ONAMEOFFSET
return n
}
// A SelectorExpr is a selector expression X.Sym.
type SelectorExpr struct {
miniExpr
@ -645,6 +666,8 @@ func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x }
func (n *SelectorExpr) Offset() int64 { return n.Offset_ }
func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x }
func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
// Before type-checking, bytes.Buffer is a SelectorExpr.
// After type-checking it becomes a Name.
@ -783,8 +806,10 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr {
return n
}
func (n *StarExpr) Left() Node { return n.X }
func (n *StarExpr) SetLeft(x Node) { n.X = x }
func (n *StarExpr) Left() Node { return n.X }
func (n *StarExpr) SetLeft(x Node) { n.X = x }
func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
func (*StarExpr) CanBeNtype() {}

View File

@ -9,6 +9,7 @@ import (
"fmt"
"go/constant"
"io"
"math"
"os"
"path/filepath"
"reflect"
@ -141,7 +142,7 @@ func FmtNode(n Node, s fmt.State, verb rune) {
}
if n == nil {
fmt.Fprint(s, "<N>")
fmt.Fprint(s, "<nil>")
return
}
@ -330,12 +331,14 @@ func stmtFmt(n Node, s fmt.State) {
switch n.Op() {
case ODCL:
n := n.(*Decl)
fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typechecked to reproduce
// the "v = <N>" again.
case OAS:
n := n.(*AssignStmt)
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%v := %v", n.Left(), n.Right())
} else {
@ -343,6 +346,7 @@ func stmtFmt(n Node, s fmt.State) {
}
case OASOP:
n := n.(*AssignOpStmt)
if n.Implicit() {
if n.SubOp() == OADD {
fmt.Fprintf(s, "%v++", n.Left())
@ -355,6 +359,7 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right())
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
n := n.(*AssignListStmt)
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
} else {
@ -362,26 +367,33 @@ func stmtFmt(n Node, s fmt.State) {
}
case OBLOCK:
n := n.(*BlockStmt)
if n.List().Len() != 0 {
fmt.Fprintf(s, "%v", n.List())
}
case ORETURN:
n := n.(*ReturnStmt)
fmt.Fprintf(s, "return %.v", n.List())
case ORETJMP:
n := n.(*BranchStmt)
fmt.Fprintf(s, "retjmp %v", n.Sym())
case OINLMARK:
n := n.(*InlineMarkStmt)
fmt.Fprintf(s, "inlmark %d", n.Offset())
case OGO:
n := n.(*GoDeferStmt)
fmt.Fprintf(s, "go %v", n.Left())
case ODEFER:
n := n.(*GoDeferStmt)
fmt.Fprintf(s, "defer %v", n.Left())
case OIF:
n := n.(*IfStmt)
if simpleinit {
fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
} else {
@ -392,6 +404,7 @@ func stmtFmt(n Node, s fmt.State) {
}
case OFOR, OFORUNTIL:
n := n.(*ForStmt)
opname := "for"
if n.Op() == OFORUNTIL {
opname = "foruntil"
@ -425,6 +438,7 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprintf(s, " { %v }", n.Body())
case ORANGE:
n := n.(*RangeStmt)
if !exportFormat {
fmt.Fprint(s, "for loop")
break
@ -437,23 +451,31 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
case OSELECT, OSWITCH:
case OSELECT:
n := n.(*SelectStmt)
if !exportFormat {
fmt.Fprintf(s, "%v statement", n.Op())
break
}
fmt.Fprintf(s, "select { %v }", n.List())
fmt.Fprintf(s, "%v", n.Op())
case OSWITCH:
n := n.(*SwitchStmt)
if !exportFormat {
fmt.Fprintf(s, "%v statement", n.Op())
break
}
fmt.Fprintf(s, "switch")
if simpleinit {
fmt.Fprintf(s, " %v;", n.Init().First())
}
if n.Left() != nil {
fmt.Fprintf(s, " %v ", n.Left())
}
fmt.Fprintf(s, " { %v }", n.List())
case OCASE:
n := n.(*CaseStmt)
if n.List().Len() != 0 {
fmt.Fprintf(s, "case %.v", n.List())
} else {
@ -462,6 +484,7 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprintf(s, ": %v", n.Body())
case OBREAK, OCONTINUE, OGOTO, OFALL:
n := n.(*BranchStmt)
if n.Sym() != nil {
fmt.Fprintf(s, "%v %v", n.Op(), n.Sym())
} else {
@ -469,6 +492,7 @@ func stmtFmt(n Node, s fmt.State) {
}
case OLABEL:
n := n.(*LabelStmt)
fmt.Fprintf(s, "%v: ", n.Sym())
}
@ -488,7 +512,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
for {
if n == nil {
fmt.Fprint(s, "<N>")
fmt.Fprint(s, "<nil>")
return
}
@ -499,10 +523,23 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
// Skip implicit operations introduced during typechecking.
switch n.Op() {
case OADDR, ODEREF, OCONV, OCONVNOP, OCONVIFACE:
if n.Implicit() {
n = n.Left()
switch nn := n; nn.Op() {
case OADDR:
nn := nn.(*AddrExpr)
if nn.Implicit() {
n = nn.Left()
continue
}
case ODEREF:
nn := nn.(*StarExpr)
if nn.Implicit() {
n = nn.Left()
continue
}
case OCONV, OCONVNOP, OCONVIFACE:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.Left()
continue
}
}
@ -522,6 +559,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
fmt.Fprintf(s, "(%v)", n.Left())
case ONIL:
@ -570,6 +608,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
case ODCLFUNC:
n := n.(*Func)
if sym := n.Sym(); sym != nil {
fmt.Fprint(s, sym)
return
@ -577,6 +616,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "<unnamed Func>")
case ONAME:
n := n.(*Name)
// Special case: name used as local variable in export.
// _ becomes ~b%d internally; print as _ for export
if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
@ -584,9 +624,17 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
fallthrough
case OPACK, ONONAME, OMETHEXPR:
case OPACK, ONONAME:
fmt.Fprint(s, n.Sym())
case OMETHEXPR:
n := n.(*MethodExpr)
fmt.Fprint(s, n.FuncName().Sym())
case ONAMEOFFSET:
n := n.(*NameOffsetExpr)
fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_)
case OTYPE:
if n.Type() == nil && n.Sym() != nil {
fmt.Fprint(s, n.Sym())
@ -641,17 +689,15 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprint(s, "<func>")
case OCLOSURE:
n := n.(*ClosureExpr)
if !exportFormat {
fmt.Fprint(s, "func literal")
return
}
if n.Body().Len() != 0 {
fmt.Fprintf(s, "%v { %v }", n.Type(), n.Body())
return
}
fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body())
case OCOMPLIT:
n := n.(*CompLitExpr)
if !exportFormat {
if n.Implicit() {
fmt.Fprintf(s, "... argument")
@ -668,9 +714,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
case OPTRLIT:
n := n.(*AddrExpr)
fmt.Fprintf(s, "&%v", n.Left())
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
n := n.(*CompLitExpr)
if !exportFormat {
fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
return
@ -678,6 +726,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
case OKEY:
n := n.(*KeyExpr)
if n.Left() != nil && n.Right() != nil {
fmt.Fprintf(s, "%v:%v", n.Left(), n.Right())
return
@ -694,9 +743,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprint(s, ":")
case OSTRUCTKEY:
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left())
case OCALLPART:
n := n.(*CallPartExpr)
exprFmt(n.Left(), s, nprec)
if n.Sym() == nil {
fmt.Fprint(s, ".<nil>")
@ -705,6 +756,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
n := n.(*SelectorExpr)
exprFmt(n.Left(), s, nprec)
if n.Sym() == nil {
fmt.Fprint(s, ".<nil>")
@ -713,6 +765,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
case ODOTTYPE, ODOTTYPE2:
n := n.(*TypeAssertExpr)
exprFmt(n.Left(), s, nprec)
if n.Right() != nil {
fmt.Fprintf(s, ".(%v)", n.Right())
@ -721,10 +774,12 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, ".(%v)", n.Type())
case OINDEX, OINDEXMAP:
n := n.(*IndexExpr)
exprFmt(n.Left(), s, nprec)
fmt.Fprintf(s, "[%v]", n.Right())
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n := n.(*SliceExpr)
exprFmt(n.Left(), s, nprec)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
@ -744,17 +799,15 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprint(s, "]")
case OSLICEHEADER:
n := n.(*SliceHeaderExpr)
if n.List().Len() != 2 {
base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
}
fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
case OCOMPLEX, OCOPY:
if n.Left() != nil {
fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right())
} else {
fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List())
}
n := n.(*BinaryExpr)
fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right())
case OCONV,
OCONVIFACE,
@ -764,37 +817,34 @@ func exprFmt(n Node, s fmt.State, prec int) {
OSTR2BYTES,
OSTR2RUNES,
ORUNESTR:
n := n.(*ConvExpr)
if n.Type() == nil || n.Type().Sym() == nil {
fmt.Fprintf(s, "(%v)", n.Type())
} else {
fmt.Fprintf(s, "%v", n.Type())
}
if n.Left() != nil {
fmt.Fprintf(s, "(%v)", n.Left())
} else {
fmt.Fprintf(s, "(%.v)", n.List())
}
fmt.Fprintf(s, "(%v)", n.Left())
case OREAL,
OIMAG,
OAPPEND,
OCAP,
OCLOSE,
ODELETE,
OLEN,
OMAKE,
ONEW,
OPANIC,
ORECOVER,
OALIGNOF,
OOFFSETOF,
OSIZEOF,
OSIZEOF:
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left())
case OAPPEND,
ODELETE,
OMAKE,
ORECOVER,
OPRINT,
OPRINTN:
if n.Left() != nil {
fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left())
return
}
n := n.(*CallExpr)
if n.IsDDD() {
fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List())
return
@ -802,6 +852,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List())
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n := n.(*CallExpr)
exprFmt(n.Left(), s, nprec)
if n.IsDDD() {
fmt.Fprintf(s, "(%.v...)", n.List())
@ -810,10 +861,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "(%.v)", n.List())
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
if n.List().Len() != 0 { // pre-typecheck
fmt.Fprintf(s, "make(%v, %.v)", n.Type(), n.List())
return
}
n := n.(*MakeExpr)
if n.Right() != nil {
fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
return
@ -825,20 +873,34 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "make(%v)", n.Type())
case OMAKESLICECOPY:
n := n.(*MakeExpr)
fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
// Unary
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v", n.Op())
if n.Left() != nil && n.Left().Op() == n.Op() {
fmt.Fprint(s, " ")
}
exprFmt(n.Left(), s, nprec+1)
case OADDR:
n := n.(*AddrExpr)
fmt.Fprintf(s, "%v", n.Op())
if n.Left() != nil && n.Left().Op() == n.Op() {
fmt.Fprint(s, " ")
}
exprFmt(n.Left(), s, nprec+1)
case ODEREF:
n := n.(*StarExpr)
fmt.Fprintf(s, "%v", n.Op())
exprFmt(n.Left(), s, nprec+1)
// Binary
case OADD,
OAND,
OANDAND,
OANDNOT,
ODIV,
OEQ,
@ -851,16 +913,29 @@ func exprFmt(n Node, s fmt.State, prec int) {
OMUL,
ONE,
OOR,
OOROR,
ORSH,
OSEND,
OSUB,
OXOR:
n := n.(*BinaryExpr)
exprFmt(n.Left(), s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
exprFmt(n.Right(), s, nprec+1)
case OANDAND,
OOROR:
n := n.(*LogicalExpr)
exprFmt(n.Left(), s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
exprFmt(n.Right(), s, nprec+1)
case OSEND:
n := n.(*SendStmt)
exprFmt(n.Left(), s, nprec)
fmt.Fprintf(s, " <- ")
exprFmt(n.Right(), s, nprec+1)
case OADDSTR:
n := n.(*AddStringExpr)
for i, n1 := range n.List().Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
@ -951,27 +1026,12 @@ func dumpNodeHeader(w io.Writer, n Node) {
if base.Debug.DumpPtrs != 0 {
fmt.Fprintf(w, " p(%p)", n)
}
if n.Name() != nil && n.Name().Vargen != 0 {
fmt.Fprintf(w, " g(%d)", n.Name().Vargen)
}
if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil {
// Useful to see where Defn is set and what node it points to
fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
}
if n.Offset() != types.BADWIDTH {
fmt.Fprintf(w, " x(%d)", n.Offset())
}
if n.Class() != 0 {
fmt.Fprintf(w, " class(%v)", n.Class())
}
if n.Colas() {
fmt.Fprintf(w, " colas(%v)", n.Colas())
}
if EscFmt != nil {
if esc := EscFmt(n); esc != "" {
fmt.Fprintf(w, " %s", esc)
@ -982,47 +1042,62 @@ func dumpNodeHeader(w io.Writer, n Node) {
fmt.Fprintf(w, " tc(%d)", n.Typecheck())
}
if n.IsDDD() {
fmt.Fprintf(w, " isddd(%v)", n.IsDDD())
// Print Node-specific fields of basic type in header line.
v := reflect.ValueOf(n).Elem()
t := v.Type()
nf := t.NumField()
for i := 0; i < nf; i++ {
tf := t.Field(i)
if tf.PkgPath != "" {
// skip unexported field - Interface will fail
continue
}
k := tf.Type.Kind()
if reflect.Bool <= k && k <= reflect.Complex128 {
name := strings.TrimSuffix(tf.Name, "_")
vf := v.Field(i)
vfi := vf.Interface()
if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && isZero(vf) {
continue
}
if vfi == true {
fmt.Fprintf(w, " %s", name)
} else {
fmt.Fprintf(w, " %s:%+v", name, vf.Interface())
}
}
}
if n.Implicit() {
fmt.Fprintf(w, " implicit(%v)", n.Implicit())
}
if n.Op() == ONAME {
if n.Name().Addrtaken() {
fmt.Fprint(w, " addrtaken")
// Print Node-specific booleans by looking for methods.
// Different v, t from above - want *Struct not Struct, for methods.
v = reflect.ValueOf(n)
t = v.Type()
nm := t.NumMethod()
for i := 0; i < nm; i++ {
tm := t.Method(i)
if tm.PkgPath != "" {
// skip unexported method - call will fail
continue
}
if n.Name().Assigned() {
fmt.Fprint(w, " assigned")
m := v.Method(i)
mt := m.Type()
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool {
// TODO(rsc): Remove the func/defer/recover wrapping,
// which is guarding against panics in miniExpr,
// once we get down to the simpler state in which
// nodes have no getter methods that aren't allowed to be called.
func() {
defer func() { recover() }()
if m.Call(nil)[0].Bool() {
name := strings.TrimSuffix(tm.Name, "_")
fmt.Fprintf(w, " %s", name)
}
}()
}
if n.Name().IsClosureVar() {
fmt.Fprint(w, " closurevar")
}
if n.Name().Captured() {
fmt.Fprint(w, " captured")
}
if n.Name().IsOutputParamHeapAddr() {
fmt.Fprint(w, " outputparamheapaddr")
}
}
if n.Bounded() {
fmt.Fprint(w, " bounded")
}
if n.NonNil() {
fmt.Fprint(w, " nonnil")
}
if n.HasCall() {
fmt.Fprint(w, " hascall")
}
if n.Name() != nil && n.Name().Used() {
fmt.Fprint(w, " used")
}
if n.Op() == OCLOSURE {
n := n.(*ClosureExpr)
if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil {
fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
}
@ -1072,7 +1147,7 @@ func dumpNode(w io.Writer, n Node, depth int) {
dumpNodeHeader(w, n)
return
case ONAME, ONONAME, OMETHEXPR:
case ONAME, ONONAME:
if n.Sym() != nil {
fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym())
} else {
@ -1086,7 +1161,14 @@ func dumpNode(w io.Writer, n Node, depth int) {
}
return
case OMETHEXPR:
n := n.(*MethodExpr)
fmt.Fprintf(w, "%+v-%+v", n.Op(), n.FuncName().Sym())
dumpNodeHeader(w, n)
return
case OASOP:
n := n.(*AssignOpStmt)
fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp())
dumpNodeHeader(w, n)
@ -1120,7 +1202,7 @@ func dumpNode(w io.Writer, n Node, depth int) {
if fn.Body().Len() > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())
dumpNodes(w, n.Body(), depth+1)
dumpNodes(w, fn.Body(), depth+1)
}
return
}
@ -1186,3 +1268,40 @@ func dumpNodes(w io.Writer, list Nodes, depth int) {
dumpNode(w, n, depth)
}
}
// reflect.IsZero is not available in Go 1.4 (added in Go 1.13), so we use this copy instead.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return math.Float64bits(v.Float()) == 0
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !isZero(v.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
return v.IsNil()
case reflect.String:
return v.Len() == 0
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !isZero(v.Field(i)) {
return false
}
}
return true
default:
return false
}
}

View File

@ -61,14 +61,12 @@ func (n *miniNode) SetEsc(x uint16) { n.esc = x }
const (
miniWalkdefShift = 0
miniTypecheckShift = 2
miniInitorderShift = 4
miniDiag = 1 << 6
miniHasCall = 1 << 7 // for miniStmt
miniDiag = 1 << 4
miniHasCall = 1 << 5 // for miniStmt
)
func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) }
func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
func (n *miniNode) Initorder() uint8 { return n.bits.get2(miniInitorderShift) }
func (n *miniNode) SetWalkdef(x uint8) {
if x > 3 {
panic(fmt.Sprintf("cannot SetWalkdef %d", x))
@ -81,12 +79,6 @@ func (n *miniNode) SetTypecheck(x uint8) {
}
n.bits.set2(miniTypecheckShift, x)
}
func (n *miniNode) SetInitorder(x uint8) {
if x > 3 {
panic(fmt.Sprintf("cannot SetInitorder %d", x))
}
n.bits.set2(miniInitorderShift, x)
}
func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
@ -114,22 +106,22 @@ func (n *miniNode) SetRight(x Node) {
}
}
func (n *miniNode) SetInit(x Nodes) {
if x != (Nodes{}) {
if x != nil {
panic(n.no("SetInit"))
}
}
func (n *miniNode) SetBody(x Nodes) {
if x != (Nodes{}) {
if x != nil {
panic(n.no("SetBody"))
}
}
func (n *miniNode) SetList(x Nodes) {
if x != (Nodes{}) {
if x != nil {
panic(n.no("SetList"))
}
}
func (n *miniNode) SetRlist(x Nodes) {
if x != (Nodes{}) {
if x != nil {
panic(n.no("SetRlist"))
}
}

View File

@ -67,18 +67,23 @@ func main() {
fmt.Fprintf(&buf, "\n")
fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name)
fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name)
forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {
switch {
case is(nodesType):
fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name)
case is(ptrFieldType):
fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name)
case is(slicePtrFieldType):
fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name)
}
})
fmt.Fprintf(&buf, "return &c }\n")
switch name {
case "Name":
fmt.Fprintf(&buf, "func (n *%s) copy() Node {panic(\"%s.copy\")}\n", name, name)
default:
fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name)
forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {
switch {
case is(nodesType):
fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name)
case is(ptrFieldType):
fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name)
case is(slicePtrFieldType):
fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name)
}
})
fmt.Fprintf(&buf, "return &c }\n")
}
fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name)
forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {

View File

@ -16,8 +16,7 @@ import (
// An Ident is an identifier, possibly qualified.
type Ident struct {
miniExpr
sym *types.Sym
Used bool
sym *types.Sym
}
func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
@ -35,16 +34,16 @@ func (*Ident) CanBeNtype() {}
// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
type Name struct {
miniExpr
subOp Op // uint8
class Class // uint8
flags bitset16
pragma PragmaFlag // int16
sym *types.Sym
fn *Func
offset int64
val constant.Value
orig Node
embedFiles *[]string // list of embedded files, for ONAME var
BuiltinOp Op // uint8
Class_ Class // uint8
flags bitset16
pragma PragmaFlag // int16
sym *types.Sym
fn *Func
Offset_ int64
val constant.Value
orig Node
Embed *[]Embed // list of embedded files, for ONAME var
PkgName *PkgName // real package for import . names
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
@ -142,6 +141,12 @@ type Name struct {
func (n *Name) isExpr() {}
// CloneName makes a cloned copy of the name.
// It's not ir.Copy(n) because in general that operation is a mistake on names,
// which uniquely identify variables.
// Callers must use n.CloneName to make clear they intend to create a separate name.
func (n *Name) CloneName() *Name { c := *n; return &c }
// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting Curfn.
func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
@ -181,16 +186,22 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
func (n *Name) Name() *Name { return n }
func (n *Name) Sym() *types.Sym { return n.sym }
func (n *Name) SetSym(x *types.Sym) { n.sym = x }
func (n *Name) SubOp() Op { return n.subOp }
func (n *Name) SetSubOp(x Op) { n.subOp = x }
func (n *Name) Class() Class { return n.class }
func (n *Name) SetClass(x Class) { n.class = x }
func (n *Name) SubOp() Op { return n.BuiltinOp }
func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
func (n *Name) Class() Class { return n.Class_ }
func (n *Name) SetClass(x Class) { n.Class_ = x }
func (n *Name) Func() *Func { return n.fn }
func (n *Name) SetFunc(x *Func) { n.fn = x }
func (n *Name) Offset() int64 { return n.offset }
func (n *Name) SetOffset(x int64) { n.offset = x }
func (n *Name) Iota() int64 { return n.offset }
func (n *Name) SetIota(x int64) { n.offset = x }
func (n *Name) Offset() int64 { panic("Name.Offset") }
func (n *Name) SetOffset(x int64) {
if x != 0 {
panic("Name.SetOffset")
}
}
func (n *Name) FrameOffset() int64 { return n.Offset_ }
func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
func (n *Name) Iota() int64 { return n.Offset_ }
func (n *Name) SetIota(x int64) { n.Offset_ = x }
func (*Name) CanBeNtype() {}
func (*Name) CanBeAnSSASym() {}
@ -220,27 +231,6 @@ func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
// EmbedFiles returns the list of embedded files for p,
// which must be for an ONAME var.
func (n *Name) EmbedFiles() []string {
if n.embedFiles == nil {
return nil
}
return *n.embedFiles
}
// SetEmbedFiles sets the list of embedded files for p,
// which must be for an ONAME var.
func (n *Name) SetEmbedFiles(list []string) {
if n.embedFiles == nil && list == nil {
return
}
if n.embedFiles == nil {
n.embedFiles = new([]string)
}
*n.embedFiles = list
}
const (
nameCaptured = 1 << iota // is the variable captured by a closure
nameReadonly
@ -378,6 +368,11 @@ const (
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
type Embed struct {
Pos src.XPos
Patterns []string
}
// A Pack is an identifier referring to an imported package.
type PkgName struct {
miniNode

View File

@ -102,8 +102,6 @@ type Node interface {
SetBounded(x bool)
Typecheck() uint8
SetTypecheck(x uint8)
Initorder() uint8
SetInitorder(x uint8)
NonNil() bool
MarkNonNil()
HasCall() bool
@ -276,7 +274,6 @@ const (
ORECOVER // recover()
ORECV // <-Left
ORUNESTR // Type(Left) (Type is string, Left is rune)
OSELRECV // like OAS: Left = Right where Right.Op = ORECV (appears as .Left of OCASE)
OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE)
OIOTA // iota
OREAL // real(Left)
@ -348,6 +345,7 @@ const (
OVARLIVE // variable is alive
ORESULT // result of a function call; Xoffset is stack offset
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
ONAMEOFFSET // offset within a name
// arch-specific opcodes
ORETJMP // return to other function
@ -359,7 +357,7 @@ const (
// Nodes is a pointer to a slice of *Node.
// For fields that are not used in most nodes, this is used instead of
// a slice to save space.
type Nodes struct{ slice *[]Node }
type Nodes []Node
// immutableEmptyNodes is an immutable, empty Nodes list.
// The methods that would modify it panic instead.
@ -367,43 +365,37 @@ var immutableEmptyNodes = Nodes{}
// asNodes returns a slice of *Node as a Nodes value.
func AsNodes(s []Node) Nodes {
return Nodes{&s}
return s
}
// Slice returns the entries in Nodes as a slice.
// Changes to the slice entries (as in s[i] = n) will be reflected in
// the Nodes.
func (n Nodes) Slice() []Node {
if n.slice == nil {
return nil
}
return *n.slice
return n
}
// Len returns the number of entries in Nodes.
func (n Nodes) Len() int {
if n.slice == nil {
return 0
}
return len(*n.slice)
return len(n)
}
// Index returns the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
func (n Nodes) Index(i int) Node {
return (*n.slice)[i]
return n[i]
}
// First returns the first element of Nodes (same as n.Index(0)).
// It panics if n has no elements.
func (n Nodes) First() Node {
return (*n.slice)[0]
return n[0]
}
// Second returns the second element of Nodes (same as n.Index(1)).
// It panics if n has fewer than two elements.
func (n Nodes) Second() Node {
return (*n.slice)[1]
return n[1]
}
func (n *Nodes) mutate() {
@ -422,64 +414,56 @@ func (n *Nodes) Set(s []Node) {
}
n.mutate()
}
if len(s) == 0 {
n.slice = nil
} else {
// Copy s and take address of t rather than s to avoid
// allocation in the case where len(s) == 0 (which is
// over 3x more common, dynamically, for make.bash).
t := s
n.slice = &t
}
*n = s
}
// Set1 sets n to a slice containing a single node.
func (n *Nodes) Set1(n1 Node) {
n.mutate()
n.slice = &[]Node{n1}
*n = []Node{n1}
}
// Set2 sets n to a slice containing two nodes.
func (n *Nodes) Set2(n1, n2 Node) {
n.mutate()
n.slice = &[]Node{n1, n2}
*n = []Node{n1, n2}
}
// Set3 sets n to a slice containing three nodes.
func (n *Nodes) Set3(n1, n2, n3 Node) {
n.mutate()
n.slice = &[]Node{n1, n2, n3}
*n = []Node{n1, n2, n3}
}
// MoveNodes sets n to the contents of n2, then clears n2.
func (n *Nodes) MoveNodes(n2 *Nodes) {
n.mutate()
n.slice = n2.slice
n2.slice = nil
*n = *n2
*n2 = nil
}
// SetIndex sets the i'th element of Nodes to node.
// It panics if n does not have at least i+1 elements.
func (n Nodes) SetIndex(i int, node Node) {
(*n.slice)[i] = node
n[i] = node
}
// SetFirst sets the first element of Nodes to node.
// It panics if n does not have at least one elements.
func (n Nodes) SetFirst(node Node) {
(*n.slice)[0] = node
n[0] = node
}
// SetSecond sets the second element of Nodes to node.
// It panics if n does not have at least two elements.
func (n Nodes) SetSecond(node Node) {
(*n.slice)[1] = node
n[1] = node
}
// Addr returns the address of the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
func (n Nodes) Addr(i int) *Node {
return &(*n.slice)[i]
return &n[i]
}
// Append appends entries to Nodes.
@ -488,13 +472,7 @@ func (n *Nodes) Append(a ...Node) {
return
}
n.mutate()
if n.slice == nil {
s := make([]Node, len(a))
copy(s, a)
n.slice = &s
return
}
*n.slice = append(*n.slice, a...)
*n = append(*n, a...)
}
// Prepend prepends entries to Nodes.
@ -504,38 +482,29 @@ func (n *Nodes) Prepend(a ...Node) {
return
}
n.mutate()
if n.slice == nil {
n.slice = &a
} else {
*n.slice = append(a, *n.slice...)
}
*n = append(a, *n...)
}
// Take clears n, returning its former contents.
func (n *Nodes) Take() []Node {
ret := *n
*n = nil
return ret
}
// AppendNodes appends the contents of *n2 to n, then clears n2.
func (n *Nodes) AppendNodes(n2 *Nodes) {
n.mutate()
switch {
case n2.slice == nil:
case n.slice == nil:
n.slice = n2.slice
default:
*n.slice = append(*n.slice, *n2.slice...)
}
n2.slice = nil
*n = append(*n, n2.Take()...)
}
// Copy returns a copy of the content of the slice.
func (n Nodes) Copy() Nodes {
var c Nodes
if n.slice == nil {
return c
if n == nil {
return nil
}
c.slice = new([]Node)
if *n.slice == nil {
return c
}
*c.slice = make([]Node, n.Len())
copy(*c.slice, n.Slice())
c := make(Nodes, n.Len())
copy(c, n)
return c
}
@ -697,12 +666,8 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
typ = nright.(Ntype)
}
return NewCompLitExpr(pos, op, typ, nil)
case OAS, OSELRECV:
n := NewAssignStmt(pos, nleft, nright)
if op != OAS {
n.SetOp(op)
}
return n
case OAS:
return NewAssignStmt(pos, nleft, nright)
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
n := NewAssignListStmt(pos, op, nil, nil)
return n
@ -769,8 +734,6 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
return newNameAt(pos, op, nil)
case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
return NewMakeExpr(pos, op, nleft, nright)
case OMETHEXPR:
return NewMethodExpr(pos, nleft, nright)
case ONIL:
return NewNilExpr(pos)
case OPACK:

View File

@ -632,21 +632,14 @@ func (n *MethodExpr) copy() Node {
func (n *MethodExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.X, err, do)
err = maybeDo(n.M, err, do)
return err
}
func (n *MethodExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.X = maybeEdit(n.X, edit)
n.M = maybeEdit(n.M, edit)
}
func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *Name) copy() Node {
c := *n
return &c
}
func (n *Name) copy() Node { panic("Name.copy") }
func (n *Name) doChildren(do func(Node) error) error {
var err error
return err
@ -654,6 +647,21 @@ func (n *Name) doChildren(do func(Node) error) error {
func (n *Name) editChildren(edit func(Node) Node) {
}
func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *NameOffsetExpr) copy() Node {
c := *n
c.init = c.init.Copy()
return &c
}
func (n *NameOffsetExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
return err
}
func (n *NameOffsetExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
}
func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *NilExpr) copy() Node {
c := *n

View File

@ -111,62 +111,62 @@ func _() {
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV-103]
_ = x[OSELRECV2-104]
_ = x[OIOTA-105]
_ = x[OREAL-106]
_ = x[OIMAG-107]
_ = x[OCOMPLEX-108]
_ = x[OALIGNOF-109]
_ = x[OOFFSETOF-110]
_ = x[OSIZEOF-111]
_ = x[OMETHEXPR-112]
_ = x[OSTMTEXPR-113]
_ = x[OBLOCK-114]
_ = x[OBREAK-115]
_ = x[OCASE-116]
_ = x[OCONTINUE-117]
_ = x[ODEFER-118]
_ = x[OFALL-119]
_ = x[OFOR-120]
_ = x[OFORUNTIL-121]
_ = x[OGOTO-122]
_ = x[OIF-123]
_ = x[OLABEL-124]
_ = x[OGO-125]
_ = x[ORANGE-126]
_ = x[ORETURN-127]
_ = x[OSELECT-128]
_ = x[OSWITCH-129]
_ = x[OTYPESW-130]
_ = x[OTCHAN-131]
_ = x[OTMAP-132]
_ = x[OTSTRUCT-133]
_ = x[OTINTER-134]
_ = x[OTFUNC-135]
_ = x[OTARRAY-136]
_ = x[OTSLICE-137]
_ = x[OINLCALL-138]
_ = x[OEFACE-139]
_ = x[OITAB-140]
_ = x[OIDATA-141]
_ = x[OSPTR-142]
_ = x[OCLOSUREREAD-143]
_ = x[OCFUNC-144]
_ = x[OCHECKNIL-145]
_ = x[OVARDEF-146]
_ = x[OVARKILL-147]
_ = x[OVARLIVE-148]
_ = x[ORESULT-149]
_ = x[OINLMARK-150]
_ = x[OSELRECV2-103]
_ = x[OIOTA-104]
_ = x[OREAL-105]
_ = x[OIMAG-106]
_ = x[OCOMPLEX-107]
_ = x[OALIGNOF-108]
_ = x[OOFFSETOF-109]
_ = x[OSIZEOF-110]
_ = x[OMETHEXPR-111]
_ = x[OSTMTEXPR-112]
_ = x[OBLOCK-113]
_ = x[OBREAK-114]
_ = x[OCASE-115]
_ = x[OCONTINUE-116]
_ = x[ODEFER-117]
_ = x[OFALL-118]
_ = x[OFOR-119]
_ = x[OFORUNTIL-120]
_ = x[OGOTO-121]
_ = x[OIF-122]
_ = x[OLABEL-123]
_ = x[OGO-124]
_ = x[ORANGE-125]
_ = x[ORETURN-126]
_ = x[OSELECT-127]
_ = x[OSWITCH-128]
_ = x[OTYPESW-129]
_ = x[OTCHAN-130]
_ = x[OTMAP-131]
_ = x[OTSTRUCT-132]
_ = x[OTINTER-133]
_ = x[OTFUNC-134]
_ = x[OTARRAY-135]
_ = x[OTSLICE-136]
_ = x[OINLCALL-137]
_ = x[OEFACE-138]
_ = x[OITAB-139]
_ = x[OIDATA-140]
_ = x[OSPTR-141]
_ = x[OCLOSUREREAD-142]
_ = x[OCFUNC-143]
_ = x[OCHECKNIL-144]
_ = x[OVARDEF-145]
_ = x[OVARKILL-146]
_ = x[OVARLIVE-147]
_ = x[ORESULT-148]
_ = x[OINLMARK-149]
_ = x[ONAMEOFFSET-150]
_ = x[ORETJMP-151]
_ = x[OGETG-152]
_ = x[OEND-153]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 658, 663, 668, 672, 680, 685, 689, 692, 700, 704, 706, 711, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 781, 788, 793, 797, 802, 806, 817, 822, 830, 836, 843, 850, 856, 863, 869, 873, 876}
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 866, 872, 876, 879}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {

View File

@ -0,0 +1,35 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import "cmd/compile/internal/types"
// A Package holds information about the package being compiled.
type Package struct {
// Imports, listed in source order.
// See golang.org/issue/31636.
Imports []*types.Pkg
// Init functions, listed in source order.
Inits []*Func
// Top-level declarations.
Decls []Node
// Extern (package global) declarations.
Externs []Node
// Assembly function declarations.
Asms []*Name
// Cgo directives.
CgoPragmas [][]string
// Variables with //go:embed lines.
Embeds []*Name
// Exported (or re-exported) symbols.
Exports []*Name
}

View File

@ -20,8 +20,8 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 168, 288},
{Name{}, 124, 216},
{Func{}, 200, 352},
{Name{}, 132, 232},
}
for _, tt := range tests {

View File

@ -63,10 +63,9 @@ func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) }
// If Def is true, the assignment is a :=.
type AssignListStmt struct {
miniStmt
Lhs Nodes
Def bool
Rhs Nodes
Offset_ int64 // for initorder
Lhs Nodes
Def bool
Rhs Nodes
}
func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
@ -75,20 +74,17 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
n.SetOp(op)
n.Lhs.Set(lhs)
n.Rhs.Set(rhs)
n.Offset_ = types.BADWIDTH
return n
}
func (n *AssignListStmt) List() Nodes { return n.Lhs }
func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs }
func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x }
func (n *AssignListStmt) Rlist() Nodes { return n.Rhs }
func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs }
func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x }
func (n *AssignListStmt) Colas() bool { return n.Def }
func (n *AssignListStmt) SetColas(x bool) { n.Def = x }
func (n *AssignListStmt) Offset() int64 { return n.Offset_ }
func (n *AssignListStmt) SetOffset(x int64) { n.Offset_ = x }
func (n *AssignListStmt) List() Nodes { return n.Lhs }
func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs }
func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x }
func (n *AssignListStmt) Rlist() Nodes { return n.Rhs }
func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs }
func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x }
func (n *AssignListStmt) Colas() bool { return n.Def }
func (n *AssignListStmt) SetColas(x bool) { n.Def = x }
func (n *AssignListStmt) SetOp(op Op) {
switch op {
@ -103,34 +99,30 @@ func (n *AssignListStmt) SetOp(op Op) {
// If Def is true, the assignment is a :=.
type AssignStmt struct {
miniStmt
X Node
Def bool
Y Node
Offset_ int64 // for initorder
X Node
Def bool
Y Node
}
func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
n := &AssignStmt{X: x, Y: y}
n.pos = pos
n.op = OAS
n.Offset_ = types.BADWIDTH
return n
}
func (n *AssignStmt) Left() Node { return n.X }
func (n *AssignStmt) SetLeft(x Node) { n.X = x }
func (n *AssignStmt) Right() Node { return n.Y }
func (n *AssignStmt) SetRight(y Node) { n.Y = y }
func (n *AssignStmt) Colas() bool { return n.Def }
func (n *AssignStmt) SetColas(x bool) { n.Def = x }
func (n *AssignStmt) Offset() int64 { return n.Offset_ }
func (n *AssignStmt) SetOffset(x int64) { n.Offset_ = x }
func (n *AssignStmt) Left() Node { return n.X }
func (n *AssignStmt) SetLeft(x Node) { n.X = x }
func (n *AssignStmt) Right() Node { return n.Y }
func (n *AssignStmt) SetRight(y Node) { n.Y = y }
func (n *AssignStmt) Colas() bool { return n.Def }
func (n *AssignStmt) SetColas(x bool) { n.Def = x }
func (n *AssignStmt) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OAS, OSELRECV:
case OAS:
n.op = op
}
}
@ -145,8 +137,8 @@ type AssignOpStmt struct {
IncDec bool // actually ++ or --
}
func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt {
n := &AssignOpStmt{AsOp: op, X: x, Y: y}
func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
n.pos = pos
n.op = OASOP
return n
@ -376,6 +368,7 @@ type RangeStmt struct {
Body_ Nodes
HasBreak_ bool
typ *types.Type // TODO(rsc): Remove - use X.Type() instead
Prealloc *Name
}
func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt {

View File

@ -57,46 +57,40 @@ import (
// }
// do(root)
//
// The Inspect function illustrates a further simplification of the pattern,
// only considering processing before visiting children, and letting
// that processing decide whether children are visited at all:
// The Visit function illustrates a further simplification of the pattern,
// only processing before visiting children and never stopping:
//
// func Inspect(n ir.Node, inspect func(ir.Node) bool) {
// func Visit(n ir.Node, visit func(ir.Node)) {
// var do func(ir.Node) error
// do = func(x ir.Node) error {
// if inspect(x) {
// ir.DoChildren(x, do)
// }
// return nil
// visit(x)
// return ir.DoChildren(x, do)
// }
// if n != nil {
// do(n)
// visit(n)
// }
// }
//
// The Find function illustrates a different simplification of the pattern,
// The Any function illustrates a different simplification of the pattern,
// visiting each node and then its children, recursively, until finding
// a node x such that find(x) returns a non-nil result,
// at which point the entire traversal stops:
// a node x for which cond(x) returns true, at which point the entire
// traversal stops and returns true.
//
// func Find(n ir.Node, find func(ir.Node) interface{}) interface{} {
// func Any(n ir.Node, find cond(ir.Node)) bool {
// stop := errors.New("stop")
// var found interface{}
// var do func(ir.Node) error
// do = func(x ir.Node) error {
// if v := find(x); v != nil {
// found = v
// if cond(x) {
// return stop
// }
// return ir.DoChildren(x, do)
// }
// do(n)
// return found
// return do(n) == stop
// }
//
// Inspect and Find are presented above as examples of how to use
// Visit and Any are presented above as examples of how to use
// DoChildren effectively, but of course, usage that fits within the
// simplifications captured by Inspect or Find will be best served
// simplifications captured by Visit or Any will be best served
// by directly calling the ones provided by this package.
func DoChildren(n Node, do func(Node) error) error {
if n == nil {
@ -122,71 +116,59 @@ func DoList(list Nodes, do func(Node) error) error {
return nil
}
// Inspect visits each node x in the IR tree rooted at n
// in a depth-first preorder traversal, calling inspect on each node visited.
// If inspect(x) returns false, then Inspect skips over x's children.
//
// Note that the meaning of the boolean result in the callback function
// passed to Inspect differs from that of Scan.
// During Scan, if scan(x) returns false, then Scan stops the scan.
// During Inspect, if inspect(x) returns false, then Inspect skips x's children
// but continues with the remainder of the tree (x's siblings and so on).
func Inspect(n Node, inspect func(Node) bool) {
// Visit visits each non-nil node x in the IR tree rooted at n
// in a depth-first preorder traversal, calling visit on each node visited.
func Visit(n Node, visit func(Node)) {
var do func(Node) error
do = func(x Node) error {
if inspect(x) {
DoChildren(x, do)
}
return nil
visit(x)
return DoChildren(x, do)
}
if n != nil {
do(n)
}
}
// InspectList calls Inspect(x, inspect) for each node x in the list.
func InspectList(list Nodes, inspect func(Node) bool) {
// VisitList calls Visit(x, visit) for each node x in the list.
func VisitList(list Nodes, visit func(Node)) {
for _, x := range list.Slice() {
Inspect(x, inspect)
Visit(x, visit)
}
}
var stop = errors.New("stop")
// Find looks for a non-nil node x in the IR tree rooted at n
// for which find(x) returns a non-nil value.
// Find considers nodes in a depth-first, preorder traversal.
// When Find finds a node x such that find(x) != nil,
// Find ends the traversal and returns the value of find(x) immediately.
// Otherwise Find returns nil.
func Find(n Node, find func(Node) interface{}) interface{} {
// Any looks for a non-nil node x in the IR tree rooted at n
// for which cond(x) returns true.
// Any considers nodes in a depth-first, preorder traversal.
// When Any finds a node x such that cond(x) is true,
// Any ends the traversal and returns true immediately.
// Otherwise Any returns false after completing the entire traversal.
func Any(n Node, cond func(Node) bool) bool {
if n == nil {
return nil
return false
}
var found interface{}
var do func(Node) error
do = func(x Node) error {
if v := find(x); v != nil {
found = v
if cond(x) {
return stop
}
return DoChildren(x, do)
}
do(n)
return found
return do(n) == stop
}
// FindList calls Find(x, ok) for each node x in the list, in order.
// If any call find(x) returns a non-nil result, FindList stops and
// returns that result, skipping the remainder of the list.
// Otherwise FindList returns nil.
func FindList(list Nodes, find func(Node) interface{}) interface{} {
// AnyList calls Any(x, cond) for each node x in the list, in order.
// If any call returns true, AnyList stops and returns true.
// Otherwise, AnyList returns false after calling Any(x, cond)
// for every x in the list.
func AnyList(list Nodes, cond func(Node) bool) bool {
for _, x := range list.Slice() {
if v := Find(x, find); v != nil {
return v
if Any(x, cond) {
return true
}
}
return nil
return false
}
// EditChildren edits the child nodes of n, replacing each child x with edit(x).

View File

@ -147,6 +147,11 @@ func checkFunc(f *Func) {
canHaveAuxInt = true
case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false.
case auxUInt8:
if v.AuxInt != int64(uint8(v.AuxInt)) {
f.Fatalf("bad uint8 AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxFloat32:
canHaveAuxInt = true
if math.IsNaN(v.AuxFloat()) {

View File

@ -196,9 +196,6 @@ func expandCalls(f *Func) {
}
if leaf.Op == OpIData {
leafType = removeTrivialWrapperTypes(leaf.Type)
if leafType.IsEmptyInterface() {
leafType = typ.BytePtr
}
}
aux := selector.Aux
auxInt := selector.AuxInt + offset
@ -247,12 +244,9 @@ func expandCalls(f *Func) {
// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
if leafType != selector.Type && !selector.Type.IsEmptyInterface() { // empty interface for #42727
if leafType != selector.Type {
f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
if selector.Type.IsEmptyInterface() {
selector.Type = typ.BytePtr
}
leaf.copyOf(selector)
for _, s := range namedSelects[selector] {
locs = append(locs, f.Names[s.locIndex])

View File

@ -663,8 +663,8 @@
((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
// Constant shifts.
(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [int8(c&63)])
(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [int8(c&31)])
(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
@ -685,8 +685,8 @@
(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
// Match rotate by constant.
(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, int8(c&63))})
(RLL x (MOVDconst [c])) => (RLLconst x [int8(c&31)])
(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
// Match rotate by constant pattern.
((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
@ -705,10 +705,10 @@
(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(max8(0, c-d), 63-d, (d-c)&63)})
(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, min8(63, 63-c+d), (c-d)&63)})
(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
@ -818,18 +818,18 @@
// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c&(c-1))
=> ((ADD|ADDW) (SL(D|W)const <t> x [int8(log32(c&(c-1)))])
(SL(D|W)const <t> x [int8(log32(c&^(c-1)))]))
=> ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
(SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c+(c&^(c-1)))
=> ((SUB|SUBW) (SL(D|W)const <t> x [int8(log32(c+(c&^(c-1))))])
(SL(D|W)const <t> x [int8(log32(c&^(c-1)))]))
=> ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
(SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1)))
=> ((SUB|SUBW) (SL(D|W)const <t> x [int8(log32(-c&^(-c-1)))])
(SL(D|W)const <t> x [int8(log32(-c+(-c&^(-c-1))))]))
=> ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
(SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)

View File

@ -330,27 +330,27 @@ func init() {
{name: "LTDBR", argLength: 1, reg: fp1flags, asm: "LTDBR", typ: "Flags"}, // arg0 compare to 0, f64
{name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int8"}, // arg0 << auxint, shift amount 0-63
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int8"}, // arg0 << auxint, shift amount 0-31
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "UInt8"}, // arg0 << auxint, shift amount 0-63
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "UInt8"}, // arg0 << auxint, shift amount 0-31
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-63
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "UInt8"}, // unsigned arg0 >> auxint, shift amount 0-63
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "UInt8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
// Arithmetic shifts clobber flags.
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "UInt8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "UInt8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
// Rotate instructions.
// Note: no RLLGconst - use RISBGZ instead.
{name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
{name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-31
{name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
{name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "UInt8"}, // arg0 rotate left auxint, rotate amount 0-31
// Rotate then (and|or|xor|insert) selected bits instructions.
//

View File

@ -1395,7 +1395,7 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi
func opHasAuxInt(op opData) bool {
switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64",
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
"SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
return true
}
@ -1780,6 +1780,8 @@ func (op opData) auxIntType() string {
return "int64"
case "Int128":
return "int128"
case "UInt8":
return "uint8"
case "Float32":
return "float32"
case "Float64":

View File

@ -207,6 +207,7 @@ const (
auxInt32 // auxInt is a 32-bit integer
auxInt64 // auxInt is a 64-bit integer
auxInt128 // auxInt represents a 128-bit integer. Always 0.
auxUInt8 // auxInt is an 8-bit unsigned integer
auxFloat32 // auxInt is a float32 (encoded with math.Float64bits)
auxFloat64 // auxInt is a float64 (encoded with math.Float64bits)
auxFlagConstant // auxInt is a flagConstant

View File

@ -30569,7 +30569,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SLDconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
asm: s390x.ASLD,
reg: regInfo{
@ -30583,7 +30583,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SLWconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
asm: s390x.ASLW,
reg: regInfo{
@ -30625,7 +30625,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SRDconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
asm: s390x.ASRD,
reg: regInfo{
@ -30639,7 +30639,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SRWconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
asm: s390x.ASRW,
reg: regInfo{
@ -30683,7 +30683,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SRADconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
clobberFlags: true,
asm: s390x.ASRAD,
@ -30698,7 +30698,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SRAWconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
clobberFlags: true,
asm: s390x.ASRAW,
@ -30741,7 +30741,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "RLLconst",
auxType: auxInt8,
auxType: auxUInt8,
argLen: 1,
asm: s390x.ARLL,
reg: regInfo{

File diff suppressed because it is too large Load Diff

View File

@ -94,7 +94,8 @@ func (s *Sym) SetPkgDef(n Object) {
func (s *Sym) pkgDefPtr() *Object {
// Look for outermost saved declaration, which must be the
// package scope definition, if present.
for _, d := range dclstack {
for i := range dclstack {
d := &dclstack[i]
if s == d.sym {
return &d.def
}

View File

@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Sym{}, 52, 88},
{Sym{}, 48, 80},
{Type{}, 56, 96},
{Map{}, 20, 40},
{Forward{}, 20, 32},

View File

@ -38,8 +38,7 @@ type Sym struct {
Block int32 // blocknumber to catch redeclaration
Lastlineno src.XPos // last declaration for diagnostic
flags bitset8
Origpkg *Pkg // original package for . import
flags bitset8
}
const (

View File

@ -8,5 +8,5 @@ require (
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897
golang.org/x/mod v0.4.0
golang.org/x/sys v0.0.0-20201204225414-ed752295db88 // indirect
golang.org/x/tools v0.0.0-20201208211828-de58e7c01d49
golang.org/x/tools v0.0.0-20201211025543-abf6a1d87e11
)

View File

@ -31,8 +31,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201208211828-de58e7c01d49 h1:K1QAOVIWIvmQ66F1Z3AEa9Wzp0bj+xU3YzLkvROk2Ds=
golang.org/x/tools v0.0.0-20201208211828-de58e7c01d49/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201211025543-abf6a1d87e11 h1:9j/upNXDRpADUw2RpUfJ7E7GHtfhDih62kX6JM8vs2c=
golang.org/x/tools v0.0.0-20201211025543-abf6a1d87e11/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=

View File

@ -164,6 +164,17 @@
// directory, but it is not accessed. When -modfile is specified, an
// alternate go.sum file is also used: its path is derived from the
// -modfile flag by trimming the ".mod" extension and appending ".sum".
// -overlay file
// read a JSON config file that provides an overlay for build operations.
// The file is a JSON struct with a single field, named 'Replace', that
// maps each disk file path (a string) to its backing file path, so that
// a build will run as if the disk file path exists with the contents
// given by the backing file paths, or as if the disk file path does not
// exist if its backing file path is empty. Support for the -overlay flag
// has some limitations:importantly, cgo files included from outside the
// include path must be in the same directory as the Go package they are
// included from, and overlays will not appear when binaries and tests are
// run through go run and go test respectively.
// -pkgdir dir
// install and load all packages from dir instead of the usual locations.
// For example, when building with a non-standard configuration,

View File

@ -31,6 +31,7 @@ import (
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/robustio"
"cmd/go/internal/work"
"cmd/internal/sys"
)
@ -1365,6 +1366,30 @@ func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
}
func TestLdFlagsLongArgumentsIssue42295(t *testing.T) {
// Test the extremely long command line arguments that contain '\n' characters
// get encoded and passed correctly.
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
print(extern)
}`)
testStr := "test test test test test \n\\ "
var buf bytes.Buffer
for buf.Len() < work.ArgLengthForResponseFile+1 {
buf.WriteString(testStr)
}
tg.run("run", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go"))
if tg.stderr.String() != buf.String() {
t.Errorf("strings differ")
}
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)

View File

@ -33,8 +33,20 @@ See also: go fmt, go vet.
}
func runFix(ctx context.Context, cmd *base.Command, args []string) {
pkgs := load.PackagesAndErrors(ctx, args)
w := 0
for _, pkg := range pkgs {
if pkg.Error != nil {
base.Errorf("%v", pkg.Error)
continue
}
pkgs[w] = pkg
w++
}
pkgs = pkgs[:w]
printed := false
for _, pkg := range load.Packages(ctx, args) {
for _, pkg := range pkgs {
if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
if !printed {
fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n")

View File

@ -180,13 +180,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// everything.
load.ClearPackageCache()
pkgs := load.PackagesForBuild(ctx, args)
pkgs := load.PackagesAndErrors(ctx, args)
load.CheckPackageErrors(pkgs)
// Phase 3. Install.
if *getD {
// Download only.
// Check delayed until now so that importPaths
// and packagesForBuild have a chance to print errors.
// Check delayed until now so that downloadPaths
// and CheckPackageErrors have a chance to print errors.
return
}

View File

@ -471,11 +471,18 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
}
load.IgnoreImports = *listFind
var pkgs []*load.Package
if *listE {
pkgs = load.PackagesAndErrors(ctx, args)
} else {
pkgs = load.Packages(ctx, args)
pkgs := load.PackagesAndErrors(ctx, args)
if !*listE {
w := 0
for _, pkg := range pkgs {
if pkg.Error != nil {
base.Errorf("%v", pkg.Error)
continue
}
pkgs[w] = pkg
w++
}
pkgs = pkgs[:w]
base.ExitIfErrors()
}

View File

@ -2314,30 +2314,14 @@ func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack,
// argument where needed.
var ModResolveTests bool
// Packages returns the packages named by the
// command line arguments 'args'. If a named package
// cannot be loaded at all (for example, if the directory does not exist),
// then packages prints an error and does not include that
// package in the results. However, if errors occur trying
// to load dependencies of a named package, the named
// package is still returned, with p.Incomplete = true
// and details in p.DepsErrors.
func Packages(ctx context.Context, args []string) []*Package {
var pkgs []*Package
for _, pkg := range PackagesAndErrors(ctx, args) {
if pkg.Error != nil {
base.Errorf("%v", pkg.Error)
continue
}
pkgs = append(pkgs, pkg)
}
return pkgs
}
// PackagesAndErrors is like 'packages' but returns a
// *Package for every argument, even the ones that
// cannot be loaded at all.
// The packages that fail to load will have p.Error != nil.
// PackagesAndErrors returns the packages named by the command line arguments
// 'patterns'. If a named package cannot be loaded, PackagesAndErrors returns
// a *Package with the Error field describing the failure. If errors are found
// loading imported packages, the DepsErrors field is set. The Incomplete field
// may be set as well.
//
// To obtain a flat list of packages, use PackageList.
// To report errors loading packages, use ReportPackageErrors.
func PackagesAndErrors(ctx context.Context, patterns []string) []*Package {
ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors")
defer span.Done()
@ -2427,20 +2411,9 @@ func PackagesAndErrors(ctx context.Context, patterns []string) []*Package {
return pkgs
}
func setToolFlags(pkgs ...*Package) {
for _, p := range PackageList(pkgs) {
p.Internal.Asmflags = BuildAsmflags.For(p)
p.Internal.Gcflags = BuildGcflags.For(p)
p.Internal.Ldflags = BuildLdflags.For(p)
p.Internal.Gccgoflags = BuildGccgoflags.For(p)
}
}
// PackagesForBuild is like Packages but exits
// if any of the packages or their dependencies have errors
// (cannot be built).
func PackagesForBuild(ctx context.Context, args []string) []*Package {
pkgs := PackagesAndErrors(ctx, args)
// CheckPackageErrors prints errors encountered loading pkgs and their
// dependencies, then exits with a non-zero status if any errors were found.
func CheckPackageErrors(pkgs []*Package) {
printed := map[*PackageError]bool{}
for _, pkg := range pkgs {
if pkg.Error != nil {
@ -2475,8 +2448,15 @@ func PackagesForBuild(ctx context.Context, args []string) []*Package {
seen[pkg.ImportPath] = true
}
base.ExitIfErrors()
}
return pkgs
func setToolFlags(pkgs ...*Package) {
for _, p := range PackageList(pkgs) {
p.Internal.Asmflags = BuildAsmflags.For(p)
p.Internal.Gcflags = BuildGcflags.For(p)
p.Internal.Ldflags = BuildLdflags.For(p)
p.Internal.Gccgoflags = BuildGccgoflags.For(p)
}
}
// GoFilesPackage creates a package for building a collection of Go files

View File

@ -434,11 +434,13 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// directory.
if !*getD && len(pkgPatterns) > 0 {
work.BuildInit()
pkgs := load.PackagesForBuild(ctx, pkgPatterns)
pkgs := load.PackagesAndErrors(ctx, pkgPatterns)
load.CheckPackageErrors(pkgs)
work.InstallPackages(ctx, pkgPatterns, pkgs)
// TODO(#40276): After Go 1.16, print a deprecation notice when building
// and installing main packages. 'go install pkg' or
// 'go install pkg@version' should be used instead.
// Give the specific argument to use if possible.
}
if !modload.HasModRoot() {

View File

@ -595,7 +595,8 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
work.VetFlags = testVet.flags
work.VetExplicit = testVet.explicit
pkgs = load.PackagesForBuild(ctx, pkgArgs)
pkgs = load.PackagesAndErrors(ctx, pkgArgs)
load.CheckPackageErrors(pkgs)
if len(pkgs) == 0 {
base.Fatalf("no packages to test")
}
@ -678,7 +679,9 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
sort.Strings(all)
a := &work.Action{Mode: "go test -i"}
for _, p := range load.PackagesForBuild(ctx, all) {
pkgs := load.PackagesAndErrors(ctx, all)
load.CheckPackageErrors(pkgs)
for _, p := range pkgs {
if cfg.BuildToolchainName == "gccgo" && p.Standard {
// gccgo's standard library packages
// can not be reinstalled.

View File

@ -87,7 +87,8 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) {
}
}
pkgs := load.PackagesForBuild(ctx, pkgArgs)
pkgs := load.PackagesAndErrors(ctx, pkgArgs)
load.CheckPackageErrors(pkgs)
if len(pkgs) == 0 {
base.Fatalf("no packages to vet")
}

View File

@ -124,6 +124,17 @@ and test commands:
directory, but it is not accessed. When -modfile is specified, an
alternate go.sum file is also used: its path is derived from the
-modfile flag by trimming the ".mod" extension and appending ".sum".
-overlay file
read a JSON config file that provides an overlay for build operations.
The file is a JSON struct with a single field, named 'Replace', that
maps each disk file path (a string) to its backing file path, so that
a build will run as if the disk file path exists with the contents
given by the backing file paths, or as if the disk file path does not
exist if its backing file path is empty. Support for the -overlay flag
has some limitations:importantly, cgo files included from outside the
include path must be in the same directory as the Go package they are
included from, and overlays will not appear when binaries and tests are
run through go run and go test respectively.
-pkgdir dir
install and load all packages from dir instead of the usual locations.
For example, when building with a non-standard configuration,
@ -358,7 +369,8 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) {
var b Builder
b.Init()
pkgs := load.PackagesForBuild(ctx, args)
pkgs := load.PackagesAndErrors(ctx, args)
load.CheckPackageErrors(pkgs)
explicitO := len(cfg.BuildO) > 0
@ -388,7 +400,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) {
fmt.Fprint(os.Stderr, "go build: -i flag is deprecated\n")
}
pkgs = omitTestOnly(pkgsFilter(load.Packages(ctx, args)))
pkgs = omitTestOnly(pkgsFilter(pkgs))
// Special case -o /dev/null by not writing at all.
if cfg.BuildO == os.DevNull {
@ -571,8 +583,32 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
return
}
}
BuildInit()
pkgs := load.PackagesForBuild(ctx, args)
pkgs := load.PackagesAndErrors(ctx, args)
if cfg.ModulesEnabled && !modload.HasModRoot() {
haveErrors := false
allMissingErrors := true
for _, pkg := range pkgs {
if pkg.Error == nil {
continue
}
haveErrors = true
if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) {
allMissingErrors = false
break
}
}
if haveErrors && allMissingErrors {
latestArgs := make([]string, len(args))
for i := range args {
latestArgs[i] = args[i] + "@latest"
}
hint := strings.Join(latestArgs, " ")
base.Fatalf("go install: version is required when current directory is not in a module\n\tTry 'go install %s' to install the latest version", hint)
}
}
load.CheckPackageErrors(pkgs)
if cfg.BuildI {
allGoroot := true
for _, pkg := range pkgs {
@ -585,6 +621,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
fmt.Fprint(os.Stderr, "go install: -i flag is deprecated\n")
}
}
InstallPackages(ctx, args, pkgs)
}
@ -802,7 +839,7 @@ func installOutsideModule(ctx context.Context, args []string) {
// Load packages for all arguments. Ignore non-main packages.
// Print a warning if an argument contains "..." and matches no main packages.
// PackagesForBuild already prints warnings for patterns that don't match any
// PackagesAndErrors already prints warnings for patterns that don't match any
// packages, so be careful not to double print.
matchers := make([]func(string) bool, len(patterns))
for i, p := range patterns {
@ -813,7 +850,8 @@ func installOutsideModule(ctx context.Context, args []string) {
// TODO(golang.org/issue/40276): don't report errors loading non-main packages
// matched by a pattern.
pkgs := load.PackagesForBuild(ctx, patterns)
pkgs := load.PackagesAndErrors(ctx, patterns)
load.CheckPackageErrors(pkgs)
mainPkgs := make([]*load.Package, 0, len(pkgs))
mainCount := make([]int, len(patterns))
nonMainCount := make([]int, len(patterns))

View File

@ -3236,7 +3236,7 @@ func passLongArgsInResponseFiles(cmd *exec.Cmd) (cleanup func()) {
cleanup = func() { os.Remove(tf.Name()) }
var buf bytes.Buffer
for _, arg := range cmd.Args[1:] {
fmt.Fprintf(&buf, "%s\n", arg)
fmt.Fprintf(&buf, "%s\n", encodeArg(arg))
}
if _, err := tf.Write(buf.Bytes()); err != nil {
tf.Close()
@ -3251,6 +3251,12 @@ func passLongArgsInResponseFiles(cmd *exec.Cmd) (cleanup func()) {
return cleanup
}
// Windows has a limit of 32 KB arguments. To be conservative and not worry
// about whether that includes spaces or not, just use 30 KB. Darwin's limit is
// less clear. The OS claims 256KB, but we've seen failures with arglen as
// small as 50KB.
const ArgLengthForResponseFile = (30 << 10)
func useResponseFile(path string, argLen int) bool {
// Unless the program uses objabi.Flagparse, which understands
// response files, don't use response files.
@ -3262,11 +3268,7 @@ func useResponseFile(path string, argLen int) bool {
return false
}
// Windows has a limit of 32 KB arguments. To be conservative and not
// worry about whether that includes spaces or not, just use 30 KB.
// Darwin's limit is less clear. The OS claims 256KB, but we've seen
// failures with arglen as small as 50KB.
if argLen > (30 << 10) {
if argLen > ArgLengthForResponseFile {
return true
}
@ -3279,3 +3281,25 @@ func useResponseFile(path string, argLen int) bool {
return false
}
// encodeArg encodes an argument for response file writing.
func encodeArg(arg string) string {
// If there aren't any characters we need to reencode, fastpath out.
if !strings.ContainsAny(arg, "\\\n") {
return arg
}
var b strings.Builder
for _, r := range arg {
switch r {
case '\\':
b.WriteByte('\\')
b.WriteByte('\\')
case '\n':
b.WriteByte('\\')
b.WriteByte('n')
default:
b.WriteRune(r)
}
}
return b.String()
}

View File

@ -0,0 +1,86 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package work
import (
"bytes"
"cmd/internal/objabi"
"fmt"
"math/rand"
"testing"
"time"
"unicode/utf8"
)
func TestEncodeArgs(t *testing.T) {
t.Parallel()
tests := []struct {
arg, want string
}{
{"", ""},
{"hello", "hello"},
{"hello\n", "hello\\n"},
{"hello\\", "hello\\\\"},
{"hello\nthere", "hello\\nthere"},
{"\\\n", "\\\\\\n"},
}
for _, test := range tests {
if got := encodeArg(test.arg); got != test.want {
t.Errorf("encodeArg(%q) = %q, want %q", test.arg, got, test.want)
}
}
}
func TestEncodeDecode(t *testing.T) {
t.Parallel()
tests := []string{
"",
"hello",
"hello\\there",
"hello\nthere",
"hello 中国",
"hello \n中\\国",
}
for _, arg := range tests {
if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
t.Errorf("objabi.DecodeArg(encodeArg(%q)) = %q", arg, got)
}
}
}
func TestEncodeDecodeFuzz(t *testing.T) {
if testing.Short() {
t.Skip("fuzz test is slow")
}
t.Parallel()
nRunes := ArgLengthForResponseFile + 100
rBuffer := make([]rune, nRunes)
buf := bytes.NewBuffer([]byte(string(rBuffer)))
seed := time.Now().UnixNano()
t.Logf("rand seed: %v", seed)
rng := rand.New(rand.NewSource(seed))
for i := 0; i < 50; i++ {
// Generate a random string of runes.
buf.Reset()
for buf.Len() < ArgLengthForResponseFile+1 {
var r rune
for {
r = rune(rng.Intn(utf8.MaxRune + 1))
if utf8.ValidRune(r) {
break
}
}
fmt.Fprintf(buf, "%c", r)
}
arg := buf.String()
if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
t.Errorf("[%d] objabi.DecodeArg(encodeArg(%q)) = %q [seed: %v]", i, arg, got, seed)
}
}
}

View File

@ -241,7 +241,8 @@ func buildModeInit() {
if gccgo {
codegenArg = "-fPIC"
} else {
forcedAsmflags = append(forcedAsmflags, "-D=GOBUILDMODE_shared=1")
forcedAsmflags = append(forcedAsmflags, "-D=GOBUILDMODE_shared=1",
"-linkshared")
codegenArg = "-dynlink"
forcedGcflags = append(forcedGcflags, "-linkshared")
// TODO(mwhudson): remove -w when that gets fixed in linker.

View File

@ -189,13 +189,16 @@ exists $GOPATH/bin/printversion$GOEXE
# 'go install' should fail if a package argument must be resolved to a module.
! go install example.com/printversion
stderr 'no required module provides package example.com/printversion: working directory is not part of a module'
stderr '^go install: version is required when current directory is not in a module\n\tTry ''go install example.com/printversion@latest'' to install the latest version$'
# 'go install' should fail if a source file imports a package that must be
# resolved to a module.
! go install ./needmod/needmod.go
stderr 'needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: working directory is not part of a module'
# 'go install' should succeed with a package in GOROOT.
go install cmd/addr2line
! stderr .
# 'go run' with a verison should fail due to syntax.
! go run example.com/printversion@v1.0.0

View File

@ -483,6 +483,11 @@ func (r *RefFlags) SetFlag2(x uint8) { r[9] = x }
func (r *RefFlags) Write(w *Writer) { w.Bytes(r[:]) }
// Used to construct an artifically large array type when reading an
// item from the object file relocs section or aux sym section (needs
// to work on 32-bit as well as 64-bit). See issue 41621.
const huge = (1<<31 - 1) / RelocSize
// Referenced symbol name.
//
// Serialized format:
@ -792,7 +797,7 @@ func (r *Reader) Reloc(i uint32, j int) *Reloc {
func (r *Reader) Relocs(i uint32) []Reloc {
off := r.RelocOff(i, 0)
n := r.NReloc(i)
return (*[1 << 20]Reloc)(unsafe.Pointer(&r.b[off]))[:n:n]
return (*[huge]Reloc)(unsafe.Pointer(&r.b[off]))[:n:n]
}
// NAux returns the number of aux symbols of the i-th symbol.
@ -818,7 +823,7 @@ func (r *Reader) Aux(i uint32, j int) *Aux {
func (r *Reader) Auxs(i uint32) []Aux {
off := r.AuxOff(i, 0)
n := r.NAux(i)
return (*[1 << 20]Aux)(unsafe.Pointer(&r.b[off]))[:n:n]
return (*[huge]Aux)(unsafe.Pointer(&r.b[off]))[:n:n]
}
// DataOff returns the offset of the i-th symbol's data.

View File

@ -250,6 +250,12 @@ func (a *Addr) SetTarget(t *Prog) {
a.Val = t
}
func (a *Addr) SetConst(v int64) {
a.Sym = nil
a.Type = TYPE_CONST
a.Offset = v
}
// Prog describes a single machine instruction.
//
// The general instruction form is:

View File

@ -28,9 +28,9 @@ import (
// input left by. Note that this rotation is performed
// before the masked region is used.
type RotateParams struct {
Start int8 // big-endian start bit index [0..63]
End int8 // big-endian end bit index [0..63]
Amount int8 // amount to rotate left
Start uint8 // big-endian start bit index [0..63]
End uint8 // big-endian end bit index [0..63]
Amount uint8 // amount to rotate left
}
// NewRotateParams creates a set of parameters representing a
@ -39,7 +39,7 @@ type RotateParams struct {
//
// The start and end indexes and the rotation amount must all
// be in the range 0-63 inclusive or this function will panic.
func NewRotateParams(start, end, amount int8) RotateParams {
func NewRotateParams(start, end, amount uint8) RotateParams {
if start&^63 != 0 {
panic("start out of bounds")
}
@ -58,7 +58,7 @@ func NewRotateParams(start, end, amount int8) RotateParams {
// RotateLeft generates a new set of parameters with the rotation amount
// increased by the given value. The selected bits are left unchanged.
func (r RotateParams) RotateLeft(amount int8) RotateParams {
func (r RotateParams) RotateLeft(amount uint8) RotateParams {
r.Amount += amount
r.Amount &= 63
return r
@ -100,8 +100,8 @@ func (r RotateParams) OutMerge(mask uint64) *RotateParams {
}
// update start and end positions (rotation amount remains the same)
r.Start = int8(o+z) & 63
r.End = (r.Start + int8(l) - 1) & 63
r.Start = uint8(o+z) & 63
r.End = (r.Start + uint8(l) - 1) & 63
return &r
}

View File

@ -10,7 +10,7 @@ import (
func TestRotateParamsMask(t *testing.T) {
tests := []struct {
start, end, amount int8
start, end, amount uint8
inMask, outMask uint64
}{
// start before end, no rotation

View File

@ -5,6 +5,7 @@
package objabi
import (
"bytes"
"flag"
"fmt"
"io"
@ -59,6 +60,9 @@ func expandArgs(in []string) (out []string) {
log.Fatal(err)
}
args := strings.Split(strings.TrimSpace(strings.Replace(string(slurp), "\r", "", -1)), "\n")
for i, arg := range args {
args[i] = DecodeArg(arg)
}
out = append(out, expandArgs(args)...)
} else if out != nil {
out = append(out, s)
@ -160,3 +164,38 @@ func (f fn1) Set(s string) error {
}
func (f fn1) String() string { return "" }
// DecodeArg decodes an argument.
//
// This function is public for testing with the parallel encoder.
func DecodeArg(arg string) string {
// If no encoding, fastpath out.
if !strings.ContainsAny(arg, "\\\n") {
return arg
}
// We can't use strings.Builder as this must work at bootstrap.
var b bytes.Buffer
var wasBS bool
for _, r := range arg {
if wasBS {
switch r {
case '\\':
b.WriteByte('\\')
case 'n':
b.WriteByte('\n')
default:
// This shouldn't happen. The only backslashes that reach here
// should encode '\n' and '\\' exclusively.
panic("badly formatted input")
}
} else if r == '\\' {
wasBS = true
continue
} else {
b.WriteRune(r)
}
wasBS = false
}
return b.String()
}

View File

@ -0,0 +1,26 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objabi
import "testing"
func TestDecodeArg(t *testing.T) {
t.Parallel()
tests := []struct {
arg, want string
}{
{"", ""},
{"hello", "hello"},
{"hello\\n", "hello\n"},
{"hello\\nthere", "hello\nthere"},
{"hello\\\\there", "hello\\there"},
{"\\\\\\n", "\\\n"},
}
for _, test := range tests {
if got := DecodeArg(test.arg); got != test.want {
t.Errorf("decodoeArg(%q) = %q, want %q", test.arg, got, test.want)
}
}
}

View File

@ -37,6 +37,7 @@ import (
"cmd/link/internal/loader"
"cmd/link/internal/sym"
"debug/elf"
"fmt"
"log"
)
@ -463,12 +464,29 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym,
return true
}
// sign-extends from 24-bit.
func signext24(x int64) int64 { return x << 40 >> 40 }
func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool {
var v uint32
rs := r.Xsym
rt := r.Type
siz := r.Size
xadd := r.Xadd
if xadd != signext24(xadd) {
// If the relocation target would overflow the addend, then target
// a linker-manufactured label symbol with a smaller addend instead.
label := ldr.Lookup(machoLabelName(ldr, rs, xadd), ldr.SymVersion(rs))
if label != 0 {
xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label)
rs = label
}
if xadd != signext24(xadd) {
ldr.Errorf(s, "internal error: relocation addend overflow: %s+0x%x", ldr.SymName(rs), xadd)
}
}
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 || rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL {
if ldr.SymDynid(rs) < 0 {
@ -492,8 +510,8 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
case objabi.R_ADDR:
v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28
case objabi.R_CALLARM64:
if r.Xadd != 0 {
ldr.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", ldr.SymName(rs), r.Xadd)
if xadd != 0 {
ldr.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", ldr.SymName(rs), xadd)
}
v |= 1 << 24 // pc-relative bit
@ -504,13 +522,13 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
// if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND.
if r.Xadd != 0 {
out.Write32(uint32(sectoff + 4))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff))
}
out.Write32(uint32(sectoff + 4))
out.Write32(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25))
if r.Xadd != 0 {
out.Write32(uint32(sectoff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff))
}
v |= 1 << 24 // pc-relative bit
v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28
@ -520,13 +538,13 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
// if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND.
if r.Xadd != 0 {
out.Write32(uint32(sectoff + 4))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff))
}
out.Write32(uint32(sectoff + 4))
out.Write32(v | (ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 << 28) | (2 << 25))
if r.Xadd != 0 {
out.Write32(uint32(sectoff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(xadd&0xffffff))
}
v |= 1 << 24 // pc-relative bit
v |= ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 << 28
@ -965,3 +983,66 @@ func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade
ldr.Errorf(s, "addpltsym: unsupported binary format")
}
}
const machoRelocLimit = 1 << 23
func gensymlate(ctxt *ld.Link, ldr *loader.Loader) {
// When external linking on darwin, Mach-O relocation has only signed 24-bit
// addend. For large symbols, we generate "label" symbols in the middle, so
// that relocations can target them with smaller addends.
if !ctxt.IsDarwin() || !ctxt.IsExternal() {
return
}
big := false
for _, seg := range ld.Segments {
if seg.Length >= machoRelocLimit {
big = true
break
}
}
if !big {
return // skip work if nothing big
}
// addLabelSyms adds "label" symbols at s+machoRelocLimit, s+2*machoRelocLimit, etc.
addLabelSyms := func(s loader.Sym, sz int64) {
v := ldr.SymValue(s)
for off := int64(machoRelocLimit); off < sz; off += machoRelocLimit {
p := ldr.LookupOrCreateSym(machoLabelName(ldr, s, off), ldr.SymVersion(s))
ldr.SetAttrReachable(p, true)
ldr.SetSymValue(p, v+off)
ldr.SetSymSect(p, ldr.SymSect(s))
ld.AddMachoSym(ldr, p)
//fmt.Printf("gensymlate %s %x\n", ldr.SymName(p), ldr.SymValue(p))
}
}
for s, n := loader.Sym(1), loader.Sym(ldr.NSym()); s < n; s++ {
if !ldr.AttrReachable(s) {
continue
}
if ldr.SymType(s) == sym.STEXT {
continue // we don't target the middle of a function
}
sz := ldr.SymSize(s)
if sz <= machoRelocLimit {
continue
}
addLabelSyms(s, sz)
}
// Also for carrier symbols (for which SymSize is 0)
for _, ss := range ld.CarrierSymByType {
if ss.Sym != 0 && ss.Size > machoRelocLimit {
addLabelSyms(ss.Sym, ss.Size)
}
}
}
// machoLabelName returns the name of the "label" symbol used for a
// relocation targetting s+off. The label symbols is used on darwin
// when external linking, so that the addend fits in a Mach-O relocation.
func machoLabelName(ldr *loader.Loader, s loader.Sym, off int64) string {
return fmt.Sprintf("%s.%d", ldr.SymExtname(s), off/machoRelocLimit)
}

View File

@ -55,6 +55,7 @@ func Init() (*sys.Arch, ld.Arch) {
ElfrelocSize: 24,
Elfsetupplt: elfsetupplt,
Gentext: gentext,
GenSymsLate: gensymlate,
Machoreloc1: machoreloc1,
MachorelocSize: 8,

View File

@ -1815,6 +1815,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) {
for _, symn := range sym.ReadOnly {
symnStartValue := state.datsize
state.assignToSection(sect, symn, sym.SRODATA)
setCarrierSize(symn, state.datsize-symnStartValue)
if ctxt.HeadType == objabi.Haix {
// Read-only symbols might be wrapped inside their outer
// symbol.
@ -1902,6 +1903,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) {
}
}
state.assignToSection(sect, symn, sym.SRODATA)
setCarrierSize(symn, state.datsize-symnStartValue)
if ctxt.HeadType == objabi.Haix {
// Read-only symbols might be wrapped inside their outer
// symbol.
@ -1949,6 +1951,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) {
ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect)
ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.functab", 0), sect)
ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect)
setCarrierSize(sym.SPCLNTAB, int64(sect.Length))
if ctxt.HeadType == objabi.Haix {
xcoffUpdateOuterSize(ctxt, int64(sect.Length), sym.SPCLNTAB)
}

View File

@ -1458,7 +1458,7 @@ func (ctxt *Link) hostlink() {
}
const compressDWARF = "-Wl,--compress-debug-sections=zlib-gnu"
if ctxt.compressDWARF && linkerFlagSupported(argv[0], altLinker, compressDWARF) {
if ctxt.compressDWARF && linkerFlagSupported(ctxt.Arch, argv[0], altLinker, compressDWARF) {
argv = append(argv, compressDWARF)
}
@ -1548,7 +1548,7 @@ func (ctxt *Link) hostlink() {
if ctxt.BuildMode == BuildModeExe && !ctxt.linkShared && !(ctxt.IsDarwin() && ctxt.IsARM64()) {
// GCC uses -no-pie, clang uses -nopie.
for _, nopie := range []string{"-no-pie", "-nopie"} {
if linkerFlagSupported(argv[0], altLinker, nopie) {
if linkerFlagSupported(ctxt.Arch, argv[0], altLinker, nopie) {
argv = append(argv, nopie)
break
}
@ -1560,10 +1560,22 @@ func (ctxt *Link) hostlink() {
checkStatic(p)
}
if ctxt.HeadType == objabi.Hwindows {
// Determine which linker we're using. Add in the extldflags in
// case used has specified "-fuse-ld=...".
cmd := exec.Command(*flagExtld, *flagExtldflags, "-Wl,--version")
usingLLD := false
if out, err := cmd.CombinedOutput(); err == nil {
if bytes.Contains(out, []byte("LLD ")) {
usingLLD = true
}
}
// use gcc linker script to work around gcc bug
// (see https://golang.org/issue/20183 for details).
p := writeGDBLinkerScript()
argv = append(argv, "-Wl,-T,"+p)
if !usingLLD {
p := writeGDBLinkerScript()
argv = append(argv, "-Wl,-T,"+p)
}
// libmingw32 and libmingwex have some inter-dependencies,
// so must use linker groups.
argv = append(argv, "-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group")
@ -1657,7 +1669,7 @@ func (ctxt *Link) hostlink() {
var createTrivialCOnce sync.Once
func linkerFlagSupported(linker, altLinker, flag string) bool {
func linkerFlagSupported(arch *sys.Arch, linker, altLinker, flag string) bool {
createTrivialCOnce.Do(func() {
src := filepath.Join(*flagTmpdir, "trivial.c")
if err := ioutil.WriteFile(src, []byte("int main() { return 0; }"), 0666); err != nil {
@ -1691,7 +1703,7 @@ func linkerFlagSupported(linker, altLinker, flag string) bool {
"-target",
}
var flags []string
flags := hostlinkArchArgs(arch)
keep := false
skip := false
extldflags := strings.Fields(*flagExtldflags)
@ -1801,7 +1813,7 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string,
return ldhostobj(ldmacho, ctxt.HeadType, f, pkg, length, pn, file)
}
if c1 == 0x4c && c2 == 0x01 || c1 == 0x64 && c2 == 0x86 {
if /* x86 */ c1 == 0x4c && c2 == 0x01 || /* x86_64 */ c1 == 0x64 && c2 == 0x86 || /* armv7 */ c1 == 0xc4 && c2 == 0x01 {
ldpe := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) {
textp, rsrc, err := loadpe.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn)
if err != nil {

View File

@ -969,6 +969,15 @@ func machosymorder(ctxt *Link) {
}
}
// AddMachoSym adds s to Mach-O symbol table, used in GenSymLate.
// Currently only used on ARM64 when external linking.
func AddMachoSym(ldr *loader.Loader, s loader.Sym) {
ldr.SetSymDynid(s, int32(nsortsym))
sortsym = append(sortsym, s)
nsortsym++
nkind[symkind(ldr, s)]++
}
// machoShouldExport reports whether a symbol needs to be exported.
//
// When dynamically linking, all non-local variables and plugin-exported
@ -1474,6 +1483,17 @@ func machoCodeSign(ctxt *Link, fname string) error {
// Skip.
return nil
}
fi, err := f.Stat()
if err != nil {
return err
}
if sigOff+sigSz != fi.Size() {
// We don't expect anything after the signature (this will invalidate
// the signature anyway.)
return fmt.Errorf("unexpected content after code signature")
}
sz := codesign.Size(sigOff, "a.out")
if sz != sigSz {
// Update the load command,
@ -1500,5 +1520,9 @@ func machoCodeSign(ctxt *Link, fname string) error {
cs := make([]byte, sz)
codesign.Sign(cs, f, "a.out", sigOff, int64(textSeg.Offset), int64(textSeg.Filesz), ctxt.IsExe() || ctxt.IsPIE())
_, err = f.WriteAt(cs, sigOff)
if err != nil {
return err
}
err = f.Truncate(sigOff + sz)
return err
}

View File

@ -859,6 +859,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab {
state.carrier = ldr.LookupOrCreateSym("runtime.pclntab", 0)
ldr.MakeSymbolUpdater(state.carrier).SetType(sym.SPCLNTAB)
ldr.SetAttrReachable(state.carrier, true)
setCarrierSym(sym.SPCLNTAB, state.carrier)
state.generatePCHeader(ctxt)
nameOffsets := state.generateFuncnametab(ctxt, funcs)

Some files were not shown because too many files have changed in this diff Show More