mirror of
https://github.com/golang/go.git
synced 2025-05-08 09:03:04 +00:00
Compare commits
54 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c390a1c22e | ||
|
1207de4f6c | ||
|
a0d15cb9c8 | ||
|
958f3a0309 | ||
|
6ba3a8a6ba | ||
|
5472853843 | ||
|
cfe0ae0b70 | ||
|
58babf6e0b | ||
|
8d79bf799b | ||
|
35c010ad6d | ||
|
6495ce0495 | ||
|
7fc8312673 | ||
|
cc16cdf48f | ||
|
9563300f6e | ||
|
f8080edefd | ||
|
ed07b321ae | ||
|
3b2e846e11 | ||
|
fbddfae62f | ||
|
c8c6f9abfb | ||
|
a74951c5af | ||
|
e6598e7baa | ||
|
82575f76b8 | ||
|
a886959aa2 | ||
|
80ff7cd35a | ||
|
69234ded30 | ||
|
032ac075c2 | ||
|
fa8ff1a46d | ||
|
53487e5477 | ||
|
3d1f1f27cf | ||
|
6de5a7180c | ||
|
9625a7faae | ||
|
9c939a1e60 | ||
|
7afe17bbdb | ||
|
8002845759 | ||
|
9166d2feec | ||
|
76346b3543 | ||
|
3c9340557c | ||
|
dbecb416d1 | ||
|
6885bad7dd | ||
|
ec7d6094e6 | ||
|
63b0f805cd | ||
|
7adb012205 | ||
|
c9940fe2a9 | ||
|
3509415eca | ||
|
559c77592f | ||
|
f5e4e45ef7 | ||
|
30b6fd60a6 | ||
|
7e4d6c2bcb | ||
|
8bd4ed6cbb | ||
|
7dff7439dc | ||
|
62c3a6350b | ||
|
eba9e08766 | ||
|
f3bdcda88a | ||
|
362f22d2d2 |
@ -1,7 +1,7 @@
|
||||
name: Language Change Proposals
|
||||
description: Changes to the language
|
||||
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
|
||||
title: "proposal: spec: proposal title"
|
||||
labels: ["Proposal", "v2", "LanguageChange"]
|
||||
title: "proposal: Go 2: proposal title"
|
||||
|
||||
|
||||
body:
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -37,7 +37,7 @@ _testmain.go
|
||||
/src/go/build/zcgo.go
|
||||
/src/go/doc/headscan
|
||||
/src/internal/buildcfg/zbootstrap.go
|
||||
/src/internal/runtime/sys/zversion.go
|
||||
/src/runtime/internal/sys/zversion.go
|
||||
/src/unicode/maketables
|
||||
/src/time/tzdata/zzipdata.go
|
||||
/test.out
|
||||
|
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
223
api/go1.24.txt
223
api/go1.24.txt
@ -1,223 +0,0 @@
|
||||
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
|
||||
pkg crypto/cipher, func NewOFB //deprecated #69445
|
||||
pkg crypto/fips140, func Enabled() bool #70123
|
||||
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const SeedSize = 64 #70122
|
||||
pkg crypto/mlkem, const SeedSize ideal-int #70122
|
||||
pkg crypto/mlkem, const SharedKeySize = 32 #70122
|
||||
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
|
||||
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
|
||||
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
|
||||
pkg crypto/rand, func Text() string #67057
|
||||
pkg crypto/sha3, func New224() *SHA3 #69982
|
||||
pkg crypto/sha3, func New256() *SHA3 #69982
|
||||
pkg crypto/sha3, func New384() *SHA3 #69982
|
||||
pkg crypto/sha3, func New512() *SHA3 #69982
|
||||
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
|
||||
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
|
||||
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
|
||||
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
|
||||
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHA3) Size() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, type SHA3 struct #69982
|
||||
pkg crypto/sha3, type SHAKE struct #69982
|
||||
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
|
||||
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
|
||||
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
|
||||
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
|
||||
pkg crypto/x509, const NoValidChains = 10 #68484
|
||||
pkg crypto/x509, const NoValidChains InvalidReason #68484
|
||||
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
|
||||
pkg crypto/x509, type PolicyMapping struct #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
|
||||
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
|
||||
pkg debug/elf, const VER_FLG_BASE = 1 #63952
|
||||
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_INFO = 4 #63952
|
||||
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
|
||||
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
|
||||
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
|
||||
pkg debug/elf, type DynamicVersion struct #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionFlag uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
|
||||
pkg debug/elf, type Symbol struct, HasVersion bool #63952
|
||||
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
|
||||
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
|
||||
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
|
||||
pkg debug/elf, type VersionIndex uint16 #63952
|
||||
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
|
||||
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg encoding, type TextAppender interface { AppendText } #62384
|
||||
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
|
||||
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
|
||||
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
|
||||
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
|
||||
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
|
||||
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
|
||||
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, var DiscardHandler Handler #62005
|
||||
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
|
||||
pkg net/http, method (Protocols) HTTP1() bool #67814
|
||||
pkg net/http, method (Protocols) HTTP2() bool #67814
|
||||
pkg net/http, method (Protocols) String() string #67814
|
||||
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
|
||||
pkg net/http, type HTTP2Config struct #67813
|
||||
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
|
||||
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
|
||||
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
|
||||
pkg net/http, type Protocols struct #67814
|
||||
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Server struct, Protocols *Protocols #67814
|
||||
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Transport struct, Protocols *Protocols #67814
|
||||
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg os, func OpenInRoot(string, string) (*File, error) #67002
|
||||
pkg os, func OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Close() error #67002
|
||||
pkg os, method (*Root) Create(string) (*File, error) #67002
|
||||
pkg os, method (*Root) FS() fs.FS #67002
|
||||
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Name() string #67002
|
||||
pkg os, method (*Root) Open(string) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Remove(string) error #67002
|
||||
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, type Root struct #67002
|
||||
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
|
||||
pkg runtime, func GOROOT //deprecated #51473
|
||||
pkg runtime, method (Cleanup) Stop() #67535
|
||||
pkg runtime, type Cleanup struct #67535
|
||||
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
|
||||
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
|
||||
pkg strings, func Lines(string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
|
||||
pkg testing, method (*B) Chdir(string) #62516
|
||||
pkg testing, method (*B) Context() context.Context #36532
|
||||
pkg testing, method (*B) Loop() bool #61515
|
||||
pkg testing, method (*F) Chdir(string) #62516
|
||||
pkg testing, method (*F) Context() context.Context #36532
|
||||
pkg testing, method (*T) Chdir(string) #62516
|
||||
pkg testing, method (*T) Context() context.Context #36532
|
||||
pkg testing, type TB interface, Chdir(string) #62516
|
||||
pkg testing, type TB interface, Context() context.Context #36532
|
||||
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
|
||||
pkg weak, method (Pointer[$0]) Value() *$0 #67552
|
||||
pkg weak, type Pointer[$0 interface{}] struct #67552
|
@ -1 +0,0 @@
|
||||
pkg mime/multipart, func FileContentDisposition(string, string) string #46771
|
@ -1,8 +0,0 @@
|
||||
pkg io/fs, func Lstat(FS, string) (FileInfo, error) #49580
|
||||
pkg io/fs, func ReadLink(FS, string) (string, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface { Lstat, Open, ReadLink } #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Lstat(string) (FileInfo, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Open(string) (File, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, ReadLink(string) (string, error) #49580
|
||||
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
|
||||
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
|
@ -1 +0,0 @@
|
||||
pkg sync, method (*WaitGroup) Go(func()) #63769
|
@ -1,8 +0,0 @@
|
||||
pkg os, method (*Root) Chmod(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Chown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Chtimes(string, time.Time, time.Time) error #67002
|
||||
pkg os, method (*Root) Lchown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Link(string, string) error #67002
|
||||
pkg os, method (*Root) Readlink(string) (string, error) #67002
|
||||
pkg os, method (*Root) Rename(string, string) error #67002
|
||||
pkg os, method (*Root) Symlink(string, string) error #67002
|
@ -1 +0,0 @@
|
||||
pkg crypto/tls, type ConnectionState struct, CurveID CurveID #67516
|
@ -1,17 +0,0 @@
|
||||
pkg go/types, const FieldVar = 6 #70250
|
||||
pkg go/types, const FieldVar VarKind #70250
|
||||
pkg go/types, const LocalVar = 2 #70250
|
||||
pkg go/types, const LocalVar VarKind #70250
|
||||
pkg go/types, const PackageVar = 1 #70250
|
||||
pkg go/types, const PackageVar VarKind #70250
|
||||
pkg go/types, const ParamVar = 4 #70250
|
||||
pkg go/types, const ParamVar VarKind #70250
|
||||
pkg go/types, const RecvVar = 3 #70250
|
||||
pkg go/types, const RecvVar VarKind #70250
|
||||
pkg go/types, const ResultVar = 5 #70250
|
||||
pkg go/types, const ResultVar VarKind #70250
|
||||
pkg go/types, func LookupSelection(Type, bool, *Package, string) (Selection, bool) #70737
|
||||
pkg go/types, method (*Var) Kind() VarKind #70250
|
||||
pkg go/types, method (*Var) SetKind(VarKind) #70250
|
||||
pkg go/types, method (VarKind) String() string #70250
|
||||
pkg go/types, type VarKind uint8 #70250
|
@ -1 +0,0 @@
|
||||
pkg log/slog, method (Record) Source() *Source #70280
|
@ -1,3 +0,0 @@
|
||||
pkg unicode, var CategoryAliases map[string]string #70780
|
||||
pkg unicode, var Cn *RangeTable #70780
|
||||
pkg unicode, var LC *RangeTable #70780
|
@ -1 +0,0 @@
|
||||
pkg go/parser, func ParseDir //deprecated #71122
|
@ -1,4 +0,0 @@
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES ProgType #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES SectionType #72843
|
@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.23
|
||||
parent-branch: master
|
||||
|
@ -70,6 +70,6 @@ To begin the next release development cycle, populate the contents of `next`
|
||||
with those of `initial`. From the repo root:
|
||||
|
||||
> cd doc
|
||||
> cp -R initial/ next
|
||||
> cp -r initial/* next
|
||||
|
||||
Then edit `next/1-intro.md` to refer to the next version.
|
||||
|
6864
doc/go1.17_spec.html
Normal file
6864
doc/go1.17_spec.html
Normal file
File diff suppressed because it is too large
Load Diff
@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
|
||||
<p>
|
||||
The following formal definition of Go's memory model closely follows
|
||||
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
||||
“<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
published in PLDI 2008.
|
||||
The definition of data-race-free programs and the guarantee of sequential consistency
|
||||
for race-free programs are equivalent to the ones in that work.
|
||||
@ -453,7 +453,7 @@ crash, or do something else.)
|
||||
</p>
|
||||
|
||||
<p class="rule">
|
||||
The <i>k</i>th receive from a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send on that channel.
|
||||
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
|
474
doc/go_spec.html
474
doc/go_spec.html
@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Language version go1.25 (Feb 25, 2025)",
|
||||
"Subtitle": "Language version go1.23 (June 13, 2024)",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
@ -8,6 +8,8 @@
|
||||
|
||||
<p>
|
||||
This is the reference manual for the Go programming language.
|
||||
The pre-Go1.18 version, without generics, can be found
|
||||
<a href="/doc/go1.17_spec.html">here</a>.
|
||||
For more information and other documents, see <a href="/">go.dev</a>.
|
||||
</p>
|
||||
|
||||
@ -796,6 +798,7 @@ If a variable has not yet been assigned a value, its value is the
|
||||
<a href="#The_zero_value">zero value</a> for its type.
|
||||
</p>
|
||||
|
||||
|
||||
<h2 id="Types">Types</h2>
|
||||
|
||||
<p>
|
||||
@ -1083,7 +1086,7 @@ A field declared with a type but no explicit field name is called an <i>embedded
|
||||
An embedded field must be specified as
|
||||
a type name <code>T</code> or as a pointer to a non-interface type name <code>*T</code>,
|
||||
and <code>T</code> itself may not be
|
||||
a pointer type or type parameter. The unqualified type name acts as the field name.
|
||||
a pointer type. The unqualified type name acts as the field name.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -1124,7 +1127,7 @@ of a struct except that they cannot be used as field names in
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Given a struct type <code>S</code> and a type name
|
||||
Given a struct type <code>S</code> and a <a href="#Types">named type</a>
|
||||
<code>T</code>, promoted methods are included in the method set of the struct as follows:
|
||||
</p>
|
||||
<ul>
|
||||
@ -1197,7 +1200,7 @@ type (
|
||||
<p>
|
||||
A pointer type denotes the set of all pointers to <a href="#Variables">variables</a> of a given
|
||||
type, called the <i>base type</i> of the pointer.
|
||||
The <a href="#Representation_of_values">value</a> of an uninitialized pointer is <code>nil</code>.
|
||||
The value of an uninitialized pointer is <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
@ -1213,9 +1216,9 @@ BaseType = Type .
|
||||
<h3 id="Function_types">Function types</h3>
|
||||
|
||||
<p>
|
||||
A function type denotes the set of all functions with the same parameter and result types.
|
||||
The <a href="#Representation_of_values">value</a> of an uninitialized variable of function
|
||||
type is <code>nil</code>.
|
||||
A function type denotes the set of all functions with the same parameter
|
||||
and result types. The value of an uninitialized variable of function type
|
||||
is <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
@ -1264,8 +1267,7 @@ An interface type defines a <i>type set</i>.
|
||||
A variable of interface type can store a value of any type that is in the type
|
||||
set of the interface. Such a type is said to
|
||||
<a href="#Implementing_an_interface">implement the interface</a>.
|
||||
The <a href="#Representation_of_values">value</a> of an uninitialized variable of
|
||||
interface type is <code>nil</code>.
|
||||
The value of an uninitialized variable of interface type is <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
@ -1628,7 +1630,7 @@ implements the interface.
|
||||
A map is an unordered group of elements of one type, called the
|
||||
element type, indexed by a set of unique <i>keys</i> of another type,
|
||||
called the key type.
|
||||
The <a href="#Representation_of_values">value</a> of an uninitialized map is <code>nil</code>.
|
||||
The value of an uninitialized map is <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
@ -1691,7 +1693,7 @@ to communicate by
|
||||
<a href="#Send_statements">sending</a> and
|
||||
<a href="#Receive_operator">receiving</a>
|
||||
values of a specified element type.
|
||||
The <a href="#Representation_of_values">value</a> of an uninitialized channel is <code>nil</code>.
|
||||
The value of an uninitialized channel is <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
@ -1770,57 +1772,6 @@ received in the order sent.
|
||||
|
||||
<h2 id="Properties_of_types_and_values">Properties of types and values</h2>
|
||||
|
||||
<h3 id="Representation_of_values">Representation of values</h3>
|
||||
|
||||
<p>
|
||||
Values of predeclared types (see below for the interfaces <code>any</code>
|
||||
and <code>error</code>), arrays, and structs are self-contained:
|
||||
Each such value contains a complete copy of all its data,
|
||||
and <a href="#Variables">variables</a> of such types store the entire value.
|
||||
For instance, an array variable provides the storage (the variables)
|
||||
for all elements of the array.
|
||||
The respective <a href="#The_zero_value">zero values</a> are specific to the
|
||||
value's types; they are never <code>nil</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Non-nil pointer, function, slice, map, and channel values contain references
|
||||
to underlying data which may be shared by multiple values:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
A pointer value is a reference to the variable holding
|
||||
the pointer base type value.
|
||||
</li>
|
||||
<li>
|
||||
A function value contains references to the (possibly
|
||||
<a href="#Function_literals">anonymous</a>) function
|
||||
and enclosed variables.
|
||||
</li>
|
||||
<li>
|
||||
A slice value contains the slice length, capacity, and
|
||||
a reference to its <a href="#Slice_types">underlying array</a>.
|
||||
</li>
|
||||
<li>
|
||||
A map or channel value is a reference to the implementation-specific
|
||||
data structure of the map or channel.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
An interface value may be self-contained or contain references to underlying data
|
||||
depending on the interface's <a href="#Variables">dynamic type</a>.
|
||||
The predeclared identifier <code>nil</code> is the zero value for types whose values
|
||||
can contain references.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
When multiple values share underlying data, changing one value may change another.
|
||||
For instance, changing an element of a <a href="#Slice_types">slice</a> will change
|
||||
that element in the underlying array for all slices that share the array.
|
||||
</p>
|
||||
|
||||
<h3 id="Underlying_types">Underlying types</h3>
|
||||
|
||||
<p>
|
||||
@ -1856,10 +1807,110 @@ The underlying type of <code>[]B1</code>, <code>B3</code>, and <code>B4</code> i
|
||||
The underlying type of <code>P</code> is <code>interface{}</code>.
|
||||
</p>
|
||||
|
||||
<h3 id="Core_types">Core types</h3>
|
||||
|
||||
<p>
|
||||
Each non-interface type <code>T</code> has a <i>core type</i>, which is the same as the
|
||||
<a href="#Underlying_types">underlying type</a> of <code>T</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
An interface <code>T</code> has a core type if one of the following
|
||||
conditions is satisfied:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
There is a single type <code>U</code> which is the <a href="#Underlying_types">underlying type</a>
|
||||
of all types in the <a href="#Interface_types">type set</a> of <code>T</code>; or
|
||||
</li>
|
||||
<li>
|
||||
the type set of <code>T</code> contains only <a href="#Channel_types">channel types</a>
|
||||
with identical element type <code>E</code>, and all directional channels have the same
|
||||
direction.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
No other interfaces have a core type.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The core type of an interface is, depending on the condition that is satisfied, either:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
the type <code>U</code>; or
|
||||
</li>
|
||||
<li>
|
||||
the type <code>chan E</code> if <code>T</code> contains only bidirectional
|
||||
channels, or the type <code>chan<- E</code> or <code><-chan E</code>
|
||||
depending on the direction of the directional channels present.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
By definition, a core type is never a <a href="#Type_definitions">defined type</a>,
|
||||
<a href="#Type_parameter_declarations">type parameter</a>, or
|
||||
<a href="#Interface_types">interface type</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Examples of interfaces with core types:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Celsius float32
|
||||
type Kelvin float32
|
||||
|
||||
interface{ int } // int
|
||||
interface{ Celsius|Kelvin } // float32
|
||||
interface{ ~chan int } // chan int
|
||||
interface{ ~chan int|~chan<- int } // chan<- int
|
||||
interface{ ~[]*data; String() string } // []*data
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Examples of interfaces without core types:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
interface{} // no single underlying type
|
||||
interface{ Celsius|float64 } // no single underlying type
|
||||
interface{ chan int | chan<- string } // channels have different element types
|
||||
interface{ <-chan int | chan<- int } // directional channels have different directions
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Some operations (<a href="#Slice_expressions">slice expressions</a>,
|
||||
<a href="#Appending_and_copying_slices"><code>append</code> and <code>copy</code></a>)
|
||||
rely on a slightly more loose form of core types which accept byte slices and strings.
|
||||
Specifically, if there are exactly two types, <code>[]byte</code> and <code>string</code>,
|
||||
which are the underlying types of all types in the type set of interface <code>T</code>,
|
||||
the core type of <code>T</code> is called <code>bytestring</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Examples of interfaces with <code>bytestring</code> core types:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
interface{ int } // int (same as ordinary core type)
|
||||
interface{ []byte | string } // bytestring
|
||||
interface{ ~[]byte | myString } // bytestring
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Note that <code>bytestring</code> is not a real type; it cannot be used to declare
|
||||
variables or compose other types. It exists solely to describe the behavior of some
|
||||
operations that read from a sequence of bytes, which may be a byte slice or a string.
|
||||
</p>
|
||||
|
||||
<h3 id="Type_identity">Type identity</h3>
|
||||
|
||||
<p>
|
||||
Two types are either <i>identical</i> ("the same") or <i>different</i>.
|
||||
Two types are either <i>identical</i> or <i>different</i>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@ -1876,8 +1927,8 @@ components have identical types. In detail:
|
||||
<li>Two slice types are identical if they have identical element types.</li>
|
||||
|
||||
<li>Two struct types are identical if they have the same sequence of fields,
|
||||
and if corresponding pairs of fields have the same names, identical types,
|
||||
and identical tags, and are either both embedded or both not embedded.
|
||||
and if corresponding fields have the same names, and identical types,
|
||||
and identical tags.
|
||||
<a href="#Exported_identifiers">Non-exported</a> field names from different
|
||||
packages are always different.</li>
|
||||
|
||||
@ -2460,12 +2511,12 @@ An alias declaration binds an identifier to the given type
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
AliasDecl = identifier [ TypeParameters ] "=" Type .
|
||||
AliasDecl = identifier "=" Type .
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Within the <a href="#Declarations_and_scope">scope</a> of
|
||||
the identifier, it serves as an <i>alias</i> for the given type.
|
||||
the identifier, it serves as an <i>alias</i> for the type.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -2475,24 +2526,6 @@ type (
|
||||
)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the alias declaration specifies <a href="#Type_parameter_declarations">type parameters</a>
|
||||
[<a href="#Go_1.24">Go 1.24</a>], the type name denotes a <i>generic alias</i>.
|
||||
Generic aliases must be <a href="#Instantiations">instantiated</a> when they
|
||||
are used.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type set[P comparable] = map[P]bool
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
In an alias declaration the given type cannot be a type parameter.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type A[P any] = P // illegal: P is a type parameter
|
||||
</pre>
|
||||
|
||||
<h4 id="Type_definitions">Type definitions</h4>
|
||||
|
||||
@ -2768,7 +2801,7 @@ values or variables, or components of other, non-interface types.
|
||||
|
||||
<p>
|
||||
A type argument <code>T</code><i> satisfies</i> a type constraint <code>C</code>
|
||||
if <code>T</code> is an element of the type set defined by <code>C</code>; in other words,
|
||||
if <code>T</code> is an element of the type set defined by <code>C</code>; i.e.,
|
||||
if <code>T</code> <a href="#Implementing_an_interface">implements</a> <code>C</code>.
|
||||
As an exception, a <a href="#Comparison_operators">strictly comparable</a>
|
||||
type constraint may also be satisfied by a <a href="#Comparison_operators">comparable</a>
|
||||
@ -2848,7 +2881,7 @@ initialization value in the assignment.
|
||||
If that value is an untyped constant, it is first implicitly
|
||||
<a href="#Conversions">converted</a> to its <a href="#Constants">default type</a>;
|
||||
if it is an untyped boolean value, it is first implicitly converted to type <code>bool</code>.
|
||||
The predeclared identifier <code>nil</code> cannot be used to initialize a variable
|
||||
The predeclared value <code>nil</code> cannot be used to initialize a variable
|
||||
with no explicit type.
|
||||
</p>
|
||||
|
||||
@ -3042,7 +3075,7 @@ to the base type <code>Point</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If the receiver base type is a <a href="#Type_definitions">generic type</a>, the
|
||||
If the receiver base type is a <a href="#Type_declarations">generic type</a>, the
|
||||
receiver specification must declare corresponding type parameters for the method
|
||||
to use. This makes the receiver type parameters available to the method.
|
||||
Syntactically, this type parameter declaration looks like an
|
||||
@ -3066,22 +3099,6 @@ func (p Pair[A, B]) Swap() Pair[B, A] { … } // receiver declares A, B
|
||||
func (p Pair[First, _]) First() First { … } // receiver declares First, corresponds to A in Pair
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the receiver type is denoted by (a pointer to) an <a href="#Alias_declarations">alias</a>,
|
||||
the alias must not be generic and it must not denote an instantiated generic type, neither
|
||||
directly nor indirectly via another alias, and irrespective of pointer indirections.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type GPoint[P any] = Point
|
||||
type HPoint = *GPoint[int]
|
||||
type IPair = Pair[int, int]
|
||||
|
||||
func (*GPoint[P]) Draw(P) { … } // illegal: alias must not be generic
|
||||
func (HPoint) Draw(P) { … } // illegal: alias must not denote instantiated type GPoint[int]
|
||||
func (*IPair) Second() int { … } // illegal: alias must not denote instantiated type Pair[int, int]
|
||||
</pre>
|
||||
|
||||
<h2 id="Expressions">Expressions</h2>
|
||||
|
||||
<p>
|
||||
@ -3153,8 +3170,7 @@ math.Sin // denotes the Sin function in package math
|
||||
<h3 id="Composite_literals">Composite literals</h3>
|
||||
|
||||
<p>
|
||||
Composite literals construct new values for structs, arrays, slices, and maps
|
||||
each time they are evaluated.
|
||||
Composite literals construct new composite values each time they are evaluated.
|
||||
They consist of the type of the literal followed by a brace-bound list of elements.
|
||||
Each element may optionally be preceded by a corresponding key.
|
||||
</p>
|
||||
@ -3172,14 +3188,10 @@ Element = Expression | LiteralValue .
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Unless the LiteralType is a type parameter,
|
||||
its <a href="#Underlying_types">underlying type
|
||||
The LiteralType's <a href="#Core_types">core type</a> <code>T</code>
|
||||
must be a struct, array, slice, or map type
|
||||
(the syntax enforces this constraint except when the type is given
|
||||
as a TypeName).
|
||||
If the LiteralType is a type parameter, all types in its type set
|
||||
must have the same underlying type which must be
|
||||
a valid composite literal type.
|
||||
The types of the elements and keys must be <a href="#Assignability">assignable</a>
|
||||
to the respective field, element, and key types of type <code>T</code>;
|
||||
there is no additional conversion.
|
||||
@ -3364,6 +3376,7 @@ noteFrequency := map[string]float32{
|
||||
}
|
||||
</pre>
|
||||
|
||||
|
||||
<h3 id="Function_literals">Function literals</h3>
|
||||
|
||||
<p>
|
||||
@ -3403,7 +3416,8 @@ Primary expressions are the operands for unary and binary expressions.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
PrimaryExpr = Operand |
|
||||
PrimaryExpr =
|
||||
Operand |
|
||||
Conversion |
|
||||
MethodExpr |
|
||||
PrimaryExpr Selector |
|
||||
@ -3836,12 +3850,11 @@ The following rules apply:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If <code>a</code> is neither a map nor a <a href="#Type_parameter_declarations">type parameter</a>:
|
||||
If <code>a</code> is neither a map nor a type parameter:
|
||||
</p>
|
||||
<ul>
|
||||
<li>the index <code>x</code> must be an untyped constant, or its type must be
|
||||
an <a href="#Numeric_types">integer</a> or a type parameter whose type set
|
||||
contains only integer types</li>
|
||||
<li>the index <code>x</code> must be an untyped constant or its
|
||||
<a href="#Core_types">core type</a> must be an <a href="#Numeric_types">integer</a></li>
|
||||
<li>a constant index must be non-negative and
|
||||
<a href="#Representability">representable</a> by a value of type <code>int</code></li>
|
||||
<li>a constant index that is untyped is given type <code>int</code></li>
|
||||
@ -3955,26 +3968,14 @@ Assigning to an element of a <code>nil</code> map causes a
|
||||
|
||||
<p>
|
||||
Slice expressions construct a substring or slice from a string, array, pointer
|
||||
to array, or slice operand.
|
||||
There are two variants: a simple form that specifies a low
|
||||
to array, or slice. There are two variants: a simple form that specifies a low
|
||||
and high bound, and a full form that also specifies a bound on the capacity.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If the operand type is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
unless its type set contains string types,
|
||||
all types in the type set must have the same underlying type, and the slice expression
|
||||
must be valid for an operand of that type.
|
||||
If the type set contains string types it may also contain byte slices with underlying
|
||||
type <code>[]byte</code>.
|
||||
In this case, the slice expression must be valid for an operand of <code>string</code>
|
||||
type.
|
||||
</p>
|
||||
|
||||
<h4>Simple slice expressions</h4>
|
||||
|
||||
<p>
|
||||
For a string, array, pointer to array, or slice <code>a</code>, the primary expression
|
||||
The primary expression
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -3982,7 +3983,9 @@ a[low : high]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
constructs a substring or slice.
|
||||
constructs a substring or slice. The <a href="#Core_types">core type</a> of
|
||||
<code>a</code> must be a string, array, pointer to array, slice, or a
|
||||
<a href="#Core_types"><code>bytestring</code></a>.
|
||||
The <i>indices</i> <code>low</code> and
|
||||
<code>high</code> select which elements of operand <code>a</code> appear
|
||||
in the result. The result has indices starting at 0 and length equal to
|
||||
@ -4062,7 +4065,7 @@ s3 := s[:0] // s3 == nil
|
||||
<h4>Full slice expressions</h4>
|
||||
|
||||
<p>
|
||||
For an array, pointer to array, or slice <code>a</code> (but not a string), the primary expression
|
||||
The primary expression
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -4073,6 +4076,8 @@ a[low : high : max]
|
||||
constructs a slice of the same type, and with the same length and elements as the simple slice
|
||||
expression <code>a[low : high]</code>. Additionally, it controls the resulting slice's capacity
|
||||
by setting it to <code>max - low</code>. Only the first index may be omitted; it defaults to 0.
|
||||
The <a href="#Core_types">core type</a> of <code>a</code> must be an array, pointer to array,
|
||||
or slice (but not a string).
|
||||
After slicing the array <code>a</code>
|
||||
</p>
|
||||
|
||||
@ -4178,8 +4183,8 @@ No <a href="#Run_time_panics">run-time panic</a> occurs in this case.
|
||||
<h3 id="Calls">Calls</h3>
|
||||
|
||||
<p>
|
||||
Given an expression <code>f</code> of <a href="#Function_types">function type</a>
|
||||
<code>F</code>,
|
||||
Given an expression <code>f</code> with a <a href="#Core_types">core type</a>
|
||||
<code>F</code> of <a href="#Function_types">function type</a>,
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -4191,7 +4196,8 @@ calls <code>f</code> with arguments <code>a1, a2, … an</code>.
|
||||
Except for one special case, arguments must be single-valued expressions
|
||||
<a href="#Assignability">assignable</a> to the parameter types of
|
||||
<code>F</code> and are evaluated before the function is called.
|
||||
The type of the expression is the result type of <code>F</code>.
|
||||
The type of the expression is the result type
|
||||
of <code>F</code>.
|
||||
A method invocation is similar but the method itself
|
||||
is specified as a selector upon a value of the receiver type for
|
||||
the method.
|
||||
@ -4209,23 +4215,12 @@ If <code>f</code> denotes a generic function, it must be
|
||||
or used as a function value.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If the type of <code>f</code> is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must have the same underlying type, which must be a function type,
|
||||
and the function call must be valid for that type.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
In a function call, the function value and arguments are evaluated in
|
||||
<a href="#Order_of_evaluation">the usual order</a>.
|
||||
After they are evaluated, new storage is allocated for the function's
|
||||
<a href="#Variables">variables</a>, which includes its parameters
|
||||
and results.
|
||||
Then, the arguments of the call are <i>passed</i> to the function,
|
||||
which means that they are <a href="#Assignment_statements">assigned</a>
|
||||
to their corresponding function parameters,
|
||||
After they are evaluated, the parameters of the call are passed by value to the function
|
||||
and the called function begins execution.
|
||||
The return parameters of the function are passed
|
||||
The return parameters of the function are passed by value
|
||||
back to the caller when the function returns.
|
||||
</p>
|
||||
|
||||
@ -4239,9 +4234,9 @@ As a special case, if the return values of a function or method
|
||||
<code>g</code> are equal in number and individually
|
||||
assignable to the parameters of another function or method
|
||||
<code>f</code>, then the call <code>f(g(<i>parameters_of_g</i>))</code>
|
||||
will invoke <code>f</code> after passing the return values of
|
||||
<code>g</code> to the parameters of <code>f</code> in order.
|
||||
The call of <code>f</code> must contain no parameters other than the call of <code>g</code>,
|
||||
will invoke <code>f</code> after binding the return values of
|
||||
<code>g</code> to the parameters of <code>f</code> in order. The call
|
||||
of <code>f</code> must contain no parameters other than the call of <code>g</code>,
|
||||
and <code>g</code> must have at least one return value.
|
||||
If <code>f</code> has a final <code>...</code> parameter, it is
|
||||
assigned the return values of <code>g</code> that remain after
|
||||
@ -4287,7 +4282,7 @@ If <code>f</code> is <a href="#Function_types">variadic</a> with a final
|
||||
parameter <code>p</code> of type <code>...T</code>, then within <code>f</code>
|
||||
the type of <code>p</code> is equivalent to type <code>[]T</code>.
|
||||
If <code>f</code> is invoked with no actual arguments for <code>p</code>,
|
||||
the value <a href="#Calls">passed</a> to <code>p</code> is <code>nil</code>.
|
||||
the value passed to <code>p</code> is <code>nil</code>.
|
||||
Otherwise, the value passed is a new slice
|
||||
of type <code>[]T</code> with a new underlying array whose successive elements
|
||||
are the actual arguments, which all must be <a href="#Assignability">assignable</a>
|
||||
@ -4728,28 +4723,17 @@ more complicated:
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
If all types in <code>C</code>'s type set have the same
|
||||
underlying type <code>U</code>,
|
||||
If <code>C</code> has a <a href="#Core_types">core type</a>
|
||||
<code>core(C)</code>
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>U</code> and <code>A</code> must unify loosely.
|
||||
</li>
|
||||
<li>
|
||||
Similarly, if all types in <code>C</code>'s type set are
|
||||
channel types with the same element type and non-conflicting
|
||||
channel directions,
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
the most restrictive channel type in <code>C</code>'s type
|
||||
set and <code>A</code> must unify loosely.
|
||||
</li>
|
||||
<li>
|
||||
<code>core(C)</code> and <code>A</code> must unify loosely.
|
||||
If <code>P</code> does not have a known type argument
|
||||
and <code>C</code> contains exactly one type term <code>T</code>
|
||||
that is not an underlying (tilde) type, unification adds the
|
||||
mapping <code>P ➞ T</code> to the map.
|
||||
</li>
|
||||
<li>
|
||||
If <code>C</code> does not have a type <code>U</code>
|
||||
as described above
|
||||
If <code>C</code> does not have a core type
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>A</code> must have all methods of <code>C</code>, if any,
|
||||
and corresponding method types must unify exactly.
|
||||
@ -5300,10 +5284,10 @@ var x *int = nil
|
||||
<h3 id="Receive_operator">Receive operator</h3>
|
||||
|
||||
<p>
|
||||
For an operand <code>ch</code> of <a href="#Channel_types">channel type</a>,
|
||||
For an operand <code>ch</code> whose <a href="#Core_types">core type</a> is a
|
||||
<a href="#Channel_types">channel</a>,
|
||||
the value of the receive operation <code><-ch</code> is the value received
|
||||
from the channel <code>ch</code>.
|
||||
The channel direction must permit receive operations,
|
||||
from the channel <code>ch</code>. The channel direction must permit receive operations,
|
||||
and the type of the receive operation is the element type of the channel.
|
||||
The expression blocks until a value is available.
|
||||
Receiving from a <code>nil</code> channel blocks forever.
|
||||
@ -5319,12 +5303,6 @@ f(<-ch)
|
||||
<-strobe // wait until clock pulse and discard received value
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the operand type is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be channel types that permit receive operations, and
|
||||
they must all have the same element type, which is the type of the receive operation.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
A receive expression used in an <a href="#Assignment_statements">assignment statement</a> or initialization of the special form
|
||||
</p>
|
||||
@ -5620,8 +5598,6 @@ myString([]myRune{0x1f30e}) // "\U0001f30e" == "🌎"
|
||||
<li>
|
||||
Converting a value of a string type to a slice of bytes type
|
||||
yields a non-nil slice whose successive elements are the bytes of the string.
|
||||
The <a href="#Length_and_capacity">capacity</a> of the resulting slice is
|
||||
implementation-specific and may be larger than the slice length.
|
||||
|
||||
<pre>
|
||||
[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
|
||||
@ -5637,8 +5613,6 @@ bytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
|
||||
<li>
|
||||
Converting a value of a string type to a slice of runes type
|
||||
yields a slice containing the individual Unicode code points of the string.
|
||||
The <a href="#Length_and_capacity">capacity</a> of the resulting slice is
|
||||
implementation-specific and may be larger than the slice length.
|
||||
|
||||
<pre>
|
||||
[]rune(myString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
|
||||
@ -5840,7 +5814,7 @@ Otherwise, when evaluating the <a href="#Operands">operands</a> of an
|
||||
expression, assignment, or
|
||||
<a href="#Return_statements">return statement</a>,
|
||||
all function calls, method calls,
|
||||
<a href="#Receive_operator">receive operations</a>,
|
||||
<a href="#Receive operator">receive operations</a>,
|
||||
and <a href="#Logical_operators">binary logical operations</a>
|
||||
are evaluated in lexical left-to-right order.
|
||||
</p>
|
||||
@ -5908,7 +5882,8 @@ Statements control execution.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
Statement = Declaration | LabeledStmt | SimpleStmt |
|
||||
Statement =
|
||||
Declaration | LabeledStmt | SimpleStmt |
|
||||
GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
|
||||
FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
|
||||
DeferStmt .
|
||||
@ -6060,7 +6035,8 @@ len("foo") // illegal if len is the built-in function
|
||||
|
||||
<p>
|
||||
A send statement sends a value on a channel.
|
||||
The channel expression must be of <a href="#Channel_types">channel type</a>,
|
||||
The channel expression's <a href="#Core_types">core type</a>
|
||||
must be a <a href="#Channel_types">channel</a>,
|
||||
the channel direction must permit send operations,
|
||||
and the type of the value to be sent must be <a href="#Assignability">assignable</a>
|
||||
to the channel's element type.
|
||||
@ -6084,13 +6060,6 @@ A send on a <code>nil</code> channel blocks forever.
|
||||
ch <- 3 // send value 3 to channel ch
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the type of the channel expression is a
|
||||
<a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be channel types that permit send operations,
|
||||
they must all have the same element type,
|
||||
and the type of the value to be sent must be assignable to that element type.
|
||||
</p>
|
||||
|
||||
<h3 id="IncDec_statements">IncDec statements</h3>
|
||||
|
||||
@ -6258,26 +6227,6 @@ to the type of the operand to which it is assigned, with the following special c
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
When a value is assigned to a variable, only the data that is stored in the variable
|
||||
is replaced. If the value contains a <a href="#Representation_of_values">reference</a>,
|
||||
the assignment copies the reference but does not make a copy of the referenced data
|
||||
(such as the underlying array of a slice).
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
var s1 = []int{1, 2, 3}
|
||||
var s2 = s1 // s2 stores the slice descriptor of s1
|
||||
s1 = s1[:1] // s1's length is 1 but it still shares its underlying array with s2
|
||||
s2[0] = 42 // setting s2[0] changes s1[0] as well
|
||||
fmt.Println(s1, s2) // prints [42] [42 2 3]
|
||||
|
||||
var m1 = make(map[string]int)
|
||||
var m2 = m1 // m2 stores the map descriptor of m1
|
||||
m1["foo"] = 42 // setting m1["foo"] changes m2["foo"] as well
|
||||
fmt.Println(m2["foo"]) // prints 42
|
||||
</pre>
|
||||
|
||||
<h3 id="If_statements">If statements</h3>
|
||||
|
||||
<p>
|
||||
@ -6683,7 +6632,8 @@ RangeClause = [ ExpressionList "=" | IdentifierList ":=" ] "range" Expression .
|
||||
|
||||
<p>
|
||||
The expression on the right in the "range" clause is called the <i>range expression</i>,
|
||||
which may be an array, pointer to an array, slice, string, map, channel permitting
|
||||
its <a href="#Core_types">core type</a> must be
|
||||
an array, pointer to an array, slice, string, map, channel permitting
|
||||
<a href="#Receive_operator">receive operations</a>, an integer, or
|
||||
a function with specific signature (see below).
|
||||
As with an assignment, if present the operands on the left must be
|
||||
@ -6897,12 +6847,6 @@ for k, v := range t.Walk {
|
||||
}
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the type of the range expression is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must have the same underlying type and the range expression must be valid
|
||||
for that type, or, if the type set contains channel types, it must only contain channel types with
|
||||
identical element types, and all channel types must permit receive operations.
|
||||
</p>
|
||||
|
||||
<h3 id="Go_statements">Go statements</h3>
|
||||
|
||||
@ -7376,28 +7320,23 @@ by the arguments overlaps.
|
||||
|
||||
<p>
|
||||
The <a href="#Function_types">variadic</a> function <code>append</code>
|
||||
appends zero or more values <code>x</code> to a slice <code>s</code> of
|
||||
type <code>S</code> and returns the resulting slice, also of type
|
||||
<code>S</code>.
|
||||
appends zero or more values <code>x</code> to a slice <code>s</code>
|
||||
and returns the resulting slice of the same type as <code>s</code>.
|
||||
The <a href="#Core_types">core type</a> of <code>s</code> must be a slice
|
||||
of type <code>[]E</code>.
|
||||
The values <code>x</code> are passed to a parameter of type <code>...E</code>
|
||||
where <code>E</code> is the element type of <code>S</code>
|
||||
and the respective <a href="#Passing_arguments_to_..._parameters">parameter
|
||||
passing rules</a> apply.
|
||||
As a special case, <code>append</code> also accepts a first argument assignable
|
||||
to type <code>[]byte</code> with a second argument of string type followed by
|
||||
<code>...</code>.
|
||||
This form appends the bytes of the string.
|
||||
As a special case, if the core type of <code>s</code> is <code>[]byte</code>,
|
||||
<code>append</code> also accepts a second argument with core type
|
||||
<a href="#Core_types"><code>bytestring</code></a> followed by <code>...</code>.
|
||||
This form appends the bytes of the byte slice or string.
|
||||
</p>
|
||||
|
||||
<pre class="grammar">
|
||||
append(s S, x ...E) S // E is the element type of S
|
||||
append(s S, x ...E) S // core type of S is []E
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If <code>S</code> is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must have the same underlying slice type <code>[]E</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If the capacity of <code>s</code> is not large enough to fit the additional
|
||||
values, <code>append</code> <a href="#Allocation">allocates</a> a new, sufficiently large underlying
|
||||
@ -7423,14 +7362,14 @@ b = append(b, "bar"...) // append string contents b is []byte{'b
|
||||
The function <code>copy</code> copies slice elements from
|
||||
a source <code>src</code> to a destination <code>dst</code> and returns the
|
||||
number of elements copied.
|
||||
Both arguments must have <a href="#Type_identity">identical</a> element type
|
||||
<code>E</code> and must be assignable to a slice of type <code>[]E</code>.
|
||||
The <a href="#Core_types">core types</a> of both arguments must be slices
|
||||
with <a href="#Type_identity">identical</a> element type.
|
||||
The number of elements copied is the minimum of
|
||||
<code>len(src)</code> and <code>len(dst)</code>.
|
||||
As a special case, <code>copy</code> also accepts a destination argument
|
||||
assignable to type <code>[]byte</code> with a source argument of a
|
||||
<code>string</code> type.
|
||||
This form copies the bytes from the string into the byte slice.
|
||||
As a special case, if the destination's core type is <code>[]byte</code>,
|
||||
<code>copy</code> also accepts a source argument with core type
|
||||
<a href="#Core_types"><code>bytestring</code></a>.
|
||||
This form copies the bytes from the byte slice or string into the byte slice.
|
||||
</p>
|
||||
|
||||
<pre class="grammar">
|
||||
@ -7438,11 +7377,6 @@ copy(dst, src []T) int
|
||||
copy(dst []byte, src string) int
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the type of one or both arguments is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in their respective type sets must have the same underlying slice type <code>[]E</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Examples:
|
||||
</p>
|
||||
@ -7493,7 +7427,8 @@ If the map or slice is <code>nil</code>, <code>clear</code> is a no-op.
|
||||
<h3 id="Close">Close</h3>
|
||||
|
||||
<p>
|
||||
For a channel <code>ch</code>, the built-in function <code>close(ch)</code>
|
||||
For an argument <code>ch</code> with a <a href="#Core_types">core type</a>
|
||||
that is a <a href="#Channel_types">channel</a>, the built-in function <code>close</code>
|
||||
records that no more values will be sent on the channel.
|
||||
It is an error if <code>ch</code> is a receive-only channel.
|
||||
Sending to or closing a closed channel causes a <a href="#Run_time_panics">run-time panic</a>.
|
||||
@ -7505,12 +7440,6 @@ The multi-valued <a href="#Receive_operator">receive operation</a>
|
||||
returns a received value along with an indication of whether the channel is closed.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If the type of the argument to <code>close</code> is a
|
||||
<a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be channels with the same element type.
|
||||
It is an error if any of those channels is a receive-only channel.
|
||||
</p>
|
||||
|
||||
<h3 id="Complex_numbers">Manipulating complex numbers</h3>
|
||||
|
||||
@ -7680,15 +7609,16 @@ var z complex128
|
||||
|
||||
<p>
|
||||
The built-in function <code>make</code> takes a type <code>T</code>,
|
||||
which must be a slice, map or channel type, or a type parameter,
|
||||
optionally followed by a type-specific list of expressions.
|
||||
The <a href="#Core_types">core type</a> of <code>T</code> must
|
||||
be a slice, map or channel.
|
||||
It returns a value of type <code>T</code> (not <code>*T</code>).
|
||||
The memory is initialized as described in the section on
|
||||
<a href="#The_zero_value">initial values</a>.
|
||||
</p>
|
||||
|
||||
<pre class="grammar">
|
||||
Call Type T Result
|
||||
Call Core type Result
|
||||
|
||||
make(T, n) slice slice of type T with length n and capacity n
|
||||
make(T, n, m) slice slice of type T with length n and capacity m
|
||||
@ -7698,18 +7628,8 @@ make(T, n) map map of type T with initial space for approxim
|
||||
|
||||
make(T) channel unbuffered channel of type T
|
||||
make(T, n) channel buffered channel of type T, buffer size n
|
||||
|
||||
make(T, n) type parameter see below
|
||||
make(T, n, m) type parameter see below
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the first argument is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must have the same underlying type, which must be a slice
|
||||
or map type, or, if there are channel types, there must only be channel types, they
|
||||
must all have the same element type, and the channel directions must not conflict.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Each of the size arguments <code>n</code> and <code>m</code> must be of <a href="#Numeric_types">integer type</a>,
|
||||
have a <a href="#Interface_types">type set</a> containing only integer types,
|
||||
@ -7894,7 +7814,7 @@ causes a <a href="#Run_time_panics">run-time panic</a>.
|
||||
<p>
|
||||
The <code>protect</code> function in the example below invokes
|
||||
the function argument <code>g</code> and protects callers from
|
||||
run-time panics caused by <code>g</code>.
|
||||
run-time panics raised by <code>g</code>.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
@ -8452,14 +8372,17 @@ func String(ptr *byte, len IntegerType) string
|
||||
func StringData(str string) *byte
|
||||
</pre>
|
||||
|
||||
<!--
|
||||
These conversions also apply to type parameters with suitable core types.
|
||||
Determine if we can simply use core type instead of underlying type here,
|
||||
of if the general conversion rules take care of this.
|
||||
-->
|
||||
|
||||
<p>
|
||||
A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
|
||||
value may not be <a href="#Address_operators">dereferenced</a>.
|
||||
Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
|
||||
<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
|
||||
If the respective types are <a href="#Type_parameter_declarations">type parameters</a>, all types in
|
||||
their respective type sets must have the same underlying type, which must be <code>uintptr</code> and
|
||||
<code>Pointer</code>, respectively.
|
||||
Any pointer or value of <a href="#Core_types">core type</a> <code>uintptr</code> can be
|
||||
<a href="#Conversions">converted</a> to a type of core type <code>Pointer</code> and vice versa.
|
||||
The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
|
||||
</p>
|
||||
|
||||
@ -8480,7 +8403,7 @@ var p ptr = nil
|
||||
<p>
|
||||
The functions <code>Alignof</code> and <code>Sizeof</code> take an expression <code>x</code>
|
||||
of any type and return the alignment or size, respectively, of a hypothetical variable <code>v</code>
|
||||
as if <code>v</code> were declared via <code>var v = x</code>.
|
||||
as if <code>v</code> was declared via <code>var v = x</code>.
|
||||
</p>
|
||||
<p>
|
||||
The function <code>Offsetof</code> takes a (possibly parenthesized) <a href="#Selectors">selector</a>
|
||||
@ -8764,15 +8687,6 @@ integer values from zero to an upper limit.
|
||||
function as range expression.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h4 id="Go_1.24">Go 1.24</h4>
|
||||
<ul>
|
||||
<li>
|
||||
An <a href="#Alias_declarations">alias declaration</a> may declare
|
||||
<a href="#Type_parameter_declarations">type parameters</a>.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3 id="Type_unification_rules">Type unification rules</h3>
|
||||
|
||||
<p>
|
||||
@ -8813,9 +8727,9 @@ following conditions is true:
|
||||
</li>
|
||||
<li>
|
||||
Exactly one type is an <a href="#Type_inference">unbound</a>
|
||||
type parameter, and all the types in its type set unify with
|
||||
the other type
|
||||
per the unification rules for <code>≡<sub>A</sub></code>
|
||||
type parameter with a <a href="#Core_types">core type</a>,
|
||||
and that core type unifies with the other type per the
|
||||
unification rules for <code>≡<sub>A</sub></code>
|
||||
(loose unification at the top level and exact unification
|
||||
for element types).
|
||||
</li>
|
||||
|
105
doc/godebug.md
105
doc/godebug.md
@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
|
||||
|
||||
then that Go program will disable the use of HTTP/2 by default in both
|
||||
the HTTP client and the HTTP server.
|
||||
Unrecognized settings in the `GODEBUG` environment variable are ignored.
|
||||
It is also possible to set the default `GODEBUG` for a given program
|
||||
(discussed below).
|
||||
|
||||
@ -109,9 +108,7 @@ Only the work module's `go.mod` is consulted for `godebug` directives.
|
||||
Any directives in required dependency modules are ignored.
|
||||
It is an error to list a `godebug` with an unrecognized setting.
|
||||
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
||||
understand `godebug` at all.) When a workspace is in use, `godebug`
|
||||
directives in `go.mod` files are ignored, and `go.work` will be consulted
|
||||
for `godebug` directives instead.
|
||||
understand `godebug` at all.)
|
||||
|
||||
The defaults from the `go` and `godebug` lines apply to all main
|
||||
packages that are built. For more fine-grained control,
|
||||
@ -153,100 +150,6 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.25
|
||||
|
||||
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
|
||||
runtime annotates OS anonymous memory mappings with context about their
|
||||
purpose. These annotations appear in /proc/self/maps and /proc/self/smaps as
|
||||
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it defaults
|
||||
to `decoratemappings=1`, enabling annotations. Using `decoratemappings=0`
|
||||
reverts to the pre-Go 1.25 behavior. This setting is fixed at program startup
|
||||
time, and can't be modified by changing the `GODEBUG` environment variable
|
||||
after the program starts.
|
||||
|
||||
Go 1.25 added a new `embedfollowsymlinks` setting that controls whether the
|
||||
Go command will follow symlinks to regular files embedding files.
|
||||
The default value `embedfollowsymlinks=0` does not allow following
|
||||
symlinks. `embedfollowsymlinks=1` will allow following symlinks.
|
||||
|
||||
Go 1.25 corrected the semantics of contention reports for runtime-internal locks,
|
||||
and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variable).
|
||||
|
||||
### Go 1.24
|
||||
|
||||
Go 1.24 added a new `fips140` setting that controls whether the Go
|
||||
Cryptographic Module operates in FIPS 140-3 mode.
|
||||
The possible values are:
|
||||
- "off": no special support for FIPS 140-3 mode. This is the default.
|
||||
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
|
||||
- "only": like "on", but cryptographic algorithms not approved by
|
||||
FIPS 140-3 return an error or panic.
|
||||
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
|
||||
This setting is fixed at program startup time, and can't be modified
|
||||
by changing the `GODEBUG` environment variable after the program starts.
|
||||
|
||||
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
|
||||
no-op. This behavior is controlled by the `randseednop` setting.
|
||||
For Go 1.24 it defaults to `randseednop=1`.
|
||||
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 added new values for the `multipathtcp` setting.
|
||||
The possible values for `multipathtcp` are now:
|
||||
- "0": disable MPTCP on dialers and listeners by default
|
||||
- "1": enable MPTCP on dialers and listeners by default
|
||||
- "2": enable MPTCP on listeners only by default
|
||||
- "3": enable MPTCP on dialers only by default
|
||||
|
||||
For Go 1.24, it now defaults to multipathtcp="2", thus
|
||||
enabled by default on listeners. Using multipathtcp="0" reverts to the
|
||||
pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
|
||||
instead of text.
|
||||
These new JSON events are distinguished by new `Action` values,
|
||||
but can still cause problems with CI systems that aren't robust to these events.
|
||||
This behavior can be controlled with the `gotestjsonbuildtext` setting.
|
||||
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
|
||||
This setting will be removed in a future release, Go 1.28 at the earliest.
|
||||
|
||||
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
|
||||
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
|
||||
Using `rsa1024min=0` restores the Go 1.23 behavior.
|
||||
|
||||
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
|
||||
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
|
||||
mode can be enabled for an entire program with the `dataindependenttiming` setting.
|
||||
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
|
||||
behavior from Go 1.23 when `dataindependenttiming` is unset.
|
||||
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
|
||||
When enabled, DIT will be enabled when calling into C from Go. When enabled,
|
||||
calling into Go code from C will enable DIT, and disable it before returning to
|
||||
C if it was not enabled when Go code was entered.
|
||||
This currently only affects arm64 programs. For all other platforms it is a no-op.
|
||||
|
||||
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
|
||||
signatures on certificates that use SHA-1 based signature algorithms.
|
||||
|
||||
Go 1.24 changes the default value of the [`x509usepolicies`
|
||||
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
|
||||
certificates, policies are now taken from the
|
||||
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
|
||||
than the
|
||||
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
|
||||
field by default.
|
||||
|
||||
Go 1.24 enabled the post-quantum key exchange mechanism
|
||||
X25519MLKEM768 by default. The default can be reverted using the
|
||||
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
|
||||
|
||||
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
|
||||
use and validate the CRT parameters in the encoded private key. This behavior
|
||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
||||
the Go 1.23 behavior.
|
||||
|
||||
### Go 1.23
|
||||
|
||||
Go 1.23 changed the channels created by package time to be unbuffered
|
||||
@ -276,8 +179,6 @@ Previous versions default to `winreadlinkvolume=0`.
|
||||
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
||||
X25519Kyber768Draft00 by default. The default can be reverted using the
|
||||
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
|
||||
Go 1.23 changed the behavior of
|
||||
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
||||
@ -373,7 +274,7 @@ certificate policy OIDs with components larger than 31 bits. By default this
|
||||
field is only used during parsing, when it is populated with policy OIDs, but
|
||||
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
||||
of the existing PolicyIdentifiers field, by using the
|
||||
[`x509usepolicies` setting](/pkg/crypto/x509/#CreateCertificate).
|
||||
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
|
||||
|
||||
|
||||
### Go 1.21
|
||||
@ -441,7 +342,7 @@ There is no plan to remove this setting.
|
||||
|
||||
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
||||
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
||||
This setting was removed in Go 1.24.
|
||||
This setting will be removed in a future release, Go 1.22 at the earliest.
|
||||
|
||||
### Go 1.10
|
||||
|
||||
|
@ -1,3 +1,9 @@
|
||||
<!--
|
||||
NOTE: In this document and others in this directory, the convention is to
|
||||
set fixed-width phrases with non-fixed-width spaces, as in
|
||||
`hello` `world`.
|
||||
-->
|
||||
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
@ -1,10 +1,3 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
|
||||
#### go/types
|
||||
|
||||
The `Var.Kind` method returns an enumeration of type `VarKind` that
|
||||
classifies the variable (package-level, local, receiver, parameter,
|
||||
result, or struct field). See issue #70250.
|
||||
|
||||
Callers of `NewVar` or `NewParam` are encouraged to call `Var.SetKind`
|
||||
to ensure that this attribute is set correctly in all cases.
|
||||
|
@ -1,8 +0,0 @@
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
|
||||
|
||||
**Go 1.25 is not yet released. These are work-in-progress release notes.
|
||||
Go 1.25 is expected to be released in August 2025.**
|
@ -1,3 +0,0 @@
|
||||
## Changes to the language {#language}
|
||||
|
||||
|
@ -1,42 +0,0 @@
|
||||
## Tools {#tools}
|
||||
|
||||
### Go command {#go-command}
|
||||
|
||||
The `go build` `-asan` option now defaults to doing leak detection at
|
||||
program exit.
|
||||
This will report an error if memory allocated by C is not freed and is
|
||||
not referenced by any other memory allocated by either C or Go.
|
||||
These new error reports may be disabled by setting
|
||||
`ASAN_OPTIONS=detect_leaks=0` in the environment when running the
|
||||
program.
|
||||
|
||||
<!-- go.dev/issue/71294 -->
|
||||
|
||||
The new `work` package pattern matches all packages in the work (formerly called main)
|
||||
modules: either the single work module in module mode or the set of workspace modules
|
||||
in workspace mode.
|
||||
|
||||
<!-- go.dev/issue/65847 -->
|
||||
|
||||
When the go command updates the `go` line in a `go.mod` or `go.work` file,
|
||||
it [no longer](/ref/mod#go-mod-file-toolchain) adds a toolchain line
|
||||
specifying the command's current version.
|
||||
|
||||
### Cgo {#cgo}
|
||||
|
||||
### Vet {#vet}
|
||||
|
||||
The `go vet` command includes new analyzers:
|
||||
|
||||
<!-- go.dev/issue/18022 -->
|
||||
|
||||
- [waitgroup](https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup),
|
||||
which reports misplaced calls to [sync.WaitGroup.Add]; and
|
||||
|
||||
<!-- go.dev/issue/28308 -->
|
||||
|
||||
- [hostport](https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/hostport),
|
||||
which reports uses of `fmt.Sprintf("%s:%d", host, port)` to
|
||||
construct addresses for [net.Dial], as these will not work with
|
||||
IPv6; instead it suggests using [net.JoinHostPort].
|
||||
|
@ -1,26 +0,0 @@
|
||||
## Runtime {#runtime}
|
||||
|
||||
<!-- go.dev/issue/71517 -->
|
||||
|
||||
The message printed when a program exits due to an unhandled panic
|
||||
that was recovered and repanicked no longer repeats the text of
|
||||
the panic value.
|
||||
|
||||
Previously, a program which panicked with `panic("PANIC")`,
|
||||
recovered the panic, and then repanicked with the original
|
||||
value would print:
|
||||
|
||||
panic: PANIC [recovered]
|
||||
panic: PANIC
|
||||
|
||||
This program will now print:
|
||||
|
||||
panic: PANIC [recovered, repanicked]
|
||||
|
||||
<!-- go.dev/issue/71546 -->
|
||||
|
||||
On Linux systems with kernel support for anonymous VMA names
|
||||
(`CONFIG_ANON_VMA_NAME`), the Go runtime will annotate anonymous memory
|
||||
mappings with context about their purpose. e.g., `[anon: Go: heap]` for heap
|
||||
memory. This can be disabled with the [GODEBUG setting](/doc/godebug)
|
||||
`decoratemappings=0`.
|
@ -1,44 +0,0 @@
|
||||
## Compiler {#compiler}
|
||||
|
||||
<!-- https://go.dev/issue/26379 -->
|
||||
|
||||
The compiler and linker in Go 1.25 now generate debug information
|
||||
using [DWARF version 5](https://dwarfstd.org/dwarf5std.html); the
|
||||
newer DWARF version reduces the space required for debugging
|
||||
information in Go binaries.
|
||||
DWARF 5 generation is gated by the "dwarf5" GOEXPERIMENT; this
|
||||
functionality can be disabled (for now) using GOEXPERIMENT=nodwarf5.
|
||||
|
||||
<!-- https://go.dev/issue/72860, CL 657715 -->
|
||||
|
||||
The compiler [has been fixed](/cl/657715)
|
||||
to ensure that nil pointer checks are performed promptly. Programs like the following,
|
||||
which used to execute successfully, will now panic with a nil-pointer exception:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
func main() {
|
||||
f, err := os.Open("nonExistentFile")
|
||||
name := f.Name()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
println(name)
|
||||
}
|
||||
```
|
||||
|
||||
This program is incorrect in that it uses the result of `os.Open` before checking
|
||||
the error. The main result of `os.Open` can be a nil pointer if the error result is non-nil.
|
||||
But because of [a compiler bug](/issue/72860), this program ran successfully under
|
||||
Go versions 1.21 through 1.24 (in violation of the Go spec). It will no longer run
|
||||
successfully in Go 1.25. If this change is affecting your code, the solution is to put
|
||||
the non-nil error check earlier in your code, preferably immediately after
|
||||
the error-generating statement.
|
||||
|
||||
## Assembler {#assembler}
|
||||
|
||||
## Linker {#linker}
|
||||
|
@ -1,2 +0,0 @@
|
||||
## Standard library {#library}
|
||||
|
@ -1,3 +0,0 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
API changes and other small changes to the standard library go here.
|
@ -1,2 +0,0 @@
|
||||
The [*Writer.AddFS] implementation now supports symbolic links
|
||||
for filesystems that implement [io/fs.ReadLinkFS].
|
@ -1,2 +0,0 @@
|
||||
The hidden and undocumented `Inverse` and `CombinedMult` methods on some [Curve]
|
||||
implementations have been removed.
|
@ -1,2 +0,0 @@
|
||||
The new [ConnectionState.CurveID] field exposes the key exchange mechanism used
|
||||
to establish the connection.
|
@ -1,2 +0,0 @@
|
||||
When [FIPS 140-3 mode](/doc/security/fips140) is enabled, Extended Master Secret
|
||||
is now required in TLS 1.2, and Ed25519 and X25519MLKEM768 are now allowed.
|
@ -1,4 +0,0 @@
|
||||
The [debug/elf] package adds two new constants:
|
||||
- [PT_RISCV_ATTRIBUTES]
|
||||
- [SHT_RISCV_ATTRIBUTES]
|
||||
for RISC-V ELF parsing.
|
@ -1 +0,0 @@
|
||||
The [ParseDir] function is deprecated.
|
@ -1,3 +0,0 @@
|
||||
[Var] now has a [Var.Kind] method that classifies the variable as one
|
||||
of: package-level, receiver, parameter, result, or local variable, or
|
||||
a struct field.
|
@ -1,3 +0,0 @@
|
||||
The new [LookupSelection] function looks up the field or method of a
|
||||
given name and receiver type, like the existing [LookupFieldOrMethod]
|
||||
function, but returns the result in the form of a [Selection].
|
@ -1 +0,0 @@
|
||||
A new [ReadLinkFS] interface provides the ability to read symbolic links in a filesystem.
|
@ -1 +0,0 @@
|
||||
[Record] now has a Source() method, returning its source location or nil if unavailable.
|
@ -1,2 +0,0 @@
|
||||
The new helper function [FieldContentDisposition] builds multipart
|
||||
Content-Disposition header fields.
|
@ -1,3 +0,0 @@
|
||||
On Windows, the [TCPConn.File], [UDPConn.File], [UnixConn.File],
|
||||
[IPConn.File], [TCPListener.File], and [UnixListener.File]
|
||||
methods are now supported.
|
@ -1,5 +0,0 @@
|
||||
[LookupMX] and [*Resolver.LookupMX] now return DNS names that look
|
||||
like valid IP address, as well as valid domain names.
|
||||
Previously if a name server returned an IP address as a DNS name,
|
||||
LookupMX would discard it, as required by the RFCs.
|
||||
However, name servers in practice do sometimes return IP addresses.
|
@ -1 +0,0 @@
|
||||
On Windows, the [ListenMulticastUDP] now supports IPv6 addresses.
|
@ -1,2 +0,0 @@
|
||||
On Windows, the [FileConn], [FilePacketConn], [FileListener]
|
||||
functions are now supported.
|
@ -1,14 +0,0 @@
|
||||
On Windows, [NewFile] now supports handles opened for asynchronous I/O (that is,
|
||||
[syscall.FILE_FLAG_OVERLAPPED] is specified in the [syscall.CreateFile] call).
|
||||
These handles are associated with the Go runtime's I/O completion port,
|
||||
which provides the following benefits for the resulting [File]:
|
||||
|
||||
- I/O methods ([File.Read], [File.Write], [File.ReadAt], and [File.WriteAt]) do not block an OS thread.
|
||||
- Deadline methods ([File.SetDeadline], [File.SetReadDeadline], and [File.SetWriteDeadline]) are supported.
|
||||
|
||||
This enhancement is especially beneficial for applications that communicate via named pipes on Windows.
|
||||
|
||||
Note that a handle can only be associated with one completion port at a time.
|
||||
If the handle provided to [NewFile] is already associated with a completion port,
|
||||
the returned [File] is downgraded to synchronous I/O mode.
|
||||
In this case, I/O methods will block an OS thread, and the deadline methods have no effect.
|
@ -1,2 +0,0 @@
|
||||
The filesystem returned by [DirFS] implements the new [io/fs.ReadLinkFS] interface.
|
||||
[CopyFS] supports symlinks when copying filesystems that implement [io/fs.ReadLinkFS].
|
@ -1,10 +0,0 @@
|
||||
The [os.Root] type supports the following additional methods:
|
||||
|
||||
* [os.Root.Chmod]
|
||||
* [os.Root.Chown]
|
||||
* [os.Root.Chtimes]
|
||||
* [os.Root.Lchown]
|
||||
* [os.Root.Link]
|
||||
* [os.Root.Readlink]
|
||||
* [os.Root.Rename]
|
||||
* [os.Root.Symlink]
|
@ -1,4 +0,0 @@
|
||||
The `\p{name}` and `\P{name}` character class syntaxes now accept the names
|
||||
Any, ASCII, Assigned, Cn, and LC, as well as Unicode category aliases like `\p{Letter}` for `\pL`.
|
||||
Following [Unicode TR18](https://unicode.org/reports/tr18/), they also now use
|
||||
case-insensitive name lookups, ignoring spaces, underscores, and hyphens.
|
@ -1,6 +0,0 @@
|
||||
The mutex profile for contention on runtime-internal locks now correctly points
|
||||
to the end of the critical section that caused the delay. This matches the
|
||||
profile's behavior for contention on `sync.Mutex` values. The
|
||||
`runtimecontentionstacks` setting for `GODEBUG`, which allowed opting in to the
|
||||
unusual behavior of Go 1.22 through 1.24 for this part of the profile, is now
|
||||
gone.
|
@ -1,2 +0,0 @@
|
||||
[WaitGroup] has added a new method [WaitGroup.Go],
|
||||
that makes the common pattern of creating and counting goroutines more convenient.
|
@ -1,3 +0,0 @@
|
||||
[MapFS] implements the new [io/fs.ReadLinkFS] interface.
|
||||
[TestFS] will verify the functionality of the [io/fs.ReadLinkFS] interface if implemented.
|
||||
[TestFS] will no longer follow symlinks to avoid unbounded recursion.
|
@ -1,4 +0,0 @@
|
||||
The new [CategoryAliases] map provides access to category alias names, such as “Letter” for “L”.
|
||||
The new categories [Cn] and [LC] define unassigned codepoints and cased letters, respectively.
|
||||
These have always been defined by Unicode but were inadvertently omitted in earlier versions of Go.
|
||||
The [C] category now includes [Cn], meaning it has added all unassigned code points.
|
@ -1,11 +0,0 @@
|
||||
## Ports {#ports}
|
||||
|
||||
### Darwin
|
||||
|
||||
<!-- go.dev/issue/69839 -->
|
||||
As [announced](/doc/go1.24#darwin) in the Go 1.24 release notes, Go 1.25 requires macOS 12 Monterey or later; support for previous versions has been discontinued.
|
||||
|
||||
### Windows
|
||||
|
||||
<!-- go.dev/issue/71671 -->
|
||||
Go 1.25 is the last release that contains the [broken](/doc/go1.24#windows) 32-bit windows/arm port (`GOOS=windows` `GOARCH=arm`). It will be removed in Go 1.26.
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2024 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Rules for building and testing new FIPS snapshots.
|
||||
# For example:
|
||||
#
|
||||
# make v1.2.3.zip
|
||||
# make v1.2.3.test
|
||||
#
|
||||
# and then if changes are needed, check them into master
|
||||
# and run 'make v1.2.3.rm' and repeat.
|
||||
#
|
||||
# Note that once published a snapshot zip file should never
|
||||
# be modified. We record the sha256 hashes of the zip files
|
||||
# in fips140.sum, and the cmd/go/internal/fips140 test checks
|
||||
# that the zips match.
|
||||
#
|
||||
# When the zip file is finalized, run 'make updatesum' to update
|
||||
# fips140.sum.
|
||||
|
||||
default:
|
||||
@echo nothing to make
|
||||
|
||||
# make v1.2.3.zip builds a v1.2.3.zip file
|
||||
# from the current origin/master.
|
||||
# copy and edit the 'go run' command by hand to use a different branch.
|
||||
v%.zip:
|
||||
git fetch origin master
|
||||
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
|
||||
|
||||
# normally mkzip refuses to overwrite an existing zip file.
|
||||
# make v1.2.3.rm removes the zip file and and unpacked
|
||||
# copy from the module cache.
|
||||
v%.rm:
|
||||
rm -f v$*.zip
|
||||
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
|
||||
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
|
||||
|
||||
# make v1.2.3.test runs the crypto tests using that snapshot.
|
||||
v%.test:
|
||||
GOFIPS140=v$* go test -short crypto...
|
||||
|
||||
# make updatesum updates the fips140.sum file.
|
||||
updatesum:
|
||||
go test cmd/go/internal/fips140 -update
|
@ -1,9 +0,0 @@
|
||||
This directory holds snapshots of the crypto/internal/fips140 tree
|
||||
that are being validated and certified for FIPS-140 use.
|
||||
The file x.txt (for example, inprocess.txt, certified.txt)
|
||||
defines the meaning of the FIPS version alias x, listing
|
||||
the exact version to use.
|
||||
|
||||
The zip files are created by cmd/go/internal/fips140/mkzip.go.
|
||||
The fips140.sum file lists checksums for the zip files.
|
||||
See the Makefile for recipes.
|
@ -1,12 +0,0 @@
|
||||
# SHA256 checksums of snapshot zip files in this directory.
|
||||
# These checksums are included in the FIPS security policy
|
||||
# (validation instructions sent to the lab) and MUST NOT CHANGE.
|
||||
# That is, the zip files themselves must not change.
|
||||
#
|
||||
# It is okay to add new zip files to the list, and it is okay to
|
||||
# remove zip files from the list when they are removed from
|
||||
# this directory. To update this file:
|
||||
#
|
||||
# go test cmd/go/internal/fips140 -update
|
||||
#
|
||||
v1.0.0.zip b50508feaeff05d22516b21e1fd210bbf5d6a1e422eaf2cfa23fe379342713b8
|
Binary file not shown.
@ -24,8 +24,8 @@
|
||||
# in the CL match the update.bash in the CL.
|
||||
|
||||
# Versions to use.
|
||||
CODE=2025a
|
||||
DATA=2025a
|
||||
CODE=2024a
|
||||
DATA=2024a
|
||||
|
||||
set -e
|
||||
|
||||
@ -40,12 +40,7 @@ curl -sS -L -O https://www.iana.org/time-zones/repository/releases/tzdata$DATA.t
|
||||
tar xzf tzcode$CODE.tar.gz
|
||||
tar xzf tzdata$DATA.tar.gz
|
||||
|
||||
# The PACKRATLIST and PACKRATDATA options are copied from Ubuntu:
|
||||
# https://git.launchpad.net/ubuntu/+source/tzdata/tree/debian/rules?h=debian/sid
|
||||
#
|
||||
# You can see the description of these make variables in the tzdata Makefile:
|
||||
# https://github.com/eggert/tz/blob/main/Makefile
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo PACKRATDATA=backzone PACKRATLIST=zone.tab posix_only >make.out 2>&1; then
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only >make.out 2>&1; then
|
||||
cat make.out
|
||||
exit 2
|
||||
fi
|
||||
|
Binary file not shown.
191
misc/linkcheck/linkcheck.go
Normal file
191
misc/linkcheck/linkcheck.go
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The linkcheck command finds missing links in the godoc website.
|
||||
// It crawls a URL recursively and notes URLs and URL fragments
|
||||
// that it's seen and prints a report of missing links at the end.
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
root = flag.String("root", "http://localhost:6060", "Root to crawl")
|
||||
verbose = flag.Bool("verbose", false, "verbose")
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup // outstanding fetches
|
||||
var urlq = make(chan string) // URLs to crawl
|
||||
|
||||
// urlFrag is a URL and its optional #fragment (without the #)
|
||||
type urlFrag struct {
|
||||
url, frag string
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
crawled = make(map[string]bool) // URL without fragment -> true
|
||||
neededFrags = make(map[urlFrag][]string) // URL#frag -> who needs it
|
||||
)
|
||||
|
||||
var aRx = regexp.MustCompile(`<a href=['"]?(/[^\s'">]+)`)
|
||||
|
||||
// Owned by crawlLoop goroutine:
|
||||
var (
|
||||
linkSources = make(map[string][]string) // url no fragment -> sources
|
||||
fragExists = make(map[urlFrag]bool)
|
||||
problems []string
|
||||
)
|
||||
|
||||
func localLinks(body string) (links []string) {
|
||||
seen := map[string]bool{}
|
||||
mv := aRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ref := m[1]
|
||||
if strings.HasPrefix(ref, "/src/") {
|
||||
continue
|
||||
}
|
||||
if !seen[ref] {
|
||||
seen[ref] = true
|
||||
links = append(links, m[1])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var idRx = regexp.MustCompile(`\bid=['"]?([^\s'">]+)`)
|
||||
|
||||
func pageIDs(body string) (ids []string) {
|
||||
mv := idRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ids = append(ids, m[1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// url may contain a #fragment, and the fragment is then noted as needing to exist.
|
||||
func crawl(url string, sourceURL string) {
|
||||
if strings.Contains(url, "/devel/release") {
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if u, frag, ok := strings.Cut(url, "#"); ok {
|
||||
url = u
|
||||
if frag != "" {
|
||||
uf := urlFrag{url, frag}
|
||||
neededFrags[uf] = append(neededFrags[uf], sourceURL)
|
||||
}
|
||||
}
|
||||
if crawled[url] {
|
||||
return
|
||||
}
|
||||
crawled[url] = true
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
urlq <- url
|
||||
}()
|
||||
}
|
||||
|
||||
func addProblem(url, errmsg string) {
|
||||
msg := fmt.Sprintf("Error on %s: %s (from %s)", url, errmsg, linkSources[url])
|
||||
if *verbose {
|
||||
log.Print(msg)
|
||||
}
|
||||
problems = append(problems, msg)
|
||||
}
|
||||
|
||||
func crawlLoop() {
|
||||
for url := range urlq {
|
||||
if err := doCrawl(url); err != nil {
|
||||
addProblem(url, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doCrawl(url string) error {
|
||||
defer wg.Done()
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := http.DefaultTransport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Handle redirects.
|
||||
if res.StatusCode/100 == 3 {
|
||||
newURL, err := res.Location()
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving redirect: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(newURL.String(), *root) {
|
||||
// Skip off-site redirects.
|
||||
return nil
|
||||
}
|
||||
crawl(newURL.String(), url)
|
||||
return nil
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
slurp, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading %s body: %v", url, err)
|
||||
}
|
||||
if *verbose {
|
||||
log.Printf("Len of %s: %d", url, len(slurp))
|
||||
}
|
||||
body := string(slurp)
|
||||
for _, ref := range localLinks(body) {
|
||||
if *verbose {
|
||||
log.Printf(" links to %s", ref)
|
||||
}
|
||||
dest := *root + ref
|
||||
linkSources[dest] = append(linkSources[dest], url)
|
||||
crawl(dest, url)
|
||||
}
|
||||
for _, id := range pageIDs(body) {
|
||||
if *verbose {
|
||||
log.Printf(" url %s has #%s", url, id)
|
||||
}
|
||||
fragExists[urlFrag{url, id}] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
go crawlLoop()
|
||||
crawl(*root, "")
|
||||
|
||||
wg.Wait()
|
||||
close(urlq)
|
||||
for uf, needers := range neededFrags {
|
||||
if !fragExists[uf] {
|
||||
problems = append(problems, fmt.Sprintf("Missing fragment for %+v from %v", uf, needers))
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range problems {
|
||||
fmt.Println(s)
|
||||
}
|
||||
if len(problems) > 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
|
||||
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
||||
(see https://caniuse.com/#feat=textencoder)
|
||||
-->
|
||||
<script src="../../lib/wasm/wasm_exec.js"></script>
|
||||
<script src="wasm_exec.js"></script>
|
||||
<script>
|
||||
if (!WebAssembly.instantiateStreaming) { // polyfill
|
||||
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
||||
|
@ -14,7 +14,7 @@
|
||||
if (!globalThis.fs) {
|
||||
let outputBuf = "";
|
||||
globalThis.fs = {
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
|
||||
writeSync(fd, buf) {
|
||||
outputBuf += decoder.decode(buf);
|
||||
const nl = outputBuf.lastIndexOf("\n");
|
||||
@ -73,14 +73,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.path) {
|
||||
globalThis.path = {
|
||||
resolve(...pathSegments) {
|
||||
return pathSegments.join("/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.crypto) {
|
||||
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
||||
}
|
||||
@ -216,16 +208,10 @@
|
||||
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
||||
}
|
||||
|
||||
const testCallExport = (a, b) => {
|
||||
this._inst.exports.testExport0();
|
||||
return this._inst.exports.testExport(a, b);
|
||||
}
|
||||
|
||||
const timeOrigin = Date.now() - performance.now();
|
||||
this.importObject = {
|
||||
_gotest: {
|
||||
add: (a, b) => a + b,
|
||||
callExport: testCallExport,
|
||||
},
|
||||
gojs: {
|
||||
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
@ -11,7 +11,6 @@ if (process.argv.length < 3) {
|
||||
|
||||
globalThis.require = require;
|
||||
globalThis.fs = require("fs");
|
||||
globalThis.path = require("path");
|
||||
globalThis.TextEncoder = require("util").TextEncoder;
|
||||
globalThis.TextDecoder = require("util").TextDecoder;
|
||||
|
@ -33,10 +33,6 @@ Before updating vendor directories, ensure that module mode is enabled.
|
||||
Make sure that GO111MODULE is not set in the environment, or that it is
|
||||
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
||||
|
||||
Also, ensure that 'go env GOROOT' shows the root of this Go source
|
||||
tree. Otherwise, the results are undefined. It's recommended to build
|
||||
Go from source and use that 'go' binary to update its source tree.
|
||||
|
||||
Requirements may be added, updated, and removed with 'go get'.
|
||||
The vendor directory may be updated with 'go mod vendor'.
|
||||
A typical sequence might be:
|
||||
|
18
src/all.bat
18
src/all.bat
@ -6,11 +6,17 @@
|
||||
|
||||
setlocal
|
||||
|
||||
if not exist make.bat (
|
||||
if exist make.bat goto ok
|
||||
echo all.bat must be run from go\src
|
||||
exit /b 1
|
||||
)
|
||||
:: cannot exit: would kill parent command interpreter
|
||||
goto end
|
||||
:ok
|
||||
|
||||
call .\make.bat --no-banner || exit /b 1
|
||||
call .\run.bat --no-rebuild || exit /b 1
|
||||
..\bin\go tool dist banner
|
||||
call .\make.bat --no-banner --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
call .\run.bat --no-rebuild --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
"%GOTOOLDIR%/dist" banner
|
||||
|
||||
:end
|
||||
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"fmt"
|
||||
"internal/godebug"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"path"
|
||||
"reflect"
|
||||
@ -697,14 +696,24 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
h.Xattrs = maps.Clone(sys.Xattrs)
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
h.PAXRecords = maps.Clone(sys.PAXRecords)
|
||||
if sys.PAXRecords != nil {
|
||||
h.PAXRecords = make(map[string]string)
|
||||
for k, v := range sys.PAXRecords {
|
||||
h.PAXRecords[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var doNameLookups = true
|
||||
if iface, ok := fi.(FileInfoNames); ok {
|
||||
|
@ -7,16 +7,14 @@ package tar
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -27,7 +25,7 @@ func TestReader(t *testing.T) {
|
||||
vectors := []struct {
|
||||
file string // Test input file
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // CRC32 checksum of files, leave as nil if not checked
|
||||
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
}{{
|
||||
file: "testdata/gnu.tar",
|
||||
@ -55,8 +53,8 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"6cbd88fc",
|
||||
"ddac04b3",
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/sparse-formats.tar",
|
||||
@ -149,11 +147,11 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"8eb179ba",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"b0061974914468de549a2af8ced10316",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/star.tar",
|
||||
@ -270,7 +268,7 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5fd7e86a",
|
||||
"0afb597b283fe61b5d4879669a350556",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-records.tar",
|
||||
@ -657,7 +655,7 @@ func TestReader(t *testing.T) {
|
||||
if v.chksums == nil {
|
||||
continue
|
||||
}
|
||||
h := crc32.NewIEEE()
|
||||
h := md5.New()
|
||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||
if err != nil {
|
||||
break
|
||||
@ -1019,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
|
||||
for i, v := range vectors {
|
||||
r := strings.NewReader(v.in)
|
||||
got, err := parsePAX(r)
|
||||
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
||||
}
|
||||
if ok := err == nil; ok != v.ok {
|
||||
@ -1136,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
v.input = v.input[copy(blk[:], v.input):]
|
||||
tr := Reader{r: bytes.NewReader(v.input)}
|
||||
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
@ -1327,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
|
||||
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
||||
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
||||
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
|
@ -11,13 +11,11 @@ import (
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
|
||||
return f.pos, nil
|
||||
}
|
||||
|
||||
func equalSparseEntries(x, y []sparseEntry) bool {
|
||||
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
|
||||
}
|
||||
|
||||
func TestSparseEntries(t *testing.T) {
|
||||
vectors := []struct {
|
||||
in []sparseEntry
|
||||
@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotAligned, v.wantAligned) {
|
||||
if !equalSparseEntries(gotAligned, v.wantAligned) {
|
||||
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
||||
}
|
||||
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotInverted, v.wantInverted) {
|
||||
if !equalSparseEntries(gotInverted, v.wantInverted) {
|
||||
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
||||
}
|
||||
}
|
||||
@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
|
||||
if formats != v.formats {
|
||||
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
||||
}
|
||||
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
||||
}
|
||||
if (formats != FormatUnknown) && (err != nil) {
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@ -170,10 +169,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
// Write PAX records to the output.
|
||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||
if len(paxHdrs) > 0 || isGlobal {
|
||||
// Sort keys for deterministic ordering.
|
||||
var keys []string
|
||||
for k := range paxHdrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
|
||||
// Write each record to a buffer.
|
||||
var buf strings.Builder
|
||||
// Sort keys for deterministic ordering.
|
||||
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
|
||||
for _, k := range keys {
|
||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -408,37 +413,25 @@ func (tw *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
linkTarget := ""
|
||||
if typ := d.Type(); typ == fs.ModeSymlink {
|
||||
var err error
|
||||
linkTarget, err = fs.ReadLink(fsys, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !typ.IsRegular() && typ != fs.ModeDir {
|
||||
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info, linkTarget)
|
||||
h, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -675,7 +668,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||
return sw.fw.physicalRemaining()
|
||||
}
|
||||
|
@ -10,11 +10,10 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@ -703,7 +702,7 @@ func TestPaxXattrs(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !maps.Equal(hdr.Xattrs, xattrs) {
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
@ -1339,41 +1338,29 @@ func TestFileWriter(t *testing.T) {
|
||||
|
||||
func TestWriterAddFS(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
"emptyfolder": {Mode: 0o755 | os.ModeDir},
|
||||
"file.go": {Data: []byte("hello")},
|
||||
"subfolder/another.go": {Data: []byte("world")},
|
||||
"symlink.go": {Mode: 0o777 | os.ModeSymlink, Data: []byte("file.go")},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := NewWriter(&buf)
|
||||
if err := tw.AddFS(fsys); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the tar.
|
||||
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
|
||||
|
||||
// Test that we can get the files back from the archive
|
||||
tr := NewReader(&buf)
|
||||
|
||||
names := make([]string, 0, len(fsys))
|
||||
for name := range fsys {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
entriesLeft := len(fsys)
|
||||
for _, name := range names {
|
||||
entriesLeft--
|
||||
|
||||
entryInfo, err := fsys.Lstat(name)
|
||||
entries, err := fsys.ReadDir(".")
|
||||
if err != nil {
|
||||
t.Fatalf("getting entry info error: %v", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var curfname string
|
||||
for _, entry := range entries {
|
||||
curfname = entry.Name()
|
||||
if entry.IsDir() {
|
||||
curfname += "/"
|
||||
continue
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@ -1383,43 +1370,23 @@ func TestWriterAddFS(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpName := name
|
||||
if entryInfo.IsDir() {
|
||||
tmpName += "/"
|
||||
}
|
||||
if hdr.Name != tmpName {
|
||||
t.Errorf("test fs has filename %v; archive header has %v",
|
||||
name, hdr.Name)
|
||||
}
|
||||
|
||||
if entryInfo.Mode() != hdr.FileInfo().Mode() {
|
||||
t.Errorf("%s: test fs has mode %v; archive header has %v",
|
||||
name, entryInfo.Mode(), hdr.FileInfo().Mode())
|
||||
}
|
||||
|
||||
switch entryInfo.Mode().Type() {
|
||||
case fs.ModeDir:
|
||||
// No additional checks necessary.
|
||||
case fs.ModeSymlink:
|
||||
origtarget := string(fsys[name].Data)
|
||||
if hdr.Linkname != origtarget {
|
||||
t.Fatalf("test fs has link content %s; archive header %v", origtarget, hdr.Linkname)
|
||||
}
|
||||
default:
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origdata := fsys[name].Data
|
||||
|
||||
if hdr.Name != curfname {
|
||||
t.Fatalf("got filename %v, want %v",
|
||||
curfname, hdr.Name)
|
||||
}
|
||||
|
||||
origdata := fsys[curfname].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("test fs has file content %v; archive header has %v", origdata, data)
|
||||
t.Fatalf("got file content %v, want %v",
|
||||
data, origdata)
|
||||
}
|
||||
}
|
||||
}
|
||||
if entriesLeft > 0 {
|
||||
t.Fatalf("not all entries are in the archive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"internal/godebug"
|
||||
@ -805,9 +804,6 @@ func toValidName(name string) string {
|
||||
|
||||
func (r *Reader) initFileList() {
|
||||
r.fileListOnce.Do(func() {
|
||||
// Preallocate the minimum size of the index.
|
||||
// We may also synthesize additional directory entries.
|
||||
r.fileList = make([]fileListEntry, 0, len(r.File))
|
||||
// files and knownDirs map from a file/directory name
|
||||
// to an index into the r.fileList entry that we are
|
||||
// building. They are used to mark duplicate entries.
|
||||
@ -906,8 +902,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
||||
}
|
||||
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
name, isDir = strings.CutSuffix(name, "/")
|
||||
i := strings.LastIndexByte(name, '/')
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
@ -989,12 +991,6 @@ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
|
||||
s, err := d.files[d.offset+i].stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
|
||||
return nil, &fs.PathError{
|
||||
Op: "readdir",
|
||||
Path: d.e.name,
|
||||
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
|
||||
}
|
||||
}
|
||||
list[i] = s
|
||||
}
|
||||
|
@ -8,14 +8,13 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@ -1275,56 +1274,13 @@ func TestFSWalk(t *testing.T) {
|
||||
} else if !test.wantErr && sawErr {
|
||||
t.Error("unexpected error")
|
||||
}
|
||||
if test.want != nil && !slices.Equal(files, test.want) {
|
||||
if test.want != nil && !reflect.DeepEqual(files, test.want) {
|
||||
t.Errorf("got %v want %v", files, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSWalkBadFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
hdr := &FileHeader{Name: "."}
|
||||
hdr.SetMode(fs.ModeDir | 0o755)
|
||||
w, err := zw.CreateHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatalf("create zip header: %v", err)
|
||||
}
|
||||
_, err = w.Write([]byte("some data"))
|
||||
if err != nil {
|
||||
t.Fatalf("write zip contents: %v", err)
|
||||
|
||||
}
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("close zip writer: %v", err)
|
||||
|
||||
}
|
||||
|
||||
zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatalf("create zip reader: %v", err)
|
||||
|
||||
}
|
||||
var count int
|
||||
var errRepeat = errors.New("repeated call to path")
|
||||
err = fs.WalkDir(zr, ".", func(p string, d fs.DirEntry, err error) error {
|
||||
count++
|
||||
if count > 2 { // once for directory read, once for the error
|
||||
return errRepeat
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error from invalid file name")
|
||||
} else if errors.Is(err, errRepeat) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSModTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader("testdata/subdir.zip")
|
||||
@ -1624,7 +1580,7 @@ func TestCVE202141772(t *testing.T) {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||
}
|
||||
}
|
||||
if !slices.Equal(names, entryNames) {
|
||||
if !reflect.DeepEqual(names, entryNames) {
|
||||
t.Errorf("Unexpected file entries: %q", names)
|
||||
}
|
||||
if _, err := r.Open(""); err == nil {
|
||||
@ -1737,7 +1693,7 @@ func TestInsecurePaths(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if !slices.Equal(gotPaths, []string{path}) {
|
||||
if !reflect.DeepEqual(gotPaths, []string{path}) {
|
||||
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
||||
continue
|
||||
}
|
||||
@ -1762,7 +1718,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if want := []string{name}; !slices.Equal(gotPaths, want) {
|
||||
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
|
||||
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
||||
}
|
||||
}
|
||||
|
@ -505,14 +505,14 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("zip: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info)
|
||||
@ -520,17 +520,11 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
h.Method = Deflate
|
||||
fw, err := w.CreateHeader(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -108,7 +108,7 @@ func TestWriter(t *testing.T) {
|
||||
|
||||
// TestWriterComment is test for EOCD comment read/write.
|
||||
func TestWriterComment(t *testing.T) {
|
||||
tests := []struct {
|
||||
var tests = []struct {
|
||||
comment string
|
||||
ok bool
|
||||
}{
|
||||
@ -158,7 +158,7 @@ func TestWriterComment(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWriterUTF8(t *testing.T) {
|
||||
utf8Tests := []struct {
|
||||
var utf8Tests = []struct {
|
||||
name string
|
||||
comment string
|
||||
nonUTF8 bool
|
||||
@ -619,32 +619,32 @@ func TestWriterAddFS(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
tests := []WriteTest{
|
||||
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
|
||||
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
|
||||
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
{
|
||||
Name: "file.go",
|
||||
Data: []byte("hello"),
|
||||
Mode: 0644,
|
||||
},
|
||||
{
|
||||
Name: "subfolder/another.go",
|
||||
Data: []byte("world"),
|
||||
Mode: 0644,
|
||||
},
|
||||
}
|
||||
err := w.AddFS(writeTestsToFS(tests))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the zip.
|
||||
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
|
||||
|
||||
// read it back
|
||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, wt := range tests {
|
||||
if wt.Mode.IsDir() {
|
||||
wt.Name += "/"
|
||||
}
|
||||
testReadFile(t, r.File[i], &wt)
|
||||
}
|
||||
}
|
||||
|
@ -29,9 +29,6 @@ var (
|
||||
// Buffered input.
|
||||
|
||||
// Reader implements buffering for an io.Reader object.
|
||||
// A new Reader is created by calling [NewReader] or [NewReaderSize];
|
||||
// alternatively the zero value of a Reader may be used after calling [Reset]
|
||||
// on it.
|
||||
type Reader struct {
|
||||
buf []byte
|
||||
rd io.Reader // reader provided by the client
|
||||
@ -133,10 +130,9 @@ func (b *Reader) readErr() error {
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
||||
// being valid at the next read call. If necessary, Peek will read more bytes
|
||||
// into the buffer in order to make n bytes available. If Peek returns fewer
|
||||
// than n bytes, it also returns an error explaining why the read is short.
|
||||
// The error is [ErrBufferFull] if n is larger than b's buffer size.
|
||||
// being valid at the next read call. If Peek returns fewer than n bytes, it
|
||||
// also returns an error explaining why the read is short. The error is
|
||||
// [ErrBufferFull] if n is larger than b's buffer size.
|
||||
//
|
||||
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
||||
// until the next read operation.
|
||||
@ -519,12 +515,10 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.lastByte = -1
|
||||
b.lastRuneSize = -1
|
||||
|
||||
if b.r < b.w {
|
||||
n, err = b.writeBuf(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r, ok := b.rd.(io.WriterTo); ok {
|
||||
m, err := r.WriteTo(w)
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadStringAllocs(t *testing.T) {
|
||||
if asan.Enabled {
|
||||
t.Skip("test allocates more with -asan; see #70079")
|
||||
}
|
||||
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
||||
buf := NewReader(r)
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
|
||||
for l := 0; l < len(written); l++ {
|
||||
if written[l] != data[l] {
|
||||
t.Errorf("wrong bytes written")
|
||||
t.Errorf("want=%q", data[:len(written)])
|
||||
t.Errorf("want=%q", data[0:len(written)])
|
||||
t.Errorf("have=%q", written)
|
||||
}
|
||||
}
|
||||
@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func testReadLine(t *testing.T, input []byte) {
|
||||
//for stride := 1; stride < len(input); stride++ {
|
||||
for stride := 1; stride < 2; stride++ {
|
||||
done := 0
|
||||
reader := testReader{input, stride}
|
||||
@ -1149,7 +1146,7 @@ func (w errorWriterToTest) Write(p []byte) (int, error) {
|
||||
var errorWriterToTests = []errorWriterToTest{
|
||||
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrUnexpectedEOF},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.EOF, nil, nil},
|
||||
}
|
||||
|
||||
|
@ -33,33 +33,6 @@ func ExampleWriter_AvailableBuffer() {
|
||||
// Output: 1 2 3 4
|
||||
}
|
||||
|
||||
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
|
||||
func ExampleWriter_ReadFrom() {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
data := "Hello, world!\nThis is a ReadFrom example."
|
||||
reader := strings.NewReader(data)
|
||||
|
||||
n, err := writer.ReadFrom(reader)
|
||||
if err != nil {
|
||||
fmt.Println("ReadFrom Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = writer.Flush(); err != nil {
|
||||
fmt.Println("Flush Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Bytes written:", n)
|
||||
fmt.Println("Buffer contents:", buf.String())
|
||||
// Output:
|
||||
// Bytes written: 41
|
||||
// Buffer contents: Hello, world!
|
||||
// This is a ReadFrom example.
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
@ -1,96 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package bufio_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCopyUnixpacket tests that we can use bufio when copying
|
||||
// across a unixpacket socket. This used to fail due to an unnecessary
|
||||
// empty Write call that was interpreted as an EOF.
|
||||
func TestCopyUnixpacket(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
socket := filepath.Join(tmpDir, "unixsock")
|
||||
|
||||
// Start a unixpacket server.
|
||||
addr := &net.UnixAddr{
|
||||
Name: socket,
|
||||
Net: "unixpacket",
|
||||
}
|
||||
server, err := net.ListenUnix("unixpacket", addr)
|
||||
if err != nil {
|
||||
t.Skipf("skipping test because opening a unixpacket socket failed: %v", err)
|
||||
}
|
||||
|
||||
// Start a goroutine for the server to accept one connection
|
||||
// and read all the data sent on the connection,
|
||||
// reporting the number of bytes read on ch.
|
||||
ch := make(chan int, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tot := 0
|
||||
defer func() {
|
||||
ch <- tot
|
||||
}()
|
||||
|
||||
serverConn, err := server.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n, err := serverConn.Read(buf)
|
||||
tot += n
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
clientConn, err := net.DialUnix("unixpacket", nil, addr)
|
||||
if err != nil {
|
||||
// Leaves the server goroutine hanging. Oh well.
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
defer clientConn.Close()
|
||||
|
||||
const data = "data"
|
||||
r := bufio.NewReader(strings.NewReader(data))
|
||||
n, err := io.Copy(clientConn, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != int64(len(data)) {
|
||||
t.Errorf("io.Copy returned %d, want %d", n, len(data))
|
||||
}
|
||||
|
||||
clientConn.Close()
|
||||
tot := <-ch
|
||||
|
||||
if tot != len(data) {
|
||||
t.Errorf("server read %d, want %d", tot, len(data))
|
||||
}
|
||||
}
|
@ -162,11 +162,11 @@ func delete(m map[Type]Type1, key Type)
|
||||
|
||||
// The len built-in function returns the length of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v.
|
||||
// - Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// - Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// - String: the number of bytes in v.
|
||||
// - Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// Array: the number of elements in v.
|
||||
// Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// String: the number of bytes in v.
|
||||
// Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a string literal or a simple array expression, the
|
||||
@ -176,11 +176,11 @@ func len(v Type) int
|
||||
|
||||
// The cap built-in function returns the capacity of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v (same as len(v)).
|
||||
// - Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// - Slice: the maximum length the slice can reach when resliced;
|
||||
// Array: the number of elements in v (same as len(v)).
|
||||
// Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// - Channel: the channel buffer capacity, in units of elements;
|
||||
// Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a simple array expression, the result can be a
|
||||
@ -194,16 +194,16 @@ func cap(v Type) int
|
||||
// argument, not a pointer to it. The specification of the result depends on
|
||||
// the type:
|
||||
//
|
||||
// - Slice: The size specifies the length. The capacity of the slice is
|
||||
// Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||
// backed by this underlying array.
|
||||
// - Map: An empty map is allocated with enough space to hold the
|
||||
// Map: An empty map is allocated with enough space to hold the
|
||||
// specified number of elements. The size may be omitted, in which case
|
||||
// a small starting size is allocated.
|
||||
// - Channel: The channel's buffer is initialized with the specified
|
||||
// Channel: The channel's buffer is initialized with the specified
|
||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||
// unbuffered.
|
||||
func make(t Type, size ...IntegerType) Type
|
||||
@ -247,7 +247,7 @@ func imag(c ComplexType) FloatType
|
||||
// to the zero value of the respective element type. If the argument
|
||||
// type is a type parameter, the type parameter's type set must
|
||||
// contain only map or slice types, and clear performs the operation
|
||||
// implied by the type argument. If t is nil, clear is a no-op.
|
||||
// implied by the type argument.
|
||||
func clear[T ~[]Type | ~map[Type]Type1](t T)
|
||||
|
||||
// The close built-in function closes a channel, which must be either
|
||||
|
@ -247,8 +247,8 @@ func growSlice(b []byte, n int) []byte {
|
||||
c = 2 * cap(b)
|
||||
}
|
||||
b2 := append([]byte(nil), make([]byte, c)...)
|
||||
i := copy(b2, b)
|
||||
return b2[:i]
|
||||
copy(b2, b)
|
||||
return b2[:len(b)]
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until the buffer is drained or an error occurs.
|
||||
|
@ -213,7 +213,7 @@ func TestLargeByteWrites(t *testing.T) {
|
||||
func TestLargeStringReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[:len(testString)/i])
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeStringReads (3)", &buf, "")
|
||||
@ -222,7 +222,7 @@ func TestLargeStringReads(t *testing.T) {
|
||||
func TestLargeByteReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||
@ -274,7 +274,7 @@ func TestNil(t *testing.T) {
|
||||
func TestReadFrom(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
b.ReadFrom(&buf)
|
||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
|
||||
@ -337,7 +337,7 @@ func TestReadFromNegativeReader(t *testing.T) {
|
||||
func TestWriteTo(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
buf.WriteTo(&b)
|
||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
|
||||
|
@ -8,7 +8,6 @@ package bytes
|
||||
|
||||
import (
|
||||
"internal/bytealg"
|
||||
"math/bits"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
_ "unsafe" // for linkname
|
||||
@ -137,7 +136,6 @@ func LastIndexByte(s []byte, c byte) int {
|
||||
// If r is [utf8.RuneError], it returns the first instance of any
|
||||
// invalid UTF-8 byte sequence.
|
||||
func IndexRune(s []byte, r rune) int {
|
||||
const haveFastIndex = bytealg.MaxBruteForce > 0
|
||||
switch {
|
||||
case 0 <= r && r < utf8.RuneSelf:
|
||||
return IndexByte(s, byte(r))
|
||||
@ -153,64 +151,9 @@ func IndexRune(s []byte, r rune) int {
|
||||
case !utf8.ValidRune(r):
|
||||
return -1
|
||||
default:
|
||||
// Search for rune r using the last byte of its UTF-8 encoded form.
|
||||
// The distribution of the last byte is more uniform compared to the
|
||||
// first byte which has a 78% chance of being [240, 243, 244].
|
||||
var b [utf8.UTFMax]byte
|
||||
n := utf8.EncodeRune(b[:], r)
|
||||
last := n - 1
|
||||
i := last
|
||||
fails := 0
|
||||
for i < len(s) {
|
||||
if s[i] != b[last] {
|
||||
o := IndexByte(s[i+1:], b[last])
|
||||
if o < 0 {
|
||||
return -1
|
||||
}
|
||||
i += o + 1
|
||||
}
|
||||
// Step backwards comparing bytes.
|
||||
for j := 1; j < n; j++ {
|
||||
if s[i-j] != b[last-j] {
|
||||
goto next
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
next:
|
||||
fails++
|
||||
i++
|
||||
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
|
||||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
|
||||
goto fallback
|
||||
}
|
||||
}
|
||||
return -1
|
||||
|
||||
fallback:
|
||||
// Switch to bytealg.Index, if available, or a brute force search when
|
||||
// IndexByte returns too many false positives.
|
||||
if haveFastIndex {
|
||||
if j := bytealg.Index(s[i-last:], b[:n]); j >= 0 {
|
||||
return i + j - last
|
||||
}
|
||||
} else {
|
||||
// If bytealg.Index is not available a brute force search is
|
||||
// ~1.5-3x faster than Rabin-Karp since n is small.
|
||||
c0 := b[last]
|
||||
c1 := b[last-1] // There are at least 2 chars to match
|
||||
loop:
|
||||
for ; i < len(s); i++ {
|
||||
if s[i] == c0 && s[i-1] == c1 {
|
||||
for k := 2; k < n; k++ {
|
||||
if s[i-k] != b[last-k] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return Index(s, b[:n])
|
||||
}
|
||||
}
|
||||
|
||||
@ -451,9 +394,7 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
|
||||
// Fields interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It splits the slice s around each instance of one or more consecutive white space
|
||||
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
|
||||
// empty slice if s contains only white space. Every element of the returned slice is
|
||||
// non-empty. Unlike [Split], leading and trailing runs of white space characters
|
||||
// are discarded.
|
||||
// empty slice if s contains only white space.
|
||||
func Fields(s []byte) [][]byte {
|
||||
// First count the fields.
|
||||
// This is an exact count if s is ASCII, otherwise it is an approximation.
|
||||
@ -507,9 +448,7 @@ func Fields(s []byte) [][]byte {
|
||||
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It splits the slice s at each run of code points c satisfying f(c) and
|
||||
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
|
||||
// len(s) == 0, an empty slice is returned. Every element of the returned slice is
|
||||
// non-empty. Unlike [SplitFunc], leading and trailing runs of code points
|
||||
// satisfying f(c) are discarded.
|
||||
// len(s) == 0, an empty slice is returned.
|
||||
//
|
||||
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
||||
// and assumes that f always returns the same value for a given c.
|
||||
@ -596,7 +535,7 @@ func Join(s [][]byte, sep []byte) []byte {
|
||||
|
||||
// HasPrefix reports whether the byte slice s begins with prefix.
|
||||
func HasPrefix(s, prefix []byte) bool {
|
||||
return len(s) >= len(prefix) && Equal(s[:len(prefix)], prefix)
|
||||
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
|
||||
}
|
||||
|
||||
// HasSuffix reports whether the byte slice s ends with suffix.
|
||||
@ -655,11 +594,10 @@ func Repeat(b []byte, count int) []byte {
|
||||
if count < 0 {
|
||||
panic("bytes: negative Repeat count")
|
||||
}
|
||||
hi, lo := bits.Mul(uint(len(b)), uint(count))
|
||||
if hi > 0 || lo > uint(maxInt) {
|
||||
if len(b) > maxInt/count {
|
||||
panic("bytes: Repeat output length overflow")
|
||||
}
|
||||
n := int(lo) // lo = len(b) * count
|
||||
n := len(b) * count
|
||||
|
||||
if len(b) == 0 {
|
||||
return []byte{}
|
||||
@ -686,7 +624,10 @@ func Repeat(b []byte, count int) []byte {
|
||||
nb := bytealg.MakeNoZero(n)[:n:n]
|
||||
bp := copy(nb, b)
|
||||
for bp < n {
|
||||
chunk := min(bp, chunkMax)
|
||||
chunk := bp
|
||||
if chunk > chunkMax {
|
||||
chunk = chunkMax
|
||||
}
|
||||
bp += copy(nb[bp:], nb[:chunk])
|
||||
}
|
||||
return nb
|
||||
@ -1192,23 +1133,20 @@ func Replace(s, old, new []byte, n int) []byte {
|
||||
t := make([]byte, len(s)+n*(len(new)-len(old)))
|
||||
w := 0
|
||||
start := 0
|
||||
if len(old) > 0 {
|
||||
for range n {
|
||||
j := start + Index(s[start:], old)
|
||||
for i := 0; i < n; i++ {
|
||||
j := start
|
||||
if len(old) == 0 {
|
||||
if i > 0 {
|
||||
_, wid := utf8.DecodeRune(s[start:])
|
||||
j += wid
|
||||
}
|
||||
} else {
|
||||
j += Index(s[start:], old)
|
||||
}
|
||||
w += copy(t[w:], s[start:j])
|
||||
w += copy(t[w:], new)
|
||||
start = j + len(old)
|
||||
}
|
||||
} else { // len(old) == 0
|
||||
w += copy(t[w:], new)
|
||||
for range n - 1 {
|
||||
_, wid := utf8.DecodeRune(s[start:])
|
||||
j := start + wid
|
||||
w += copy(t[w:], s[start:j])
|
||||
w += copy(t[w:], new)
|
||||
start = j
|
||||
}
|
||||
}
|
||||
w += copy(t[w:], s[start:])
|
||||
return t[0:w]
|
||||
}
|
||||
|
@ -7,12 +7,10 @@ package bytes_test
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"internal/testenv"
|
||||
"iter"
|
||||
"math"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
@ -20,6 +18,18 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func eq(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sliceOfString(s [][]byte) []string {
|
||||
result := make([]string, len(s))
|
||||
for i, v := range s {
|
||||
@ -28,37 +38,6 @@ func sliceOfString(s [][]byte) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func collect(t *testing.T, seq iter.Seq[[]byte]) [][]byte {
|
||||
out := slices.Collect(seq)
|
||||
out1 := slices.Collect(seq)
|
||||
if !slices.Equal(sliceOfString(out), sliceOfString(out1)) {
|
||||
t.Fatalf("inconsistent seq:\n%s\n%s", out, out1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type LinesTest struct {
|
||||
a string
|
||||
b []string
|
||||
}
|
||||
|
||||
var linesTests = []LinesTest{
|
||||
{a: "abc\nabc\n", b: []string{"abc\n", "abc\n"}},
|
||||
{a: "abc\r\nabc", b: []string{"abc\r\n", "abc"}},
|
||||
{a: "abc\r\n", b: []string{"abc\r\n"}},
|
||||
{a: "\nabc", b: []string{"\n", "abc"}},
|
||||
{a: "\nabc\n\n", b: []string{"\n", "abc\n", "\n"}},
|
||||
}
|
||||
|
||||
func TestLines(t *testing.T) {
|
||||
for _, s := range linesTests {
|
||||
result := sliceOfString(slices.Collect(Lines([]byte(s.a))))
|
||||
if !slices.Equal(result, s.b) {
|
||||
t.Errorf(`slices.Collect(Lines(%q)) = %q; want %q`, s.a, result, s.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For ease of reading, the test cases use strings that are converted to byte
|
||||
// slices before invoking the functions.
|
||||
|
||||
@ -198,11 +177,6 @@ var indexTests = []BinOpTest{
|
||||
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
|
||||
// test fallback to Rabin-Karp.
|
||||
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
|
||||
// test fallback to IndexRune
|
||||
{"oxoxoxoxoxoxoxoxoxoxox☺", "☺", 22},
|
||||
// invalid UTF-8 byte sequence (must be longer than bytealg.MaxBruteForce to
|
||||
// test that we don't use IndexRune)
|
||||
{"xx0123456789012345678901234567890123456789012345678901234567890120123456789012345678901234567890123456xxx\xed\x9f\xc0", "\xed\x9f\xc0", 105},
|
||||
}
|
||||
|
||||
var lastIndexTests = []BinOpTest{
|
||||
@ -451,31 +425,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"some_text=some_value", '=', 9},
|
||||
{"☺a", 'a', 3},
|
||||
{"a☻☺b", '☺', 4},
|
||||
{"𠀳𠀗𠀾𠁄𠀧𠁆𠁂𠀫𠀖𠀪𠀲𠀴𠁀𠀨𠀿", '𠀿', 56},
|
||||
|
||||
// 2 bytes
|
||||
{"ӆ", 'ӆ', 0},
|
||||
{"a", 'ӆ', -1},
|
||||
{" ӆ", 'ӆ', 2},
|
||||
{" a", 'ӆ', -1},
|
||||
{strings.Repeat("ц", 64) + "ӆ", 'ӆ', 128}, // test cutover
|
||||
{strings.Repeat("ц", 64), 'ӆ', -1},
|
||||
|
||||
// 3 bytes
|
||||
{"Ꚁ", 'Ꚁ', 0},
|
||||
{"a", 'Ꚁ', -1},
|
||||
{" Ꚁ", 'Ꚁ', 2},
|
||||
{" a", 'Ꚁ', -1},
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", 'Ꚁ', 192}, // test cutover
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", '䚀', -1}, // 'Ꚁ' and '䚀' share the same last two bytes
|
||||
|
||||
// 4 bytes
|
||||
{"𡌀", '𡌀', 0},
|
||||
{"a", '𡌀', -1},
|
||||
{" 𡌀", '𡌀', 2},
|
||||
{" a", '𡌀', -1},
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𡌀', 256}, // test cutover
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𣌀', -1}, // '𡌀' and '𣌀' share the same last two bytes
|
||||
|
||||
// RuneError should match any invalid UTF-8 byte sequence.
|
||||
{"<22>", '<27>', 0},
|
||||
@ -489,13 +438,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
|
||||
|
||||
// Test the cutover to bytealg.Index when it is triggered in
|
||||
// the middle of rune that contains consecutive runs of equal bytes.
|
||||
{"aaaaaKKKK\U000bc104", '\U000bc104', 17}, // cutover: (n + 16) / 8
|
||||
{"aaaaaKKKK鄄", '鄄', 17},
|
||||
{"aaKKKKKa\U000bc104", '\U000bc104', 18}, // cutover: 4 + n>>4
|
||||
{"aaKKKKKa鄄", '鄄', 18},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
|
||||
@ -643,21 +585,6 @@ func BenchmarkIndexRuneASCII(b *testing.B) {
|
||||
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
|
||||
}
|
||||
|
||||
func BenchmarkIndexRuneUnicode(b *testing.B) {
|
||||
b.Run("Latin", func(b *testing.B) {
|
||||
// Latin is mostly 1, 2, 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Latin, 'é'))
|
||||
})
|
||||
b.Run("Cyrillic", func(b *testing.B) {
|
||||
// Cyrillic is mostly 2 and 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Cyrillic, 'Ꙁ'))
|
||||
})
|
||||
b.Run("Han", func(b *testing.B) {
|
||||
// Han consists only of 3 and 4 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Han, '𠀿'))
|
||||
})
|
||||
}
|
||||
|
||||
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
@ -688,61 +615,6 @@ func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B, n int) {
|
||||
var rs []rune
|
||||
for _, r16 := range rt.R16 {
|
||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r32 := range rt.R32 {
|
||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Shuffle the runes so that they are not in descending order.
|
||||
// The sort is deterministic since this is used for benchmarks,
|
||||
// which need to be repeatable.
|
||||
rr := rand.New(rand.NewSource(1))
|
||||
rr.Shuffle(len(rs), func(i, j int) {
|
||||
rs[i], rs[j] = rs[j], rs[i]
|
||||
})
|
||||
uchars := string(rs)
|
||||
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
o := copy(buf, uchars)
|
||||
for o < len(buf) {
|
||||
o += copy(buf[o:], uchars)
|
||||
}
|
||||
|
||||
// Make space for the needle rune at the end of buf.
|
||||
m := utf8.RuneLen(needle)
|
||||
for o := m; o > 0; {
|
||||
_, sz := utf8.DecodeLastRune(buf)
|
||||
copy(buf[len(buf)-sz:], "\x00\x00\x00\x00")
|
||||
buf = buf[:len(buf)-sz]
|
||||
o -= sz
|
||||
}
|
||||
buf = utf8.AppendRune(buf[:n-m], needle)
|
||||
|
||||
n -= m // adjust for rune len
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := IndexRune(buf, needle)
|
||||
if j != n {
|
||||
b.Fatal("bad index", j)
|
||||
}
|
||||
}
|
||||
for i := range buf {
|
||||
buf[i] = '\x00'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.Run("0", func(b *testing.B) {
|
||||
var buf [4]byte
|
||||
@ -936,18 +808,10 @@ func TestSplit(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.n == 0 || len(a) == 0 {
|
||||
continue
|
||||
}
|
||||
@ -961,8 +825,8 @@ func TestSplit(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(Split([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
b := Split([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
@ -1002,18 +866,11 @@ func TestSplitAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitAfterSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitAfterSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
|
||||
t.Errorf("last appended result was %s; want %s", x, want)
|
||||
}
|
||||
@ -1023,8 +880,8 @@ func TestSplitAfter(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(SplitAfter([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
b := SplitAfter([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
@ -1062,16 +919,11 @@ func TestFields(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsSeq([]byte(tt.s))))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
|
||||
}
|
||||
@ -1087,7 +939,7 @@ func TestFieldsFunc(t *testing.T) {
|
||||
for _, tt := range fieldstests {
|
||||
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
@ -1110,15 +962,10 @@ func TestFieldsFunc(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsFuncSeq([]byte(tt.s), pred)))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsFuncSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", b, tt.s)
|
||||
}
|
||||
@ -1439,6 +1286,18 @@ func TestRepeatCatchesOverflow(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func runesEqual(a, b []rune) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, r := range a {
|
||||
if r != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type RunesTest struct {
|
||||
in string
|
||||
out []rune
|
||||
@ -1459,7 +1318,7 @@ func TestRunes(t *testing.T) {
|
||||
for _, tt := range RunesTests {
|
||||
tin := []byte(tt.in)
|
||||
a := Runes(tin)
|
||||
if !slices.Equal(a, tt.out) {
|
||||
if !runesEqual(a, tt.out) {
|
||||
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
|
||||
continue
|
||||
}
|
||||
@ -1787,20 +1646,9 @@ var ReplaceTests = []ReplaceTest{
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, tt := range ReplaceTests {
|
||||
var (
|
||||
in = []byte(tt.in)
|
||||
old = []byte(tt.old)
|
||||
new = []byte(tt.new)
|
||||
)
|
||||
if !asan.Enabled {
|
||||
allocs := testing.AllocsPerRun(10, func() { Replace(in, old, new, tt.n) })
|
||||
if allocs > 1 {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) allocates %.2f objects", tt.in, tt.old, tt.new, tt.n, allocs)
|
||||
}
|
||||
}
|
||||
in = append(in, "<spare>"...)
|
||||
in := append([]byte(tt.in), "<spare>"...)
|
||||
in = in[:len(tt.in)]
|
||||
out := Replace(in, old, new, tt.n)
|
||||
out := Replace(in, []byte(tt.old), []byte(tt.new), tt.n)
|
||||
if s := string(out); s != tt.out {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
|
||||
}
|
||||
@ -1808,7 +1656,7 @@ func TestReplace(t *testing.T) {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
|
||||
}
|
||||
if tt.n == -1 {
|
||||
out := ReplaceAll(in, old, new)
|
||||
out := ReplaceAll(in, []byte(tt.old), []byte(tt.new))
|
||||
if s := string(out); s != tt.out {
|
||||
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
|
||||
}
|
||||
@ -1816,69 +1664,6 @@ func TestReplace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzReplace(f *testing.F) {
|
||||
for _, tt := range ReplaceTests {
|
||||
f.Add([]byte(tt.in), []byte(tt.old), []byte(tt.new), tt.n)
|
||||
}
|
||||
f.Fuzz(func(t *testing.T, in, old, new []byte, n int) {
|
||||
differentImpl := func(in, old, new []byte, n int) []byte {
|
||||
var out Buffer
|
||||
if n < 0 {
|
||||
n = math.MaxInt
|
||||
}
|
||||
for i := 0; i < len(in); {
|
||||
if n == 0 {
|
||||
out.Write(in[i:])
|
||||
break
|
||||
}
|
||||
if HasPrefix(in[i:], old) {
|
||||
out.Write(new)
|
||||
i += len(old)
|
||||
n--
|
||||
if len(old) != 0 {
|
||||
continue
|
||||
}
|
||||
if i == len(in) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(old) == 0 {
|
||||
_, length := utf8.DecodeRune(in[i:])
|
||||
out.Write(in[i : i+length])
|
||||
i += length
|
||||
} else {
|
||||
out.WriteByte(in[i])
|
||||
i++
|
||||
}
|
||||
}
|
||||
if len(old) == 0 && n != 0 {
|
||||
out.Write(new)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
if simple, replace := differentImpl(in, old, new, n), Replace(in, old, new, n); !slices.Equal(simple, replace) {
|
||||
t.Errorf("The two implementations do not match %q != %q for Replace(%q, %q, %q, %d)", simple, replace, in, old, new, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkReplace(b *testing.B) {
|
||||
for _, tt := range ReplaceTests {
|
||||
desc := fmt.Sprintf("%q %q %q %d", tt.in, tt.old, tt.new, tt.n)
|
||||
var (
|
||||
in = []byte(tt.in)
|
||||
old = []byte(tt.old)
|
||||
new = []byte(tt.new)
|
||||
)
|
||||
b.Run(desc, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
Replace(in, old, new, tt.n)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type TitleTest struct {
|
||||
in, out string
|
||||
}
|
||||
@ -2259,11 +2044,6 @@ func makeBenchInputHard() []byte {
|
||||
var benchInputHard = makeBenchInputHard()
|
||||
|
||||
func benchmarkIndexHard(b *testing.B, sep []byte) {
|
||||
n := Index(benchInputHard, sep)
|
||||
if n < 0 {
|
||||
n = len(benchInputHard)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
for i := 0; i < b.N; i++ {
|
||||
Index(benchInputHard, sep)
|
||||
}
|
||||
|
@ -502,10 +502,10 @@ func ExampleTitle() {
|
||||
|
||||
func ExampleToTitle() {
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("брат")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
|
||||
// Output:
|
||||
// LOUD NOISES
|
||||
// БРАТ
|
||||
// ХЛЕБ
|
||||
}
|
||||
|
||||
func ExampleToTitleSpecial() {
|
||||
@ -628,93 +628,3 @@ func ExampleToUpperSpecial() {
|
||||
// Original : ahoj vývojári golang
|
||||
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
|
||||
}
|
||||
|
||||
func ExampleLines() {
|
||||
text := []byte("Hello\nWorld\nGo Programming\n")
|
||||
for line := range bytes.Lines(text) {
|
||||
fmt.Printf("%q\n", line)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "Hello\n"
|
||||
// "World\n"
|
||||
// "Go Programming\n"
|
||||
}
|
||||
|
||||
func ExampleSplitSeq() {
|
||||
s := []byte("a,b,c,d")
|
||||
for part := range bytes.SplitSeq(s, []byte(",")) {
|
||||
fmt.Printf("%q\n", part)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "a"
|
||||
// "b"
|
||||
// "c"
|
||||
// "d"
|
||||
}
|
||||
|
||||
func ExampleSplitAfterSeq() {
|
||||
s := []byte("a,b,c,d")
|
||||
for part := range bytes.SplitAfterSeq(s, []byte(",")) {
|
||||
fmt.Printf("%q\n", part)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "a,"
|
||||
// "b,"
|
||||
// "c,"
|
||||
// "d"
|
||||
}
|
||||
|
||||
func ExampleFieldsSeq() {
|
||||
text := []byte("The quick brown fox")
|
||||
fmt.Println("Split byte slice into fields:")
|
||||
for word := range bytes.FieldsSeq(text) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
textWithSpaces := []byte(" lots of spaces ")
|
||||
fmt.Println("\nSplit byte slice with multiple spaces:")
|
||||
for word := range bytes.FieldsSeq(textWithSpaces) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Split byte slice into fields:
|
||||
// "The"
|
||||
// "quick"
|
||||
// "brown"
|
||||
// "fox"
|
||||
//
|
||||
// Split byte slice with multiple spaces:
|
||||
// "lots"
|
||||
// "of"
|
||||
// "spaces"
|
||||
}
|
||||
|
||||
func ExampleFieldsFuncSeq() {
|
||||
text := []byte("The quick brown fox")
|
||||
fmt.Println("Split on whitespace(similar to FieldsSeq):")
|
||||
for word := range bytes.FieldsFuncSeq(text, unicode.IsSpace) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
mixedText := []byte("abc123def456ghi")
|
||||
fmt.Println("\nSplit on digits:")
|
||||
for word := range bytes.FieldsFuncSeq(mixedText, unicode.IsDigit) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Split on whitespace(similar to FieldsSeq):
|
||||
// "The"
|
||||
// "quick"
|
||||
// "brown"
|
||||
// "fox"
|
||||
//
|
||||
// Split on digits:
|
||||
// "abc"
|
||||
// "def"
|
||||
// "ghi"
|
||||
}
|
||||
|
@ -1,146 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Lines returns an iterator over the newline-terminated lines in the byte slice s.
|
||||
// The lines yielded by the iterator include their terminating newlines.
|
||||
// If s is empty, the iterator yields no lines at all.
|
||||
// If s does not end in a newline, the final yielded line will not end in a newline.
|
||||
// It returns a single-use iterator.
|
||||
func Lines(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
for len(s) > 0 {
|
||||
var line []byte
|
||||
if i := IndexByte(s, '\n'); i >= 0 {
|
||||
line, s = s[:i+1], s[i+1:]
|
||||
} else {
|
||||
line, s = s, nil
|
||||
}
|
||||
if !yield(line[:len(line):len(line)]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// explodeSeq returns an iterator over the runes in s.
|
||||
func explodeSeq(s []byte, yield func([]byte) bool) {
|
||||
for len(s) > 0 {
|
||||
_, size := utf8.DecodeRune(s)
|
||||
if !yield(s[:size:size]) {
|
||||
return
|
||||
}
|
||||
s = s[size:]
|
||||
}
|
||||
}
|
||||
|
||||
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
|
||||
// bytes of sep to include in the results (none or all).
|
||||
func splitSeq(s, sep []byte, sepSave int) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
if len(sep) == 0 {
|
||||
explodeSeq(s, yield)
|
||||
return
|
||||
}
|
||||
for {
|
||||
i := Index(s, sep)
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
frag := s[:i+sepSave]
|
||||
if !yield(frag[:len(frag):len(frag)]) {
|
||||
return
|
||||
}
|
||||
s = s[i+len(sep):]
|
||||
}
|
||||
yield(s[:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
|
||||
// SplitSeq returns an iterator over all subslices of s separated by sep.
|
||||
// The iterator yields the same subslices that would be returned by [Split](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, 0)
|
||||
}
|
||||
|
||||
// SplitAfterSeq returns an iterator over subslices of s split after each instance of sep.
|
||||
// The iterator yields the same subslices that would be returned by [SplitAfter](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitAfterSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, len(sep))
|
||||
}
|
||||
|
||||
// FieldsSeq returns an iterator over subslices of s split around runs of
|
||||
// whitespace characters, as defined by [unicode.IsSpace].
|
||||
// The iterator yields the same subslices that would be returned by [Fields](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsSeq(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
isSpace := asciiSpace[s[i]] != 0
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
isSpace = unicode.IsSpace(r)
|
||||
}
|
||||
if isSpace {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FieldsFuncSeq returns an iterator over subslices of s split around runs of
|
||||
// Unicode code points satisfying f(c).
|
||||
// The iterator yields the same subslices that would be returned by [FieldsFunc](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsFuncSeq(s []byte, f func(rune) bool) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
}
|
||||
if f(r) {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bytes_test
|
||||
|
||||
import (
|
||||
. "bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSplitSeqEmptySeparator(b *testing.B) {
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, nil) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitSeqSingleByteSeparator(b *testing.B) {
|
||||
sep := []byte("/")
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitSeqMultiByteSeparator(b *testing.B) {
|
||||
sep := []byte("hello")
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqEmptySeparator(b *testing.B) {
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, nil) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqSingleByteSeparator(b *testing.B) {
|
||||
sep := []byte("/")
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqMultiByteSeparator(b *testing.B) {
|
||||
sep := []byte("hello")
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
@ -6,16 +6,27 @@
|
||||
|
||||
setlocal
|
||||
|
||||
go tool dist env -w -p >env.bat || exit /b 1
|
||||
set GOBUILDFAIL=0
|
||||
|
||||
go tool dist env -w -p >env.bat
|
||||
if errorlevel 1 goto fail
|
||||
call .\env.bat
|
||||
del env.bat
|
||||
echo.
|
||||
|
||||
if not exist %GOTOOLDIR%\dist.exe (
|
||||
echo cannot find %GOTOOLDIR%\dist.exe; nothing to clean
|
||||
exit /b 1
|
||||
)
|
||||
if exist %GOTOOLDIR%\dist.exe goto distok
|
||||
echo cannot find %GOTOOLDIR%\dist; nothing to clean
|
||||
goto fail
|
||||
:distok
|
||||
|
||||
"%GOBIN%\go" clean -i std
|
||||
"%GOBIN%\go" tool dist clean
|
||||
"%GOBIN%\go" clean -i cmd
|
||||
|
||||
goto end
|
||||
|
||||
:fail
|
||||
set GOBUILDFAIL=1
|
||||
|
||||
:end
|
||||
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -27,6 +28,26 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// addr2linePath returns the path to the "addr2line" binary to run.
|
||||
func addr2linePath(t testing.TB) string {
|
||||
t.Helper()
|
||||
testenv.MustHaveExec(t)
|
||||
|
||||
addr2linePathOnce.Do(func() {
|
||||
addr2lineExePath, addr2linePathErr = os.Executable()
|
||||
})
|
||||
if addr2linePathErr != nil {
|
||||
t.Fatal(addr2linePathErr)
|
||||
}
|
||||
return addr2lineExePath
|
||||
}
|
||||
|
||||
var (
|
||||
addr2linePathOnce sync.Once
|
||||
addr2lineExePath string
|
||||
addr2linePathErr error
|
||||
)
|
||||
|
||||
func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath)
|
||||
out, err := cmd.CombinedOutput()
|
||||
@ -49,7 +70,7 @@ func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
}
|
||||
|
||||
func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) {
|
||||
cmd := testenv.Command(t, testenv.Executable(t), dbgExePath)
|
||||
cmd := testenv.Command(t, addr2linePath(t), dbgExePath)
|
||||
cmd.Stdin = strings.NewReader(addr)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@ -87,22 +108,27 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) {
|
||||
// Debug paths are stored slash-separated, so convert to system-native.
|
||||
srcPath = filepath.FromSlash(srcPath)
|
||||
fi2, err := os.Stat(srcPath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Stat failed: %v", err)
|
||||
}
|
||||
if !os.SameFile(fi1, fi2) {
|
||||
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
|
||||
}
|
||||
if want := "102"; srcLineNo != want {
|
||||
if want := "124"; srcLineNo != want {
|
||||
t.Fatalf("line number = %v; want %s", srcLineNo, want)
|
||||
}
|
||||
}
|
||||
|
||||
// This is line 101. The test depends on that.
|
||||
// This is line 123. The test depends on that.
|
||||
func TestAddr2Line(t *testing.T) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
tmpDir, err := os.MkdirTemp("", "TestAddr2Line")
|
||||
if err != nil {
|
||||
t.Fatal("TempDir failed: ", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Build copy of test binary with debug symbols,
|
||||
// since the one running now may not have them.
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"internal/testenv"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -57,10 +57,7 @@ func TestGolden(t *testing.T) {
|
||||
// TODO(gri) remove extra pkg directory eventually
|
||||
goldenFile := filepath.Join("testdata", "src", "pkg", fi.Name(), "golden.txt")
|
||||
w := NewWalker(nil, "testdata/src/pkg")
|
||||
pkg, err := w.import_(fi.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("import %s: %v", fi.Name(), err)
|
||||
}
|
||||
pkg, _ := w.import_(fi.Name())
|
||||
w.export(pkg)
|
||||
|
||||
if *updateGolden {
|
||||
@ -80,7 +77,7 @@ func TestGolden(t *testing.T) {
|
||||
t.Fatalf("opening golden.txt for package %q: %v", fi.Name(), err)
|
||||
}
|
||||
wanted := strings.Split(string(bs), "\n")
|
||||
slices.Sort(wanted)
|
||||
sort.Strings(wanted)
|
||||
for _, feature := range wanted {
|
||||
if feature == "" {
|
||||
continue
|
||||
@ -99,11 +96,6 @@ func TestGolden(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCompareAPI(t *testing.T) {
|
||||
if *flagCheck {
|
||||
// not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
features, required, exception []string
|
||||
@ -185,11 +177,6 @@ func TestCompareAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipInternal(t *testing.T) {
|
||||
if *flagCheck {
|
||||
// not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pkg string
|
||||
want bool
|
||||
@ -214,13 +201,7 @@ func BenchmarkAll(b *testing.B) {
|
||||
for _, context := range contexts {
|
||||
w := NewWalker(context, filepath.Join(testenv.GOROOT(b), "src"))
|
||||
for _, name := range w.stdPackages {
|
||||
pkg, err := w.import_(name)
|
||||
if _, nogo := err.(*build.NoGoError); nogo {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
b.Fatalf("import %s (%s-%s): %v", name, context.GOOS, context.GOARCH, err)
|
||||
}
|
||||
pkg, _ := w.import_(name)
|
||||
w.export(pkg)
|
||||
}
|
||||
w.Features()
|
||||
@ -258,7 +239,8 @@ func TestIssue21181(t *testing.T) {
|
||||
w := NewWalker(context, "testdata/src/issue21181")
|
||||
pkg, err := w.import_("p")
|
||||
if err != nil {
|
||||
t.Fatalf("import %s (%s-%s): %v", "p", context.GOOS, context.GOARCH, err)
|
||||
t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH,
|
||||
pkg.Name(), w.imported)
|
||||
}
|
||||
w.export(pkg)
|
||||
}
|
||||
@ -304,20 +286,14 @@ func TestIssue41358(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue64958(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping with -short")
|
||||
}
|
||||
if *flagCheck {
|
||||
// slow, not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
t.Errorf("expected no panic; recovered %v", x)
|
||||
}
|
||||
}()
|
||||
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
for _, context := range contexts {
|
||||
w := NewWalker(context, "testdata/src/issue64958")
|
||||
pkg, err := w.importFrom("p", "", 0)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -232,8 +232,8 @@ func compareAPI(w io.Writer, features, required, exception []string) (ok bool) {
|
||||
featureSet := set(features)
|
||||
exceptionSet := set(exception)
|
||||
|
||||
slices.Sort(features)
|
||||
slices.Sort(required)
|
||||
sort.Strings(features)
|
||||
sort.Strings(required)
|
||||
|
||||
take := func(sl *[]string) string {
|
||||
s := (*sl)[0]
|
||||
@ -378,7 +378,7 @@ func (w *Walker) Features() (fs []string) {
|
||||
for f := range w.features {
|
||||
fs = append(fs, f)
|
||||
}
|
||||
slices.Sort(fs)
|
||||
sort.Strings(fs)
|
||||
return
|
||||
}
|
||||
|
||||
@ -431,7 +431,7 @@ func tagKey(dir string, context *build.Context, tags []string) string {
|
||||
// an indirect imported package. See https://github.com/golang/go/issues/21181
|
||||
// for more detail.
|
||||
tags = append(tags, context.GOOS, context.GOARCH)
|
||||
slices.Sort(tags)
|
||||
sort.Strings(tags)
|
||||
|
||||
for _, tag := range tags {
|
||||
if ctags[tag] {
|
||||
@ -535,7 +535,7 @@ func (w *Walker) loadImports() {
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(stdPackages)
|
||||
sort.Strings(stdPackages)
|
||||
imports = listImports{
|
||||
stdPackages: stdPackages,
|
||||
importMap: importMap,
|
||||
@ -717,7 +717,7 @@ func sortedMethodNames(typ *types.Interface) []string {
|
||||
for i := range list {
|
||||
list[i] = typ.Method(i).Name()
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@ -747,7 +747,7 @@ func (w *Walker) sortedEmbeddeds(typ *types.Interface) []string {
|
||||
list = append(list, buf.String())
|
||||
}
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@ -1019,7 +1019,7 @@ func (w *Walker) emitType(obj *types.TypeName) {
|
||||
|
||||
func (w *Walker) emitStructType(name string, typ *types.Struct) {
|
||||
typeStruct := fmt.Sprintf("type %s struct", name)
|
||||
w.emitf("%s", typeStruct)
|
||||
w.emitf(typeStruct)
|
||||
defer w.pushScope(typeStruct)()
|
||||
|
||||
for i := 0; i < typ.NumFields(); i++ {
|
||||
@ -1058,7 +1058,7 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
||||
if w.isDeprecated(m) {
|
||||
w.emitf("%s //deprecated", m.Name())
|
||||
}
|
||||
w.emitf("%s%s", m.Name(), w.signatureString(m.Signature()))
|
||||
w.emitf("%s%s", m.Name(), w.signatureString(m.Type().(*types.Signature)))
|
||||
}
|
||||
|
||||
if !complete {
|
||||
@ -1083,12 +1083,12 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(methodNames)
|
||||
sort.Strings(methodNames)
|
||||
w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", "))
|
||||
}
|
||||
|
||||
func (w *Walker) emitFunc(f *types.Func) {
|
||||
sig := f.Signature()
|
||||
sig := f.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
panic("method considered a regular function: " + f.String())
|
||||
}
|
||||
|
@ -520,27 +520,15 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
||||
for i := loong64.REG_R0; i <= loong64.REG_R31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_F0; i <= loong64.REG_F31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_V0; i <= loong64.REG_V31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_X0; i <= loong64.REG_X31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
// Pseudo-registers.
|
||||
register["SB"] = RSB
|
||||
register["FP"] = RFP
|
||||
@ -553,8 +541,6 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
||||
"FCSR": true,
|
||||
"FCC": true,
|
||||
"R": true,
|
||||
"V": true,
|
||||
"X": true,
|
||||
}
|
||||
|
||||
instructions := make(map[string]obj.As)
|
||||
@ -600,10 +586,6 @@ func archRISCV64(shared bool) *Arch {
|
||||
name := fmt.Sprintf("F%d", i-riscv.REG_F0)
|
||||
register[name] = int16(i)
|
||||
}
|
||||
for i := riscv.REG_V0; i <= riscv.REG_V31; i++ {
|
||||
name := fmt.Sprintf("V%d", i-riscv.REG_V0)
|
||||
register[name] = int16(i)
|
||||
}
|
||||
|
||||
// General registers with ABI names.
|
||||
register["ZERO"] = riscv.REG_ZERO
|
||||
|
@ -101,7 +101,7 @@ func IsARMCMP(op obj.As) bool {
|
||||
// one of the STREX-like instructions that require special handling.
|
||||
func IsARMSTREX(op obj.As) bool {
|
||||
switch op {
|
||||
case arm.ASTREX, arm.ASTREXD, arm.ASTREXB, arm.ASWPW, arm.ASWPBU:
|
||||
case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -59,10 +59,10 @@ func jumpArm64(word string) bool {
|
||||
|
||||
var arm64SpecialOperand map[string]arm64.SpecialOperand
|
||||
|
||||
// ARM64SpecialOperand returns the internal representation of a special operand.
|
||||
func ARM64SpecialOperand(name string) arm64.SpecialOperand {
|
||||
// GetARM64SpecialOperand returns the internal representation of a special operand.
|
||||
func GetARM64SpecialOperand(name string) arm64.SpecialOperand {
|
||||
if arm64SpecialOperand == nil {
|
||||
// Generate mapping when function is first called.
|
||||
// Generate the mapping automatically when the first time the function is called.
|
||||
arm64SpecialOperand = map[string]arm64.SpecialOperand{}
|
||||
for opd := arm64.SPOP_BEGIN; opd < arm64.SPOP_END; opd++ {
|
||||
arm64SpecialOperand[opd.String()] = opd
|
||||
|
@ -11,8 +11,6 @@ package arch
|
||||
import (
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/loong64"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func jumpLoong64(word string) bool {
|
||||
@ -23,6 +21,17 @@ func jumpLoong64(word string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoong64CMP reports whether the op (as defined by an loong64.A* constant) is
|
||||
// one of the CMP instructions that require special handling.
|
||||
func IsLoong64CMP(op obj.As) bool {
|
||||
switch op {
|
||||
case loong64.ACMPEQF, loong64.ACMPEQD, loong64.ACMPGEF, loong64.ACMPGED,
|
||||
loong64.ACMPGTF, loong64.ACMPGTD:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoong64MUL reports whether the op (as defined by an loong64.A* constant) is
|
||||
// one of the MUL/DIV/REM instructions that require special handling.
|
||||
func IsLoong64MUL(op obj.As) bool {
|
||||
@ -50,82 +59,6 @@ func IsLoong64AMO(op obj.As) bool {
|
||||
return loong64.IsAtomicInst(op)
|
||||
}
|
||||
|
||||
var loong64ElemExtMap = map[string]int16{
|
||||
"B": loong64.ARNG_B,
|
||||
"H": loong64.ARNG_H,
|
||||
"W": loong64.ARNG_W,
|
||||
"V": loong64.ARNG_V,
|
||||
"BU": loong64.ARNG_BU,
|
||||
"HU": loong64.ARNG_HU,
|
||||
"WU": loong64.ARNG_WU,
|
||||
"VU": loong64.ARNG_VU,
|
||||
}
|
||||
|
||||
var loong64LsxArngExtMap = map[string]int16{
|
||||
"B16": loong64.ARNG_16B,
|
||||
"H8": loong64.ARNG_8H,
|
||||
"W4": loong64.ARNG_4W,
|
||||
"V2": loong64.ARNG_2V,
|
||||
}
|
||||
|
||||
var loong64LasxArngExtMap = map[string]int16{
|
||||
"B32": loong64.ARNG_32B,
|
||||
"H16": loong64.ARNG_16H,
|
||||
"W8": loong64.ARNG_8W,
|
||||
"V4": loong64.ARNG_4V,
|
||||
"Q2": loong64.ARNG_2Q,
|
||||
}
|
||||
|
||||
// Loong64RegisterExtension constructs an Loong64 register with extension or arrangement.
|
||||
func Loong64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
|
||||
var ok bool
|
||||
var arng_type int16
|
||||
var simd_type int16
|
||||
|
||||
switch {
|
||||
case reg >= loong64.REG_V0 && reg <= loong64.REG_V31:
|
||||
simd_type = loong64.LSX
|
||||
case reg >= loong64.REG_X0 && reg <= loong64.REG_X31:
|
||||
simd_type = loong64.LASX
|
||||
default:
|
||||
return errors.New("Loong64 extension: invalid LSX/LASX register: " + fmt.Sprintf("%d", reg))
|
||||
}
|
||||
|
||||
if isIndex {
|
||||
arng_type, ok = loong64ElemExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LSX/LASX arrangement type: " + ext)
|
||||
}
|
||||
|
||||
a.Reg = loong64.REG_ELEM
|
||||
a.Reg += ((reg & loong64.EXT_REG_MASK) << loong64.EXT_REG_SHIFT)
|
||||
a.Reg += ((arng_type & loong64.EXT_TYPE_MASK) << loong64.EXT_TYPE_SHIFT)
|
||||
a.Reg += ((simd_type & loong64.EXT_SIMDTYPE_MASK) << loong64.EXT_SIMDTYPE_SHIFT)
|
||||
a.Index = num
|
||||
} else {
|
||||
switch simd_type {
|
||||
case loong64.LSX:
|
||||
arng_type, ok = loong64LsxArngExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LSX arrangement type: " + ext)
|
||||
}
|
||||
|
||||
case loong64.LASX:
|
||||
arng_type, ok = loong64LasxArngExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LASX arrangement type: " + ext)
|
||||
}
|
||||
}
|
||||
|
||||
a.Reg = loong64.REG_ARNG
|
||||
a.Reg += ((reg & loong64.EXT_REG_MASK) << loong64.EXT_REG_SHIFT)
|
||||
a.Reg += ((arng_type & loong64.EXT_TYPE_MASK) << loong64.EXT_TYPE_SHIFT)
|
||||
a.Reg += ((simd_type & loong64.EXT_SIMDTYPE_MASK) << loong64.EXT_SIMDTYPE_SHIFT)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loong64RegisterNumber(name string, n int16) (int16, bool) {
|
||||
switch name {
|
||||
case "F":
|
||||
@ -144,14 +77,6 @@ func loong64RegisterNumber(name string, n int16) (int16, bool) {
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_R0 + n, true
|
||||
}
|
||||
case "V":
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_V0 + n, true
|
||||
}
|
||||
case "X":
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_X0 + n, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func jumpPPC64(word string) bool {
|
||||
// one of the CMP instructions that require special handling.
|
||||
func IsPPC64CMP(op obj.As) bool {
|
||||
switch op {
|
||||
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU, ppc64.ADCMPO, ppc64.ADCMPU, ppc64.ADCMPOQ, ppc64.ADCMPUQ:
|
||||
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -13,8 +13,9 @@ import (
|
||||
"cmd/internal/obj/riscv"
|
||||
)
|
||||
|
||||
// IsRISCV64AMO reports whether op is an AMO instruction that requires
|
||||
// special handling.
|
||||
// IsRISCV64AMO reports whether the op (as defined by a riscv.A*
|
||||
// constant) is one of the AMO instructions that requires special
|
||||
// handling.
|
||||
func IsRISCV64AMO(op obj.As) bool {
|
||||
switch op {
|
||||
case riscv.ASCW, riscv.ASCD, riscv.AAMOSWAPW, riscv.AAMOSWAPD, riscv.AAMOADDW, riscv.AAMOADDD,
|
||||
@ -25,33 +26,3 @@ func IsRISCV64AMO(op obj.As) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRISCV64VTypeI reports whether op is a vtype immediate instruction that
|
||||
// requires special handling.
|
||||
func IsRISCV64VTypeI(op obj.As) bool {
|
||||
return op == riscv.AVSETVLI || op == riscv.AVSETIVLI
|
||||
}
|
||||
|
||||
var riscv64SpecialOperand map[string]riscv.SpecialOperand
|
||||
|
||||
// RISCV64SpecialOperand returns the internal representation of a special operand.
|
||||
func RISCV64SpecialOperand(name string) riscv.SpecialOperand {
|
||||
if riscv64SpecialOperand == nil {
|
||||
// Generate mapping when function is first called.
|
||||
riscv64SpecialOperand = map[string]riscv.SpecialOperand{}
|
||||
for opd := riscv.SPOP_BEGIN; opd < riscv.SPOP_END; opd++ {
|
||||
riscv64SpecialOperand[opd.String()] = opd
|
||||
}
|
||||
}
|
||||
if opd, ok := riscv64SpecialOperand[name]; ok {
|
||||
return opd
|
||||
}
|
||||
return riscv.SPOP_END
|
||||
}
|
||||
|
||||
// RISCV64ValidateVectorType reports whether the given configuration is a
|
||||
// valid vector type.
|
||||
func RISCV64ValidateVectorType(vsew, vlmul, vtail, vmask int64) error {
|
||||
_, err := riscv.EncodeVectorType(vsew, vlmul, vtail, vmask)
|
||||
return err
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user