mirror of
https://github.com/golang/go.git
synced 2025-05-08 17:13:05 +00:00
Compare commits
54 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c390a1c22e | ||
|
1207de4f6c | ||
|
a0d15cb9c8 | ||
|
958f3a0309 | ||
|
6ba3a8a6ba | ||
|
5472853843 | ||
|
cfe0ae0b70 | ||
|
58babf6e0b | ||
|
8d79bf799b | ||
|
35c010ad6d | ||
|
6495ce0495 | ||
|
7fc8312673 | ||
|
cc16cdf48f | ||
|
9563300f6e | ||
|
f8080edefd | ||
|
ed07b321ae | ||
|
3b2e846e11 | ||
|
fbddfae62f | ||
|
c8c6f9abfb | ||
|
a74951c5af | ||
|
e6598e7baa | ||
|
82575f76b8 | ||
|
a886959aa2 | ||
|
80ff7cd35a | ||
|
69234ded30 | ||
|
032ac075c2 | ||
|
fa8ff1a46d | ||
|
53487e5477 | ||
|
3d1f1f27cf | ||
|
6de5a7180c | ||
|
9625a7faae | ||
|
9c939a1e60 | ||
|
7afe17bbdb | ||
|
8002845759 | ||
|
9166d2feec | ||
|
76346b3543 | ||
|
3c9340557c | ||
|
dbecb416d1 | ||
|
6885bad7dd | ||
|
ec7d6094e6 | ||
|
63b0f805cd | ||
|
7adb012205 | ||
|
c9940fe2a9 | ||
|
3509415eca | ||
|
559c77592f | ||
|
f5e4e45ef7 | ||
|
30b6fd60a6 | ||
|
7e4d6c2bcb | ||
|
8bd4ed6cbb | ||
|
7dff7439dc | ||
|
62c3a6350b | ||
|
eba9e08766 | ||
|
f3bdcda88a | ||
|
362f22d2d2 |
@ -1,7 +1,7 @@
|
|||||||
name: Language Change Proposals
|
name: Language Change Proposals
|
||||||
description: Changes to the language
|
description: Changes to the language
|
||||||
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
|
labels: ["Proposal", "v2", "LanguageChange"]
|
||||||
title: "proposal: spec: proposal title"
|
title: "proposal: Go 2: proposal title"
|
||||||
|
|
||||||
|
|
||||||
body:
|
body:
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -37,7 +37,7 @@ _testmain.go
|
|||||||
/src/go/build/zcgo.go
|
/src/go/build/zcgo.go
|
||||||
/src/go/doc/headscan
|
/src/go/doc/headscan
|
||||||
/src/internal/buildcfg/zbootstrap.go
|
/src/internal/buildcfg/zbootstrap.go
|
||||||
/src/internal/runtime/sys/zversion.go
|
/src/runtime/internal/sys/zversion.go
|
||||||
/src/unicode/maketables
|
/src/unicode/maketables
|
||||||
/src/time/tzdata/zzipdata.go
|
/src/time/tzdata/zzipdata.go
|
||||||
/test.out
|
/test.out
|
||||||
|
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
|||||||
Copyright 2009 The Go Authors.
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||||||
copyright notice, this list of conditions and the following disclaimer
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
in the documentation and/or other materials provided with the
|
in the documentation and/or other materials provided with the
|
||||||
distribution.
|
distribution.
|
||||||
* Neither the name of Google LLC nor the names of its
|
* Neither the name of Google Inc. nor the names of its
|
||||||
contributors may be used to endorse or promote products derived from
|
contributors may be used to endorse or promote products derived from
|
||||||
this software without specific prior written permission.
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
223
api/go1.24.txt
223
api/go1.24.txt
@ -1,223 +0,0 @@
|
|||||||
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
|
|
||||||
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
|
|
||||||
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
|
|
||||||
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
|
||||||
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
|
||||||
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
|
|
||||||
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
|
|
||||||
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
|
|
||||||
pkg crypto/cipher, func NewOFB //deprecated #69445
|
|
||||||
pkg crypto/fips140, func Enabled() bool #70123
|
|
||||||
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
|
|
||||||
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
|
|
||||||
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
|
|
||||||
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
|
|
||||||
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
|
|
||||||
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
|
|
||||||
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
|
|
||||||
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
|
|
||||||
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
|
|
||||||
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
|
|
||||||
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
|
|
||||||
pkg crypto/mlkem, const SeedSize = 64 #70122
|
|
||||||
pkg crypto/mlkem, const SeedSize ideal-int #70122
|
|
||||||
pkg crypto/mlkem, const SharedKeySize = 32 #70122
|
|
||||||
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
|
|
||||||
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
|
|
||||||
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
|
|
||||||
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
|
|
||||||
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
|
|
||||||
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
|
|
||||||
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
|
|
||||||
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
|
|
||||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
|
|
||||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
|
|
||||||
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
|
|
||||||
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
|
|
||||||
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
|
|
||||||
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
|
|
||||||
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
|
|
||||||
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
|
|
||||||
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
|
|
||||||
pkg crypto/rand, func Text() string #67057
|
|
||||||
pkg crypto/sha3, func New224() *SHA3 #69982
|
|
||||||
pkg crypto/sha3, func New256() *SHA3 #69982
|
|
||||||
pkg crypto/sha3, func New384() *SHA3 #69982
|
|
||||||
pkg crypto/sha3, func New512() *SHA3 #69982
|
|
||||||
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
|
|
||||||
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
|
|
||||||
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
|
|
||||||
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
|
|
||||||
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
|
|
||||||
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
|
|
||||||
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
|
|
||||||
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
|
|
||||||
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
|
|
||||||
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) Reset() #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) Size() int #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
|
|
||||||
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) Reset() #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
|
|
||||||
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
|
|
||||||
pkg crypto/sha3, type SHA3 struct #69982
|
|
||||||
pkg crypto/sha3, type SHAKE struct #69982
|
|
||||||
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
|
|
||||||
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
|
|
||||||
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
|
|
||||||
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
|
|
||||||
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
|
|
||||||
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
|
|
||||||
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
|
|
||||||
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
|
|
||||||
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
|
|
||||||
pkg crypto/x509, const NoValidChains = 10 #68484
|
|
||||||
pkg crypto/x509, const NoValidChains InvalidReason #68484
|
|
||||||
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
|
|
||||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
|
|
||||||
pkg crypto/x509, type PolicyMapping struct #68484
|
|
||||||
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
|
|
||||||
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
|
|
||||||
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
|
|
||||||
pkg debug/elf, const VER_FLG_BASE = 1 #63952
|
|
||||||
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
|
|
||||||
pkg debug/elf, const VER_FLG_INFO = 4 #63952
|
|
||||||
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
|
|
||||||
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
|
|
||||||
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
|
|
||||||
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
|
|
||||||
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
|
|
||||||
pkg debug/elf, type DynamicVersion struct #63952
|
|
||||||
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
|
|
||||||
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
|
|
||||||
pkg debug/elf, type DynamicVersion struct, Name string #63952
|
|
||||||
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
|
|
||||||
pkg debug/elf, type DynamicVersionDep struct #63952
|
|
||||||
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
|
|
||||||
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
|
|
||||||
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
|
|
||||||
pkg debug/elf, type DynamicVersionFlag uint16 #63952
|
|
||||||
pkg debug/elf, type DynamicVersionNeed struct #63952
|
|
||||||
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
|
|
||||||
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
|
|
||||||
pkg debug/elf, type Symbol struct, HasVersion bool #63952
|
|
||||||
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
|
|
||||||
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
|
|
||||||
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
|
|
||||||
pkg debug/elf, type VersionIndex uint16 #63952
|
|
||||||
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
|
|
||||||
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg encoding, type TextAppender interface { AppendText } #62384
|
|
||||||
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
|
|
||||||
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
|
|
||||||
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
|
|
||||||
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
|
|
||||||
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
|
|
||||||
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
|
|
||||||
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
|
|
||||||
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
|
|
||||||
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
|
|
||||||
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
|
|
||||||
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
|
|
||||||
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
|
|
||||||
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
|
|
||||||
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg log/slog, var DiscardHandler Handler #62005
|
|
||||||
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
|
|
||||||
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
|
|
||||||
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
|
|
||||||
pkg net/http, method (Protocols) HTTP1() bool #67814
|
|
||||||
pkg net/http, method (Protocols) HTTP2() bool #67814
|
|
||||||
pkg net/http, method (Protocols) String() string #67814
|
|
||||||
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
|
|
||||||
pkg net/http, type HTTP2Config struct #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
|
|
||||||
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
|
|
||||||
pkg net/http, type Protocols struct #67814
|
|
||||||
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
|
|
||||||
pkg net/http, type Server struct, Protocols *Protocols #67814
|
|
||||||
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
|
|
||||||
pkg net/http, type Transport struct, Protocols *Protocols #67814
|
|
||||||
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg os, func OpenInRoot(string, string) (*File, error) #67002
|
|
||||||
pkg os, func OpenRoot(string) (*Root, error) #67002
|
|
||||||
pkg os, method (*Root) Close() error #67002
|
|
||||||
pkg os, method (*Root) Create(string) (*File, error) #67002
|
|
||||||
pkg os, method (*Root) FS() fs.FS #67002
|
|
||||||
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
|
|
||||||
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
|
|
||||||
pkg os, method (*Root) Name() string #67002
|
|
||||||
pkg os, method (*Root) Open(string) (*File, error) #67002
|
|
||||||
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
|
|
||||||
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
|
|
||||||
pkg os, method (*Root) Remove(string) error #67002
|
|
||||||
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
|
|
||||||
pkg os, type Root struct #67002
|
|
||||||
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
|
|
||||||
pkg runtime, func GOROOT //deprecated #51473
|
|
||||||
pkg runtime, method (Cleanup) Stop() #67535
|
|
||||||
pkg runtime, type Cleanup struct #67535
|
|
||||||
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
|
|
||||||
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
|
|
||||||
pkg strings, func Lines(string) iter.Seq[string] #61901
|
|
||||||
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
|
|
||||||
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
|
|
||||||
pkg testing, method (*B) Chdir(string) #62516
|
|
||||||
pkg testing, method (*B) Context() context.Context #36532
|
|
||||||
pkg testing, method (*B) Loop() bool #61515
|
|
||||||
pkg testing, method (*F) Chdir(string) #62516
|
|
||||||
pkg testing, method (*F) Context() context.Context #36532
|
|
||||||
pkg testing, method (*T) Chdir(string) #62516
|
|
||||||
pkg testing, method (*T) Context() context.Context #36532
|
|
||||||
pkg testing, type TB interface, Chdir(string) #62516
|
|
||||||
pkg testing, type TB interface, Context() context.Context #36532
|
|
||||||
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
|
|
||||||
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
|
|
||||||
pkg weak, method (Pointer[$0]) Value() *$0 #67552
|
|
||||||
pkg weak, type Pointer[$0 interface{}] struct #67552
|
|
@ -1 +0,0 @@
|
|||||||
pkg mime/multipart, func FileContentDisposition(string, string) string #46771
|
|
@ -1,8 +0,0 @@
|
|||||||
pkg io/fs, func Lstat(FS, string) (FileInfo, error) #49580
|
|
||||||
pkg io/fs, func ReadLink(FS, string) (string, error) #49580
|
|
||||||
pkg io/fs, type ReadLinkFS interface { Lstat, Open, ReadLink } #49580
|
|
||||||
pkg io/fs, type ReadLinkFS interface, Lstat(string) (FileInfo, error) #49580
|
|
||||||
pkg io/fs, type ReadLinkFS interface, Open(string) (File, error) #49580
|
|
||||||
pkg io/fs, type ReadLinkFS interface, ReadLink(string) (string, error) #49580
|
|
||||||
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
|
|
||||||
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
|
|
@ -1,5 +0,0 @@
|
|||||||
pkg crypto, func SignMessage(Signer, io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
|
||||||
pkg crypto, type MessageSigner interface { Public, Sign, SignMessage } #63405
|
|
||||||
pkg crypto, type MessageSigner interface, Public() PublicKey #63405
|
|
||||||
pkg crypto, type MessageSigner interface, Sign(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
|
||||||
pkg crypto, type MessageSigner interface, SignMessage(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
|
@ -1 +0,0 @@
|
|||||||
pkg sync, method (*WaitGroup) Go(func()) #63769
|
|
@ -1,8 +0,0 @@
|
|||||||
pkg os, method (*Root) Chmod(string, fs.FileMode) error #67002
|
|
||||||
pkg os, method (*Root) Chown(string, int, int) error #67002
|
|
||||||
pkg os, method (*Root) Chtimes(string, time.Time, time.Time) error #67002
|
|
||||||
pkg os, method (*Root) Lchown(string, int, int) error #67002
|
|
||||||
pkg os, method (*Root) Link(string, string) error #67002
|
|
||||||
pkg os, method (*Root) Readlink(string) (string, error) #67002
|
|
||||||
pkg os, method (*Root) Rename(string, string) error #67002
|
|
||||||
pkg os, method (*Root) Symlink(string, string) error #67002
|
|
@ -1 +0,0 @@
|
|||||||
pkg crypto/tls, type ConnectionState struct, CurveID CurveID #67516
|
|
@ -1,17 +0,0 @@
|
|||||||
pkg go/types, const FieldVar = 6 #70250
|
|
||||||
pkg go/types, const FieldVar VarKind #70250
|
|
||||||
pkg go/types, const LocalVar = 2 #70250
|
|
||||||
pkg go/types, const LocalVar VarKind #70250
|
|
||||||
pkg go/types, const PackageVar = 1 #70250
|
|
||||||
pkg go/types, const PackageVar VarKind #70250
|
|
||||||
pkg go/types, const ParamVar = 4 #70250
|
|
||||||
pkg go/types, const ParamVar VarKind #70250
|
|
||||||
pkg go/types, const RecvVar = 3 #70250
|
|
||||||
pkg go/types, const RecvVar VarKind #70250
|
|
||||||
pkg go/types, const ResultVar = 5 #70250
|
|
||||||
pkg go/types, const ResultVar VarKind #70250
|
|
||||||
pkg go/types, func LookupSelection(Type, bool, *Package, string) (Selection, bool) #70737
|
|
||||||
pkg go/types, method (*Var) Kind() VarKind #70250
|
|
||||||
pkg go/types, method (*Var) SetKind(VarKind) #70250
|
|
||||||
pkg go/types, method (VarKind) String() string #70250
|
|
||||||
pkg go/types, type VarKind uint8 #70250
|
|
@ -1 +0,0 @@
|
|||||||
pkg log/slog, method (Record) Source() *Source #70280
|
|
@ -1,3 +0,0 @@
|
|||||||
pkg unicode, var CategoryAliases map[string]string #70780
|
|
||||||
pkg unicode, var Cn *RangeTable #70780
|
|
||||||
pkg unicode, var LC *RangeTable #70780
|
|
@ -1 +0,0 @@
|
|||||||
pkg go/parser, func ParseDir //deprecated #71122
|
|
@ -1,4 +0,0 @@
|
|||||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES = 1879048195 #72843
|
|
||||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES ProgType #72843
|
|
||||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES = 1879048195 #72843
|
|
||||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES SectionType #72843
|
|
@ -1 +1,2 @@
|
|||||||
branch: master
|
branch: release-branch.go1.23
|
||||||
|
parent-branch: master
|
||||||
|
@ -70,6 +70,6 @@ To begin the next release development cycle, populate the contents of `next`
|
|||||||
with those of `initial`. From the repo root:
|
with those of `initial`. From the repo root:
|
||||||
|
|
||||||
> cd doc
|
> cd doc
|
||||||
> cp -R initial/ next
|
> cp -r initial/* next
|
||||||
|
|
||||||
Then edit `next/1-intro.md` to refer to the next version.
|
Then edit `next/1-intro.md` to refer to the next version.
|
||||||
|
6864
doc/go1.17_spec.html
Normal file
6864
doc/go1.17_spec.html
Normal file
File diff suppressed because it is too large
Load Diff
@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
|
|||||||
<p>
|
<p>
|
||||||
The following formal definition of Go's memory model closely follows
|
The following formal definition of Go's memory model closely follows
|
||||||
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
||||||
“<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
|
“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||||
published in PLDI 2008.
|
published in PLDI 2008.
|
||||||
The definition of data-race-free programs and the guarantee of sequential consistency
|
The definition of data-race-free programs and the guarantee of sequential consistency
|
||||||
for race-free programs are equivalent to the ones in that work.
|
for race-free programs are equivalent to the ones in that work.
|
||||||
@ -453,7 +453,7 @@ crash, or do something else.)
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p class="rule">
|
<p class="rule">
|
||||||
The <i>k</i>th receive from a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send on that channel.
|
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
604
doc/go_spec.html
604
doc/go_spec.html
File diff suppressed because it is too large
Load Diff
105
doc/godebug.md
105
doc/godebug.md
@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
|
|||||||
|
|
||||||
then that Go program will disable the use of HTTP/2 by default in both
|
then that Go program will disable the use of HTTP/2 by default in both
|
||||||
the HTTP client and the HTTP server.
|
the HTTP client and the HTTP server.
|
||||||
Unrecognized settings in the `GODEBUG` environment variable are ignored.
|
|
||||||
It is also possible to set the default `GODEBUG` for a given program
|
It is also possible to set the default `GODEBUG` for a given program
|
||||||
(discussed below).
|
(discussed below).
|
||||||
|
|
||||||
@ -109,9 +108,7 @@ Only the work module's `go.mod` is consulted for `godebug` directives.
|
|||||||
Any directives in required dependency modules are ignored.
|
Any directives in required dependency modules are ignored.
|
||||||
It is an error to list a `godebug` with an unrecognized setting.
|
It is an error to list a `godebug` with an unrecognized setting.
|
||||||
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
||||||
understand `godebug` at all.) When a workspace is in use, `godebug`
|
understand `godebug` at all.)
|
||||||
directives in `go.mod` files are ignored, and `go.work` will be consulted
|
|
||||||
for `godebug` directives instead.
|
|
||||||
|
|
||||||
The defaults from the `go` and `godebug` lines apply to all main
|
The defaults from the `go` and `godebug` lines apply to all main
|
||||||
packages that are built. For more fine-grained control,
|
packages that are built. For more fine-grained control,
|
||||||
@ -153,100 +150,6 @@ for example,
|
|||||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||||
|
|
||||||
### Go 1.25
|
|
||||||
|
|
||||||
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
|
|
||||||
runtime annotates OS anonymous memory mappings with context about their
|
|
||||||
purpose. These annotations appear in /proc/self/maps and /proc/self/smaps as
|
|
||||||
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it defaults
|
|
||||||
to `decoratemappings=1`, enabling annotations. Using `decoratemappings=0`
|
|
||||||
reverts to the pre-Go 1.25 behavior. This setting is fixed at program startup
|
|
||||||
time, and can't be modified by changing the `GODEBUG` environment variable
|
|
||||||
after the program starts.
|
|
||||||
|
|
||||||
Go 1.25 added a new `embedfollowsymlinks` setting that controls whether the
|
|
||||||
Go command will follow symlinks to regular files embedding files.
|
|
||||||
The default value `embedfollowsymlinks=0` does not allow following
|
|
||||||
symlinks. `embedfollowsymlinks=1` will allow following symlinks.
|
|
||||||
|
|
||||||
Go 1.25 corrected the semantics of contention reports for runtime-internal locks,
|
|
||||||
and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variable).
|
|
||||||
|
|
||||||
### Go 1.24
|
|
||||||
|
|
||||||
Go 1.24 added a new `fips140` setting that controls whether the Go
|
|
||||||
Cryptographic Module operates in FIPS 140-3 mode.
|
|
||||||
The possible values are:
|
|
||||||
- "off": no special support for FIPS 140-3 mode. This is the default.
|
|
||||||
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
|
|
||||||
- "only": like "on", but cryptographic algorithms not approved by
|
|
||||||
FIPS 140-3 return an error or panic.
|
|
||||||
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
|
|
||||||
This setting is fixed at program startup time, and can't be modified
|
|
||||||
by changing the `GODEBUG` environment variable after the program starts.
|
|
||||||
|
|
||||||
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
|
|
||||||
no-op. This behavior is controlled by the `randseednop` setting.
|
|
||||||
For Go 1.24 it defaults to `randseednop=1`.
|
|
||||||
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
|
|
||||||
|
|
||||||
Go 1.24 added new values for the `multipathtcp` setting.
|
|
||||||
The possible values for `multipathtcp` are now:
|
|
||||||
- "0": disable MPTCP on dialers and listeners by default
|
|
||||||
- "1": enable MPTCP on dialers and listeners by default
|
|
||||||
- "2": enable MPTCP on listeners only by default
|
|
||||||
- "3": enable MPTCP on dialers only by default
|
|
||||||
|
|
||||||
For Go 1.24, it now defaults to multipathtcp="2", thus
|
|
||||||
enabled by default on listeners. Using multipathtcp="0" reverts to the
|
|
||||||
pre-Go 1.24 behavior.
|
|
||||||
|
|
||||||
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
|
|
||||||
instead of text.
|
|
||||||
These new JSON events are distinguished by new `Action` values,
|
|
||||||
but can still cause problems with CI systems that aren't robust to these events.
|
|
||||||
This behavior can be controlled with the `gotestjsonbuildtext` setting.
|
|
||||||
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
|
|
||||||
This setting will be removed in a future release, Go 1.28 at the earliest.
|
|
||||||
|
|
||||||
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
|
|
||||||
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
|
|
||||||
Using `rsa1024min=0` restores the Go 1.23 behavior.
|
|
||||||
|
|
||||||
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
|
|
||||||
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
|
|
||||||
mode can be enabled for an entire program with the `dataindependenttiming` setting.
|
|
||||||
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
|
|
||||||
behavior from Go 1.23 when `dataindependenttiming` is unset.
|
|
||||||
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
|
|
||||||
When enabled, DIT will be enabled when calling into C from Go. When enabled,
|
|
||||||
calling into Go code from C will enable DIT, and disable it before returning to
|
|
||||||
C if it was not enabled when Go code was entered.
|
|
||||||
This currently only affects arm64 programs. For all other platforms it is a no-op.
|
|
||||||
|
|
||||||
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
|
|
||||||
signatures on certificates that use SHA-1 based signature algorithms.
|
|
||||||
|
|
||||||
Go 1.24 changes the default value of the [`x509usepolicies`
|
|
||||||
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
|
|
||||||
certificates, policies are now taken from the
|
|
||||||
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
|
|
||||||
than the
|
|
||||||
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
|
|
||||||
field by default.
|
|
||||||
|
|
||||||
Go 1.24 enabled the post-quantum key exchange mechanism
|
|
||||||
X25519MLKEM768 by default. The default can be reverted using the
|
|
||||||
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
|
||||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
|
||||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
|
||||||
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
|
|
||||||
|
|
||||||
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
|
|
||||||
use and validate the CRT parameters in the encoded private key. This behavior
|
|
||||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
|
||||||
the Go 1.23 behavior.
|
|
||||||
|
|
||||||
### Go 1.23
|
### Go 1.23
|
||||||
|
|
||||||
Go 1.23 changed the channels created by package time to be unbuffered
|
Go 1.23 changed the channels created by package time to be unbuffered
|
||||||
@ -276,8 +179,6 @@ Previous versions default to `winreadlinkvolume=0`.
|
|||||||
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
||||||
X25519Kyber768Draft00 by default. The default can be reverted using the
|
X25519Kyber768Draft00 by default. The default can be reverted using the
|
||||||
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
|
||||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
|
||||||
|
|
||||||
Go 1.23 changed the behavior of
|
Go 1.23 changed the behavior of
|
||||||
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
||||||
@ -373,7 +274,7 @@ certificate policy OIDs with components larger than 31 bits. By default this
|
|||||||
field is only used during parsing, when it is populated with policy OIDs, but
|
field is only used during parsing, when it is populated with policy OIDs, but
|
||||||
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
||||||
of the existing PolicyIdentifiers field, by using the
|
of the existing PolicyIdentifiers field, by using the
|
||||||
[`x509usepolicies` setting](/pkg/crypto/x509/#CreateCertificate).
|
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
|
||||||
|
|
||||||
|
|
||||||
### Go 1.21
|
### Go 1.21
|
||||||
@ -441,7 +342,7 @@ There is no plan to remove this setting.
|
|||||||
|
|
||||||
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
||||||
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
||||||
This setting was removed in Go 1.24.
|
This setting will be removed in a future release, Go 1.22 at the earliest.
|
||||||
|
|
||||||
### Go 1.10
|
### Go 1.10
|
||||||
|
|
||||||
|
@ -1,3 +1,9 @@
|
|||||||
|
<!--
|
||||||
|
NOTE: In this document and others in this directory, the convention is to
|
||||||
|
set fixed-width phrases with non-fixed-width spaces, as in
|
||||||
|
`hello` `world`.
|
||||||
|
-->
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
main ul li { margin: 0.5em 0; }
|
main ul li { margin: 0.5em 0; }
|
||||||
</style>
|
</style>
|
||||||
|
@ -1,10 +1,3 @@
|
|||||||
### Minor changes to the library {#minor_library_changes}
|
### Minor changes to the library {#minor_library_changes}
|
||||||
|
|
||||||
#### go/types
|
|
||||||
|
|
||||||
The `Var.Kind` method returns an enumeration of type `VarKind` that
|
|
||||||
classifies the variable (package-level, local, receiver, parameter,
|
|
||||||
result, or struct field). See issue #70250.
|
|
||||||
|
|
||||||
Callers of `NewVar` or `NewParam` are encouraged to call `Var.SetKind`
|
|
||||||
to ensure that this attribute is set correctly in all cases.
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
<style>
|
|
||||||
main ul li { margin: 0.5em 0; }
|
|
||||||
</style>
|
|
||||||
|
|
||||||
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
|
|
||||||
|
|
||||||
**Go 1.25 is not yet released. These are work-in-progress release notes.
|
|
||||||
Go 1.25 is expected to be released in August 2025.**
|
|
@ -1,3 +0,0 @@
|
|||||||
## Changes to the language {#language}
|
|
||||||
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
|||||||
## Tools {#tools}
|
|
||||||
|
|
||||||
### Go command {#go-command}
|
|
||||||
|
|
||||||
The `go build` `-asan` option now defaults to doing leak detection at
|
|
||||||
program exit.
|
|
||||||
This will report an error if memory allocated by C is not freed and is
|
|
||||||
not referenced by any other memory allocated by either C or Go.
|
|
||||||
These new error reports may be disabled by setting
|
|
||||||
`ASAN_OPTIONS=detect_leaks=0` in the environment when running the
|
|
||||||
program.
|
|
||||||
|
|
||||||
<!-- go.dev/issue/71294 -->
|
|
||||||
|
|
||||||
The new `work` package pattern matches all packages in the work (formerly called main)
|
|
||||||
modules: either the single work module in module mode or the set of workspace modules
|
|
||||||
in workspace mode.
|
|
||||||
|
|
||||||
<!-- go.dev/issue/65847 -->
|
|
||||||
|
|
||||||
When the go command updates the `go` line in a `go.mod` or `go.work` file,
|
|
||||||
it [no longer](/ref/mod#go-mod-file-toolchain) adds a toolchain line
|
|
||||||
specifying the command's current version.
|
|
||||||
|
|
||||||
### Cgo {#cgo}
|
|
||||||
|
|
||||||
### Vet {#vet}
|
|
||||||
|
|
||||||
The `go vet` command includes new analyzers:
|
|
||||||
|
|
||||||
<!-- go.dev/issue/18022 -->
|
|
||||||
|
|
||||||
- [waitgroup](https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup),
|
|
||||||
which reports misplaced calls to [sync.WaitGroup.Add]; and
|
|
||||||
|
|
||||||
<!-- go.dev/issue/28308 -->
|
|
||||||
|
|
||||||
- [hostport](https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/hostport),
|
|
||||||
which reports uses of `fmt.Sprintf("%s:%d", host, port)` to
|
|
||||||
construct addresses for [net.Dial], as these will not work with
|
|
||||||
IPv6; instead it suggests using [net.JoinHostPort].
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
|||||||
## Runtime {#runtime}
|
|
||||||
|
|
||||||
<!-- go.dev/issue/71517 -->
|
|
||||||
|
|
||||||
The message printed when a program exits due to an unhandled panic
|
|
||||||
that was recovered and repanicked no longer repeats the text of
|
|
||||||
the panic value.
|
|
||||||
|
|
||||||
Previously, a program which panicked with `panic("PANIC")`,
|
|
||||||
recovered the panic, and then repanicked with the original
|
|
||||||
value would print:
|
|
||||||
|
|
||||||
panic: PANIC [recovered]
|
|
||||||
panic: PANIC
|
|
||||||
|
|
||||||
This program will now print:
|
|
||||||
|
|
||||||
panic: PANIC [recovered, repanicked]
|
|
||||||
|
|
||||||
<!-- go.dev/issue/71546 -->
|
|
||||||
|
|
||||||
On Linux systems with kernel support for anonymous VMA names
|
|
||||||
(`CONFIG_ANON_VMA_NAME`), the Go runtime will annotate anonymous memory
|
|
||||||
mappings with context about their purpose. e.g., `[anon: Go: heap]` for heap
|
|
||||||
memory. This can be disabled with the [GODEBUG setting](/doc/godebug)
|
|
||||||
`decoratemappings=0`.
|
|
@ -1,44 +0,0 @@
|
|||||||
## Compiler {#compiler}
|
|
||||||
|
|
||||||
<!-- https://go.dev/issue/26379 -->
|
|
||||||
|
|
||||||
The compiler and linker in Go 1.25 now generate debug information
|
|
||||||
using [DWARF version 5](https://dwarfstd.org/dwarf5std.html); the
|
|
||||||
newer DWARF version reduces the space required for debugging
|
|
||||||
information in Go binaries.
|
|
||||||
DWARF 5 generation is gated by the "dwarf5" GOEXPERIMENT; this
|
|
||||||
functionality can be disabled (for now) using GOEXPERIMENT=nodwarf5.
|
|
||||||
|
|
||||||
<!-- https://go.dev/issue/72860, CL 657715 -->
|
|
||||||
|
|
||||||
The compiler [has been fixed](/cl/657715)
|
|
||||||
to ensure that nil pointer checks are performed promptly. Programs like the following,
|
|
||||||
which used to execute successfully, will now panic with a nil-pointer exception:
|
|
||||||
|
|
||||||
```
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
f, err := os.Open("nonExistentFile")
|
|
||||||
name := f.Name()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
println(name)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This program is incorrect in that it uses the result of `os.Open` before checking
|
|
||||||
the error. The main result of `os.Open` can be a nil pointer if the error result is non-nil.
|
|
||||||
But because of [a compiler bug](/issue/72860), this program ran successfully under
|
|
||||||
Go versions 1.21 through 1.24 (in violation of the Go spec). It will no longer run
|
|
||||||
successfully in Go 1.25. If this change is affecting your code, the solution is to put
|
|
||||||
the non-nil error check earlier in your code, preferably immediately after
|
|
||||||
the error-generating statement.
|
|
||||||
|
|
||||||
## Assembler {#assembler}
|
|
||||||
|
|
||||||
## Linker {#linker}
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
|||||||
## Standard library {#library}
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
### Minor changes to the library {#minor_library_changes}
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
API changes and other small changes to the standard library go here.
|
|
@ -1,2 +0,0 @@
|
|||||||
The [*Writer.AddFS] implementation now supports symbolic links
|
|
||||||
for filesystems that implement [io/fs.ReadLinkFS].
|
|
@ -1 +0,0 @@
|
|||||||
[MessageSigner] is a new signing interface that can be implemented by signers that wish to hash the message to be signed themselves. A new function is also introduced, [SignMessage] which attempts to update a [Signer] interface to [MessageSigner], using the [MessageSigner.SignMessage] method if successful, and [Signer.Sign] if not. This can be used when code wishes to support both [Signer] and [MessageSigner].
|
|
@ -1,2 +0,0 @@
|
|||||||
The hidden and undocumented `Inverse` and `CombinedMult` methods on some [Curve]
|
|
||||||
implementations have been removed.
|
|
@ -1,2 +0,0 @@
|
|||||||
The new [ConnectionState.CurveID] field exposes the key exchange mechanism used
|
|
||||||
to establish the connection.
|
|
@ -1,2 +0,0 @@
|
|||||||
When [FIPS 140-3 mode](/doc/security/fips140) is enabled, Extended Master Secret
|
|
||||||
is now required in TLS 1.2, and Ed25519 and X25519MLKEM768 are now allowed.
|
|
@ -1 +0,0 @@
|
|||||||
[CreateCertificate], [CreateCertificateRequest], and [CreateRevocationList] can now accept a [crypto.MessageSigner] signing interface as well as [crypto.Signer]. This allows these functions to use signers which implement "one-shot" signing interfaces, where hashing is done as part of the signing operation, instead of by the caller.
|
|
@ -1,4 +0,0 @@
|
|||||||
The [debug/elf] package adds two new constants:
|
|
||||||
- [PT_RISCV_ATTRIBUTES]
|
|
||||||
- [SHT_RISCV_ATTRIBUTES]
|
|
||||||
for RISC-V ELF parsing.
|
|
@ -1 +0,0 @@
|
|||||||
The [ParseDir] function is deprecated.
|
|
@ -1,3 +0,0 @@
|
|||||||
[Var] now has a [Var.Kind] method that classifies the variable as one
|
|
||||||
of: package-level, receiver, parameter, result, or local variable, or
|
|
||||||
a struct field.
|
|
@ -1,3 +0,0 @@
|
|||||||
The new [LookupSelection] function looks up the field or method of a
|
|
||||||
given name and receiver type, like the existing [LookupFieldOrMethod]
|
|
||||||
function, but returns the result in the form of a [Selection].
|
|
@ -1 +0,0 @@
|
|||||||
A new [ReadLinkFS] interface provides the ability to read symbolic links in a filesystem.
|
|
@ -1 +0,0 @@
|
|||||||
[Record] now has a Source() method, returning its source location or nil if unavailable.
|
|
@ -1,2 +0,0 @@
|
|||||||
The new helper function [FieldContentDisposition] builds multipart
|
|
||||||
Content-Disposition header fields.
|
|
@ -1,3 +0,0 @@
|
|||||||
On Windows, the [TCPConn.File], [UDPConn.File], [UnixConn.File],
|
|
||||||
[IPConn.File], [TCPListener.File], and [UnixListener.File]
|
|
||||||
methods are now supported.
|
|
@ -1,5 +0,0 @@
|
|||||||
[LookupMX] and [*Resolver.LookupMX] now return DNS names that look
|
|
||||||
like valid IP address, as well as valid domain names.
|
|
||||||
Previously if a name server returned an IP address as a DNS name,
|
|
||||||
LookupMX would discard it, as required by the RFCs.
|
|
||||||
However, name servers in practice do sometimes return IP addresses.
|
|
@ -1 +0,0 @@
|
|||||||
On Windows, the [ListenMulticastUDP] now supports IPv6 addresses.
|
|
@ -1,2 +0,0 @@
|
|||||||
On Windows, the [FileConn], [FilePacketConn], [FileListener]
|
|
||||||
functions are now supported.
|
|
@ -1,14 +0,0 @@
|
|||||||
On Windows, [NewFile] now supports handles opened for asynchronous I/O (that is,
|
|
||||||
[syscall.FILE_FLAG_OVERLAPPED] is specified in the [syscall.CreateFile] call).
|
|
||||||
These handles are associated with the Go runtime's I/O completion port,
|
|
||||||
which provides the following benefits for the resulting [File]:
|
|
||||||
|
|
||||||
- I/O methods ([File.Read], [File.Write], [File.ReadAt], and [File.WriteAt]) do not block an OS thread.
|
|
||||||
- Deadline methods ([File.SetDeadline], [File.SetReadDeadline], and [File.SetWriteDeadline]) are supported.
|
|
||||||
|
|
||||||
This enhancement is especially beneficial for applications that communicate via named pipes on Windows.
|
|
||||||
|
|
||||||
Note that a handle can only be associated with one completion port at a time.
|
|
||||||
If the handle provided to [NewFile] is already associated with a completion port,
|
|
||||||
the returned [File] is downgraded to synchronous I/O mode.
|
|
||||||
In this case, I/O methods will block an OS thread, and the deadline methods have no effect.
|
|
@ -1,2 +0,0 @@
|
|||||||
The filesystem returned by [DirFS] implements the new [io/fs.ReadLinkFS] interface.
|
|
||||||
[CopyFS] supports symlinks when copying filesystems that implement [io/fs.ReadLinkFS].
|
|
@ -1,10 +0,0 @@
|
|||||||
The [os.Root] type supports the following additional methods:
|
|
||||||
|
|
||||||
* [os.Root.Chmod]
|
|
||||||
* [os.Root.Chown]
|
|
||||||
* [os.Root.Chtimes]
|
|
||||||
* [os.Root.Lchown]
|
|
||||||
* [os.Root.Link]
|
|
||||||
* [os.Root.Readlink]
|
|
||||||
* [os.Root.Rename]
|
|
||||||
* [os.Root.Symlink]
|
|
@ -1,4 +0,0 @@
|
|||||||
The `\p{name}` and `\P{name}` character class syntaxes now accept the names
|
|
||||||
Any, ASCII, Assigned, Cn, and LC, as well as Unicode category aliases like `\p{Letter}` for `\pL`.
|
|
||||||
Following [Unicode TR18](https://unicode.org/reports/tr18/), they also now use
|
|
||||||
case-insensitive name lookups, ignoring spaces, underscores, and hyphens.
|
|
@ -1,6 +0,0 @@
|
|||||||
The mutex profile for contention on runtime-internal locks now correctly points
|
|
||||||
to the end of the critical section that caused the delay. This matches the
|
|
||||||
profile's behavior for contention on `sync.Mutex` values. The
|
|
||||||
`runtimecontentionstacks` setting for `GODEBUG`, which allowed opting in to the
|
|
||||||
unusual behavior of Go 1.22 through 1.24 for this part of the profile, is now
|
|
||||||
gone.
|
|
@ -1,2 +0,0 @@
|
|||||||
[WaitGroup] has added a new method [WaitGroup.Go],
|
|
||||||
that makes the common pattern of creating and counting goroutines more convenient.
|
|
@ -1,3 +0,0 @@
|
|||||||
[MapFS] implements the new [io/fs.ReadLinkFS] interface.
|
|
||||||
[TestFS] will verify the functionality of the [io/fs.ReadLinkFS] interface if implemented.
|
|
||||||
[TestFS] will no longer follow symlinks to avoid unbounded recursion.
|
|
@ -1,4 +0,0 @@
|
|||||||
The new [CategoryAliases] map provides access to category alias names, such as “Letter” for “L”.
|
|
||||||
The new categories [Cn] and [LC] define unassigned codepoints and cased letters, respectively.
|
|
||||||
These have always been defined by Unicode but were inadvertently omitted in earlier versions of Go.
|
|
||||||
The [C] category now includes [Cn], meaning it has added all unassigned code points.
|
|
@ -1,11 +0,0 @@
|
|||||||
## Ports {#ports}
|
|
||||||
|
|
||||||
### Darwin
|
|
||||||
|
|
||||||
<!-- go.dev/issue/69839 -->
|
|
||||||
As [announced](/doc/go1.24#darwin) in the Go 1.24 release notes, Go 1.25 requires macOS 12 Monterey or later; support for previous versions has been discontinued.
|
|
||||||
|
|
||||||
### Windows
|
|
||||||
|
|
||||||
<!-- go.dev/issue/71671 -->
|
|
||||||
Go 1.25 is the last release that contains the [broken](/doc/go1.24#windows) 32-bit windows/arm port (`GOOS=windows` `GOARCH=arm`). It will be removed in Go 1.26.
|
|
@ -1,46 +0,0 @@
|
|||||||
# Copyright 2024 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# Rules for building and testing new FIPS snapshots.
|
|
||||||
# For example:
|
|
||||||
#
|
|
||||||
# make v1.2.3.zip
|
|
||||||
# make v1.2.3.test
|
|
||||||
#
|
|
||||||
# and then if changes are needed, check them into master
|
|
||||||
# and run 'make v1.2.3.rm' and repeat.
|
|
||||||
#
|
|
||||||
# Note that once published a snapshot zip file should never
|
|
||||||
# be modified. We record the sha256 hashes of the zip files
|
|
||||||
# in fips140.sum, and the cmd/go/internal/fips140 test checks
|
|
||||||
# that the zips match.
|
|
||||||
#
|
|
||||||
# When the zip file is finalized, run 'make updatesum' to update
|
|
||||||
# fips140.sum.
|
|
||||||
|
|
||||||
default:
|
|
||||||
@echo nothing to make
|
|
||||||
|
|
||||||
# make v1.2.3.zip builds a v1.2.3.zip file
|
|
||||||
# from the current origin/master.
|
|
||||||
# copy and edit the 'go run' command by hand to use a different branch.
|
|
||||||
v%.zip:
|
|
||||||
git fetch origin master
|
|
||||||
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
|
|
||||||
|
|
||||||
# normally mkzip refuses to overwrite an existing zip file.
|
|
||||||
# make v1.2.3.rm removes the zip file and and unpacked
|
|
||||||
# copy from the module cache.
|
|
||||||
v%.rm:
|
|
||||||
rm -f v$*.zip
|
|
||||||
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
|
|
||||||
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
|
|
||||||
|
|
||||||
# make v1.2.3.test runs the crypto tests using that snapshot.
|
|
||||||
v%.test:
|
|
||||||
GOFIPS140=v$* go test -short crypto...
|
|
||||||
|
|
||||||
# make updatesum updates the fips140.sum file.
|
|
||||||
updatesum:
|
|
||||||
go test cmd/go/internal/fips140 -update
|
|
@ -1,9 +0,0 @@
|
|||||||
This directory holds snapshots of the crypto/internal/fips140 tree
|
|
||||||
that are being validated and certified for FIPS-140 use.
|
|
||||||
The file x.txt (for example, inprocess.txt, certified.txt)
|
|
||||||
defines the meaning of the FIPS version alias x, listing
|
|
||||||
the exact version to use.
|
|
||||||
|
|
||||||
The zip files are created by cmd/go/internal/fips140/mkzip.go.
|
|
||||||
The fips140.sum file lists checksums for the zip files.
|
|
||||||
See the Makefile for recipes.
|
|
@ -1,12 +0,0 @@
|
|||||||
# SHA256 checksums of snapshot zip files in this directory.
|
|
||||||
# These checksums are included in the FIPS security policy
|
|
||||||
# (validation instructions sent to the lab) and MUST NOT CHANGE.
|
|
||||||
# That is, the zip files themselves must not change.
|
|
||||||
#
|
|
||||||
# It is okay to add new zip files to the list, and it is okay to
|
|
||||||
# remove zip files from the list when they are removed from
|
|
||||||
# this directory. To update this file:
|
|
||||||
#
|
|
||||||
# go test cmd/go/internal/fips140 -update
|
|
||||||
#
|
|
||||||
v1.0.0.zip b50508feaeff05d22516b21e1fd210bbf5d6a1e422eaf2cfa23fe379342713b8
|
|
Binary file not shown.
@ -24,8 +24,8 @@
|
|||||||
# in the CL match the update.bash in the CL.
|
# in the CL match the update.bash in the CL.
|
||||||
|
|
||||||
# Versions to use.
|
# Versions to use.
|
||||||
CODE=2025a
|
CODE=2024a
|
||||||
DATA=2025a
|
DATA=2024a
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
@ -40,12 +40,7 @@ curl -sS -L -O https://www.iana.org/time-zones/repository/releases/tzdata$DATA.t
|
|||||||
tar xzf tzcode$CODE.tar.gz
|
tar xzf tzcode$CODE.tar.gz
|
||||||
tar xzf tzdata$DATA.tar.gz
|
tar xzf tzdata$DATA.tar.gz
|
||||||
|
|
||||||
# The PACKRATLIST and PACKRATDATA options are copied from Ubuntu:
|
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only >make.out 2>&1; then
|
||||||
# https://git.launchpad.net/ubuntu/+source/tzdata/tree/debian/rules?h=debian/sid
|
|
||||||
#
|
|
||||||
# You can see the description of these make variables in the tzdata Makefile:
|
|
||||||
# https://github.com/eggert/tz/blob/main/Makefile
|
|
||||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo PACKRATDATA=backzone PACKRATLIST=zone.tab posix_only >make.out 2>&1; then
|
|
||||||
cat make.out
|
cat make.out
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
Binary file not shown.
191
misc/linkcheck/linkcheck.go
Normal file
191
misc/linkcheck/linkcheck.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// The linkcheck command finds missing links in the godoc website.
|
||||||
|
// It crawls a URL recursively and notes URLs and URL fragments
|
||||||
|
// that it's seen and prints a report of missing links at the end.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
root = flag.String("root", "http://localhost:6060", "Root to crawl")
|
||||||
|
verbose = flag.Bool("verbose", false, "verbose")
|
||||||
|
)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup // outstanding fetches
|
||||||
|
var urlq = make(chan string) // URLs to crawl
|
||||||
|
|
||||||
|
// urlFrag is a URL and its optional #fragment (without the #)
|
||||||
|
type urlFrag struct {
|
||||||
|
url, frag string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.Mutex
|
||||||
|
crawled = make(map[string]bool) // URL without fragment -> true
|
||||||
|
neededFrags = make(map[urlFrag][]string) // URL#frag -> who needs it
|
||||||
|
)
|
||||||
|
|
||||||
|
var aRx = regexp.MustCompile(`<a href=['"]?(/[^\s'">]+)`)
|
||||||
|
|
||||||
|
// Owned by crawlLoop goroutine:
|
||||||
|
var (
|
||||||
|
linkSources = make(map[string][]string) // url no fragment -> sources
|
||||||
|
fragExists = make(map[urlFrag]bool)
|
||||||
|
problems []string
|
||||||
|
)
|
||||||
|
|
||||||
|
func localLinks(body string) (links []string) {
|
||||||
|
seen := map[string]bool{}
|
||||||
|
mv := aRx.FindAllStringSubmatch(body, -1)
|
||||||
|
for _, m := range mv {
|
||||||
|
ref := m[1]
|
||||||
|
if strings.HasPrefix(ref, "/src/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !seen[ref] {
|
||||||
|
seen[ref] = true
|
||||||
|
links = append(links, m[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var idRx = regexp.MustCompile(`\bid=['"]?([^\s'">]+)`)
|
||||||
|
|
||||||
|
func pageIDs(body string) (ids []string) {
|
||||||
|
mv := idRx.FindAllStringSubmatch(body, -1)
|
||||||
|
for _, m := range mv {
|
||||||
|
ids = append(ids, m[1])
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// url may contain a #fragment, and the fragment is then noted as needing to exist.
|
||||||
|
func crawl(url string, sourceURL string) {
|
||||||
|
if strings.Contains(url, "/devel/release") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if u, frag, ok := strings.Cut(url, "#"); ok {
|
||||||
|
url = u
|
||||||
|
if frag != "" {
|
||||||
|
uf := urlFrag{url, frag}
|
||||||
|
neededFrags[uf] = append(neededFrags[uf], sourceURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if crawled[url] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
crawled[url] = true
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
urlq <- url
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func addProblem(url, errmsg string) {
|
||||||
|
msg := fmt.Sprintf("Error on %s: %s (from %s)", url, errmsg, linkSources[url])
|
||||||
|
if *verbose {
|
||||||
|
log.Print(msg)
|
||||||
|
}
|
||||||
|
problems = append(problems, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func crawlLoop() {
|
||||||
|
for url := range urlq {
|
||||||
|
if err := doCrawl(url); err != nil {
|
||||||
|
addProblem(url, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func doCrawl(url string) error {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res, err := http.DefaultTransport.RoundTrip(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Handle redirects.
|
||||||
|
if res.StatusCode/100 == 3 {
|
||||||
|
newURL, err := res.Location()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("resolving redirect: %v", err)
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(newURL.String(), *root) {
|
||||||
|
// Skip off-site redirects.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
crawl(newURL.String(), url)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return errors.New(res.Status)
|
||||||
|
}
|
||||||
|
slurp, err := io.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error reading %s body: %v", url, err)
|
||||||
|
}
|
||||||
|
if *verbose {
|
||||||
|
log.Printf("Len of %s: %d", url, len(slurp))
|
||||||
|
}
|
||||||
|
body := string(slurp)
|
||||||
|
for _, ref := range localLinks(body) {
|
||||||
|
if *verbose {
|
||||||
|
log.Printf(" links to %s", ref)
|
||||||
|
}
|
||||||
|
dest := *root + ref
|
||||||
|
linkSources[dest] = append(linkSources[dest], url)
|
||||||
|
crawl(dest, url)
|
||||||
|
}
|
||||||
|
for _, id := range pageIDs(body) {
|
||||||
|
if *verbose {
|
||||||
|
log.Printf(" url %s has #%s", url, id)
|
||||||
|
}
|
||||||
|
fragExists[urlFrag{url, id}] = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
go crawlLoop()
|
||||||
|
crawl(*root, "")
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
close(urlq)
|
||||||
|
for uf, needers := range neededFrags {
|
||||||
|
if !fragExists[uf] {
|
||||||
|
problems = append(problems, fmt.Sprintf("Missing fragment for %+v from %v", uf, needers))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range problems {
|
||||||
|
fmt.Println(s)
|
||||||
|
}
|
||||||
|
if len(problems) > 0 {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
|
|||||||
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
||||||
(see https://caniuse.com/#feat=textencoder)
|
(see https://caniuse.com/#feat=textencoder)
|
||||||
-->
|
-->
|
||||||
<script src="../../lib/wasm/wasm_exec.js"></script>
|
<script src="wasm_exec.js"></script>
|
||||||
<script>
|
<script>
|
||||||
if (!WebAssembly.instantiateStreaming) { // polyfill
|
if (!WebAssembly.instantiateStreaming) { // polyfill
|
||||||
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
if (!globalThis.fs) {
|
if (!globalThis.fs) {
|
||||||
let outputBuf = "";
|
let outputBuf = "";
|
||||||
globalThis.fs = {
|
globalThis.fs = {
|
||||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
|
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
|
||||||
writeSync(fd, buf) {
|
writeSync(fd, buf) {
|
||||||
outputBuf += decoder.decode(buf);
|
outputBuf += decoder.decode(buf);
|
||||||
const nl = outputBuf.lastIndexOf("\n");
|
const nl = outputBuf.lastIndexOf("\n");
|
||||||
@ -73,14 +73,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!globalThis.path) {
|
|
||||||
globalThis.path = {
|
|
||||||
resolve(...pathSegments) {
|
|
||||||
return pathSegments.join("/");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!globalThis.crypto) {
|
if (!globalThis.crypto) {
|
||||||
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
||||||
}
|
}
|
||||||
@ -216,16 +208,10 @@
|
|||||||
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
||||||
}
|
}
|
||||||
|
|
||||||
const testCallExport = (a, b) => {
|
|
||||||
this._inst.exports.testExport0();
|
|
||||||
return this._inst.exports.testExport(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
const timeOrigin = Date.now() - performance.now();
|
const timeOrigin = Date.now() - performance.now();
|
||||||
this.importObject = {
|
this.importObject = {
|
||||||
_gotest: {
|
_gotest: {
|
||||||
add: (a, b) => a + b,
|
add: (a, b) => a + b,
|
||||||
callExport: testCallExport,
|
|
||||||
},
|
},
|
||||||
gojs: {
|
gojs: {
|
||||||
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
@ -11,7 +11,6 @@ if (process.argv.length < 3) {
|
|||||||
|
|
||||||
globalThis.require = require;
|
globalThis.require = require;
|
||||||
globalThis.fs = require("fs");
|
globalThis.fs = require("fs");
|
||||||
globalThis.path = require("path");
|
|
||||||
globalThis.TextEncoder = require("util").TextEncoder;
|
globalThis.TextEncoder = require("util").TextEncoder;
|
||||||
globalThis.TextDecoder = require("util").TextDecoder;
|
globalThis.TextDecoder = require("util").TextDecoder;
|
||||||
|
|
@ -33,10 +33,6 @@ Before updating vendor directories, ensure that module mode is enabled.
|
|||||||
Make sure that GO111MODULE is not set in the environment, or that it is
|
Make sure that GO111MODULE is not set in the environment, or that it is
|
||||||
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
||||||
|
|
||||||
Also, ensure that 'go env GOROOT' shows the root of this Go source
|
|
||||||
tree. Otherwise, the results are undefined. It's recommended to build
|
|
||||||
Go from source and use that 'go' binary to update its source tree.
|
|
||||||
|
|
||||||
Requirements may be added, updated, and removed with 'go get'.
|
Requirements may be added, updated, and removed with 'go get'.
|
||||||
The vendor directory may be updated with 'go mod vendor'.
|
The vendor directory may be updated with 'go mod vendor'.
|
||||||
A typical sequence might be:
|
A typical sequence might be:
|
||||||
|
20
src/all.bat
20
src/all.bat
@ -6,11 +6,17 @@
|
|||||||
|
|
||||||
setlocal
|
setlocal
|
||||||
|
|
||||||
if not exist make.bat (
|
if exist make.bat goto ok
|
||||||
echo all.bat must be run from go\src
|
echo all.bat must be run from go\src
|
||||||
exit /b 1
|
:: cannot exit: would kill parent command interpreter
|
||||||
)
|
goto end
|
||||||
|
:ok
|
||||||
|
|
||||||
call .\make.bat --no-banner || exit /b 1
|
call .\make.bat --no-banner --no-local
|
||||||
call .\run.bat --no-rebuild || exit /b 1
|
if %GOBUILDFAIL%==1 goto end
|
||||||
..\bin\go tool dist banner
|
call .\run.bat --no-rebuild --no-local
|
||||||
|
if %GOBUILDFAIL%==1 goto end
|
||||||
|
"%GOTOOLDIR%/dist" banner
|
||||||
|
|
||||||
|
:end
|
||||||
|
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"internal/godebug"
|
"internal/godebug"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"maps"
|
|
||||||
"math"
|
"math"
|
||||||
"path"
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -697,14 +696,24 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
|||||||
h.Gname = sys.Gname
|
h.Gname = sys.Gname
|
||||||
h.AccessTime = sys.AccessTime
|
h.AccessTime = sys.AccessTime
|
||||||
h.ChangeTime = sys.ChangeTime
|
h.ChangeTime = sys.ChangeTime
|
||||||
h.Xattrs = maps.Clone(sys.Xattrs)
|
if sys.Xattrs != nil {
|
||||||
|
h.Xattrs = make(map[string]string)
|
||||||
|
for k, v := range sys.Xattrs {
|
||||||
|
h.Xattrs[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
if sys.Typeflag == TypeLink {
|
if sys.Typeflag == TypeLink {
|
||||||
// hard link
|
// hard link
|
||||||
h.Typeflag = TypeLink
|
h.Typeflag = TypeLink
|
||||||
h.Size = 0
|
h.Size = 0
|
||||||
h.Linkname = sys.Linkname
|
h.Linkname = sys.Linkname
|
||||||
}
|
}
|
||||||
h.PAXRecords = maps.Clone(sys.PAXRecords)
|
if sys.PAXRecords != nil {
|
||||||
|
h.PAXRecords = make(map[string]string)
|
||||||
|
for k, v := range sys.PAXRecords {
|
||||||
|
h.PAXRecords[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var doNameLookups = true
|
var doNameLookups = true
|
||||||
if iface, ok := fi.(FileInfoNames); ok {
|
if iface, ok := fi.(FileInfoNames); ok {
|
||||||
|
@ -7,16 +7,14 @@ package tar
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/bzip2"
|
"compress/bzip2"
|
||||||
|
"crypto/md5"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"maps"
|
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
"slices"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -27,7 +25,7 @@ func TestReader(t *testing.T) {
|
|||||||
vectors := []struct {
|
vectors := []struct {
|
||||||
file string // Test input file
|
file string // Test input file
|
||||||
headers []*Header // Expected output headers
|
headers []*Header // Expected output headers
|
||||||
chksums []string // CRC32 checksum of files, leave as nil if not checked
|
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||||
err error // Expected error to occur
|
err error // Expected error to occur
|
||||||
}{{
|
}{{
|
||||||
file: "testdata/gnu.tar",
|
file: "testdata/gnu.tar",
|
||||||
@ -55,8 +53,8 @@ func TestReader(t *testing.T) {
|
|||||||
Format: FormatGNU,
|
Format: FormatGNU,
|
||||||
}},
|
}},
|
||||||
chksums: []string{
|
chksums: []string{
|
||||||
"6cbd88fc",
|
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||||
"ddac04b3",
|
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
file: "testdata/sparse-formats.tar",
|
file: "testdata/sparse-formats.tar",
|
||||||
@ -149,11 +147,11 @@ func TestReader(t *testing.T) {
|
|||||||
Format: FormatGNU,
|
Format: FormatGNU,
|
||||||
}},
|
}},
|
||||||
chksums: []string{
|
chksums: []string{
|
||||||
"5375e1d2",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"5375e1d2",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"5375e1d2",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"5375e1d2",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"8eb179ba",
|
"b0061974914468de549a2af8ced10316",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
file: "testdata/star.tar",
|
file: "testdata/star.tar",
|
||||||
@ -270,7 +268,7 @@ func TestReader(t *testing.T) {
|
|||||||
Format: FormatPAX,
|
Format: FormatPAX,
|
||||||
}},
|
}},
|
||||||
chksums: []string{
|
chksums: []string{
|
||||||
"5fd7e86a",
|
"0afb597b283fe61b5d4879669a350556",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
file: "testdata/pax-records.tar",
|
file: "testdata/pax-records.tar",
|
||||||
@ -657,7 +655,7 @@ func TestReader(t *testing.T) {
|
|||||||
if v.chksums == nil {
|
if v.chksums == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
h := crc32.NewIEEE()
|
h := md5.New()
|
||||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
@ -1019,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
|
|||||||
for i, v := range vectors {
|
for i, v := range vectors {
|
||||||
r := strings.NewReader(v.in)
|
r := strings.NewReader(v.in)
|
||||||
got, err := parsePAX(r)
|
got, err := parsePAX(r)
|
||||||
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||||
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
||||||
}
|
}
|
||||||
if ok := err == nil; ok != v.ok {
|
if ok := err == nil; ok != v.ok {
|
||||||
@ -1136,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
|||||||
v.input = v.input[copy(blk[:], v.input):]
|
v.input = v.input[copy(blk[:], v.input):]
|
||||||
tr := Reader{r: bytes.NewReader(v.input)}
|
tr := Reader{r: bytes.NewReader(v.input)}
|
||||||
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
||||||
if !slices.Equal(got, v.wantMap) {
|
if !equalSparseEntries(got, v.wantMap) {
|
||||||
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
||||||
}
|
}
|
||||||
if err != v.wantErr {
|
if err != v.wantErr {
|
||||||
@ -1327,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
|
|||||||
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
||||||
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
||||||
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
||||||
if !slices.Equal(got, v.wantMap) {
|
if !equalSparseEntries(got, v.wantMap) {
|
||||||
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
||||||
}
|
}
|
||||||
if err != v.wantErr {
|
if err != v.wantErr {
|
||||||
|
@ -11,13 +11,11 @@ import (
|
|||||||
"internal/testenv"
|
"internal/testenv"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"maps"
|
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"slices"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
|
|||||||
return f.pos, nil
|
return f.pos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func equalSparseEntries(x, y []sparseEntry) bool {
|
||||||
|
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
|
||||||
|
}
|
||||||
|
|
||||||
func TestSparseEntries(t *testing.T) {
|
func TestSparseEntries(t *testing.T) {
|
||||||
vectors := []struct {
|
vectors := []struct {
|
||||||
in []sparseEntry
|
in []sparseEntry
|
||||||
@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||||
if !slices.Equal(gotAligned, v.wantAligned) {
|
if !equalSparseEntries(gotAligned, v.wantAligned) {
|
||||||
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
||||||
}
|
}
|
||||||
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||||
if !slices.Equal(gotInverted, v.wantInverted) {
|
if !equalSparseEntries(gotInverted, v.wantInverted) {
|
||||||
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
|
|||||||
if formats != v.formats {
|
if formats != v.formats {
|
||||||
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
||||||
}
|
}
|
||||||
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||||
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
||||||
}
|
}
|
||||||
if (formats != FormatUnknown) && (err != nil) {
|
if (formats != FormatUnknown) && (err != nil) {
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"maps"
|
|
||||||
"path"
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@ -170,10 +169,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
|||||||
// Write PAX records to the output.
|
// Write PAX records to the output.
|
||||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||||
if len(paxHdrs) > 0 || isGlobal {
|
if len(paxHdrs) > 0 || isGlobal {
|
||||||
|
// Sort keys for deterministic ordering.
|
||||||
|
var keys []string
|
||||||
|
for k := range paxHdrs {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
slices.Sort(keys)
|
||||||
|
|
||||||
// Write each record to a buffer.
|
// Write each record to a buffer.
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
// Sort keys for deterministic ordering.
|
for _, k := range keys {
|
||||||
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
|
|
||||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -408,37 +413,25 @@ func (tw *Writer) AddFS(fsys fs.FS) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if name == "." {
|
if d.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := d.Info()
|
info, err := d.Info()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
linkTarget := ""
|
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
|
||||||
if typ := d.Type(); typ == fs.ModeSymlink {
|
if !info.Mode().IsRegular() {
|
||||||
var err error
|
|
||||||
linkTarget, err = fs.ReadLink(fsys, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if !typ.IsRegular() && typ != fs.ModeDir {
|
|
||||||
return errors.New("tar: cannot add non-regular file")
|
return errors.New("tar: cannot add non-regular file")
|
||||||
}
|
}
|
||||||
h, err := FileInfoHeader(info, linkTarget)
|
h, err := FileInfoHeader(info, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h.Name = name
|
h.Name = name
|
||||||
if d.IsDir() {
|
|
||||||
h.Name += "/"
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(h); err != nil {
|
if err := tw.WriteHeader(h); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !d.Type().IsRegular() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
f, err := fsys.Open(name)
|
f, err := fsys.Open(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -675,7 +668,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
|||||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||||
return sw.fw.physicalRemaining()
|
return sw.fw.physicalRemaining()
|
||||||
}
|
}
|
||||||
|
@ -10,11 +10,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"maps"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"reflect"
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"testing/fstest"
|
"testing/fstest"
|
||||||
@ -703,7 +702,7 @@ func TestPaxXattrs(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !maps.Equal(hdr.Xattrs, xattrs) {
|
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||||
hdr.Xattrs, xattrs)
|
hdr.Xattrs, xattrs)
|
||||||
}
|
}
|
||||||
@ -1339,41 +1338,29 @@ func TestFileWriter(t *testing.T) {
|
|||||||
|
|
||||||
func TestWriterAddFS(t *testing.T) {
|
func TestWriterAddFS(t *testing.T) {
|
||||||
fsys := fstest.MapFS{
|
fsys := fstest.MapFS{
|
||||||
"emptyfolder": {Mode: 0o755 | os.ModeDir},
|
|
||||||
"file.go": {Data: []byte("hello")},
|
"file.go": {Data: []byte("hello")},
|
||||||
"subfolder/another.go": {Data: []byte("world")},
|
"subfolder/another.go": {Data: []byte("world")},
|
||||||
"symlink.go": {Mode: 0o777 | os.ModeSymlink, Data: []byte("file.go")},
|
|
||||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
|
||||||
// if we don't have a subfolder directory listed.
|
|
||||||
}
|
}
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
tw := NewWriter(&buf)
|
tw := NewWriter(&buf)
|
||||||
if err := tw.AddFS(fsys); err != nil {
|
if err := tw.AddFS(fsys); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add subfolder into fsys to match what we'll read from the tar.
|
|
||||||
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
|
|
||||||
|
|
||||||
// Test that we can get the files back from the archive
|
// Test that we can get the files back from the archive
|
||||||
tr := NewReader(&buf)
|
tr := NewReader(&buf)
|
||||||
|
|
||||||
names := make([]string, 0, len(fsys))
|
entries, err := fsys.ReadDir(".")
|
||||||
for name := range fsys {
|
if err != nil {
|
||||||
names = append(names, name)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
sort.Strings(names)
|
|
||||||
|
|
||||||
entriesLeft := len(fsys)
|
var curfname string
|
||||||
for _, name := range names {
|
for _, entry := range entries {
|
||||||
entriesLeft--
|
curfname = entry.Name()
|
||||||
|
if entry.IsDir() {
|
||||||
entryInfo, err := fsys.Lstat(name)
|
curfname += "/"
|
||||||
if err != nil {
|
continue
|
||||||
t.Fatalf("getting entry info error: %v", err)
|
|
||||||
}
|
}
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
@ -1383,42 +1370,22 @@ func TestWriterAddFS(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpName := name
|
data, err := io.ReadAll(tr)
|
||||||
if entryInfo.IsDir() {
|
if err != nil {
|
||||||
tmpName += "/"
|
t.Fatal(err)
|
||||||
}
|
|
||||||
if hdr.Name != tmpName {
|
|
||||||
t.Errorf("test fs has filename %v; archive header has %v",
|
|
||||||
name, hdr.Name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if entryInfo.Mode() != hdr.FileInfo().Mode() {
|
if hdr.Name != curfname {
|
||||||
t.Errorf("%s: test fs has mode %v; archive header has %v",
|
t.Fatalf("got filename %v, want %v",
|
||||||
name, entryInfo.Mode(), hdr.FileInfo().Mode())
|
curfname, hdr.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch entryInfo.Mode().Type() {
|
origdata := fsys[curfname].Data
|
||||||
case fs.ModeDir:
|
if string(data) != string(origdata) {
|
||||||
// No additional checks necessary.
|
t.Fatalf("got file content %v, want %v",
|
||||||
case fs.ModeSymlink:
|
data, origdata)
|
||||||
origtarget := string(fsys[name].Data)
|
|
||||||
if hdr.Linkname != origtarget {
|
|
||||||
t.Fatalf("test fs has link content %s; archive header %v", origtarget, hdr.Linkname)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
data, err := io.ReadAll(tr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
origdata := fsys[name].Data
|
|
||||||
if string(data) != string(origdata) {
|
|
||||||
t.Fatalf("test fs has file content %v; archive header has %v", origdata, data)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if entriesLeft > 0 {
|
|
||||||
t.Fatalf("not all entries are in the archive")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"hash"
|
"hash"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"internal/godebug"
|
"internal/godebug"
|
||||||
@ -805,9 +804,6 @@ func toValidName(name string) string {
|
|||||||
|
|
||||||
func (r *Reader) initFileList() {
|
func (r *Reader) initFileList() {
|
||||||
r.fileListOnce.Do(func() {
|
r.fileListOnce.Do(func() {
|
||||||
// Preallocate the minimum size of the index.
|
|
||||||
// We may also synthesize additional directory entries.
|
|
||||||
r.fileList = make([]fileListEntry, 0, len(r.File))
|
|
||||||
// files and knownDirs map from a file/directory name
|
// files and knownDirs map from a file/directory name
|
||||||
// to an index into the r.fileList entry that we are
|
// to an index into the r.fileList entry that we are
|
||||||
// building. They are used to mark duplicate entries.
|
// building. They are used to mark duplicate entries.
|
||||||
@ -906,8 +902,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func split(name string) (dir, elem string, isDir bool) {
|
func split(name string) (dir, elem string, isDir bool) {
|
||||||
name, isDir = strings.CutSuffix(name, "/")
|
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||||
i := strings.LastIndexByte(name, '/')
|
isDir = true
|
||||||
|
name = name[:len(name)-1]
|
||||||
|
}
|
||||||
|
i := len(name) - 1
|
||||||
|
for i >= 0 && name[i] != '/' {
|
||||||
|
i--
|
||||||
|
}
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
return ".", name, isDir
|
return ".", name, isDir
|
||||||
}
|
}
|
||||||
@ -989,12 +991,6 @@ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
|
|||||||
s, err := d.files[d.offset+i].stat()
|
s, err := d.files[d.offset+i].stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
|
|
||||||
return nil, &fs.PathError{
|
|
||||||
Op: "readdir",
|
|
||||||
Path: d.e.name,
|
|
||||||
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
list[i] = s
|
list[i] = s
|
||||||
}
|
}
|
||||||
|
@ -8,14 +8,13 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
|
||||||
"internal/obscuretestdata"
|
"internal/obscuretestdata"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"slices"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"testing/fstest"
|
"testing/fstest"
|
||||||
@ -1275,56 +1274,13 @@ func TestFSWalk(t *testing.T) {
|
|||||||
} else if !test.wantErr && sawErr {
|
} else if !test.wantErr && sawErr {
|
||||||
t.Error("unexpected error")
|
t.Error("unexpected error")
|
||||||
}
|
}
|
||||||
if test.want != nil && !slices.Equal(files, test.want) {
|
if test.want != nil && !reflect.DeepEqual(files, test.want) {
|
||||||
t.Errorf("got %v want %v", files, test.want)
|
t.Errorf("got %v want %v", files, test.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFSWalkBadFile(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
zw := NewWriter(&buf)
|
|
||||||
hdr := &FileHeader{Name: "."}
|
|
||||||
hdr.SetMode(fs.ModeDir | 0o755)
|
|
||||||
w, err := zw.CreateHeader(hdr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("create zip header: %v", err)
|
|
||||||
}
|
|
||||||
_, err = w.Write([]byte("some data"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("write zip contents: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
err = zw.Close()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("close zip writer: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("create zip reader: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
var count int
|
|
||||||
var errRepeat = errors.New("repeated call to path")
|
|
||||||
err = fs.WalkDir(zr, ".", func(p string, d fs.DirEntry, err error) error {
|
|
||||||
count++
|
|
||||||
if count > 2 { // once for directory read, once for the error
|
|
||||||
return errRepeat
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error from invalid file name")
|
|
||||||
} else if errors.Is(err, errRepeat) {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFSModTime(t *testing.T) {
|
func TestFSModTime(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
z, err := OpenReader("testdata/subdir.zip")
|
z, err := OpenReader("testdata/subdir.zip")
|
||||||
@ -1624,7 +1580,7 @@ func TestCVE202141772(t *testing.T) {
|
|||||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !slices.Equal(names, entryNames) {
|
if !reflect.DeepEqual(names, entryNames) {
|
||||||
t.Errorf("Unexpected file entries: %q", names)
|
t.Errorf("Unexpected file entries: %q", names)
|
||||||
}
|
}
|
||||||
if _, err := r.Open(""); err == nil {
|
if _, err := r.Open(""); err == nil {
|
||||||
@ -1737,7 +1693,7 @@ func TestInsecurePaths(t *testing.T) {
|
|||||||
for _, f := range zr.File {
|
for _, f := range zr.File {
|
||||||
gotPaths = append(gotPaths, f.Name)
|
gotPaths = append(gotPaths, f.Name)
|
||||||
}
|
}
|
||||||
if !slices.Equal(gotPaths, []string{path}) {
|
if !reflect.DeepEqual(gotPaths, []string{path}) {
|
||||||
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1762,7 +1718,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
|
|||||||
for _, f := range zr.File {
|
for _, f := range zr.File {
|
||||||
gotPaths = append(gotPaths, f.Name)
|
gotPaths = append(gotPaths, f.Name)
|
||||||
}
|
}
|
||||||
if want := []string{name}; !slices.Equal(gotPaths, want) {
|
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
|
||||||
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -505,14 +505,14 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if name == "." {
|
if d.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := d.Info()
|
info, err := d.Info()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
if !info.Mode().IsRegular() {
|
||||||
return errors.New("zip: cannot add non-regular file")
|
return errors.New("zip: cannot add non-regular file")
|
||||||
}
|
}
|
||||||
h, err := FileInfoHeader(info)
|
h, err := FileInfoHeader(info)
|
||||||
@ -520,17 +520,11 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h.Name = name
|
h.Name = name
|
||||||
if d.IsDir() {
|
|
||||||
h.Name += "/"
|
|
||||||
}
|
|
||||||
h.Method = Deflate
|
h.Method = Deflate
|
||||||
fw, err := w.CreateHeader(h)
|
fw, err := w.CreateHeader(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if d.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
f, err := fsys.Open(name)
|
f, err := fsys.Open(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -108,7 +108,7 @@ func TestWriter(t *testing.T) {
|
|||||||
|
|
||||||
// TestWriterComment is test for EOCD comment read/write.
|
// TestWriterComment is test for EOCD comment read/write.
|
||||||
func TestWriterComment(t *testing.T) {
|
func TestWriterComment(t *testing.T) {
|
||||||
tests := []struct {
|
var tests = []struct {
|
||||||
comment string
|
comment string
|
||||||
ok bool
|
ok bool
|
||||||
}{
|
}{
|
||||||
@ -158,7 +158,7 @@ func TestWriterComment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWriterUTF8(t *testing.T) {
|
func TestWriterUTF8(t *testing.T) {
|
||||||
utf8Tests := []struct {
|
var utf8Tests = []struct {
|
||||||
name string
|
name string
|
||||||
comment string
|
comment string
|
||||||
nonUTF8 bool
|
nonUTF8 bool
|
||||||
@ -619,32 +619,32 @@ func TestWriterAddFS(t *testing.T) {
|
|||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
w := NewWriter(buf)
|
w := NewWriter(buf)
|
||||||
tests := []WriteTest{
|
tests := []WriteTest{
|
||||||
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
|
{
|
||||||
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
|
Name: "file.go",
|
||||||
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
|
Data: []byte("hello"),
|
||||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
Mode: 0644,
|
||||||
// if we don't have a subfolder directory listed.
|
},
|
||||||
|
{
|
||||||
|
Name: "subfolder/another.go",
|
||||||
|
Data: []byte("world"),
|
||||||
|
Mode: 0644,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
err := w.AddFS(writeTestsToFS(tests))
|
err := w.AddFS(writeTestsToFS(tests))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Close(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add subfolder into fsys to match what we'll read from the zip.
|
|
||||||
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
|
|
||||||
|
|
||||||
// read it back
|
// read it back
|
||||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
for i, wt := range tests {
|
for i, wt := range tests {
|
||||||
if wt.Mode.IsDir() {
|
|
||||||
wt.Name += "/"
|
|
||||||
}
|
|
||||||
testReadFile(t, r.File[i], &wt)
|
testReadFile(t, r.File[i], &wt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,9 +29,6 @@ var (
|
|||||||
// Buffered input.
|
// Buffered input.
|
||||||
|
|
||||||
// Reader implements buffering for an io.Reader object.
|
// Reader implements buffering for an io.Reader object.
|
||||||
// A new Reader is created by calling [NewReader] or [NewReaderSize];
|
|
||||||
// alternatively the zero value of a Reader may be used after calling [Reset]
|
|
||||||
// on it.
|
|
||||||
type Reader struct {
|
type Reader struct {
|
||||||
buf []byte
|
buf []byte
|
||||||
rd io.Reader // reader provided by the client
|
rd io.Reader // reader provided by the client
|
||||||
@ -133,10 +130,9 @@ func (b *Reader) readErr() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
||||||
// being valid at the next read call. If necessary, Peek will read more bytes
|
// being valid at the next read call. If Peek returns fewer than n bytes, it
|
||||||
// into the buffer in order to make n bytes available. If Peek returns fewer
|
// also returns an error explaining why the read is short. The error is
|
||||||
// than n bytes, it also returns an error explaining why the read is short.
|
// [ErrBufferFull] if n is larger than b's buffer size.
|
||||||
// The error is [ErrBufferFull] if n is larger than b's buffer size.
|
|
||||||
//
|
//
|
||||||
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
||||||
// until the next read operation.
|
// until the next read operation.
|
||||||
@ -519,11 +515,9 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
b.lastByte = -1
|
b.lastByte = -1
|
||||||
b.lastRuneSize = -1
|
b.lastRuneSize = -1
|
||||||
|
|
||||||
if b.r < b.w {
|
n, err = b.writeBuf(w)
|
||||||
n, err = b.writeBuf(w)
|
if err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if r, ok := b.rd.(io.WriterTo); ok {
|
if r, ok := b.rd.(io.WriterTo); ok {
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"internal/asan"
|
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadStringAllocs(t *testing.T) {
|
func TestReadStringAllocs(t *testing.T) {
|
||||||
if asan.Enabled {
|
|
||||||
t.Skip("test allocates more with -asan; see #70079")
|
|
||||||
}
|
|
||||||
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
||||||
buf := NewReader(r)
|
buf := NewReader(r)
|
||||||
allocs := testing.AllocsPerRun(100, func() {
|
allocs := testing.AllocsPerRun(100, func() {
|
||||||
@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
|
|||||||
for l := 0; l < len(written); l++ {
|
for l := 0; l < len(written); l++ {
|
||||||
if written[l] != data[l] {
|
if written[l] != data[l] {
|
||||||
t.Errorf("wrong bytes written")
|
t.Errorf("wrong bytes written")
|
||||||
t.Errorf("want=%q", data[:len(written)])
|
t.Errorf("want=%q", data[0:len(written)])
|
||||||
t.Errorf("have=%q", written)
|
t.Errorf("have=%q", written)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testReadLine(t *testing.T, input []byte) {
|
func testReadLine(t *testing.T, input []byte) {
|
||||||
|
//for stride := 1; stride < len(input); stride++ {
|
||||||
for stride := 1; stride < 2; stride++ {
|
for stride := 1; stride < 2; stride++ {
|
||||||
done := 0
|
done := 0
|
||||||
reader := testReader{input, stride}
|
reader := testReader{input, stride}
|
||||||
@ -1149,7 +1146,7 @@ func (w errorWriterToTest) Write(p []byte) (int, error) {
|
|||||||
var errorWriterToTests = []errorWriterToTest{
|
var errorWriterToTests = []errorWriterToTest{
|
||||||
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
|
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||||
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
|
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
|
||||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrUnexpectedEOF},
|
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||||
{0, 1, io.EOF, nil, nil},
|
{0, 1, io.EOF, nil, nil},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,33 +33,6 @@ func ExampleWriter_AvailableBuffer() {
|
|||||||
// Output: 1 2 3 4
|
// Output: 1 2 3 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
|
|
||||||
func ExampleWriter_ReadFrom() {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
writer := bufio.NewWriter(&buf)
|
|
||||||
|
|
||||||
data := "Hello, world!\nThis is a ReadFrom example."
|
|
||||||
reader := strings.NewReader(data)
|
|
||||||
|
|
||||||
n, err := writer.ReadFrom(reader)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("ReadFrom Error:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writer.Flush(); err != nil {
|
|
||||||
fmt.Println("Flush Error:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Bytes written:", n)
|
|
||||||
fmt.Println("Buffer contents:", buf.String())
|
|
||||||
// Output:
|
|
||||||
// Bytes written: 41
|
|
||||||
// Buffer contents: Hello, world!
|
|
||||||
// This is a ReadFrom example.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||||
func ExampleScanner_lines() {
|
func ExampleScanner_lines() {
|
||||||
scanner := bufio.NewScanner(os.Stdin)
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
|
@ -1,96 +0,0 @@
|
|||||||
// Copyright 2025 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
//go:build unix
|
|
||||||
|
|
||||||
package bufio_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestCopyUnixpacket tests that we can use bufio when copying
|
|
||||||
// across a unixpacket socket. This used to fail due to an unnecessary
|
|
||||||
// empty Write call that was interpreted as an EOF.
|
|
||||||
func TestCopyUnixpacket(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
socket := filepath.Join(tmpDir, "unixsock")
|
|
||||||
|
|
||||||
// Start a unixpacket server.
|
|
||||||
addr := &net.UnixAddr{
|
|
||||||
Name: socket,
|
|
||||||
Net: "unixpacket",
|
|
||||||
}
|
|
||||||
server, err := net.ListenUnix("unixpacket", addr)
|
|
||||||
if err != nil {
|
|
||||||
t.Skipf("skipping test because opening a unixpacket socket failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a goroutine for the server to accept one connection
|
|
||||||
// and read all the data sent on the connection,
|
|
||||||
// reporting the number of bytes read on ch.
|
|
||||||
ch := make(chan int, 1)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
tot := 0
|
|
||||||
defer func() {
|
|
||||||
ch <- tot
|
|
||||||
}()
|
|
||||||
|
|
||||||
serverConn, err := server.Accept()
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 1024)
|
|
||||||
for {
|
|
||||||
n, err := serverConn.Read(buf)
|
|
||||||
tot += n
|
|
||||||
if err == io.EOF {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
clientConn, err := net.DialUnix("unixpacket", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
// Leaves the server goroutine hanging. Oh well.
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer wg.Wait()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
const data = "data"
|
|
||||||
r := bufio.NewReader(strings.NewReader(data))
|
|
||||||
n, err := io.Copy(clientConn, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != int64(len(data)) {
|
|
||||||
t.Errorf("io.Copy returned %d, want %d", n, len(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
clientConn.Close()
|
|
||||||
tot := <-ch
|
|
||||||
|
|
||||||
if tot != len(data) {
|
|
||||||
t.Errorf("server read %d, want %d", tot, len(data))
|
|
||||||
}
|
|
||||||
}
|
|
@ -162,12 +162,12 @@ func delete(m map[Type]Type1, key Type)
|
|||||||
|
|
||||||
// The len built-in function returns the length of v, according to its type:
|
// The len built-in function returns the length of v, according to its type:
|
||||||
//
|
//
|
||||||
// - Array: the number of elements in v.
|
// Array: the number of elements in v.
|
||||||
// - Pointer to array: the number of elements in *v (even if v is nil).
|
// Pointer to array: the number of elements in *v (even if v is nil).
|
||||||
// - Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||||
// - String: the number of bytes in v.
|
// String: the number of bytes in v.
|
||||||
// - Channel: the number of elements queued (unread) in the channel buffer;
|
// Channel: the number of elements queued (unread) in the channel buffer;
|
||||||
// if v is nil, len(v) is zero.
|
// if v is nil, len(v) is zero.
|
||||||
//
|
//
|
||||||
// For some arguments, such as a string literal or a simple array expression, the
|
// For some arguments, such as a string literal or a simple array expression, the
|
||||||
// result can be a constant. See the Go language specification's "Length and
|
// result can be a constant. See the Go language specification's "Length and
|
||||||
@ -176,12 +176,12 @@ func len(v Type) int
|
|||||||
|
|
||||||
// The cap built-in function returns the capacity of v, according to its type:
|
// The cap built-in function returns the capacity of v, according to its type:
|
||||||
//
|
//
|
||||||
// - Array: the number of elements in v (same as len(v)).
|
// Array: the number of elements in v (same as len(v)).
|
||||||
// - Pointer to array: the number of elements in *v (same as len(v)).
|
// Pointer to array: the number of elements in *v (same as len(v)).
|
||||||
// - Slice: the maximum length the slice can reach when resliced;
|
// Slice: the maximum length the slice can reach when resliced;
|
||||||
// if v is nil, cap(v) is zero.
|
// if v is nil, cap(v) is zero.
|
||||||
// - Channel: the channel buffer capacity, in units of elements;
|
// Channel: the channel buffer capacity, in units of elements;
|
||||||
// if v is nil, cap(v) is zero.
|
// if v is nil, cap(v) is zero.
|
||||||
//
|
//
|
||||||
// For some arguments, such as a simple array expression, the result can be a
|
// For some arguments, such as a simple array expression, the result can be a
|
||||||
// constant. See the Go language specification's "Length and capacity" section for
|
// constant. See the Go language specification's "Length and capacity" section for
|
||||||
@ -194,18 +194,18 @@ func cap(v Type) int
|
|||||||
// argument, not a pointer to it. The specification of the result depends on
|
// argument, not a pointer to it. The specification of the result depends on
|
||||||
// the type:
|
// the type:
|
||||||
//
|
//
|
||||||
// - Slice: The size specifies the length. The capacity of the slice is
|
// Slice: The size specifies the length. The capacity of the slice is
|
||||||
// equal to its length. A second integer argument may be provided to
|
// equal to its length. A second integer argument may be provided to
|
||||||
// specify a different capacity; it must be no smaller than the
|
// specify a different capacity; it must be no smaller than the
|
||||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||||
// backed by this underlying array.
|
// backed by this underlying array.
|
||||||
// - Map: An empty map is allocated with enough space to hold the
|
// Map: An empty map is allocated with enough space to hold the
|
||||||
// specified number of elements. The size may be omitted, in which case
|
// specified number of elements. The size may be omitted, in which case
|
||||||
// a small starting size is allocated.
|
// a small starting size is allocated.
|
||||||
// - Channel: The channel's buffer is initialized with the specified
|
// Channel: The channel's buffer is initialized with the specified
|
||||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||||
// unbuffered.
|
// unbuffered.
|
||||||
func make(t Type, size ...IntegerType) Type
|
func make(t Type, size ...IntegerType) Type
|
||||||
|
|
||||||
// The max built-in function returns the largest value of a fixed number of
|
// The max built-in function returns the largest value of a fixed number of
|
||||||
@ -247,7 +247,7 @@ func imag(c ComplexType) FloatType
|
|||||||
// to the zero value of the respective element type. If the argument
|
// to the zero value of the respective element type. If the argument
|
||||||
// type is a type parameter, the type parameter's type set must
|
// type is a type parameter, the type parameter's type set must
|
||||||
// contain only map or slice types, and clear performs the operation
|
// contain only map or slice types, and clear performs the operation
|
||||||
// implied by the type argument. If t is nil, clear is a no-op.
|
// implied by the type argument.
|
||||||
func clear[T ~[]Type | ~map[Type]Type1](t T)
|
func clear[T ~[]Type | ~map[Type]Type1](t T)
|
||||||
|
|
||||||
// The close built-in function closes a channel, which must be either
|
// The close built-in function closes a channel, which must be either
|
||||||
|
@ -247,8 +247,8 @@ func growSlice(b []byte, n int) []byte {
|
|||||||
c = 2 * cap(b)
|
c = 2 * cap(b)
|
||||||
}
|
}
|
||||||
b2 := append([]byte(nil), make([]byte, c)...)
|
b2 := append([]byte(nil), make([]byte, c)...)
|
||||||
i := copy(b2, b)
|
copy(b2, b)
|
||||||
return b2[:i]
|
return b2[:len(b)]
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteTo writes data to w until the buffer is drained or an error occurs.
|
// WriteTo writes data to w until the buffer is drained or an error occurs.
|
||||||
|
@ -213,7 +213,7 @@ func TestLargeByteWrites(t *testing.T) {
|
|||||||
func TestLargeStringReads(t *testing.T) {
|
func TestLargeStringReads(t *testing.T) {
|
||||||
var buf Buffer
|
var buf Buffer
|
||||||
for i := 3; i < 30; i += 3 {
|
for i := 3; i < 30; i += 3 {
|
||||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[:len(testString)/i])
|
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
|
||||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||||
}
|
}
|
||||||
check(t, "TestLargeStringReads (3)", &buf, "")
|
check(t, "TestLargeStringReads (3)", &buf, "")
|
||||||
@ -222,7 +222,7 @@ func TestLargeStringReads(t *testing.T) {
|
|||||||
func TestLargeByteReads(t *testing.T) {
|
func TestLargeByteReads(t *testing.T) {
|
||||||
var buf Buffer
|
var buf Buffer
|
||||||
for i := 3; i < 30; i += 3 {
|
for i := 3; i < 30; i += 3 {
|
||||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||||
}
|
}
|
||||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||||
@ -274,7 +274,7 @@ func TestNil(t *testing.T) {
|
|||||||
func TestReadFrom(t *testing.T) {
|
func TestReadFrom(t *testing.T) {
|
||||||
var buf Buffer
|
var buf Buffer
|
||||||
for i := 3; i < 30; i += 3 {
|
for i := 3; i < 30; i += 3 {
|
||||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||||
var b Buffer
|
var b Buffer
|
||||||
b.ReadFrom(&buf)
|
b.ReadFrom(&buf)
|
||||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
|
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
|
||||||
@ -337,7 +337,7 @@ func TestReadFromNegativeReader(t *testing.T) {
|
|||||||
func TestWriteTo(t *testing.T) {
|
func TestWriteTo(t *testing.T) {
|
||||||
var buf Buffer
|
var buf Buffer
|
||||||
for i := 3; i < 30; i += 3 {
|
for i := 3; i < 30; i += 3 {
|
||||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||||
var b Buffer
|
var b Buffer
|
||||||
buf.WriteTo(&b)
|
buf.WriteTo(&b)
|
||||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
|
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
|
||||||
|
@ -8,7 +8,6 @@ package bytes
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"internal/bytealg"
|
"internal/bytealg"
|
||||||
"math/bits"
|
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
_ "unsafe" // for linkname
|
_ "unsafe" // for linkname
|
||||||
@ -137,7 +136,6 @@ func LastIndexByte(s []byte, c byte) int {
|
|||||||
// If r is [utf8.RuneError], it returns the first instance of any
|
// If r is [utf8.RuneError], it returns the first instance of any
|
||||||
// invalid UTF-8 byte sequence.
|
// invalid UTF-8 byte sequence.
|
||||||
func IndexRune(s []byte, r rune) int {
|
func IndexRune(s []byte, r rune) int {
|
||||||
const haveFastIndex = bytealg.MaxBruteForce > 0
|
|
||||||
switch {
|
switch {
|
||||||
case 0 <= r && r < utf8.RuneSelf:
|
case 0 <= r && r < utf8.RuneSelf:
|
||||||
return IndexByte(s, byte(r))
|
return IndexByte(s, byte(r))
|
||||||
@ -153,64 +151,9 @@ func IndexRune(s []byte, r rune) int {
|
|||||||
case !utf8.ValidRune(r):
|
case !utf8.ValidRune(r):
|
||||||
return -1
|
return -1
|
||||||
default:
|
default:
|
||||||
// Search for rune r using the last byte of its UTF-8 encoded form.
|
|
||||||
// The distribution of the last byte is more uniform compared to the
|
|
||||||
// first byte which has a 78% chance of being [240, 243, 244].
|
|
||||||
var b [utf8.UTFMax]byte
|
var b [utf8.UTFMax]byte
|
||||||
n := utf8.EncodeRune(b[:], r)
|
n := utf8.EncodeRune(b[:], r)
|
||||||
last := n - 1
|
return Index(s, b[:n])
|
||||||
i := last
|
|
||||||
fails := 0
|
|
||||||
for i < len(s) {
|
|
||||||
if s[i] != b[last] {
|
|
||||||
o := IndexByte(s[i+1:], b[last])
|
|
||||||
if o < 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
i += o + 1
|
|
||||||
}
|
|
||||||
// Step backwards comparing bytes.
|
|
||||||
for j := 1; j < n; j++ {
|
|
||||||
if s[i-j] != b[last-j] {
|
|
||||||
goto next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i - last
|
|
||||||
next:
|
|
||||||
fails++
|
|
||||||
i++
|
|
||||||
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
|
|
||||||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
// Switch to bytealg.Index, if available, or a brute force search when
|
|
||||||
// IndexByte returns too many false positives.
|
|
||||||
if haveFastIndex {
|
|
||||||
if j := bytealg.Index(s[i-last:], b[:n]); j >= 0 {
|
|
||||||
return i + j - last
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If bytealg.Index is not available a brute force search is
|
|
||||||
// ~1.5-3x faster than Rabin-Karp since n is small.
|
|
||||||
c0 := b[last]
|
|
||||||
c1 := b[last-1] // There are at least 2 chars to match
|
|
||||||
loop:
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if s[i] == c0 && s[i-1] == c1 {
|
|
||||||
for k := 2; k < n; k++ {
|
|
||||||
if s[i-k] != b[last-k] {
|
|
||||||
continue loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i - last
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,9 +394,7 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
|
|||||||
// Fields interprets s as a sequence of UTF-8-encoded code points.
|
// Fields interprets s as a sequence of UTF-8-encoded code points.
|
||||||
// It splits the slice s around each instance of one or more consecutive white space
|
// It splits the slice s around each instance of one or more consecutive white space
|
||||||
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
|
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
|
||||||
// empty slice if s contains only white space. Every element of the returned slice is
|
// empty slice if s contains only white space.
|
||||||
// non-empty. Unlike [Split], leading and trailing runs of white space characters
|
|
||||||
// are discarded.
|
|
||||||
func Fields(s []byte) [][]byte {
|
func Fields(s []byte) [][]byte {
|
||||||
// First count the fields.
|
// First count the fields.
|
||||||
// This is an exact count if s is ASCII, otherwise it is an approximation.
|
// This is an exact count if s is ASCII, otherwise it is an approximation.
|
||||||
@ -507,9 +448,7 @@ func Fields(s []byte) [][]byte {
|
|||||||
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
|
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
|
||||||
// It splits the slice s at each run of code points c satisfying f(c) and
|
// It splits the slice s at each run of code points c satisfying f(c) and
|
||||||
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
|
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
|
||||||
// len(s) == 0, an empty slice is returned. Every element of the returned slice is
|
// len(s) == 0, an empty slice is returned.
|
||||||
// non-empty. Unlike [SplitFunc], leading and trailing runs of code points
|
|
||||||
// satisfying f(c) are discarded.
|
|
||||||
//
|
//
|
||||||
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
||||||
// and assumes that f always returns the same value for a given c.
|
// and assumes that f always returns the same value for a given c.
|
||||||
@ -596,7 +535,7 @@ func Join(s [][]byte, sep []byte) []byte {
|
|||||||
|
|
||||||
// HasPrefix reports whether the byte slice s begins with prefix.
|
// HasPrefix reports whether the byte slice s begins with prefix.
|
||||||
func HasPrefix(s, prefix []byte) bool {
|
func HasPrefix(s, prefix []byte) bool {
|
||||||
return len(s) >= len(prefix) && Equal(s[:len(prefix)], prefix)
|
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasSuffix reports whether the byte slice s ends with suffix.
|
// HasSuffix reports whether the byte slice s ends with suffix.
|
||||||
@ -655,11 +594,10 @@ func Repeat(b []byte, count int) []byte {
|
|||||||
if count < 0 {
|
if count < 0 {
|
||||||
panic("bytes: negative Repeat count")
|
panic("bytes: negative Repeat count")
|
||||||
}
|
}
|
||||||
hi, lo := bits.Mul(uint(len(b)), uint(count))
|
if len(b) > maxInt/count {
|
||||||
if hi > 0 || lo > uint(maxInt) {
|
|
||||||
panic("bytes: Repeat output length overflow")
|
panic("bytes: Repeat output length overflow")
|
||||||
}
|
}
|
||||||
n := int(lo) // lo = len(b) * count
|
n := len(b) * count
|
||||||
|
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
return []byte{}
|
return []byte{}
|
||||||
@ -686,7 +624,10 @@ func Repeat(b []byte, count int) []byte {
|
|||||||
nb := bytealg.MakeNoZero(n)[:n:n]
|
nb := bytealg.MakeNoZero(n)[:n:n]
|
||||||
bp := copy(nb, b)
|
bp := copy(nb, b)
|
||||||
for bp < n {
|
for bp < n {
|
||||||
chunk := min(bp, chunkMax)
|
chunk := bp
|
||||||
|
if chunk > chunkMax {
|
||||||
|
chunk = chunkMax
|
||||||
|
}
|
||||||
bp += copy(nb[bp:], nb[:chunk])
|
bp += copy(nb[bp:], nb[:chunk])
|
||||||
}
|
}
|
||||||
return nb
|
return nb
|
||||||
@ -1192,22 +1133,19 @@ func Replace(s, old, new []byte, n int) []byte {
|
|||||||
t := make([]byte, len(s)+n*(len(new)-len(old)))
|
t := make([]byte, len(s)+n*(len(new)-len(old)))
|
||||||
w := 0
|
w := 0
|
||||||
start := 0
|
start := 0
|
||||||
if len(old) > 0 {
|
for i := 0; i < n; i++ {
|
||||||
for range n {
|
j := start
|
||||||
j := start + Index(s[start:], old)
|
if len(old) == 0 {
|
||||||
w += copy(t[w:], s[start:j])
|
if i > 0 {
|
||||||
w += copy(t[w:], new)
|
_, wid := utf8.DecodeRune(s[start:])
|
||||||
start = j + len(old)
|
j += wid
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
j += Index(s[start:], old)
|
||||||
}
|
}
|
||||||
} else { // len(old) == 0
|
w += copy(t[w:], s[start:j])
|
||||||
w += copy(t[w:], new)
|
w += copy(t[w:], new)
|
||||||
for range n - 1 {
|
start = j + len(old)
|
||||||
_, wid := utf8.DecodeRune(s[start:])
|
|
||||||
j := start + wid
|
|
||||||
w += copy(t[w:], s[start:j])
|
|
||||||
w += copy(t[w:], new)
|
|
||||||
start = j
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
w += copy(t[w:], s[start:])
|
w += copy(t[w:], s[start:])
|
||||||
return t[0:w]
|
return t[0:w]
|
||||||
|
@ -7,12 +7,10 @@ package bytes_test
|
|||||||
import (
|
import (
|
||||||
. "bytes"
|
. "bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"internal/asan"
|
|
||||||
"internal/testenv"
|
"internal/testenv"
|
||||||
"iter"
|
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"slices"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"unicode"
|
"unicode"
|
||||||
@ -20,6 +18,18 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func eq(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func sliceOfString(s [][]byte) []string {
|
func sliceOfString(s [][]byte) []string {
|
||||||
result := make([]string, len(s))
|
result := make([]string, len(s))
|
||||||
for i, v := range s {
|
for i, v := range s {
|
||||||
@ -28,37 +38,6 @@ func sliceOfString(s [][]byte) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func collect(t *testing.T, seq iter.Seq[[]byte]) [][]byte {
|
|
||||||
out := slices.Collect(seq)
|
|
||||||
out1 := slices.Collect(seq)
|
|
||||||
if !slices.Equal(sliceOfString(out), sliceOfString(out1)) {
|
|
||||||
t.Fatalf("inconsistent seq:\n%s\n%s", out, out1)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type LinesTest struct {
|
|
||||||
a string
|
|
||||||
b []string
|
|
||||||
}
|
|
||||||
|
|
||||||
var linesTests = []LinesTest{
|
|
||||||
{a: "abc\nabc\n", b: []string{"abc\n", "abc\n"}},
|
|
||||||
{a: "abc\r\nabc", b: []string{"abc\r\n", "abc"}},
|
|
||||||
{a: "abc\r\n", b: []string{"abc\r\n"}},
|
|
||||||
{a: "\nabc", b: []string{"\n", "abc"}},
|
|
||||||
{a: "\nabc\n\n", b: []string{"\n", "abc\n", "\n"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLines(t *testing.T) {
|
|
||||||
for _, s := range linesTests {
|
|
||||||
result := sliceOfString(slices.Collect(Lines([]byte(s.a))))
|
|
||||||
if !slices.Equal(result, s.b) {
|
|
||||||
t.Errorf(`slices.Collect(Lines(%q)) = %q; want %q`, s.a, result, s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For ease of reading, the test cases use strings that are converted to byte
|
// For ease of reading, the test cases use strings that are converted to byte
|
||||||
// slices before invoking the functions.
|
// slices before invoking the functions.
|
||||||
|
|
||||||
@ -198,11 +177,6 @@ var indexTests = []BinOpTest{
|
|||||||
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
|
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
|
||||||
// test fallback to Rabin-Karp.
|
// test fallback to Rabin-Karp.
|
||||||
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
|
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
|
||||||
// test fallback to IndexRune
|
|
||||||
{"oxoxoxoxoxoxoxoxoxoxox☺", "☺", 22},
|
|
||||||
// invalid UTF-8 byte sequence (must be longer than bytealg.MaxBruteForce to
|
|
||||||
// test that we don't use IndexRune)
|
|
||||||
{"xx0123456789012345678901234567890123456789012345678901234567890120123456789012345678901234567890123456xxx\xed\x9f\xc0", "\xed\x9f\xc0", 105},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var lastIndexTests = []BinOpTest{
|
var lastIndexTests = []BinOpTest{
|
||||||
@ -451,31 +425,6 @@ func TestIndexRune(t *testing.T) {
|
|||||||
{"some_text=some_value", '=', 9},
|
{"some_text=some_value", '=', 9},
|
||||||
{"☺a", 'a', 3},
|
{"☺a", 'a', 3},
|
||||||
{"a☻☺b", '☺', 4},
|
{"a☻☺b", '☺', 4},
|
||||||
{"𠀳𠀗𠀾𠁄𠀧𠁆𠁂𠀫𠀖𠀪𠀲𠀴𠁀𠀨𠀿", '𠀿', 56},
|
|
||||||
|
|
||||||
// 2 bytes
|
|
||||||
{"ӆ", 'ӆ', 0},
|
|
||||||
{"a", 'ӆ', -1},
|
|
||||||
{" ӆ", 'ӆ', 2},
|
|
||||||
{" a", 'ӆ', -1},
|
|
||||||
{strings.Repeat("ц", 64) + "ӆ", 'ӆ', 128}, // test cutover
|
|
||||||
{strings.Repeat("ц", 64), 'ӆ', -1},
|
|
||||||
|
|
||||||
// 3 bytes
|
|
||||||
{"Ꚁ", 'Ꚁ', 0},
|
|
||||||
{"a", 'Ꚁ', -1},
|
|
||||||
{" Ꚁ", 'Ꚁ', 2},
|
|
||||||
{" a", 'Ꚁ', -1},
|
|
||||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", 'Ꚁ', 192}, // test cutover
|
|
||||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", '䚀', -1}, // 'Ꚁ' and '䚀' share the same last two bytes
|
|
||||||
|
|
||||||
// 4 bytes
|
|
||||||
{"𡌀", '𡌀', 0},
|
|
||||||
{"a", '𡌀', -1},
|
|
||||||
{" 𡌀", '𡌀', 2},
|
|
||||||
{" a", '𡌀', -1},
|
|
||||||
{strings.Repeat("𡋀", 64) + "𡌀", '𡌀', 256}, // test cutover
|
|
||||||
{strings.Repeat("𡋀", 64) + "𡌀", '𣌀', -1}, // '𡌀' and '𣌀' share the same last two bytes
|
|
||||||
|
|
||||||
// RuneError should match any invalid UTF-8 byte sequence.
|
// RuneError should match any invalid UTF-8 byte sequence.
|
||||||
{"<22>", '<27>', 0},
|
{"<22>", '<27>', 0},
|
||||||
@ -489,13 +438,6 @@ func TestIndexRune(t *testing.T) {
|
|||||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
|
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
|
||||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
|
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
|
||||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
|
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
|
||||||
|
|
||||||
// Test the cutover to bytealg.Index when it is triggered in
|
|
||||||
// the middle of rune that contains consecutive runs of equal bytes.
|
|
||||||
{"aaaaaKKKK\U000bc104", '\U000bc104', 17}, // cutover: (n + 16) / 8
|
|
||||||
{"aaaaaKKKK鄄", '鄄', 17},
|
|
||||||
{"aaKKKKKa\U000bc104", '\U000bc104', 18}, // cutover: 4 + n>>4
|
|
||||||
{"aaKKKKKa鄄", '鄄', 18},
|
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
|
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
|
||||||
@ -643,21 +585,6 @@ func BenchmarkIndexRuneASCII(b *testing.B) {
|
|||||||
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
|
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkIndexRuneUnicode(b *testing.B) {
|
|
||||||
b.Run("Latin", func(b *testing.B) {
|
|
||||||
// Latin is mostly 1, 2, 3 byte runes.
|
|
||||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Latin, 'é'))
|
|
||||||
})
|
|
||||||
b.Run("Cyrillic", func(b *testing.B) {
|
|
||||||
// Cyrillic is mostly 2 and 3 byte runes.
|
|
||||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Cyrillic, 'Ꙁ'))
|
|
||||||
})
|
|
||||||
b.Run("Han", func(b *testing.B) {
|
|
||||||
// Han consists only of 3 and 4 byte runes.
|
|
||||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Han, '𠀿'))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
|
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||||
return func(b *testing.B, n int) {
|
return func(b *testing.B, n int) {
|
||||||
buf := bmbuf[0:n]
|
buf := bmbuf[0:n]
|
||||||
@ -688,61 +615,6 @@ func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B, n int) {
|
|
||||||
var rs []rune
|
|
||||||
for _, r16 := range rt.R16 {
|
|
||||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
|
||||||
if r != needle {
|
|
||||||
rs = append(rs, rune(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r32 := range rt.R32 {
|
|
||||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
|
||||||
if r != needle {
|
|
||||||
rs = append(rs, rune(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Shuffle the runes so that they are not in descending order.
|
|
||||||
// The sort is deterministic since this is used for benchmarks,
|
|
||||||
// which need to be repeatable.
|
|
||||||
rr := rand.New(rand.NewSource(1))
|
|
||||||
rr.Shuffle(len(rs), func(i, j int) {
|
|
||||||
rs[i], rs[j] = rs[j], rs[i]
|
|
||||||
})
|
|
||||||
uchars := string(rs)
|
|
||||||
|
|
||||||
return func(b *testing.B, n int) {
|
|
||||||
buf := bmbuf[0:n]
|
|
||||||
o := copy(buf, uchars)
|
|
||||||
for o < len(buf) {
|
|
||||||
o += copy(buf[o:], uchars)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make space for the needle rune at the end of buf.
|
|
||||||
m := utf8.RuneLen(needle)
|
|
||||||
for o := m; o > 0; {
|
|
||||||
_, sz := utf8.DecodeLastRune(buf)
|
|
||||||
copy(buf[len(buf)-sz:], "\x00\x00\x00\x00")
|
|
||||||
buf = buf[:len(buf)-sz]
|
|
||||||
o -= sz
|
|
||||||
}
|
|
||||||
buf = utf8.AppendRune(buf[:n-m], needle)
|
|
||||||
|
|
||||||
n -= m // adjust for rune len
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
j := IndexRune(buf, needle)
|
|
||||||
if j != n {
|
|
||||||
b.Fatal("bad index", j)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := range buf {
|
|
||||||
buf[i] = '\x00'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEqual(b *testing.B) {
|
func BenchmarkEqual(b *testing.B) {
|
||||||
b.Run("0", func(b *testing.B) {
|
b.Run("0", func(b *testing.B) {
|
||||||
var buf [4]byte
|
var buf [4]byte
|
||||||
@ -936,18 +808,10 @@ func TestSplit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := sliceOfString(a)
|
result := sliceOfString(a)
|
||||||
if !slices.Equal(result, tt.a) {
|
if !eq(result, tt.a) {
|
||||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if tt.n < 0 {
|
|
||||||
b := sliceOfString(slices.Collect(SplitSeq([]byte(tt.s), []byte(tt.sep))))
|
|
||||||
if !slices.Equal(b, tt.a) {
|
|
||||||
t.Errorf(`collect(SplitSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tt.n == 0 || len(a) == 0 {
|
if tt.n == 0 || len(a) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -961,8 +825,8 @@ func TestSplit(t *testing.T) {
|
|||||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||||
}
|
}
|
||||||
if tt.n < 0 {
|
if tt.n < 0 {
|
||||||
b := sliceOfString(Split([]byte(tt.s), []byte(tt.sep)))
|
b := Split([]byte(tt.s), []byte(tt.sep))
|
||||||
if !slices.Equal(result, b) {
|
if !reflect.DeepEqual(a, b) {
|
||||||
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1002,18 +866,11 @@ func TestSplitAfter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := sliceOfString(a)
|
result := sliceOfString(a)
|
||||||
if !slices.Equal(result, tt.a) {
|
if !eq(result, tt.a) {
|
||||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if tt.n < 0 {
|
|
||||||
b := sliceOfString(slices.Collect(SplitAfterSeq([]byte(tt.s), []byte(tt.sep))))
|
|
||||||
if !slices.Equal(b, tt.a) {
|
|
||||||
t.Errorf(`collect(SplitAfterSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
|
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
|
||||||
t.Errorf("last appended result was %s; want %s", x, want)
|
t.Errorf("last appended result was %s; want %s", x, want)
|
||||||
}
|
}
|
||||||
@ -1023,8 +880,8 @@ func TestSplitAfter(t *testing.T) {
|
|||||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||||
}
|
}
|
||||||
if tt.n < 0 {
|
if tt.n < 0 {
|
||||||
b := sliceOfString(SplitAfter([]byte(tt.s), []byte(tt.sep)))
|
b := SplitAfter([]byte(tt.s), []byte(tt.sep))
|
||||||
if !slices.Equal(result, b) {
|
if !reflect.DeepEqual(a, b) {
|
||||||
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1062,16 +919,11 @@ func TestFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := sliceOfString(a)
|
result := sliceOfString(a)
|
||||||
if !slices.Equal(result, tt.a) {
|
if !eq(result, tt.a) {
|
||||||
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
|
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
result2 := sliceOfString(collect(t, FieldsSeq([]byte(tt.s))))
|
|
||||||
if !slices.Equal(result2, tt.a) {
|
|
||||||
t.Errorf(`collect(FieldsSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(b) != tt.s {
|
if string(b) != tt.s {
|
||||||
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
|
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
|
||||||
}
|
}
|
||||||
@ -1087,7 +939,7 @@ func TestFieldsFunc(t *testing.T) {
|
|||||||
for _, tt := range fieldstests {
|
for _, tt := range fieldstests {
|
||||||
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
|
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
|
||||||
result := sliceOfString(a)
|
result := sliceOfString(a)
|
||||||
if !slices.Equal(result, tt.a) {
|
if !eq(result, tt.a) {
|
||||||
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
|
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1110,15 +962,10 @@ func TestFieldsFunc(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := sliceOfString(a)
|
result := sliceOfString(a)
|
||||||
if !slices.Equal(result, tt.a) {
|
if !eq(result, tt.a) {
|
||||||
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
|
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
|
||||||
}
|
}
|
||||||
|
|
||||||
result2 := sliceOfString(collect(t, FieldsFuncSeq([]byte(tt.s), pred)))
|
|
||||||
if !slices.Equal(result2, tt.a) {
|
|
||||||
t.Errorf(`collect(FieldsFuncSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(b) != tt.s {
|
if string(b) != tt.s {
|
||||||
t.Errorf("slice changed to %s; want %s", b, tt.s)
|
t.Errorf("slice changed to %s; want %s", b, tt.s)
|
||||||
}
|
}
|
||||||
@ -1439,6 +1286,18 @@ func TestRepeatCatchesOverflow(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func runesEqual(a, b []rune) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, r := range a {
|
||||||
|
if r != b[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
type RunesTest struct {
|
type RunesTest struct {
|
||||||
in string
|
in string
|
||||||
out []rune
|
out []rune
|
||||||
@ -1459,7 +1318,7 @@ func TestRunes(t *testing.T) {
|
|||||||
for _, tt := range RunesTests {
|
for _, tt := range RunesTests {
|
||||||
tin := []byte(tt.in)
|
tin := []byte(tt.in)
|
||||||
a := Runes(tin)
|
a := Runes(tin)
|
||||||
if !slices.Equal(a, tt.out) {
|
if !runesEqual(a, tt.out) {
|
||||||
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
|
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1787,20 +1646,9 @@ var ReplaceTests = []ReplaceTest{
|
|||||||
|
|
||||||
func TestReplace(t *testing.T) {
|
func TestReplace(t *testing.T) {
|
||||||
for _, tt := range ReplaceTests {
|
for _, tt := range ReplaceTests {
|
||||||
var (
|
in := append([]byte(tt.in), "<spare>"...)
|
||||||
in = []byte(tt.in)
|
|
||||||
old = []byte(tt.old)
|
|
||||||
new = []byte(tt.new)
|
|
||||||
)
|
|
||||||
if !asan.Enabled {
|
|
||||||
allocs := testing.AllocsPerRun(10, func() { Replace(in, old, new, tt.n) })
|
|
||||||
if allocs > 1 {
|
|
||||||
t.Errorf("Replace(%q, %q, %q, %d) allocates %.2f objects", tt.in, tt.old, tt.new, tt.n, allocs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in = append(in, "<spare>"...)
|
|
||||||
in = in[:len(tt.in)]
|
in = in[:len(tt.in)]
|
||||||
out := Replace(in, old, new, tt.n)
|
out := Replace(in, []byte(tt.old), []byte(tt.new), tt.n)
|
||||||
if s := string(out); s != tt.out {
|
if s := string(out); s != tt.out {
|
||||||
t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
|
t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
|
||||||
}
|
}
|
||||||
@ -1808,7 +1656,7 @@ func TestReplace(t *testing.T) {
|
|||||||
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
|
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
|
||||||
}
|
}
|
||||||
if tt.n == -1 {
|
if tt.n == -1 {
|
||||||
out := ReplaceAll(in, old, new)
|
out := ReplaceAll(in, []byte(tt.old), []byte(tt.new))
|
||||||
if s := string(out); s != tt.out {
|
if s := string(out); s != tt.out {
|
||||||
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
|
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
|
||||||
}
|
}
|
||||||
@ -1816,69 +1664,6 @@ func TestReplace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func FuzzReplace(f *testing.F) {
|
|
||||||
for _, tt := range ReplaceTests {
|
|
||||||
f.Add([]byte(tt.in), []byte(tt.old), []byte(tt.new), tt.n)
|
|
||||||
}
|
|
||||||
f.Fuzz(func(t *testing.T, in, old, new []byte, n int) {
|
|
||||||
differentImpl := func(in, old, new []byte, n int) []byte {
|
|
||||||
var out Buffer
|
|
||||||
if n < 0 {
|
|
||||||
n = math.MaxInt
|
|
||||||
}
|
|
||||||
for i := 0; i < len(in); {
|
|
||||||
if n == 0 {
|
|
||||||
out.Write(in[i:])
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if HasPrefix(in[i:], old) {
|
|
||||||
out.Write(new)
|
|
||||||
i += len(old)
|
|
||||||
n--
|
|
||||||
if len(old) != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i == len(in) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(old) == 0 {
|
|
||||||
_, length := utf8.DecodeRune(in[i:])
|
|
||||||
out.Write(in[i : i+length])
|
|
||||||
i += length
|
|
||||||
} else {
|
|
||||||
out.WriteByte(in[i])
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(old) == 0 && n != 0 {
|
|
||||||
out.Write(new)
|
|
||||||
}
|
|
||||||
return out.Bytes()
|
|
||||||
}
|
|
||||||
if simple, replace := differentImpl(in, old, new, n), Replace(in, old, new, n); !slices.Equal(simple, replace) {
|
|
||||||
t.Errorf("The two implementations do not match %q != %q for Replace(%q, %q, %q, %d)", simple, replace, in, old, new, n)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkReplace(b *testing.B) {
|
|
||||||
for _, tt := range ReplaceTests {
|
|
||||||
desc := fmt.Sprintf("%q %q %q %d", tt.in, tt.old, tt.new, tt.n)
|
|
||||||
var (
|
|
||||||
in = []byte(tt.in)
|
|
||||||
old = []byte(tt.old)
|
|
||||||
new = []byte(tt.new)
|
|
||||||
)
|
|
||||||
b.Run(desc, func(b *testing.B) {
|
|
||||||
b.ReportAllocs()
|
|
||||||
for b.Loop() {
|
|
||||||
Replace(in, old, new, tt.n)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type TitleTest struct {
|
type TitleTest struct {
|
||||||
in, out string
|
in, out string
|
||||||
}
|
}
|
||||||
@ -2259,11 +2044,6 @@ func makeBenchInputHard() []byte {
|
|||||||
var benchInputHard = makeBenchInputHard()
|
var benchInputHard = makeBenchInputHard()
|
||||||
|
|
||||||
func benchmarkIndexHard(b *testing.B, sep []byte) {
|
func benchmarkIndexHard(b *testing.B, sep []byte) {
|
||||||
n := Index(benchInputHard, sep)
|
|
||||||
if n < 0 {
|
|
||||||
n = len(benchInputHard)
|
|
||||||
}
|
|
||||||
b.SetBytes(int64(n))
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
Index(benchInputHard, sep)
|
Index(benchInputHard, sep)
|
||||||
}
|
}
|
||||||
|
@ -502,10 +502,10 @@ func ExampleTitle() {
|
|||||||
|
|
||||||
func ExampleToTitle() {
|
func ExampleToTitle() {
|
||||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
|
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
|
||||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("брат")))
|
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
|
||||||
// Output:
|
// Output:
|
||||||
// LOUD NOISES
|
// LOUD NOISES
|
||||||
// БРАТ
|
// ХЛЕБ
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleToTitleSpecial() {
|
func ExampleToTitleSpecial() {
|
||||||
@ -628,93 +628,3 @@ func ExampleToUpperSpecial() {
|
|||||||
// Original : ahoj vývojári golang
|
// Original : ahoj vývojári golang
|
||||||
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
|
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleLines() {
|
|
||||||
text := []byte("Hello\nWorld\nGo Programming\n")
|
|
||||||
for line := range bytes.Lines(text) {
|
|
||||||
fmt.Printf("%q\n", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// "Hello\n"
|
|
||||||
// "World\n"
|
|
||||||
// "Go Programming\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleSplitSeq() {
|
|
||||||
s := []byte("a,b,c,d")
|
|
||||||
for part := range bytes.SplitSeq(s, []byte(",")) {
|
|
||||||
fmt.Printf("%q\n", part)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// "a"
|
|
||||||
// "b"
|
|
||||||
// "c"
|
|
||||||
// "d"
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleSplitAfterSeq() {
|
|
||||||
s := []byte("a,b,c,d")
|
|
||||||
for part := range bytes.SplitAfterSeq(s, []byte(",")) {
|
|
||||||
fmt.Printf("%q\n", part)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// "a,"
|
|
||||||
// "b,"
|
|
||||||
// "c,"
|
|
||||||
// "d"
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleFieldsSeq() {
|
|
||||||
text := []byte("The quick brown fox")
|
|
||||||
fmt.Println("Split byte slice into fields:")
|
|
||||||
for word := range bytes.FieldsSeq(text) {
|
|
||||||
fmt.Printf("%q\n", word)
|
|
||||||
}
|
|
||||||
|
|
||||||
textWithSpaces := []byte(" lots of spaces ")
|
|
||||||
fmt.Println("\nSplit byte slice with multiple spaces:")
|
|
||||||
for word := range bytes.FieldsSeq(textWithSpaces) {
|
|
||||||
fmt.Printf("%q\n", word)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Split byte slice into fields:
|
|
||||||
// "The"
|
|
||||||
// "quick"
|
|
||||||
// "brown"
|
|
||||||
// "fox"
|
|
||||||
//
|
|
||||||
// Split byte slice with multiple spaces:
|
|
||||||
// "lots"
|
|
||||||
// "of"
|
|
||||||
// "spaces"
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleFieldsFuncSeq() {
|
|
||||||
text := []byte("The quick brown fox")
|
|
||||||
fmt.Println("Split on whitespace(similar to FieldsSeq):")
|
|
||||||
for word := range bytes.FieldsFuncSeq(text, unicode.IsSpace) {
|
|
||||||
fmt.Printf("%q\n", word)
|
|
||||||
}
|
|
||||||
|
|
||||||
mixedText := []byte("abc123def456ghi")
|
|
||||||
fmt.Println("\nSplit on digits:")
|
|
||||||
for word := range bytes.FieldsFuncSeq(mixedText, unicode.IsDigit) {
|
|
||||||
fmt.Printf("%q\n", word)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Split on whitespace(similar to FieldsSeq):
|
|
||||||
// "The"
|
|
||||||
// "quick"
|
|
||||||
// "brown"
|
|
||||||
// "fox"
|
|
||||||
//
|
|
||||||
// Split on digits:
|
|
||||||
// "abc"
|
|
||||||
// "def"
|
|
||||||
// "ghi"
|
|
||||||
}
|
|
||||||
|
@ -1,146 +0,0 @@
|
|||||||
// Copyright 2024 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bytes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"iter"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lines returns an iterator over the newline-terminated lines in the byte slice s.
|
|
||||||
// The lines yielded by the iterator include their terminating newlines.
|
|
||||||
// If s is empty, the iterator yields no lines at all.
|
|
||||||
// If s does not end in a newline, the final yielded line will not end in a newline.
|
|
||||||
// It returns a single-use iterator.
|
|
||||||
func Lines(s []byte) iter.Seq[[]byte] {
|
|
||||||
return func(yield func([]byte) bool) {
|
|
||||||
for len(s) > 0 {
|
|
||||||
var line []byte
|
|
||||||
if i := IndexByte(s, '\n'); i >= 0 {
|
|
||||||
line, s = s[:i+1], s[i+1:]
|
|
||||||
} else {
|
|
||||||
line, s = s, nil
|
|
||||||
}
|
|
||||||
if !yield(line[:len(line):len(line)]) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// explodeSeq returns an iterator over the runes in s.
|
|
||||||
func explodeSeq(s []byte, yield func([]byte) bool) {
|
|
||||||
for len(s) > 0 {
|
|
||||||
_, size := utf8.DecodeRune(s)
|
|
||||||
if !yield(s[:size:size]) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s = s[size:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
|
|
||||||
// bytes of sep to include in the results (none or all).
|
|
||||||
func splitSeq(s, sep []byte, sepSave int) iter.Seq[[]byte] {
|
|
||||||
return func(yield func([]byte) bool) {
|
|
||||||
if len(sep) == 0 {
|
|
||||||
explodeSeq(s, yield)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
i := Index(s, sep)
|
|
||||||
if i < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
frag := s[:i+sepSave]
|
|
||||||
if !yield(frag[:len(frag):len(frag)]) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s = s[i+len(sep):]
|
|
||||||
}
|
|
||||||
yield(s[:len(s):len(s)])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitSeq returns an iterator over all subslices of s separated by sep.
|
|
||||||
// The iterator yields the same subslices that would be returned by [Split](s, sep),
|
|
||||||
// but without constructing a new slice containing the subslices.
|
|
||||||
// It returns a single-use iterator.
|
|
||||||
func SplitSeq(s, sep []byte) iter.Seq[[]byte] {
|
|
||||||
return splitSeq(s, sep, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitAfterSeq returns an iterator over subslices of s split after each instance of sep.
|
|
||||||
// The iterator yields the same subslices that would be returned by [SplitAfter](s, sep),
|
|
||||||
// but without constructing a new slice containing the subslices.
|
|
||||||
// It returns a single-use iterator.
|
|
||||||
func SplitAfterSeq(s, sep []byte) iter.Seq[[]byte] {
|
|
||||||
return splitSeq(s, sep, len(sep))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldsSeq returns an iterator over subslices of s split around runs of
|
|
||||||
// whitespace characters, as defined by [unicode.IsSpace].
|
|
||||||
// The iterator yields the same subslices that would be returned by [Fields](s),
|
|
||||||
// but without constructing a new slice containing the subslices.
|
|
||||||
func FieldsSeq(s []byte) iter.Seq[[]byte] {
|
|
||||||
return func(yield func([]byte) bool) {
|
|
||||||
start := -1
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
size := 1
|
|
||||||
r := rune(s[i])
|
|
||||||
isSpace := asciiSpace[s[i]] != 0
|
|
||||||
if r >= utf8.RuneSelf {
|
|
||||||
r, size = utf8.DecodeRune(s[i:])
|
|
||||||
isSpace = unicode.IsSpace(r)
|
|
||||||
}
|
|
||||||
if isSpace {
|
|
||||||
if start >= 0 {
|
|
||||||
if !yield(s[start:i:i]) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
start = -1
|
|
||||||
}
|
|
||||||
} else if start < 0 {
|
|
||||||
start = i
|
|
||||||
}
|
|
||||||
i += size
|
|
||||||
}
|
|
||||||
if start >= 0 {
|
|
||||||
yield(s[start:len(s):len(s)])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FieldsFuncSeq returns an iterator over subslices of s split around runs of
|
|
||||||
// Unicode code points satisfying f(c).
|
|
||||||
// The iterator yields the same subslices that would be returned by [FieldsFunc](s),
|
|
||||||
// but without constructing a new slice containing the subslices.
|
|
||||||
func FieldsFuncSeq(s []byte, f func(rune) bool) iter.Seq[[]byte] {
|
|
||||||
return func(yield func([]byte) bool) {
|
|
||||||
start := -1
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
size := 1
|
|
||||||
r := rune(s[i])
|
|
||||||
if r >= utf8.RuneSelf {
|
|
||||||
r, size = utf8.DecodeRune(s[i:])
|
|
||||||
}
|
|
||||||
if f(r) {
|
|
||||||
if start >= 0 {
|
|
||||||
if !yield(s[start:i:i]) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
start = -1
|
|
||||||
}
|
|
||||||
} else if start < 0 {
|
|
||||||
start = i
|
|
||||||
}
|
|
||||||
i += size
|
|
||||||
}
|
|
||||||
if start >= 0 {
|
|
||||||
yield(s[start:len(s):len(s)])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// Copyright 2024 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bytes_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
. "bytes"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkSplitSeqEmptySeparator(b *testing.B) {
|
|
||||||
for range b.N {
|
|
||||||
for range SplitSeq(benchInputHard, nil) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSplitSeqSingleByteSeparator(b *testing.B) {
|
|
||||||
sep := []byte("/")
|
|
||||||
for range b.N {
|
|
||||||
for range SplitSeq(benchInputHard, sep) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSplitSeqMultiByteSeparator(b *testing.B) {
|
|
||||||
sep := []byte("hello")
|
|
||||||
for range b.N {
|
|
||||||
for range SplitSeq(benchInputHard, sep) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSplitAfterSeqEmptySeparator(b *testing.B) {
|
|
||||||
for range b.N {
|
|
||||||
for range SplitAfterSeq(benchInputHard, nil) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSplitAfterSeqSingleByteSeparator(b *testing.B) {
|
|
||||||
sep := []byte("/")
|
|
||||||
for range b.N {
|
|
||||||
for range SplitAfterSeq(benchInputHard, sep) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSplitAfterSeqMultiByteSeparator(b *testing.B) {
|
|
||||||
sep := []byte("hello")
|
|
||||||
for range b.N {
|
|
||||||
for range SplitAfterSeq(benchInputHard, sep) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -6,16 +6,27 @@
|
|||||||
|
|
||||||
setlocal
|
setlocal
|
||||||
|
|
||||||
go tool dist env -w -p >env.bat || exit /b 1
|
set GOBUILDFAIL=0
|
||||||
|
|
||||||
|
go tool dist env -w -p >env.bat
|
||||||
|
if errorlevel 1 goto fail
|
||||||
call .\env.bat
|
call .\env.bat
|
||||||
del env.bat
|
del env.bat
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
if not exist %GOTOOLDIR%\dist.exe (
|
if exist %GOTOOLDIR%\dist.exe goto distok
|
||||||
echo cannot find %GOTOOLDIR%\dist.exe; nothing to clean
|
echo cannot find %GOTOOLDIR%\dist; nothing to clean
|
||||||
exit /b 1
|
goto fail
|
||||||
)
|
:distok
|
||||||
|
|
||||||
"%GOBIN%\go" clean -i std
|
"%GOBIN%\go" clean -i std
|
||||||
"%GOBIN%\go" tool dist clean
|
"%GOBIN%\go" tool dist clean
|
||||||
"%GOBIN%\go" clean -i cmd
|
"%GOBIN%\go" clean -i cmd
|
||||||
|
|
||||||
|
goto end
|
||||||
|
|
||||||
|
:fail
|
||||||
|
set GOBUILDFAIL=1
|
||||||
|
|
||||||
|
:end
|
||||||
|
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,6 +28,26 @@ func TestMain(m *testing.M) {
|
|||||||
os.Exit(m.Run())
|
os.Exit(m.Run())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addr2linePath returns the path to the "addr2line" binary to run.
|
||||||
|
func addr2linePath(t testing.TB) string {
|
||||||
|
t.Helper()
|
||||||
|
testenv.MustHaveExec(t)
|
||||||
|
|
||||||
|
addr2linePathOnce.Do(func() {
|
||||||
|
addr2lineExePath, addr2linePathErr = os.Executable()
|
||||||
|
})
|
||||||
|
if addr2linePathErr != nil {
|
||||||
|
t.Fatal(addr2linePathErr)
|
||||||
|
}
|
||||||
|
return addr2lineExePath
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
addr2linePathOnce sync.Once
|
||||||
|
addr2lineExePath string
|
||||||
|
addr2linePathErr error
|
||||||
|
)
|
||||||
|
|
||||||
func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath)
|
cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath)
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
@ -49,7 +70,7 @@ func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) {
|
func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) {
|
||||||
cmd := testenv.Command(t, testenv.Executable(t), dbgExePath)
|
cmd := testenv.Command(t, addr2linePath(t), dbgExePath)
|
||||||
cmd.Stdin = strings.NewReader(addr)
|
cmd.Stdin = strings.NewReader(addr)
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -87,22 +108,27 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) {
|
|||||||
// Debug paths are stored slash-separated, so convert to system-native.
|
// Debug paths are stored slash-separated, so convert to system-native.
|
||||||
srcPath = filepath.FromSlash(srcPath)
|
srcPath = filepath.FromSlash(srcPath)
|
||||||
fi2, err := os.Stat(srcPath)
|
fi2, err := os.Stat(srcPath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Stat failed: %v", err)
|
t.Fatalf("Stat failed: %v", err)
|
||||||
}
|
}
|
||||||
if !os.SameFile(fi1, fi2) {
|
if !os.SameFile(fi1, fi2) {
|
||||||
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
|
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
|
||||||
}
|
}
|
||||||
if want := "102"; srcLineNo != want {
|
if want := "124"; srcLineNo != want {
|
||||||
t.Fatalf("line number = %v; want %s", srcLineNo, want)
|
t.Fatalf("line number = %v; want %s", srcLineNo, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is line 101. The test depends on that.
|
// This is line 123. The test depends on that.
|
||||||
func TestAddr2Line(t *testing.T) {
|
func TestAddr2Line(t *testing.T) {
|
||||||
testenv.MustHaveGoBuild(t)
|
testenv.MustHaveGoBuild(t)
|
||||||
|
|
||||||
tmpDir := t.TempDir()
|
tmpDir, err := os.MkdirTemp("", "TestAddr2Line")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("TempDir failed: ", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
// Build copy of test binary with debug symbols,
|
// Build copy of test binary with debug symbols,
|
||||||
// since the one running now may not have them.
|
// since the one running now may not have them.
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"internal/testenv"
|
"internal/testenv"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -57,10 +57,7 @@ func TestGolden(t *testing.T) {
|
|||||||
// TODO(gri) remove extra pkg directory eventually
|
// TODO(gri) remove extra pkg directory eventually
|
||||||
goldenFile := filepath.Join("testdata", "src", "pkg", fi.Name(), "golden.txt")
|
goldenFile := filepath.Join("testdata", "src", "pkg", fi.Name(), "golden.txt")
|
||||||
w := NewWalker(nil, "testdata/src/pkg")
|
w := NewWalker(nil, "testdata/src/pkg")
|
||||||
pkg, err := w.import_(fi.Name())
|
pkg, _ := w.import_(fi.Name())
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("import %s: %v", fi.Name(), err)
|
|
||||||
}
|
|
||||||
w.export(pkg)
|
w.export(pkg)
|
||||||
|
|
||||||
if *updateGolden {
|
if *updateGolden {
|
||||||
@ -80,7 +77,7 @@ func TestGolden(t *testing.T) {
|
|||||||
t.Fatalf("opening golden.txt for package %q: %v", fi.Name(), err)
|
t.Fatalf("opening golden.txt for package %q: %v", fi.Name(), err)
|
||||||
}
|
}
|
||||||
wanted := strings.Split(string(bs), "\n")
|
wanted := strings.Split(string(bs), "\n")
|
||||||
slices.Sort(wanted)
|
sort.Strings(wanted)
|
||||||
for _, feature := range wanted {
|
for _, feature := range wanted {
|
||||||
if feature == "" {
|
if feature == "" {
|
||||||
continue
|
continue
|
||||||
@ -99,11 +96,6 @@ func TestGolden(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCompareAPI(t *testing.T) {
|
func TestCompareAPI(t *testing.T) {
|
||||||
if *flagCheck {
|
|
||||||
// not worth repeating in -check
|
|
||||||
t.Skip("skipping with -check set")
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
features, required, exception []string
|
features, required, exception []string
|
||||||
@ -185,11 +177,6 @@ func TestCompareAPI(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSkipInternal(t *testing.T) {
|
func TestSkipInternal(t *testing.T) {
|
||||||
if *flagCheck {
|
|
||||||
// not worth repeating in -check
|
|
||||||
t.Skip("skipping with -check set")
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pkg string
|
pkg string
|
||||||
want bool
|
want bool
|
||||||
@ -214,13 +201,7 @@ func BenchmarkAll(b *testing.B) {
|
|||||||
for _, context := range contexts {
|
for _, context := range contexts {
|
||||||
w := NewWalker(context, filepath.Join(testenv.GOROOT(b), "src"))
|
w := NewWalker(context, filepath.Join(testenv.GOROOT(b), "src"))
|
||||||
for _, name := range w.stdPackages {
|
for _, name := range w.stdPackages {
|
||||||
pkg, err := w.import_(name)
|
pkg, _ := w.import_(name)
|
||||||
if _, nogo := err.(*build.NoGoError); nogo {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("import %s (%s-%s): %v", name, context.GOOS, context.GOARCH, err)
|
|
||||||
}
|
|
||||||
w.export(pkg)
|
w.export(pkg)
|
||||||
}
|
}
|
||||||
w.Features()
|
w.Features()
|
||||||
@ -258,7 +239,8 @@ func TestIssue21181(t *testing.T) {
|
|||||||
w := NewWalker(context, "testdata/src/issue21181")
|
w := NewWalker(context, "testdata/src/issue21181")
|
||||||
pkg, err := w.import_("p")
|
pkg, err := w.import_("p")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("import %s (%s-%s): %v", "p", context.GOOS, context.GOARCH, err)
|
t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH,
|
||||||
|
pkg.Name(), w.imported)
|
||||||
}
|
}
|
||||||
w.export(pkg)
|
w.export(pkg)
|
||||||
}
|
}
|
||||||
@ -304,20 +286,14 @@ func TestIssue41358(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue64958(t *testing.T) {
|
func TestIssue64958(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping with -short")
|
|
||||||
}
|
|
||||||
if *flagCheck {
|
|
||||||
// slow, not worth repeating in -check
|
|
||||||
t.Skip("skipping with -check set")
|
|
||||||
}
|
|
||||||
testenv.MustHaveGoBuild(t)
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if x := recover(); x != nil {
|
if x := recover(); x != nil {
|
||||||
t.Errorf("expected no panic; recovered %v", x)
|
t.Errorf("expected no panic; recovered %v", x)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
testenv.MustHaveGoBuild(t)
|
||||||
|
|
||||||
for _, context := range contexts {
|
for _, context := range contexts {
|
||||||
w := NewWalker(context, "testdata/src/issue64958")
|
w := NewWalker(context, "testdata/src/issue64958")
|
||||||
pkg, err := w.importFrom("p", "", 0)
|
pkg, err := w.importFrom("p", "", 0)
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"slices"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -232,8 +232,8 @@ func compareAPI(w io.Writer, features, required, exception []string) (ok bool) {
|
|||||||
featureSet := set(features)
|
featureSet := set(features)
|
||||||
exceptionSet := set(exception)
|
exceptionSet := set(exception)
|
||||||
|
|
||||||
slices.Sort(features)
|
sort.Strings(features)
|
||||||
slices.Sort(required)
|
sort.Strings(required)
|
||||||
|
|
||||||
take := func(sl *[]string) string {
|
take := func(sl *[]string) string {
|
||||||
s := (*sl)[0]
|
s := (*sl)[0]
|
||||||
@ -378,7 +378,7 @@ func (w *Walker) Features() (fs []string) {
|
|||||||
for f := range w.features {
|
for f := range w.features {
|
||||||
fs = append(fs, f)
|
fs = append(fs, f)
|
||||||
}
|
}
|
||||||
slices.Sort(fs)
|
sort.Strings(fs)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +431,7 @@ func tagKey(dir string, context *build.Context, tags []string) string {
|
|||||||
// an indirect imported package. See https://github.com/golang/go/issues/21181
|
// an indirect imported package. See https://github.com/golang/go/issues/21181
|
||||||
// for more detail.
|
// for more detail.
|
||||||
tags = append(tags, context.GOOS, context.GOARCH)
|
tags = append(tags, context.GOOS, context.GOARCH)
|
||||||
slices.Sort(tags)
|
sort.Strings(tags)
|
||||||
|
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
if ctags[tag] {
|
if ctags[tag] {
|
||||||
@ -535,7 +535,7 @@ func (w *Walker) loadImports() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.Sort(stdPackages)
|
sort.Strings(stdPackages)
|
||||||
imports = listImports{
|
imports = listImports{
|
||||||
stdPackages: stdPackages,
|
stdPackages: stdPackages,
|
||||||
importMap: importMap,
|
importMap: importMap,
|
||||||
@ -717,7 +717,7 @@ func sortedMethodNames(typ *types.Interface) []string {
|
|||||||
for i := range list {
|
for i := range list {
|
||||||
list[i] = typ.Method(i).Name()
|
list[i] = typ.Method(i).Name()
|
||||||
}
|
}
|
||||||
slices.Sort(list)
|
sort.Strings(list)
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -747,7 +747,7 @@ func (w *Walker) sortedEmbeddeds(typ *types.Interface) []string {
|
|||||||
list = append(list, buf.String())
|
list = append(list, buf.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slices.Sort(list)
|
sort.Strings(list)
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1019,7 +1019,7 @@ func (w *Walker) emitType(obj *types.TypeName) {
|
|||||||
|
|
||||||
func (w *Walker) emitStructType(name string, typ *types.Struct) {
|
func (w *Walker) emitStructType(name string, typ *types.Struct) {
|
||||||
typeStruct := fmt.Sprintf("type %s struct", name)
|
typeStruct := fmt.Sprintf("type %s struct", name)
|
||||||
w.emitf("%s", typeStruct)
|
w.emitf(typeStruct)
|
||||||
defer w.pushScope(typeStruct)()
|
defer w.pushScope(typeStruct)()
|
||||||
|
|
||||||
for i := 0; i < typ.NumFields(); i++ {
|
for i := 0; i < typ.NumFields(); i++ {
|
||||||
@ -1058,7 +1058,7 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
|||||||
if w.isDeprecated(m) {
|
if w.isDeprecated(m) {
|
||||||
w.emitf("%s //deprecated", m.Name())
|
w.emitf("%s //deprecated", m.Name())
|
||||||
}
|
}
|
||||||
w.emitf("%s%s", m.Name(), w.signatureString(m.Signature()))
|
w.emitf("%s%s", m.Name(), w.signatureString(m.Type().(*types.Signature)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !complete {
|
if !complete {
|
||||||
@ -1083,12 +1083,12 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.Sort(methodNames)
|
sort.Strings(methodNames)
|
||||||
w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", "))
|
w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Walker) emitFunc(f *types.Func) {
|
func (w *Walker) emitFunc(f *types.Func) {
|
||||||
sig := f.Signature()
|
sig := f.Type().(*types.Signature)
|
||||||
if sig.Recv() != nil {
|
if sig.Recv() != nil {
|
||||||
panic("method considered a regular function: " + f.String())
|
panic("method considered a regular function: " + f.String())
|
||||||
}
|
}
|
||||||
|
@ -520,27 +520,15 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
|||||||
for i := loong64.REG_R0; i <= loong64.REG_R31; i++ {
|
for i := loong64.REG_R0; i <= loong64.REG_R31; i++ {
|
||||||
register[obj.Rconv(i)] = int16(i)
|
register[obj.Rconv(i)] = int16(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := loong64.REG_F0; i <= loong64.REG_F31; i++ {
|
for i := loong64.REG_F0; i <= loong64.REG_F31; i++ {
|
||||||
register[obj.Rconv(i)] = int16(i)
|
register[obj.Rconv(i)] = int16(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ {
|
for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ {
|
||||||
register[obj.Rconv(i)] = int16(i)
|
register[obj.Rconv(i)] = int16(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ {
|
for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ {
|
||||||
register[obj.Rconv(i)] = int16(i)
|
register[obj.Rconv(i)] = int16(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := loong64.REG_V0; i <= loong64.REG_V31; i++ {
|
|
||||||
register[obj.Rconv(i)] = int16(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := loong64.REG_X0; i <= loong64.REG_X31; i++ {
|
|
||||||
register[obj.Rconv(i)] = int16(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pseudo-registers.
|
// Pseudo-registers.
|
||||||
register["SB"] = RSB
|
register["SB"] = RSB
|
||||||
register["FP"] = RFP
|
register["FP"] = RFP
|
||||||
@ -553,8 +541,6 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
|||||||
"FCSR": true,
|
"FCSR": true,
|
||||||
"FCC": true,
|
"FCC": true,
|
||||||
"R": true,
|
"R": true,
|
||||||
"V": true,
|
|
||||||
"X": true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
instructions := make(map[string]obj.As)
|
instructions := make(map[string]obj.As)
|
||||||
@ -600,10 +586,6 @@ func archRISCV64(shared bool) *Arch {
|
|||||||
name := fmt.Sprintf("F%d", i-riscv.REG_F0)
|
name := fmt.Sprintf("F%d", i-riscv.REG_F0)
|
||||||
register[name] = int16(i)
|
register[name] = int16(i)
|
||||||
}
|
}
|
||||||
for i := riscv.REG_V0; i <= riscv.REG_V31; i++ {
|
|
||||||
name := fmt.Sprintf("V%d", i-riscv.REG_V0)
|
|
||||||
register[name] = int16(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// General registers with ABI names.
|
// General registers with ABI names.
|
||||||
register["ZERO"] = riscv.REG_ZERO
|
register["ZERO"] = riscv.REG_ZERO
|
||||||
|
@ -101,7 +101,7 @@ func IsARMCMP(op obj.As) bool {
|
|||||||
// one of the STREX-like instructions that require special handling.
|
// one of the STREX-like instructions that require special handling.
|
||||||
func IsARMSTREX(op obj.As) bool {
|
func IsARMSTREX(op obj.As) bool {
|
||||||
switch op {
|
switch op {
|
||||||
case arm.ASTREX, arm.ASTREXD, arm.ASTREXB, arm.ASWPW, arm.ASWPBU:
|
case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -59,10 +59,10 @@ func jumpArm64(word string) bool {
|
|||||||
|
|
||||||
var arm64SpecialOperand map[string]arm64.SpecialOperand
|
var arm64SpecialOperand map[string]arm64.SpecialOperand
|
||||||
|
|
||||||
// ARM64SpecialOperand returns the internal representation of a special operand.
|
// GetARM64SpecialOperand returns the internal representation of a special operand.
|
||||||
func ARM64SpecialOperand(name string) arm64.SpecialOperand {
|
func GetARM64SpecialOperand(name string) arm64.SpecialOperand {
|
||||||
if arm64SpecialOperand == nil {
|
if arm64SpecialOperand == nil {
|
||||||
// Generate mapping when function is first called.
|
// Generate the mapping automatically when the first time the function is called.
|
||||||
arm64SpecialOperand = map[string]arm64.SpecialOperand{}
|
arm64SpecialOperand = map[string]arm64.SpecialOperand{}
|
||||||
for opd := arm64.SPOP_BEGIN; opd < arm64.SPOP_END; opd++ {
|
for opd := arm64.SPOP_BEGIN; opd < arm64.SPOP_END; opd++ {
|
||||||
arm64SpecialOperand[opd.String()] = opd
|
arm64SpecialOperand[opd.String()] = opd
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user