mirror of
https://github.com/golang/go.git
synced 2025-05-05 23:53:05 +00:00
Revert "cmd/compile: enhance tighten pass for memory values"
This reverts CL 458755. Reason for revert: broke make.bash on GOAMD64=v3: /workdir/go/src/crypto/sha1/sha1.go:54:35: internal compiler error: '(*digest).MarshalBinary': func (*digest).MarshalBinary, startMem[b13] has different values, old v206, new v338 goroutine 34 [running]: runtime/debug.Stack() /workdir/go/src/runtime/debug/stack.go:24 +0x9f bootstrap/cmd/compile/internal/base.FatalfAt({0x13, 0xaa0f1}, {0xc000db4440, 0x40}, {0xc0013b0000, 0x5, 0x5}) /workdir/go/src/cmd/compile/internal/base/print.go:234 +0x2d1 bootstrap/cmd/compile/internal/base.Fatalf(...) /workdir/go/src/cmd/compile/internal/base/print.go:203 bootstrap/cmd/compile/internal/ssagen.(*ssafn).Fatalf(0xc000d90000, {0x13, 0xaa0f1}, {0xcb7b91, 0x3a}, {0xc000d99bc0, 0x4, 0x4}) /workdir/go/src/cmd/compile/internal/ssagen/ssa.go:7896 +0x1f8 bootstrap/cmd/compile/internal/ssa.(*Func).Fatalf(0xc000d82340, {0xcb7b91, 0x3a}, {0xc000d99bc0, 0x4, 0x4}) /workdir/go/src/cmd/compile/internal/ssa/func.go:716 +0x342 bootstrap/cmd/compile/internal/ssa.memState(0xc000d82340, {0xc000ec6200, 0x22, 0x40}, {0xc001046000, 0x22, 0x40}) /workdir/go/src/cmd/compile/internal/ssa/tighten.go:240 +0x6c5 bootstrap/cmd/compile/internal/ssa.tighten(0xc000d82340) [...] Change-Id: Ic445fb48fe0f2c60ac67abe259b66594f1419152 Reviewed-on: https://go-review.googlesource.com/c/go/+/492335 Run-TryBot: Daniel Martí <mvdan@mvdan.cc> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
129b597de3
commit
aa6e168480
@ -602,11 +602,6 @@ func isLeaf(f *Func) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// needRegister reports whether v needs a register.
|
|
||||||
func (v *Value) needRegister() bool {
|
|
||||||
return !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *regAllocState) init(f *Func) {
|
func (s *regAllocState) init(f *Func) {
|
||||||
s.f = f
|
s.f = f
|
||||||
s.f.RegAlloc = s.f.Cache.locs[:0]
|
s.f.RegAlloc = s.f.Cache.locs[:0]
|
||||||
@ -707,7 +702,7 @@ func (s *regAllocState) init(f *Func) {
|
|||||||
s.copies = make(map[*Value]bool)
|
s.copies = make(map[*Value]bool)
|
||||||
for _, b := range s.visitOrder {
|
for _, b := range s.visitOrder {
|
||||||
for _, v := range b.Values {
|
for _, v := range b.Values {
|
||||||
if v.needRegister() {
|
if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
|
||||||
s.values[v.ID].needReg = true
|
s.values[v.ID].needReg = true
|
||||||
s.values[v.ID].rematerializeable = v.rematerializeable()
|
s.values[v.ID].rematerializeable = v.rematerializeable()
|
||||||
s.orig[v.ID] = v
|
s.orig[v.ID] = v
|
||||||
|
@ -21,14 +21,6 @@ func tighten(f *Func) {
|
|||||||
|
|
||||||
canMove := f.Cache.allocBoolSlice(f.NumValues())
|
canMove := f.Cache.allocBoolSlice(f.NumValues())
|
||||||
defer f.Cache.freeBoolSlice(canMove)
|
defer f.Cache.freeBoolSlice(canMove)
|
||||||
|
|
||||||
// Compute the memory states of each block.
|
|
||||||
startMem := f.Cache.allocValueSlice(f.NumBlocks())
|
|
||||||
defer f.Cache.freeValueSlice(startMem)
|
|
||||||
endMem := f.Cache.allocValueSlice(f.NumBlocks())
|
|
||||||
defer f.Cache.freeValueSlice(endMem)
|
|
||||||
memState(f, startMem, endMem)
|
|
||||||
|
|
||||||
for _, b := range f.Blocks {
|
for _, b := range f.Blocks {
|
||||||
for _, v := range b.Values {
|
for _, v := range b.Values {
|
||||||
if v.Op.isLoweredGetClosurePtr() {
|
if v.Op.isLoweredGetClosurePtr() {
|
||||||
@ -43,12 +35,15 @@ func tighten(f *Func) {
|
|||||||
// SelectN is typically, ultimately, a register.
|
// SelectN is typically, ultimately, a register.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if v.MemoryArg() != nil {
|
||||||
|
// We can't move values which have a memory arg - it might
|
||||||
|
// make two memory values live across a block boundary.
|
||||||
|
continue
|
||||||
|
}
|
||||||
// Count arguments which will need a register.
|
// Count arguments which will need a register.
|
||||||
narg := 0
|
narg := 0
|
||||||
for _, a := range v.Args {
|
for _, a := range v.Args {
|
||||||
// SP and SB are special registers and have no effect on
|
if !a.rematerializeable() {
|
||||||
// the allocation of general-purpose registers.
|
|
||||||
if a.needRegister() && a.Op != OpSB && a.Op != OpSP {
|
|
||||||
narg++
|
narg++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -143,16 +138,6 @@ func tighten(f *Func) {
|
|||||||
// v is not moveable, or is already in correct place.
|
// v is not moveable, or is already in correct place.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if mem := v.MemoryArg(); mem != nil {
|
|
||||||
if startMem[t.ID] != mem {
|
|
||||||
// We can't move a value with a memory arg unless the target block
|
|
||||||
// has that memory arg as its starting memory.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if f.pass.debug > 0 {
|
|
||||||
b.Func.Warnl(v.Pos, "%v is moved", v.Op)
|
|
||||||
}
|
|
||||||
// Move v to the block which dominates its uses.
|
// Move v to the block which dominates its uses.
|
||||||
t.Values = append(t.Values, v)
|
t.Values = append(t.Values, v)
|
||||||
v.Block = t
|
v.Block = t
|
||||||
@ -189,81 +174,3 @@ func phiTighten(f *Func) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// memState computes the memory state at the beginning and end of each block of
|
|
||||||
// the function. The memory state is represented by a value of mem type.
|
|
||||||
// The returned result is stored in startMem and endMem, and endMem is nil for
|
|
||||||
// blocks with no successors (Exit,Ret,RetJmp blocks). This algorithm is not
|
|
||||||
// suitable for infinite loop blocks that do not contain any mem operations.
|
|
||||||
// For example:
|
|
||||||
// b1:
|
|
||||||
//
|
|
||||||
// (some values)
|
|
||||||
//
|
|
||||||
// plain -> b2
|
|
||||||
// b2: <- b1 b2
|
|
||||||
// Plain -> b2
|
|
||||||
//
|
|
||||||
// Algorithm introduction:
|
|
||||||
// 1. The start memory state of a block is InitMem, a Phi node of type mem or
|
|
||||||
// an incoming memory value.
|
|
||||||
// 2. The start memory state of a block is consistent with the end memory state
|
|
||||||
// of its parent nodes. If the start memory state of a block is a Phi value,
|
|
||||||
// then the end memory state of its parent nodes is consistent with the
|
|
||||||
// corresponding argument value of the Phi node.
|
|
||||||
// 3. The algorithm first obtains the memory state of some blocks in the tree
|
|
||||||
// in the first step. Then floods the known memory state to other nodes in
|
|
||||||
// the second step.
|
|
||||||
func memState(f *Func, startMem, endMem []*Value) {
|
|
||||||
// This slice contains the set of blocks that have had their startMem set but this
|
|
||||||
// startMem value has not yet been propagated to the endMem of its predecessors
|
|
||||||
changed := make([]*Block, 0)
|
|
||||||
// First step, init the memory state of some blocks.
|
|
||||||
for _, b := range f.Blocks {
|
|
||||||
for _, v := range b.Values {
|
|
||||||
var mem *Value
|
|
||||||
if v.Op == OpPhi {
|
|
||||||
if v.Type.IsMemory() {
|
|
||||||
mem = v
|
|
||||||
}
|
|
||||||
} else if v.Op == OpInitMem {
|
|
||||||
mem = v // This is actually not needed.
|
|
||||||
} else if a := v.MemoryArg(); a != nil && a.Block != b {
|
|
||||||
// The only incoming memory value doesn't belong to this block.
|
|
||||||
mem = a
|
|
||||||
}
|
|
||||||
if mem != nil {
|
|
||||||
if old := startMem[b.ID]; old != nil {
|
|
||||||
if old == mem {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.Fatalf("func %s, startMem[%v] has different values, old %v, new %v", f.Name, b, old, mem)
|
|
||||||
}
|
|
||||||
startMem[b.ID] = mem
|
|
||||||
changed = append(changed, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second step, floods the known memory state of some blocks to others.
|
|
||||||
for len(changed) != 0 {
|
|
||||||
top := changed[0]
|
|
||||||
changed = changed[1:]
|
|
||||||
mem := startMem[top.ID]
|
|
||||||
for i, p := range top.Preds {
|
|
||||||
pb := p.b
|
|
||||||
if endMem[pb.ID] != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if mem.Op == OpPhi && mem.Block == top {
|
|
||||||
endMem[pb.ID] = mem.Args[i]
|
|
||||||
} else {
|
|
||||||
endMem[pb.ID] = mem
|
|
||||||
}
|
|
||||||
if startMem[pb.ID] == nil {
|
|
||||||
startMem[pb.ID] = endMem[pb.ID]
|
|
||||||
changed = append(changed, pb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,22 +0,0 @@
|
|||||||
// errorcheck -0 -d=ssa/tighten/debug=1
|
|
||||||
|
|
||||||
//go:build arm64
|
|
||||||
|
|
||||||
// Copyright 2023 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
var (
|
|
||||||
e any
|
|
||||||
ts uint16
|
|
||||||
)
|
|
||||||
|
|
||||||
func moveValuesWithMemoryArg(len int) {
|
|
||||||
for n := 0; n < len; n++ {
|
|
||||||
// Load of e.data is lowed as a MOVDload op, which has a memory
|
|
||||||
// argument. It's moved near where it's used.
|
|
||||||
_ = e != ts // ERROR "MOVDload is moved$" "MOVDaddr is moved$"
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
x
Reference in New Issue
Block a user