mirror of
https://github.com/golang/go.git
synced 2025-05-25 01:11:23 +00:00
The x86 backend automatically rewrites MOV $0, AX to XOR AX, AX. That rewrite isn't ok when the flags register is live across the MOV. Keep track of which moves care about preserving flags, then disable this rewrite for them. On x86, Prog.Mark was being used to hold the length of the instruction. We already store that in Prog.Isize, so no need to store it in Prog.Mark also. This frees up Prog.Mark to hold a bitmask on x86 just like all the other architectures. Update #12405 Change-Id: Ibad8a8f41fc6222bec1e4904221887d3cc3ca029 Reviewed-on: https://go-review.googlesource.com/18861 Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Russ Cox <rsc@golang.org>
129 lines
3.7 KiB
Go
129 lines
3.7 KiB
Go
// Copyright 2015 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package ssa
|
|
|
|
const flagRegMask = regMask(1) << 33 // TODO: arch-specific
|
|
|
|
// flagalloc allocates the flag register among all the flag-generating
|
|
// instructions. Flag values are recomputed if they need to be
|
|
// spilled/restored.
|
|
func flagalloc(f *Func) {
|
|
// Compute the in-register flag value we want at the end of
|
|
// each block. This is basically a best-effort live variable
|
|
// analysis, so it can be much simpler than a full analysis.
|
|
// TODO: do we really need to keep flag values live across blocks?
|
|
// Could we force the flags register to be unused at basic block
|
|
// boundaries? Then we wouldn't need this computation.
|
|
end := make([]*Value, f.NumBlocks())
|
|
for n := 0; n < 2; n++ {
|
|
// Walk blocks backwards. Poor-man's postorder traversal.
|
|
for i := len(f.Blocks) - 1; i >= 0; i-- {
|
|
b := f.Blocks[i]
|
|
// Walk values backwards to figure out what flag
|
|
// value we want in the flag register at the start
|
|
// of the block.
|
|
flag := end[b.ID]
|
|
if b.Control != nil && b.Control.Type.IsFlags() {
|
|
flag = b.Control
|
|
}
|
|
for j := len(b.Values) - 1; j >= 0; j-- {
|
|
v := b.Values[j]
|
|
if v == flag {
|
|
flag = nil
|
|
}
|
|
if opcodeTable[v.Op].reg.clobbers&flagRegMask != 0 {
|
|
flag = nil
|
|
}
|
|
for _, a := range v.Args {
|
|
if a.Type.IsFlags() {
|
|
flag = a
|
|
}
|
|
}
|
|
}
|
|
for _, p := range b.Preds {
|
|
end[p.ID] = flag
|
|
}
|
|
}
|
|
}
|
|
// For blocks which have a flags control value, that's the only value
|
|
// we can leave in the flags register at the end of the block. (There
|
|
// is no place to put a flag regeneration instruction.)
|
|
for _, b := range f.Blocks {
|
|
v := b.Control
|
|
if v != nil && v.Type.IsFlags() && end[b.ID] != v {
|
|
end[b.ID] = nil
|
|
}
|
|
}
|
|
|
|
// Add flag recomputations where they are needed.
|
|
// TODO: Remove original instructions if they are never used.
|
|
var oldSched []*Value
|
|
for _, b := range f.Blocks {
|
|
oldSched = append(oldSched[:0], b.Values...)
|
|
b.Values = b.Values[:0]
|
|
// The current live flag value.
|
|
var flag *Value
|
|
if len(b.Preds) > 0 {
|
|
flag = end[b.Preds[0].ID]
|
|
// Note: the following condition depends on the lack of critical edges.
|
|
for _, p := range b.Preds[1:] {
|
|
if end[p.ID] != flag {
|
|
f.Fatalf("live flag in %s's predecessors not consistent", b)
|
|
}
|
|
}
|
|
}
|
|
for _, v := range oldSched {
|
|
if v.Op == OpPhi && v.Type.IsFlags() {
|
|
f.Fatalf("phi of flags not supported: %s", v.LongString())
|
|
}
|
|
// Make sure any flag arg of v is in the flags register.
|
|
// If not, recompute it.
|
|
for i, a := range v.Args {
|
|
if !a.Type.IsFlags() {
|
|
continue
|
|
}
|
|
if a == flag {
|
|
continue
|
|
}
|
|
// Recalculate a
|
|
c := a.copyInto(b)
|
|
// Update v.
|
|
v.SetArg(i, c)
|
|
// Remember the most-recently computed flag value.
|
|
flag = c
|
|
}
|
|
// Issue v.
|
|
b.Values = append(b.Values, v)
|
|
if opcodeTable[v.Op].reg.clobbers&flagRegMask != 0 {
|
|
flag = nil
|
|
}
|
|
if v.Type.IsFlags() {
|
|
flag = v
|
|
}
|
|
}
|
|
if v := b.Control; v != nil && v != flag && v.Type.IsFlags() {
|
|
// Recalculate control value.
|
|
c := v.copyInto(b)
|
|
b.Control = c
|
|
flag = c
|
|
}
|
|
if v := end[b.ID]; v != nil && v != flag {
|
|
// Need to reissue flag generator for use by
|
|
// subsequent blocks.
|
|
_ = v.copyInto(b)
|
|
// Note: this flag generator is not properly linked up
|
|
// with the flag users. This breaks the SSA representation.
|
|
// We could fix up the users with another pass, but for now
|
|
// we'll just leave it. (Regalloc has the same issue for
|
|
// standard regs, and it runs next.)
|
|
}
|
|
}
|
|
|
|
// Save live flag state for later.
|
|
for _, b := range f.Blocks {
|
|
b.FlagsLiveAtEnd = end[b.ID] != nil
|
|
}
|
|
}
|