mirror of
https://github.com/golang/go.git
synced 2025-05-23 08:21:24 +00:00
cmd/compile: clean up SSA-building code
Now that the write barrier insertion is moved to SSA, the SSA building code can be simplified. Updates #17583. Change-Id: I5cacc034b11aa90b0abe6f8dd97e4e3994e2bc25 Reviewed-on: https://go-review.googlesource.com/36840 Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
9ebf3d5100
commit
1b85300602
@ -50,14 +50,6 @@ func buildssa(fn *Node) *ssa.Func {
|
||||
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
|
||||
s.cgoUnsafeArgs = true
|
||||
}
|
||||
if fn.Func.Pragma&Nowritebarrier != 0 {
|
||||
s.noWB = true
|
||||
}
|
||||
defer func() {
|
||||
if s.WBPos.IsKnown() {
|
||||
fn.Func.WBPos = s.WBPos
|
||||
}
|
||||
}()
|
||||
// TODO(khr): build config just once at the start of the compiler binary
|
||||
|
||||
ssaExp.log = printssa
|
||||
@ -68,6 +60,14 @@ func buildssa(fn *Node) *ssa.Func {
|
||||
if fn.Func.Pragma&Nosplit != 0 {
|
||||
s.f.NoSplit = true
|
||||
}
|
||||
if fn.Func.Pragma&Nowritebarrier != 0 {
|
||||
s.f.NoWB = true
|
||||
}
|
||||
defer func() {
|
||||
if s.f.WBPos.IsKnown() {
|
||||
fn.Func.WBPos = s.f.WBPos
|
||||
}
|
||||
}()
|
||||
s.exitCode = fn.Func.Exit
|
||||
s.panics = map[funcLine]*ssa.Block{}
|
||||
s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name)
|
||||
@ -150,6 +150,10 @@ func buildssa(fn *Node) *ssa.Func {
|
||||
|
||||
// Main call to ssa package to compile function
|
||||
ssa.Compile(s.f)
|
||||
if nerrors > 0 {
|
||||
s.f.Free()
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.f
|
||||
}
|
||||
@ -214,8 +218,6 @@ type state struct {
|
||||
placeholder *ssa.Value
|
||||
|
||||
cgoUnsafeArgs bool
|
||||
noWB bool
|
||||
WBPos src.XPos // line number of first write barrier. 0=no write barriers
|
||||
}
|
||||
|
||||
type funcLine struct {
|
||||
@ -517,8 +519,8 @@ func (s *state) stmt(n *Node) {
|
||||
deref = true
|
||||
res = res.Args[0]
|
||||
}
|
||||
s.assign(n.List.First(), res, needwritebarrier(n.List.First()), deref, 0)
|
||||
s.assign(n.List.Second(), resok, false, false, 0)
|
||||
s.assign(n.List.First(), res, deref, 0)
|
||||
s.assign(n.List.Second(), resok, false, 0)
|
||||
return
|
||||
|
||||
case OAS2FUNC:
|
||||
@ -529,8 +531,8 @@ func (s *state) stmt(n *Node) {
|
||||
v := s.intrinsicCall(n.Rlist.First())
|
||||
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
|
||||
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
|
||||
s.assign(n.List.First(), v1, needwritebarrier(n.List.First()), false, 0)
|
||||
s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second()), false, 0)
|
||||
s.assign(n.List.First(), v1, false, 0)
|
||||
s.assign(n.List.Second(), v2, false, 0)
|
||||
return
|
||||
|
||||
case ODCL:
|
||||
@ -624,7 +626,6 @@ func (s *state) stmt(n *Node) {
|
||||
}
|
||||
}
|
||||
var r *ssa.Value
|
||||
needwb := n.Right != nil && needwritebarrier(n.Left)
|
||||
deref := !canSSAType(t)
|
||||
if deref {
|
||||
if rhs == nil {
|
||||
@ -639,18 +640,6 @@ func (s *state) stmt(n *Node) {
|
||||
r = s.expr(rhs)
|
||||
}
|
||||
}
|
||||
if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left) {
|
||||
// The frontend gets rid of the write barrier to enable the special OAPPEND
|
||||
// handling above, but since this is not a special case, we need it.
|
||||
// TODO: just add a ptr graying to the end of growslice?
|
||||
// TODO: check whether we need to provide special handling and a write barrier
|
||||
// for ODOTTYPE and ORECV also.
|
||||
// They get similar wb-removal treatment in walk.go:OAS.
|
||||
needwb = true
|
||||
}
|
||||
if needwb && Debug_wb > 1 {
|
||||
Warnl(n.Pos, "marking %v for barrier", n.Left)
|
||||
}
|
||||
|
||||
var skip skipMask
|
||||
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
|
||||
@ -682,7 +671,7 @@ func (s *state) stmt(n *Node) {
|
||||
}
|
||||
}
|
||||
|
||||
s.assign(n.Left, r, needwb, deref, skip)
|
||||
s.assign(n.Left, r, deref, skip)
|
||||
|
||||
case OIF:
|
||||
bThen := s.f.NewBlock(ssa.BlockPlain)
|
||||
@ -2134,13 +2123,9 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
|
||||
store.Aux = Types[TINT]
|
||||
s.vars[&memVar] = store
|
||||
if ssa.IsStackAddr(addr) {
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem())
|
||||
store = s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem())
|
||||
store.Aux = pt
|
||||
s.vars[&memVar] = store
|
||||
} else {
|
||||
s.insertWBstore(pt, addr, r[0], 0)
|
||||
}
|
||||
// load the value we just stored to avoid having to spill it
|
||||
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
|
||||
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
|
||||
@ -2188,28 +2173,16 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
|
||||
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
|
||||
}
|
||||
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
|
||||
// TODO: just one write barrier call for all of these writes?
|
||||
// TODO: maybe just one writeBarrier.enabled check?
|
||||
for i, arg := range args {
|
||||
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
|
||||
if arg.store {
|
||||
if haspointers(et) {
|
||||
s.insertWBstore(et, addr, arg.v, 0)
|
||||
} else {
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
|
||||
store.Aux = et
|
||||
s.vars[&memVar] = store
|
||||
}
|
||||
} else {
|
||||
if haspointers(et) {
|
||||
s.insertWBmove(et, addr, arg.v)
|
||||
s.storeType(et, addr, arg.v, 0)
|
||||
} else {
|
||||
store := s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(et), addr, arg.v, s.mem())
|
||||
store.Aux = et
|
||||
s.vars[&memVar] = store
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(s.vars, &ptrVar)
|
||||
if inplace {
|
||||
@ -2278,9 +2251,8 @@ const (
|
||||
// Right has already been evaluated to ssa, left has not.
|
||||
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
|
||||
// If deref is true and right == nil, just do left = 0.
|
||||
// Include a write barrier if wb is true.
|
||||
// skip indicates assignments (at the top level) that can be avoided.
|
||||
func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, skip skipMask) {
|
||||
func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
|
||||
if left.Op == ONAME && isblank(left) {
|
||||
return
|
||||
}
|
||||
@ -2321,7 +2293,7 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, skip skipMa
|
||||
}
|
||||
|
||||
// Recursively assign the new value we've made to the base of the dot op.
|
||||
s.assign(left.Left, new, false, false, 0)
|
||||
s.assign(left.Left, new, false, 0)
|
||||
// TODO: do we need to update named values here?
|
||||
return
|
||||
}
|
||||
@ -2346,7 +2318,7 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, skip skipMa
|
||||
i = s.extendIndex(i, panicindex)
|
||||
s.boundsCheck(i, s.constInt(Types[TINT], 1))
|
||||
v := s.newValue1(ssa.OpArrayMake1, t, right)
|
||||
s.assign(left.Left, v, false, false, 0)
|
||||
s.assign(left.Left, v, false, 0)
|
||||
return
|
||||
}
|
||||
// Update variable assignment.
|
||||
@ -2369,42 +2341,18 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, skip skipMa
|
||||
}
|
||||
if deref {
|
||||
// Treat as a mem->mem move.
|
||||
if wb && !ssa.IsStackAddr(addr) {
|
||||
s.insertWBmove(t, addr, right)
|
||||
return
|
||||
}
|
||||
var store *ssa.Value
|
||||
if right == nil {
|
||||
store := s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem())
|
||||
store.Aux = t
|
||||
s.vars[&memVar] = store
|
||||
return
|
||||
store = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem())
|
||||
} else {
|
||||
store = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem())
|
||||
}
|
||||
store := s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem())
|
||||
store.Aux = t
|
||||
s.vars[&memVar] = store
|
||||
return
|
||||
}
|
||||
// Treat as a store.
|
||||
if wb && !ssa.IsStackAddr(addr) {
|
||||
if skip&skipPtr != 0 {
|
||||
// Special case: if we don't write back the pointers, don't bother
|
||||
// doing the write barrier check.
|
||||
s.storeTypeScalars(t, addr, right, skip)
|
||||
return
|
||||
}
|
||||
s.insertWBstore(t, addr, right, skip)
|
||||
return
|
||||
}
|
||||
if skip != 0 {
|
||||
if skip&skipPtr == 0 {
|
||||
s.storeTypePtrs(t, addr, right)
|
||||
}
|
||||
s.storeTypeScalars(t, addr, right, skip)
|
||||
return
|
||||
}
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
|
||||
store.Aux = t
|
||||
s.vars[&memVar] = store
|
||||
s.storeType(t, addr, right, skip)
|
||||
}
|
||||
|
||||
// zeroVal returns the zero value for type t.
|
||||
@ -3403,65 +3351,15 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*Type, args ...*ssa
|
||||
return res
|
||||
}
|
||||
|
||||
// insertWBmove inserts the assignment *left = *right including a write barrier.
|
||||
// t is the type being assigned.
|
||||
// If right == nil, then we're zeroing *left.
|
||||
func (s *state) insertWBmove(t *Type, left, right *ssa.Value) {
|
||||
// if writeBarrier.enabled {
|
||||
// typedmemmove(&t, left, right)
|
||||
// } else {
|
||||
// *left = *right
|
||||
// }
|
||||
//
|
||||
// or
|
||||
//
|
||||
// if writeBarrier.enabled {
|
||||
// typedmemclr(&t, left)
|
||||
// } else {
|
||||
// *left = zeroValue
|
||||
// }
|
||||
|
||||
if s.noWB {
|
||||
s.Error("write barrier prohibited")
|
||||
}
|
||||
if !s.WBPos.IsKnown() {
|
||||
s.WBPos = left.Pos
|
||||
}
|
||||
|
||||
var val *ssa.Value
|
||||
if right == nil {
|
||||
val = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), left, s.mem())
|
||||
} else {
|
||||
val = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), left, right, s.mem())
|
||||
}
|
||||
//val.Aux = &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: Linksym(typenamesym(t))}
|
||||
val.Aux = t
|
||||
s.vars[&memVar] = val
|
||||
}
|
||||
|
||||
// insertWBstore inserts the assignment *left = right including a write barrier.
|
||||
// t is the type being assigned.
|
||||
func (s *state) insertWBstore(t *Type, left, right *ssa.Value, skip skipMask) {
|
||||
// store scalar fields
|
||||
// if writeBarrier.enabled {
|
||||
// writebarrierptr for pointer fields
|
||||
// } else {
|
||||
// store pointer fields
|
||||
// }
|
||||
|
||||
if s.noWB {
|
||||
s.Error("write barrier prohibited")
|
||||
}
|
||||
if !s.WBPos.IsKnown() {
|
||||
s.WBPos = left.Pos
|
||||
}
|
||||
if t == Types[TUINTPTR] {
|
||||
// Stores to reflect.{Slice,String}Header.Data.
|
||||
s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
|
||||
return
|
||||
}
|
||||
// do *left = right for type t.
|
||||
func (s *state) storeType(t *Type, left, right *ssa.Value, skip skipMask) {
|
||||
// store scalar fields first, so write barrier stores for
|
||||
// pointer fields can be grouped together, and scalar values
|
||||
// don't need to be live across the write barrier call.
|
||||
s.storeTypeScalars(t, left, right, skip)
|
||||
s.storeTypePtrsWB(t, left, right)
|
||||
if skip&skipPtr == 0 && haspointers(t) {
|
||||
s.storeTypePtrs(t, left, right)
|
||||
}
|
||||
}
|
||||
|
||||
// do *left = right for all scalar (non-pointer) parts of t.
|
||||
@ -3564,50 +3462,6 @@ func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
|
||||
}
|
||||
}
|
||||
|
||||
// do *left = right for all pointer parts of t, with write barriers if necessary.
|
||||
func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
|
||||
switch {
|
||||
case t.IsPtrShaped():
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
|
||||
store.Aux = t
|
||||
s.vars[&memVar] = store
|
||||
case t.IsString():
|
||||
ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right)
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
|
||||
store.Aux = ptrto(Types[TUINT8])
|
||||
s.vars[&memVar] = store
|
||||
case t.IsSlice():
|
||||
ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right)
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
|
||||
store.Aux = ptrto(Types[TUINT8])
|
||||
s.vars[&memVar] = store
|
||||
case t.IsInterface():
|
||||
// itab field is treated as a scalar.
|
||||
idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right)
|
||||
idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(ptrto(Types[TUINT8])), s.config.PtrSize, left)
|
||||
store := s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
|
||||
store.Aux = ptrto(Types[TUINT8])
|
||||
s.vars[&memVar] = store
|
||||
case t.IsStruct():
|
||||
n := t.NumFields()
|
||||
for i := 0; i < n; i++ {
|
||||
ft := t.FieldType(i)
|
||||
if !haspointers(ft.(*Type)) {
|
||||
continue
|
||||
}
|
||||
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
|
||||
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
|
||||
s.storeTypePtrsWB(ft.(*Type), addr, val)
|
||||
}
|
||||
case t.IsArray() && t.NumElem() == 0:
|
||||
// nothing
|
||||
case t.IsArray() && t.NumElem() == 1:
|
||||
s.storeTypePtrsWB(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
|
||||
default:
|
||||
s.Fatalf("bad write barrier type %v", t)
|
||||
}
|
||||
}
|
||||
|
||||
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
|
||||
// i,j,k may be nil, in which case they are set to their default value.
|
||||
// t is a slice, ptr to array, or string type.
|
||||
@ -4956,6 +4810,11 @@ func (e *ssaExport) Fatalf(pos src.XPos, msg string, args ...interface{}) {
|
||||
Fatalf(msg, args...)
|
||||
}
|
||||
|
||||
// Error reports a compiler error but keep going.
|
||||
func (e *ssaExport) Error(pos src.XPos, msg string, args ...interface{}) {
|
||||
yyerrorl(pos, msg, args...)
|
||||
}
|
||||
|
||||
// Warnl reports a "warning", which is usually flag-triggered
|
||||
// logging output for the benefit of tests.
|
||||
func (e *ssaExport) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
|
||||
|
@ -95,6 +95,9 @@ type Logger interface {
|
||||
// Fatal reports a compiler error and exits.
|
||||
Fatalf(pos src.XPos, msg string, args ...interface{})
|
||||
|
||||
// Error reports a compiler error but keep going.
|
||||
Error(pos src.XPos, msg string, args ...interface{})
|
||||
|
||||
// Warnl writes compiler messages in the form expected by "errorcheck" tests
|
||||
Warnl(pos src.XPos, fmt_ string, args ...interface{})
|
||||
|
||||
@ -354,6 +357,7 @@ func (c *Config) NewFunc() *Func {
|
||||
func (c *Config) Logf(msg string, args ...interface{}) { c.fe.Logf(msg, args...) }
|
||||
func (c *Config) Log() bool { return c.fe.Log() }
|
||||
func (c *Config) Fatalf(pos src.XPos, msg string, args ...interface{}) { c.fe.Fatalf(pos, msg, args...) }
|
||||
func (c *Config) Error(pos src.XPos, msg string, args ...interface{}) { c.fe.Error(pos, msg, args...) }
|
||||
func (c *Config) Warnl(pos src.XPos, msg string, args ...interface{}) { c.fe.Warnl(pos, msg, args...) }
|
||||
func (c *Config) Debug_checknil() bool { return c.fe.Debug_checknil() }
|
||||
func (c *Config) Debug_wb() bool { return c.fe.Debug_wb() }
|
||||
|
@ -96,6 +96,7 @@ func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, arg
|
||||
func (d DummyFrontend) Log() bool { return true }
|
||||
|
||||
func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
|
||||
func (d DummyFrontend) Error(_ src.XPos, msg string, args ...interface{}) { d.t.Errorf(msg, args...) }
|
||||
func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
|
||||
func (d DummyFrontend) Debug_checknil() bool { return false }
|
||||
func (d DummyFrontend) Debug_wb() bool { return false }
|
||||
|
@ -27,6 +27,9 @@ type Func struct {
|
||||
scheduled bool // Values in Blocks are in final order
|
||||
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
|
||||
|
||||
NoWB bool // write barrier is not allowed
|
||||
WBPos src.XPos // line number of first write barrier
|
||||
|
||||
// when register allocation is done, maps value ids to locations
|
||||
RegAlloc []Location
|
||||
|
||||
|
@ -19,7 +19,7 @@ func needwb(v *Value) bool {
|
||||
if !t.HasPointer() {
|
||||
return false
|
||||
}
|
||||
if IsStackAddr(v.Args[0]) {
|
||||
if isStackAddr(v.Args[0]) {
|
||||
return false // write on stack doesn't need write barrier
|
||||
}
|
||||
return true
|
||||
@ -207,6 +207,12 @@ func writebarrier(f *Func) {
|
||||
memElse = bElse.NewValue3I(pos, op, TypeMem, siz, ptr, val, memElse)
|
||||
}
|
||||
|
||||
if f.NoWB {
|
||||
f.Config.fe.Error(pos, "write barrier prohibited")
|
||||
}
|
||||
if !f.WBPos.IsKnown() {
|
||||
f.WBPos = pos
|
||||
}
|
||||
if f.Config.fe.Debug_wb() {
|
||||
f.Config.Warnl(pos, "write barrier")
|
||||
}
|
||||
@ -309,8 +315,8 @@ func round(o int64, r int64) int64 {
|
||||
return (o + r - 1) &^ (r - 1)
|
||||
}
|
||||
|
||||
// IsStackAddr returns whether v is known to be an address of a stack slot
|
||||
func IsStackAddr(v *Value) bool {
|
||||
// isStackAddr returns whether v is known to be an address of a stack slot
|
||||
func isStackAddr(v *Value) bool {
|
||||
for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
|
||||
v = v.Args[0]
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user