cmd/compile: simplify needwritebarrier

Currently, whether we need a write barrier is simply a property of the
pointer slot being written to.

The only optimization we currently apply using the value being written
is that pointers to stack variables can omit write barriers because
they're only written to stack slots... but we already omit write
barriers for all writes to the stack anyway.

Passes toolstash -cmp.

Change-Id: I7f16b71ff473899ed96706232d371d5b2b7ae789
Reviewed-on: https://go-review.googlesource.com/37109
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Matthew Dempsky 2017-02-15 18:43:34 -08:00
parent 211102c85f
commit 794f1ebff7
3 changed files with 9 additions and 35 deletions

View File

@ -553,7 +553,7 @@ func (s *state) stmt(n *Node) {
deref = true deref = true
res = res.Args[0] res = res.Args[0]
} }
s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), deref, 0, false) s.assign(n.List.First(), res, needwritebarrier(n.List.First()), deref, 0, false)
s.assign(n.List.Second(), resok, false, false, 0, false) s.assign(n.List.Second(), resok, false, false, 0, false)
return return
@ -565,12 +565,8 @@ func (s *state) stmt(n *Node) {
v := s.intrinsicCall(n.Rlist.First()) v := s.intrinsicCall(n.Rlist.First())
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
// Make a fake node to mimic loading return value, ONLY for write barrier test. s.assign(n.List.First(), v1, needwritebarrier(n.List.First()), false, 0, false)
// This is future-proofing against non-scalar 2-result intrinsics. s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second()), false, 0, false)
// Currently we only have scalar ones, which result in no write barrier.
fakeret := &Node{Op: OINDREGSP}
s.assign(n.List.First(), v1, needwritebarrier(n.List.First(), fakeret), false, 0, false)
s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second(), fakeret), false, 0, false)
return return
case ODCL: case ODCL:
@ -696,7 +692,7 @@ func (s *state) stmt(n *Node) {
} }
var r *ssa.Value var r *ssa.Value
var isVolatile bool var isVolatile bool
needwb := n.Right != nil && needwritebarrier(n.Left, n.Right) needwb := n.Right != nil && needwritebarrier(n.Left)
deref := !canSSAType(t) deref := !canSSAType(t)
if deref { if deref {
if rhs == nil { if rhs == nil {
@ -711,7 +707,7 @@ func (s *state) stmt(n *Node) {
r = s.expr(rhs) r = s.expr(rhs)
} }
} }
if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left, rhs) { if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left) {
// The frontend gets rid of the write barrier to enable the special OAPPEND // The frontend gets rid of the write barrier to enable the special OAPPEND
// handling above, but since this is not a special case, we need it. // handling above, but since this is not a special case, we need it.
// TODO: just add a ptr graying to the end of growslice? // TODO: just add a ptr graying to the end of growslice?

View File

@ -1170,7 +1170,7 @@ func ullmancalc(n *Node) {
goto out goto out
case OAS: case OAS:
if !needwritebarrier(n.Left, n.Right) { if !needwritebarrier(n.Left) {
break break
} }
fallthrough fallthrough

View File

@ -1664,8 +1664,7 @@ func fncall(l *Node, rt *Type) bool {
if l.Ullman >= UINF || l.Op == OINDEXMAP { if l.Ullman >= UINF || l.Op == OINDEXMAP {
return true return true
} }
var r Node if needwritebarrier(l) {
if needwritebarrier(l, &r) {
return true return true
} }
if eqtype(l.Type, rt) { if eqtype(l.Type, rt) {
@ -2049,8 +2048,8 @@ func isstack(n *Node) bool {
return false return false
} }
// Do we need a write barrier for the assignment l = r? // Do we need a write barrier for assigning to l?
func needwritebarrier(l *Node, r *Node) bool { func needwritebarrier(l *Node) bool {
if !use_writebarrier { if !use_writebarrier {
return false return false
} }
@ -2077,21 +2076,6 @@ func needwritebarrier(l *Node, r *Node) bool {
return false return false
} }
// Implicit zeroing is still zeroing, so it needs write
// barriers. In practice, these are all to stack variables
// (even if isstack isn't smart enough to figure that out), so
// they'll be eliminated by the backend.
if r == nil {
return true
}
// Ignore no-op conversions when making decision.
// Ensures that xp = unsafe.Pointer(&x) is treated
// the same as xp = &x.
for r.Op == OCONVNOP {
r = r.Left
}
// TODO: We can eliminate write barriers if we know *both* the // TODO: We can eliminate write barriers if we know *both* the
// current and new content of the slot must already be shaded. // current and new content of the slot must already be shaded.
// We know a pointer is shaded if it's nil, or points to // We know a pointer is shaded if it's nil, or points to
@ -2100,12 +2084,6 @@ func needwritebarrier(l *Node, r *Node) bool {
// writes to just-allocated objects. Unfortunately, knowing // writes to just-allocated objects. Unfortunately, knowing
// the "current" value of the slot requires flow analysis. // the "current" value of the slot requires flow analysis.
// No write barrier for storing address of stack values,
// which are guaranteed only to be written to the stack.
if r.Op == OADDR && isstack(r.Left) {
return false
}
// Otherwise, be conservative and use write barrier. // Otherwise, be conservative and use write barrier.
return true return true
} }