diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 9e26f66a1c..5d1b952627 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -18,22 +18,9 @@ import ( "cmd/compile/internal/types" ) -// Static devirtualizes calls within fn where possible when the concrete callee +// StaticCall devirtualizes the given call if possible when the concrete callee // is available statically. -func Static(fn *ir.Func) { - ir.CurFunc = fn - - ir.VisitList(fn.Body, func(n ir.Node) { - switch n := n.(type) { - case *ir.CallExpr: - staticCall(n) - } - }) -} - -// staticCall devirtualizes the given call if possible when the concrete callee -// is available statically. -func staticCall(call *ir.CallExpr) { +func StaticCall(call *ir.CallExpr) { // For promoted methods (including value-receiver methods promoted // to pointer-receivers), the interface method wrapper may contain // expressions that can panic (e.g., ODEREF, ODOTPTR, @@ -51,6 +38,7 @@ func staticCall(call *ir.CallExpr) { if call.Op() != ir.OCALLINTER { return } + sel := call.Fun.(*ir.SelectorExpr) r := ir.StaticValue(sel.X) if r.Op() != ir.OCONVIFACE { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a19962dabb..7e5069fced 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -9,10 +9,10 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/coverage" - "cmd/compile/internal/devirtualize" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/escape" "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/loopvar" @@ -224,30 +224,15 @@ func Main(archInit func(*ssagen.ArchInfo)) { } } - base.Timer.Start("fe", "pgo-devirtualization") - if profile != nil && base.Debug.PGODevirtualize > 0 { - // TODO(prattmic): No need to use bottom-up visit order. This - // is mirroring the PGO IRGraph visit order, which also need - // not be bottom-up. - ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { - for _, fn := range list { - devirtualize.ProfileGuided(fn, profile) - } - }) - ir.CurFunc = nil - } + // Interleaved devirtualization and inlining. + base.Timer.Start("fe", "devirtualize-and-inline") + interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile) - // Inlining - base.Timer.Start("fe", "inlining") - if base.Flag.LowerL != 0 { - inline.InlinePackage(profile) - } noder.MakeWrappers(typecheck.Target) // must happen after inlining - // Devirtualize and get variable capture right in for loops + // Get variable capture right in for loops. var transformed []loopvar.VarAndLoop for _, fn := range typecheck.Target.Funcs { - devirtualize.Static(fn) transformed = append(transformed, loopvar.ForCapture(fn)...) } ir.CurFunc = nil diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 7aed532c99..74f0d341c7 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -76,8 +76,8 @@ var ( inlineHotMaxBudget int32 = 2000 ) -// pgoInlinePrologue records the hot callsites from ir-graph. -func pgoInlinePrologue(p *pgo.Profile, funcs []*ir.Func) { +// PGOInlinePrologue records the hot callsites from ir-graph. +func PGOInlinePrologue(p *pgo.Profile, funcs []*ir.Func) { if base.Debug.PGOInlineCDFThreshold != "" { if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 { inlineCDFHotCallSiteThresholdPercent = s @@ -134,79 +134,52 @@ func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) { return 0, p.NamedEdgeMap.ByWeight } -// InlinePackage finds functions that can be inlined and clones them before walk expands them. -func InlinePackage(p *pgo.Profile) { - if base.Debug.PGOInline == 0 { - p = nil +// CanInlineFuncs computes whether a batch of functions are inlinable. +func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) { + if profile != nil { + PGOInlinePrologue(profile, funcs) } - inlheur.SetupScoreAdjustments() - - InlineDecls(p, typecheck.Target.Funcs, true) - - // Perform a garbage collection of hidden closures functions that - // are no longer reachable from top-level functions following - // inlining. See #59404 and #59638 for more context. - garbageCollectUnreferencedHiddenClosures() - - if base.Debug.DumpInlFuncProps != "" { - inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps) - } - if inlheur.Enabled() { - postProcessCallSites(p) - inlheur.TearDown() - } + ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) { + CanInlineSCC(list, recursive, profile) + }) } -// InlineDecls applies inlining to the given batch of declarations. -func InlineDecls(p *pgo.Profile, funcs []*ir.Func, doInline bool) { - if p != nil { - pgoInlinePrologue(p, funcs) +// CanInlineSCC computes the inlinability of functions within an SCC +// (strongly connected component). +// +// CanInlineSCC is designed to be used by ir.VisitFuncsBottomUp +// callbacks. +func CanInlineSCC(funcs []*ir.Func, recursive bool, profile *pgo.Profile) { + if base.Flag.LowerL == 0 { + return } - doCanInline := func(n *ir.Func, recursive bool, numfns int) { + numfns := numNonClosures(funcs) + + for _, fn := range funcs { if !recursive || numfns > 1 { // We allow inlining if there is no // recursion, or the recursion cycle is // across more than one function. - CanInline(n, p) + CanInline(fn, profile) } else { - if base.Flag.LowerM > 1 && n.OClosure == nil { - fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) + if base.Flag.LowerM > 1 && fn.OClosure == nil { + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname) } } if inlheur.Enabled() { - analyzeFuncProps(n, p) + analyzeFuncProps(fn, profile) } } - - ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) { - numfns := numNonClosures(list) - // We visit functions within an SCC in fairly arbitrary order, - // so by computing inlinability for all functions in the SCC - // before performing any inlining, the results are less - // sensitive to the order within the SCC (see #58905 for an - // example). - - // First compute inlinability for all functions in the SCC ... - for _, n := range list { - doCanInline(n, recursive, numfns) - } - // ... then make a second pass to do inlining of calls. - if doInline { - for _, n := range list { - InlineCalls(n, p) - } - } - }) } -// garbageCollectUnreferencedHiddenClosures makes a pass over all the +// GarbageCollectUnreferencedHiddenClosures makes a pass over all the // top-level (non-hidden-closure) functions looking for nested closure // functions that are reachable, then sweeps through the Target.Decls // list and marks any non-reachable hidden closure function as dead. // See issues #59404 and #59638 for more context. -func garbageCollectUnreferencedHiddenClosures() { +func GarbageCollectUnreferencedHiddenClosures() { liveFuncs := make(map[*ir.Func]bool) @@ -336,7 +309,7 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) { visitor := hairyVisitor{ curFunc: fn, - isBigFunc: isBigFunc(fn), + isBigFunc: IsBigFunc(fn), budget: budget, maxBudget: budget, extraCallCost: cc, @@ -732,14 +705,16 @@ opSwitch: // particular, to avoid breaking the existing inlinability regress // tests), we need to compensate for this here. // - // See also identical logic in isBigFunc. - if init := n.Rhs[0].Init(); len(init) == 1 { - if _, ok := init[0].(*ir.AssignListStmt); ok { - // 4 for each value, because each temporary variable now - // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. - // - // 1 for the extra "tmp1, tmp2 = f()" assignment statement. - v.budget += 4*int32(len(n.Lhs)) + 1 + // See also identical logic in IsBigFunc. + if len(n.Rhs) > 0 { + if init := n.Rhs[0].Init(); len(init) == 1 { + if _, ok := init[0].(*ir.AssignListStmt); ok { + // 4 for each value, because each temporary variable now + // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. + // + // 1 for the extra "tmp1, tmp2 = f()" assignment statement. + v.budget += 4*int32(len(n.Lhs)) + 1 + } } } @@ -771,12 +746,15 @@ opSwitch: return ir.DoChildren(n, v.do) } -func isBigFunc(fn *ir.Func) bool { +// IsBigFunc reports whether fn is a "big" function. +// +// Note: The criteria for "big" is heuristic and subject to change. +func IsBigFunc(fn *ir.Func) bool { budget := inlineBigFunctionNodes return ir.Any(fn, func(n ir.Node) bool { // See logic in hairyVisitor.doNode, explaining unified IR's // handling of "a, b = f()" assignments. - if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 { + if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 { if init := n.Rhs[0].Init(); len(init) == 1 { if _, ok := init[0].(*ir.AssignListStmt); ok { budget += 4*len(n.Lhs) + 1 @@ -789,109 +767,40 @@ func isBigFunc(fn *ir.Func) bool { }) } -// InlineCalls/inlnode walks fn's statements and expressions and substitutes any -// calls made to inlineable functions. This is the external entry point. -func InlineCalls(fn *ir.Func, profile *pgo.Profile) { - if inlheur.Enabled() && !fn.Wrapper() { - inlheur.ScoreCalls(fn) - defer inlheur.ScoreCallsCleanup() +// TryInlineCall returns an inlined call expression for call, or nil +// if inlining is not possible. +func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr { + if base.Flag.LowerL == 0 { + return nil } - if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() { - inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps) + if call.Op() != ir.OCALLFUNC { + return nil } - savefn := ir.CurFunc - ir.CurFunc = fn - bigCaller := isBigFunc(fn) - if bigCaller && base.Flag.LowerM > 1 { - fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn) - } - var inlCalls []*ir.InlinedCallExpr - var edit func(ir.Node) ir.Node - edit = func(n ir.Node) ir.Node { - return inlnode(fn, n, bigCaller, &inlCalls, edit, profile) - } - ir.EditChildren(fn, edit) - - // If we inlined any calls, we want to recursively visit their - // bodies for further inlining. However, we need to wait until - // *after* the original function body has been expanded, or else - // inlCallee can have false positives (e.g., #54632). - for len(inlCalls) > 0 { - call := inlCalls[0] - inlCalls = inlCalls[1:] - ir.EditChildren(call, edit) + if call.GoDefer || call.NoInline { + return nil } - ir.CurFunc = savefn -} - -// inlnode recurses over the tree to find inlineable calls, which will -// be turned into OINLCALLs by mkinlcall. When the recursion comes -// back up will examine left, right, list, rlist, ninit, ntest, nincr, -// nbody and nelse and use one of the 4 inlconv/glue functions above -// to turn the OINLCALL into an expression, a statement, or patch it -// in to this nodes list or rlist as appropriate. -// NOTE it makes no sense to pass the glue functions down the -// recursion to the level where the OINLCALL gets created because they -// have to edit /this/ n, so you'd have to push that one down as well, -// but then you may as well do it here. so this is cleaner and -// shorter and less complicated. -// The result of inlnode MUST be assigned back to n, e.g. -// -// n.Left = inlnode(n.Left) -func inlnode(callerfn *ir.Func, n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node { - if n == nil { - return n - } - - switch n.Op() { - case ir.OTAILCALL: - n := n.(*ir.TailCallStmt) - n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)? - case ir.OCALLFUNC: - n := n.(*ir.CallExpr) - if n.Fun.Op() == ir.OMETHEXPR { - // Prevent inlining some reflect.Value methods when using checkptr, - // even when package reflect was compiled without it (#35073). - if meth := ir.MethodExprName(n.Fun); meth != nil { - s := meth.Sym() - if base.Debug.Checkptr != 0 { - switch types.ReflectSymName(s) { - case "Value.UnsafeAddr", "Value.Pointer": - n.NoInline = true - } - } + // Prevent inlining some reflect.Value methods when using checkptr, + // even when package reflect was compiled without it (#35073). + if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR { + if method := ir.MethodExprName(call.Fun); method != nil { + switch types.ReflectSymName(method.Sym()) { + case "Value.UnsafeAddr", "Value.Pointer": + return nil } } } - lno := ir.SetPos(n) - - ir.EditChildren(n, edit) - - // with all the branches out of the way, it is now time to - // transmogrify this node itself unless inhibited by the - // switch at the top of this function. - switch n.Op() { - case ir.OCALLFUNC: - call := n.(*ir.CallExpr) - if call.GoDefer || call.NoInline { - break - } - if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Fun) - } - if ir.IsIntrinsicCall(call) { - break - } - if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) { - n = mkinlcall(callerfn, call, fn, bigCaller, inlCalls) - } + if base.Flag.LowerM > 3 { + fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun) } - - base.Pos = lno - - return n + if ir.IsIntrinsicCall(call) { + return nil + } + if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) { + return mkinlcall(callerfn, call, fn, bigCaller) + } + return nil } // inlCallee takes a function-typed expression and returns the underlying function ONAME @@ -1082,17 +991,16 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa return true } -// If n is a OCALLFUNC node, and fn is an ONAME node for a -// function with an inlinable body, return an OINLCALL node that can replace n. -// The returned node's Ninit has the parameter assignments, the Nbody is the -// inlined function body, and (List, Rlist) contain the (input, output) -// parameters. +// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or +// nil if it cannot be inlined. callerfn is the function that contains +// n, and fn is the function being called. +// // The result of mkinlcall MUST be assigned back to n, e.g. // // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node { +func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr { if !canInlineCallExpr(callerfn, n, fn, bigCaller, true) { - return n + return nil } typecheck.AssertFixedCall(n) @@ -1170,8 +1078,6 @@ func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool, i inlheur.UpdateCallsiteTable(callerfn, n, res) } - *inlCalls = append(*inlCalls, res) - return res } @@ -1275,7 +1181,7 @@ func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool { return v } -func postProcessCallSites(profile *pgo.Profile) { +func PostProcessCallSites(profile *pgo.Profile) { if base.Debug.DumpInlCallSiteScores != 0 { budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) { v := inlineBudget(fn, prof, false, false) diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go new file mode 100644 index 0000000000..a6f19d470d --- /dev/null +++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -0,0 +1,132 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package interleaved implements the interleaved devirtualization and +// inlining pass. +package interleaved + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/devirtualize" + "cmd/compile/internal/inline" + "cmd/compile/internal/inline/inlheur" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "fmt" +) + +// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on +// all functions within pkg. +func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) { + if profile != nil && base.Debug.PGODevirtualize > 0 { + // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below. + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, fn := range list { + devirtualize.ProfileGuided(fn, profile) + } + }) + ir.CurFunc = nil + } + + if base.Flag.LowerL != 0 { + inlheur.SetupScoreAdjustments() + } + + var inlProfile *pgo.Profile // copy of profile for inlining + if base.Debug.PGOInline != 0 { + inlProfile = profile + } + if inlProfile != nil { + inline.PGOInlinePrologue(inlProfile, pkg.Funcs) + } + + ir.VisitFuncsBottomUp(pkg.Funcs, func(funcs []*ir.Func, recursive bool) { + // We visit functions within an SCC in fairly arbitrary order, + // so by computing inlinability for all functions in the SCC + // before performing any inlining, the results are less + // sensitive to the order within the SCC (see #58905 for an + // example). + + // First compute inlinability for all functions in the SCC ... + inline.CanInlineSCC(funcs, recursive, inlProfile) + + // ... then make a second pass to do devirtualization and inlining + // of calls. + for _, fn := range funcs { + DevirtualizeAndInlineFunc(fn, inlProfile) + } + }) + + if base.Flag.LowerL != 0 { + // Perform a garbage collection of hidden closures functions that + // are no longer reachable from top-level functions following + // inlining. See #59404 and #59638 for more context. + inline.GarbageCollectUnreferencedHiddenClosures() + + if base.Debug.DumpInlFuncProps != "" { + inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps) + } + if inlheur.Enabled() { + inline.PostProcessCallSites(inlProfile) + inlheur.TearDown() + } + } +} + +// DevirtualizeAndInlineFunc interleaves devirtualization and inlining +// on a single function. +func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) { + ir.WithFunc(fn, func() { + if base.Flag.LowerL != 0 { + if inlheur.Enabled() && !fn.Wrapper() { + inlheur.ScoreCalls(fn) + defer inlheur.ScoreCallsCleanup() + } + if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() { + inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps) + } + } + + bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) + if bigCaller && base.Flag.LowerM > 1 { + fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn) + } + + // Walk fn's body and apply devirtualization and inlining. + var inlCalls []*ir.InlinedCallExpr + var edit func(ir.Node) ir.Node + edit = func(n ir.Node) ir.Node { + switch n := n.(type) { + case *ir.TailCallStmt: + n.Call.NoInline = true // can't inline yet + } + + ir.EditChildren(n, edit) + + if call, ok := n.(*ir.CallExpr); ok { + devirtualize.StaticCall(call) + + if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil { + inlCalls = append(inlCalls, inlCall) + n = inlCall + } + } + + return n + } + ir.EditChildren(fn, edit) + + // If we inlined any calls, we want to recursively visit their + // bodies for further devirtualization and inlining. However, we + // need to wait until *after* the original function body has been + // expanded, or else inlCallee can have false positives (e.g., + // #54632). + for len(inlCalls) > 0 { + call := inlCalls[0] + inlCalls = inlCalls[1:] + ir.EditChildren(call, edit) + } + }) +} diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index c1145f980e..99e778fd70 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -15,6 +15,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/reflectdata" @@ -3794,7 +3795,7 @@ func finishWrapperFunc(fn *ir.Func, target *ir.Package) { // We generate wrappers after the global inlining pass, // so we're responsible for applying inlining ourselves here. // TODO(prattmic): plumb PGO. - inline.InlineCalls(fn, nil) + interleaved.DevirtualizeAndInlineFunc(fn, nil) // The body of wrapper function after inlining may reveal new ir.OMETHVALUE node, // we don't know whether wrapper function has been generated for it or not, so diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go index a803e53502..d2ca1f37a9 100644 --- a/src/cmd/compile/internal/noder/unified.go +++ b/src/cmd/compile/internal/noder/unified.go @@ -280,7 +280,7 @@ func readBodies(target *ir.Package, duringInlining bool) { oldLowerM := base.Flag.LowerM base.Flag.LowerM = 0 - inline.InlineDecls(nil, inlDecls, false) + inline.CanInlineFuncs(inlDecls, nil) base.Flag.LowerM = oldLowerM for _, fn := range inlDecls { diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go index f7fd80bd20..ccf54fad54 100644 --- a/test/fixedbugs/issue42284.dir/a.go +++ b/test/fixedbugs/issue42284.dir/a.go @@ -20,7 +20,7 @@ func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r0 level=0" func g() { h := E() // ERROR "inlining call to E" "T\(0\) does not escape" - h.M() // ERROR "devirtualizing h.M to T" + h.M() // ERROR "devirtualizing h.M to T" "inlining call to T.M" // BAD: T(0) could be stack allocated. i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap" diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 8cd93b8db4..559de59184 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -8,7 +8,7 @@ import "./a" func g() { h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" - h.M() // ERROR "devirtualizing h.M to a.T" + h.M() // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M" // BAD: T(0) could be stack allocated. i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap" diff --git a/test/fixedbugs/issue52193.go b/test/fixedbugs/issue52193.go new file mode 100644 index 0000000000..40e6dcb33b --- /dev/null +++ b/test/fixedbugs/issue52193.go @@ -0,0 +1,46 @@ +// errorcheck -0 -m + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "crypto/ecdh" + "crypto/rand" +) + +func F(peerShare []byte) ([]byte, error) { // ERROR "leaking param: peerShare" + p256 := ecdh.P256() // ERROR "inlining call to ecdh.P256" + + ourKey, err := p256.GenerateKey(rand.Reader) // ERROR "devirtualizing p256.GenerateKey" "inlining call to ecdh.*GenerateKey" + if err != nil { + return nil, err + } + + peerPublic, err := p256.NewPublicKey(peerShare) // ERROR "devirtualizing p256.NewPublicKey" "inlining call to ecdh.*NewPublicKey" + if err != nil { + return nil, err + } + + return ourKey.ECDH(peerPublic) +} + +// Test that inlining doesn't break if devirtualization exposes a new +// inlinable callee. + +func f() { // ERROR "can inline f" + var i interface{ m() } = T(0) // ERROR "T\(0\) does not escape" + i.m() // ERROR "devirtualizing i.m" +} + +type T int + +func (T) m() { // ERROR "can inline T.m" + if never { + f() // ERROR "inlining call to f" "devirtualizing i.m" "T\(0\) does not escape" + } +} + +var never bool