diff --git a/cmd/bundle/main.go b/cmd/bundle/main.go index 87ef208dbc..5da7aa1295 100644 --- a/cmd/bundle/main.go +++ b/cmd/bundle/main.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 // Bundle creates a single-source-file version of a source package // suitable for inclusion in a particular target package. diff --git a/cmd/bundle/main18.go b/cmd/bundle/main18.go new file mode 100644 index 0000000000..9b44559aa2 --- /dev/null +++ b/cmd/bundle/main18.go @@ -0,0 +1,388 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// Bundle creates a single-source-file version of a source package +// suitable for inclusion in a particular target package. +// +// Usage: +// +// bundle [-o file] [-dst path] [-pkg name] [-prefix p] [-import old=new] +// +// The src argument specifies the import path of the package to bundle. +// The bundling of a directory of source files into a single source file +// necessarily imposes a number of constraints. +// The package being bundled must not use cgo; must not use conditional +// file compilation, whether with build tags or system-specific file names +// like code_amd64.go; must not depend on any special comments, which +// may not be preserved; must not use any assembly sources; +// must not use renaming imports; and must not use reflection-based APIs +// that depend on the specific names of types or struct fields. +// +// By default, bundle writes the bundled code to standard output. +// If the -o argument is given, bundle writes to the named file +// and also includes a ``//go:generate'' comment giving the exact +// command line used, for regenerating the file with ``go generate.'' +// +// Bundle customizes its output for inclusion in a particular package, the destination package. +// By default bundle assumes the destination is the package in the current directory, +// but the destination package can be specified explicitly using the -dst option, +// which takes an import path as its argument. +// If the source package imports the destination package, bundle will remove +// those imports and rewrite any references to use direct references to the +// corresponding symbols. +// Bundle also must write a package declaration in the output and must +// choose a name to use in that declaration. +// If the -package option is given, bundle uses that name. +// Otherwise, if the -dst option is given, bundle uses the last +// element of the destination import path. +// Otherwise, by default bundle uses the package name found in the +// package sources in the current directory. +// +// To avoid collisions, bundle inserts a prefix at the beginning of +// every package-level const, func, type, and var identifier in src's code, +// updating references accordingly. The default prefix is the package name +// of the source package followed by an underscore. The -prefix option +// specifies an alternate prefix. +// +// Occasionally it is necessary to rewrite imports during the bundling +// process. The -import option, which may be repeated, specifies that +// an import of "old" should be rewritten to import "new" instead. +// +// Example +// +// Bundle archive/zip for inclusion in cmd/dist: +// +// cd $GOROOT/src/cmd/dist +// bundle -o zip.go archive/zip +// +// Bundle golang.org/x/net/http2 for inclusion in net/http, +// prefixing all identifiers by "http2" instead of "http2_", +// and rewriting the import "golang.org/x/net/http2/hpack" +// to "internal/golang.org/x/net/http2/hpack": +// +// cd $GOROOT/src/net/http +// bundle -o h2_bundle.go \ +// -prefix http2 \ +// -import golang.org/x/net/http2/hpack=internal/golang.org/x/net/http2/hpack \ +// golang.org/x/net/http2 +// +// Two ways to update the http2 bundle: +// +// go generate net/http +// +// cd $GOROOT/src/net/http +// go generate +// +// Update both bundles, restricting ``go generate'' to running bundle commands: +// +// go generate -run bundle cmd/dist net/http +// +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "strconv" + "strings" + + "golang.org/x/tools/go/loader" +) + +var ( + outputFile = flag.String("o", "", "write output to `file` (default standard output)") + dstPath = flag.String("dst", "", "set destination import `path` (default taken from current directory)") + pkgName = flag.String("pkg", "", "set destination package `name` (default taken from current directory)") + prefix = flag.String("prefix", "", "set bundled identifier prefix to `p` (default source package name + \"_\")") + underscore = flag.Bool("underscore", false, "rewrite golang.org to golang_org in imports; temporary workaround for golang.org/issue/16333") + + importMap = map[string]string{} +) + +func init() { + flag.Var(flagFunc(addImportMap), "import", "rewrite import using `map`, of form old=new (can be repeated)") +} + +func addImportMap(s string) { + if strings.Count(s, "=") != 1 { + log.Fatal("-import argument must be of the form old=new") + } + i := strings.Index(s, "=") + old, new := s[:i], s[i+1:] + if old == "" || new == "" { + log.Fatal("-import argument must be of the form old=new; old and new must be non-empty") + } + importMap[old] = new +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: bundle [options] \n") + flag.PrintDefaults() +} + +func main() { + log.SetPrefix("bundle: ") + log.SetFlags(0) + + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) != 1 { + usage() + os.Exit(2) + } + + if *dstPath != "" { + if *pkgName == "" { + *pkgName = path.Base(*dstPath) + } + } else { + wd, _ := os.Getwd() + pkg, err := build.ImportDir(wd, 0) + if err != nil { + log.Fatalf("cannot find package in current directory: %v", err) + } + *dstPath = pkg.ImportPath + if *pkgName == "" { + *pkgName = pkg.Name + } + } + + code, err := bundle(args[0], *dstPath, *pkgName, *prefix) + if err != nil { + log.Fatal(err) + } + if *outputFile != "" { + err := ioutil.WriteFile(*outputFile, code, 0666) + if err != nil { + log.Fatal(err) + } + } else { + _, err := os.Stdout.Write(code) + if err != nil { + log.Fatal(err) + } + } +} + +// isStandardImportPath is copied from cmd/go in the standard library. +func isStandardImportPath(path string) bool { + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + elem := path[:i] + return !strings.Contains(elem, ".") +} + +var ctxt = &build.Default + +func bundle(src, dst, dstpkg, prefix string) ([]byte, error) { + // Load the initial package. + conf := loader.Config{ParserMode: parser.ParseComments, Build: ctxt} + conf.TypeCheckFuncBodies = func(p string) bool { return p == src } + conf.Import(src) + + lprog, err := conf.Load() + if err != nil { + return nil, err + } + + info := lprog.Package(src) + if prefix == "" { + pkgName := info.Files[0].Name.Name + prefix = pkgName + "_" + } + + objsToUpdate := make(map[types.Object]bool) + var rename func(from types.Object) + rename = func(from types.Object) { + if !objsToUpdate[from] { + objsToUpdate[from] = true + + // Renaming a type that is used as an embedded field + // requires renaming the field too. e.g. + // type T int // if we rename this to U.. + // var s struct {T} + // print(s.T) // ...this must change too + if _, ok := from.(*types.TypeName); ok { + for id, obj := range info.Uses { + if obj == from { + if field := info.Defs[id]; field != nil { + rename(field) + } + } + } + } + } + } + + // Rename each package-level object. + scope := info.Pkg.Scope() + for _, name := range scope.Names() { + rename(scope.Lookup(name)) + } + + var out bytes.Buffer + + fmt.Fprintf(&out, "// Code generated by golang.org/x/tools/cmd/bundle.\n") + if *outputFile != "" { + fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(os.Args[1:], " ")) + } else { + fmt.Fprintf(&out, "// $ bundle %s\n", strings.Join(os.Args[1:], " ")) + } + fmt.Fprintf(&out, "\n") + + // Concatenate package comments from all files... + for _, f := range info.Files { + if doc := f.Doc.Text(); strings.TrimSpace(doc) != "" { + for _, line := range strings.Split(doc, "\n") { + fmt.Fprintf(&out, "// %s\n", line) + } + } + } + // ...but don't let them become the actual package comment. + fmt.Fprintln(&out) + + fmt.Fprintf(&out, "package %s\n\n", dstpkg) + + // BUG(adonovan,shurcooL): bundle may generate incorrect code + // due to shadowing between identifiers and imported package names. + // + // The generated code will either fail to compile or + // (unlikely) compile successfully but have different behavior + // than the original package. The risk of this happening is higher + // when the original package has renamed imports (they're typically + // renamed in order to resolve a shadow inside that particular .go file). + + // TODO(adonovan,shurcooL): + // - detect shadowing issues, and either return error or resolve them + // - preserve comments from the original import declarations. + + // pkgStd and pkgExt are sets of printed import specs. This is done + // to deduplicate instances of the same import name and path. + var pkgStd = make(map[string]bool) + var pkgExt = make(map[string]bool) + for _, f := range info.Files { + for _, imp := range f.Imports { + path, err := strconv.Unquote(imp.Path.Value) + if err != nil { + log.Fatalf("invalid import path string: %v", err) // Shouldn't happen here since conf.Load succeeded. + } + if path == dst { + continue + } + if newPath, ok := importMap[path]; ok { + path = newPath + } + + var name string + if imp.Name != nil { + name = imp.Name.Name + } + spec := fmt.Sprintf("%s %q", name, path) + if isStandardImportPath(path) { + pkgStd[spec] = true + } else { + if *underscore { + spec = strings.Replace(spec, "golang.org/", "golang_org/", 1) + } + pkgExt[spec] = true + } + } + } + + // Print a single declaration that imports all necessary packages. + fmt.Fprintln(&out, "import (") + for p := range pkgStd { + fmt.Fprintf(&out, "\t%s\n", p) + } + if len(pkgExt) > 0 { + fmt.Fprintln(&out) + } + for p := range pkgExt { + fmt.Fprintf(&out, "\t%s\n", p) + } + fmt.Fprint(&out, ")\n\n") + + // Modify and print each file. + for _, f := range info.Files { + // Update renamed identifiers. + for id, obj := range info.Defs { + if objsToUpdate[obj] { + id.Name = prefix + obj.Name() + } + } + for id, obj := range info.Uses { + if objsToUpdate[obj] { + id.Name = prefix + obj.Name() + } + } + + // For each qualified identifier that refers to the + // destination package, remove the qualifier. + // The "@@@." strings are removed in postprocessing. + ast.Inspect(f, func(n ast.Node) bool { + if sel, ok := n.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok { + if obj, ok := info.Uses[id].(*types.PkgName); ok { + if obj.Imported().Path() == dst { + id.Name = "@@@" + } + } + } + } + return true + }) + + // Pretty-print package-level declarations. + // but no package or import declarations. + // + // TODO(adonovan): this may cause loss of comments + // preceding or associated with the package or import + // declarations or not associated with any declaration. + // Check. + var buf bytes.Buffer + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { + continue + } + buf.Reset() + format.Node(&buf, lprog.Fset, decl) + // Remove each "@@@." in the output. + // TODO(adonovan): not hygienic. + out.Write(bytes.Replace(buf.Bytes(), []byte("@@@."), nil, -1)) + out.WriteString("\n\n") + } + } + + // Now format the entire thing. + result, err := format.Source(out.Bytes()) + if err != nil { + log.Fatalf("formatting failed: %v", err) + } + + return result, nil +} + +type flagFunc func(string) + +func (f flagFunc) Set(s string) error { + f(s) + return nil +} + +func (f flagFunc) String() string { return "" } diff --git a/cmd/guru/callees.go b/cmd/guru/callees.go index 6c6f70e17f..e1e432c628 100644 --- a/cmd/guru/callees.go +++ b/cmd/guru/callees.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package main import ( diff --git a/cmd/guru/callees18.go b/cmd/guru/callees18.go new file mode 100644 index 0000000000..6e26964083 --- /dev/null +++ b/cmd/guru/callees18.go @@ -0,0 +1,259 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package main + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "sort" + + "golang.org/x/tools/cmd/guru/serial" + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/pointer" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/go/ssa/ssautil" +) + +// Callees reports the possible callees of the function call site +// identified by the specified source location. +func callees(q *Query) error { + lconf := loader.Config{Build: q.Build} + + if err := setPTAScope(&lconf, q.Scope); err != nil { + return err + } + + // Load/parse/type-check the program. + lprog, err := lconf.Load() + if err != nil { + return err + } + + qpos, err := parseQueryPos(lprog, q.Pos, true) // needs exact pos + if err != nil { + return err + } + + // Determine the enclosing call for the specified position. + var e *ast.CallExpr + for _, n := range qpos.path { + if e, _ = n.(*ast.CallExpr); e != nil { + break + } + } + if e == nil { + return fmt.Errorf("there is no function call here") + } + // TODO(adonovan): issue an error if the call is "too far + // away" from the current selection, as this most likely is + // not what the user intended. + + // Reject type conversions. + if qpos.info.Types[e.Fun].IsType() { + return fmt.Errorf("this is a type conversion, not a function call") + } + + // Deal with obviously static calls before constructing SSA form. + // Some static calls may yet require SSA construction, + // e.g. f := func(){}; f(). + switch funexpr := unparen(e.Fun).(type) { + case *ast.Ident: + switch obj := qpos.info.Uses[funexpr].(type) { + case *types.Builtin: + // Reject calls to built-ins. + return fmt.Errorf("this is a call to the built-in '%s' operator", obj.Name()) + case *types.Func: + // This is a static function call + q.Output(lprog.Fset, &calleesTypesResult{ + site: e, + callee: obj, + }) + return nil + } + case *ast.SelectorExpr: + sel := qpos.info.Selections[funexpr] + if sel == nil { + // qualified identifier. + // May refer to top level function variable + // or to top level function. + callee := qpos.info.Uses[funexpr.Sel] + if obj, ok := callee.(*types.Func); ok { + q.Output(lprog.Fset, &calleesTypesResult{ + site: e, + callee: obj, + }) + return nil + } + } else if sel.Kind() == types.MethodVal { + // Inspect the receiver type of the selected method. + // If it is concrete, the call is statically dispatched. + // (Due to implicit field selections, it is not enough to look + // at sel.Recv(), the type of the actual receiver expression.) + method := sel.Obj().(*types.Func) + recvtype := method.Type().(*types.Signature).Recv().Type() + if !types.IsInterface(recvtype) { + // static method call + q.Output(lprog.Fset, &calleesTypesResult{ + site: e, + callee: method, + }) + return nil + } + } + } + + prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) + + ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) + if err != nil { + return err + } + + pkg := prog.Package(qpos.info.Pkg) + if pkg == nil { + return fmt.Errorf("no SSA package") + } + + // Defer SSA construction till after errors are reported. + prog.Build() + + // Ascertain calling function and call site. + callerFn := ssa.EnclosingFunction(pkg, qpos.path) + if callerFn == nil { + return fmt.Errorf("no SSA function built for this location (dead code?)") + } + + // Find the call site. + site, err := findCallSite(callerFn, e) + if err != nil { + return err + } + + funcs, err := findCallees(ptaConfig, site) + if err != nil { + return err + } + + q.Output(lprog.Fset, &calleesSSAResult{ + site: site, + funcs: funcs, + }) + return nil +} + +func findCallSite(fn *ssa.Function, call *ast.CallExpr) (ssa.CallInstruction, error) { + instr, _ := fn.ValueForExpr(call) + callInstr, _ := instr.(ssa.CallInstruction) + if instr == nil { + return nil, fmt.Errorf("this call site is unreachable in this analysis") + } + return callInstr, nil +} + +func findCallees(conf *pointer.Config, site ssa.CallInstruction) ([]*ssa.Function, error) { + // Avoid running the pointer analysis for static calls. + if callee := site.Common().StaticCallee(); callee != nil { + switch callee.String() { + case "runtime.SetFinalizer", "(reflect.Value).Call": + // The PTA treats calls to these intrinsics as dynamic. + // TODO(adonovan): avoid reliance on PTA internals. + + default: + return []*ssa.Function{callee}, nil // singleton + } + } + + // Dynamic call: use pointer analysis. + conf.BuildCallGraph = true + cg := ptrAnalysis(conf).CallGraph + cg.DeleteSyntheticNodes() + + // Find all call edges from the site. + n := cg.Nodes[site.Parent()] + if n == nil { + return nil, fmt.Errorf("this call site is unreachable in this analysis") + } + calleesMap := make(map[*ssa.Function]bool) + for _, edge := range n.Out { + if edge.Site == site { + calleesMap[edge.Callee.Func] = true + } + } + + // De-duplicate and sort. + funcs := make([]*ssa.Function, 0, len(calleesMap)) + for f := range calleesMap { + funcs = append(funcs, f) + } + sort.Sort(byFuncPos(funcs)) + return funcs, nil +} + +type calleesSSAResult struct { + site ssa.CallInstruction + funcs []*ssa.Function +} + +type calleesTypesResult struct { + site *ast.CallExpr + callee *types.Func +} + +func (r *calleesSSAResult) PrintPlain(printf printfFunc) { + if len(r.funcs) == 0 { + // dynamic call on a provably nil func/interface + printf(r.site, "%s on nil value", r.site.Common().Description()) + } else { + printf(r.site, "this %s dispatches to:", r.site.Common().Description()) + for _, callee := range r.funcs { + printf(callee, "\t%s", callee) + } + } +} + +func (r *calleesSSAResult) JSON(fset *token.FileSet) []byte { + j := &serial.Callees{ + Pos: fset.Position(r.site.Pos()).String(), + Desc: r.site.Common().Description(), + } + for _, callee := range r.funcs { + j.Callees = append(j.Callees, &serial.Callee{ + Name: callee.String(), + Pos: fset.Position(callee.Pos()).String(), + }) + } + return toJSON(j) +} + +func (r *calleesTypesResult) PrintPlain(printf printfFunc) { + printf(r.site, "this static function call dispatches to:") + printf(r.callee, "\t%s", r.callee.FullName()) +} + +func (r *calleesTypesResult) JSON(fset *token.FileSet) []byte { + j := &serial.Callees{ + Pos: fset.Position(r.site.Pos()).String(), + Desc: "static function call", + } + j.Callees = []*serial.Callee{ + &serial.Callee{ + Name: r.callee.FullName(), + Pos: fset.Position(r.callee.Pos()).String(), + }, + } + return toJSON(j) +} + +// NB: byFuncPos is not deterministic across packages since it depends on load order. +// Use lessPos if the tests need it. +type byFuncPos []*ssa.Function + +func (a byFuncPos) Len() int { return len(a) } +func (a byFuncPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() } +func (a byFuncPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/cmd/guru/definition.go b/cmd/guru/definition.go index 46d48060b1..d71bb4b04b 100644 --- a/cmd/guru/definition.go +++ b/cmd/guru/definition.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package main import ( diff --git a/cmd/guru/definition18.go b/cmd/guru/definition18.go new file mode 100644 index 0000000000..c267f53168 --- /dev/null +++ b/cmd/guru/definition18.go @@ -0,0 +1,207 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package main + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + pathpkg "path" + "path/filepath" + "strconv" + + "golang.org/x/tools/cmd/guru/serial" + "golang.org/x/tools/go/buildutil" + "golang.org/x/tools/go/loader" +) + +// definition reports the location of the definition of an identifier. +func definition(q *Query) error { + // First try the simple resolution done by parser. + // It only works for intra-file references but it is very fast. + // (Extending this approach to all the files of the package, + // resolved using ast.NewPackage, was not worth the effort.) + { + qpos, err := fastQueryPos(q.Build, q.Pos) + if err != nil { + return err + } + + id, _ := qpos.path[0].(*ast.Ident) + if id == nil { + return fmt.Errorf("no identifier here") + } + + // Did the parser resolve it to a local object? + if obj := id.Obj; obj != nil && obj.Pos().IsValid() { + q.Output(qpos.fset, &definitionResult{ + pos: obj.Pos(), + descr: fmt.Sprintf("%s %s", obj.Kind, obj.Name), + }) + return nil // success + } + + // Qualified identifier? + if pkg := packageForQualIdent(qpos.path, id); pkg != "" { + srcdir := filepath.Dir(qpos.fset.File(qpos.start).Name()) + tok, pos, err := findPackageMember(q.Build, qpos.fset, srcdir, pkg, id.Name) + if err != nil { + return err + } + q.Output(qpos.fset, &definitionResult{ + pos: pos, + descr: fmt.Sprintf("%s %s.%s", tok, pkg, id.Name), + }) + return nil // success + } + + // Fall back on the type checker. + } + + // Run the type checker. + lconf := loader.Config{Build: q.Build} + allowErrors(&lconf) + + if _, err := importQueryPackage(q.Pos, &lconf); err != nil { + return err + } + + // Load/parse/type-check the program. + lprog, err := lconf.Load() + if err != nil { + return err + } + + qpos, err := parseQueryPos(lprog, q.Pos, false) + if err != nil { + return err + } + + id, _ := qpos.path[0].(*ast.Ident) + if id == nil { + return fmt.Errorf("no identifier here") + } + + // Look up the declaration of this identifier. + // If id is an anonymous field declaration, + // it is both a use of a type and a def of a field; + // prefer the use in that case. + obj := qpos.info.Uses[id] + if obj == nil { + obj = qpos.info.Defs[id] + if obj == nil { + // Happens for y in "switch y := x.(type)", + // and the package declaration, + // but I think that's all. + return fmt.Errorf("no object for identifier") + } + } + + if !obj.Pos().IsValid() { + return fmt.Errorf("%s is built in", obj.Name()) + } + + q.Output(lprog.Fset, &definitionResult{ + pos: obj.Pos(), + descr: qpos.objectString(obj), + }) + return nil +} + +// packageForQualIdent returns the package p if id is X in a qualified +// identifier p.X; it returns "" otherwise. +// +// Precondition: id is path[0], and the parser did not resolve id to a +// local object. For speed, packageForQualIdent assumes that p is a +// package iff it is the basename of an import path (and not, say, a +// package-level decl in another file or a predeclared identifier). +func packageForQualIdent(path []ast.Node, id *ast.Ident) string { + if sel, ok := path[1].(*ast.SelectorExpr); ok && sel.Sel == id && ast.IsExported(id.Name) { + if pkgid, ok := sel.X.(*ast.Ident); ok && pkgid.Obj == nil { + f := path[len(path)-1].(*ast.File) + for _, imp := range f.Imports { + path, _ := strconv.Unquote(imp.Path.Value) + if imp.Name != nil { + if imp.Name.Name == pkgid.Name { + return path // renaming import + } + } else if pathpkg.Base(path) == pkgid.Name { + return path // ordinary import + } + } + } + } + return "" +} + +// findPackageMember returns the type and position of the declaration of +// pkg.member by loading and parsing the files of that package. +// srcdir is the directory in which the import appears. +func findPackageMember(ctxt *build.Context, fset *token.FileSet, srcdir, pkg, member string) (token.Token, token.Pos, error) { + bp, err := ctxt.Import(pkg, srcdir, 0) + if err != nil { + return 0, token.NoPos, err // no files for package + } + + // TODO(adonovan): opt: parallelize. + for _, fname := range bp.GoFiles { + filename := filepath.Join(bp.Dir, fname) + + // Parse the file, opening it the file via the build.Context + // so that we observe the effects of the -modified flag. + f, _ := buildutil.ParseFile(fset, ctxt, nil, ".", filename, parser.Mode(0)) + if f == nil { + continue + } + + // Find a package-level decl called 'member'. + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + // const or var + for _, id := range spec.Names { + if id.Name == member { + return decl.Tok, id.Pos(), nil + } + } + case *ast.TypeSpec: + if spec.Name.Name == member { + return token.TYPE, spec.Name.Pos(), nil + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil && decl.Name.Name == member { + return token.FUNC, decl.Name.Pos(), nil + } + } + } + } + + return 0, token.NoPos, fmt.Errorf("couldn't find declaration of %s in %q", member, pkg) +} + +type definitionResult struct { + pos token.Pos // (nonzero) location of definition + descr string // description of object it denotes +} + +func (r *definitionResult) PrintPlain(printf printfFunc) { + printf(r.pos, "defined here as %s", r.descr) +} + +func (r *definitionResult) JSON(fset *token.FileSet) []byte { + return toJSON(&serial.Definition{ + Desc: r.descr, + ObjPos: fset.Position(r.pos).String(), + }) +} diff --git a/cmd/guru/describe.go b/cmd/guru/describe.go index 451f9ef6e8..8e06e4c65e 100644 --- a/cmd/guru/describe.go +++ b/cmd/guru/describe.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package main import ( diff --git a/cmd/guru/describe18.go b/cmd/guru/describe18.go new file mode 100644 index 0000000000..6ec38fd69f --- /dev/null +++ b/cmd/guru/describe18.go @@ -0,0 +1,900 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package main + +import ( + "bytes" + "fmt" + "go/ast" + exact "go/constant" + "go/token" + "go/types" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/tools/cmd/guru/serial" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/types/typeutil" +) + +// describe describes the syntax node denoted by the query position, +// including: +// - its syntactic category +// - the definition of its referent (for identifiers) [now redundant] +// - its type, fields, and methods (for an expression or type expression) +// +func describe(q *Query) error { + lconf := loader.Config{Build: q.Build} + allowErrors(&lconf) + + if _, err := importQueryPackage(q.Pos, &lconf); err != nil { + return err + } + + // Load/parse/type-check the program. + lprog, err := lconf.Load() + if err != nil { + return err + } + + qpos, err := parseQueryPos(lprog, q.Pos, true) // (need exact pos) + if err != nil { + return err + } + + if false { // debugging + fprintf(os.Stderr, lprog.Fset, qpos.path[0], "you selected: %s %s", + astutil.NodeDescription(qpos.path[0]), pathToString(qpos.path)) + } + + var qr QueryResult + path, action := findInterestingNode(qpos.info, qpos.path) + switch action { + case actionExpr: + qr, err = describeValue(qpos, path) + + case actionType: + qr, err = describeType(qpos, path) + + case actionPackage: + qr, err = describePackage(qpos, path) + + case actionStmt: + qr, err = describeStmt(qpos, path) + + case actionUnknown: + qr = &describeUnknownResult{path[0]} + + default: + panic(action) // unreachable + } + if err != nil { + return err + } + q.Output(lprog.Fset, qr) + return nil +} + +type describeUnknownResult struct { + node ast.Node +} + +func (r *describeUnknownResult) PrintPlain(printf printfFunc) { + // Nothing much to say about misc syntax. + printf(r.node, "%s", astutil.NodeDescription(r.node)) +} + +func (r *describeUnknownResult) JSON(fset *token.FileSet) []byte { + return toJSON(&serial.Describe{ + Desc: astutil.NodeDescription(r.node), + Pos: fset.Position(r.node.Pos()).String(), + }) +} + +type action int + +const ( + actionUnknown action = iota // None of the below + actionExpr // FuncDecl, true Expr or Ident(types.{Const,Var}) + actionType // type Expr or Ident(types.TypeName). + actionStmt // Stmt or Ident(types.Label) + actionPackage // Ident(types.Package) or ImportSpec +) + +// findInterestingNode classifies the syntax node denoted by path as one of: +// - an expression, part of an expression or a reference to a constant +// or variable; +// - a type, part of a type, or a reference to a named type; +// - a statement, part of a statement, or a label referring to a statement; +// - part of a package declaration or import spec. +// - none of the above. +// and returns the most "interesting" associated node, which may be +// the same node, an ancestor or a descendent. +// +func findInterestingNode(pkginfo *loader.PackageInfo, path []ast.Node) ([]ast.Node, action) { + // TODO(adonovan): integrate with go/types/stdlib_test.go and + // apply this to every AST node we can find to make sure it + // doesn't crash. + + // TODO(adonovan): audit for ParenExpr safety, esp. since we + // traverse up and down. + + // TODO(adonovan): if the users selects the "." in + // "fmt.Fprintf()", they'll get an ambiguous selection error; + // we won't even reach here. Can we do better? + + // TODO(adonovan): describing a field within 'type T struct {...}' + // describes the (anonymous) struct type and concludes "no methods". + // We should ascend to the enclosing type decl, if any. + + for len(path) > 0 { + switch n := path[0].(type) { + case *ast.GenDecl: + if len(n.Specs) == 1 { + // Descend to sole {Import,Type,Value}Spec child. + path = append([]ast.Node{n.Specs[0]}, path...) + continue + } + return path, actionUnknown // uninteresting + + case *ast.FuncDecl: + // Descend to function name. + path = append([]ast.Node{n.Name}, path...) + continue + + case *ast.ImportSpec: + return path, actionPackage + + case *ast.ValueSpec: + if len(n.Names) == 1 { + // Descend to sole Ident child. + path = append([]ast.Node{n.Names[0]}, path...) + continue + } + return path, actionUnknown // uninteresting + + case *ast.TypeSpec: + // Descend to type name. + path = append([]ast.Node{n.Name}, path...) + continue + + case ast.Stmt: + return path, actionStmt + + case *ast.ArrayType, + *ast.StructType, + *ast.FuncType, + *ast.InterfaceType, + *ast.MapType, + *ast.ChanType: + return path, actionType + + case *ast.Comment, *ast.CommentGroup, *ast.File, *ast.KeyValueExpr, *ast.CommClause: + return path, actionUnknown // uninteresting + + case *ast.Ellipsis: + // Continue to enclosing node. + // e.g. [...]T in ArrayType + // f(x...) in CallExpr + // f(x...T) in FuncType + + case *ast.Field: + // TODO(adonovan): this needs more thought, + // since fields can be so many things. + if len(n.Names) == 1 { + // Descend to sole Ident child. + path = append([]ast.Node{n.Names[0]}, path...) + continue + } + // Zero names (e.g. anon field in struct) + // or multiple field or param names: + // continue to enclosing field list. + + case *ast.FieldList: + // Continue to enclosing node: + // {Struct,Func,Interface}Type or FuncDecl. + + case *ast.BasicLit: + if _, ok := path[1].(*ast.ImportSpec); ok { + return path[1:], actionPackage + } + return path, actionExpr + + case *ast.SelectorExpr: + // TODO(adonovan): use Selections info directly. + if pkginfo.Uses[n.Sel] == nil { + // TODO(adonovan): is this reachable? + return path, actionUnknown + } + // Descend to .Sel child. + path = append([]ast.Node{n.Sel}, path...) + continue + + case *ast.Ident: + switch pkginfo.ObjectOf(n).(type) { + case *types.PkgName: + return path, actionPackage + + case *types.Const: + return path, actionExpr + + case *types.Label: + return path, actionStmt + + case *types.TypeName: + return path, actionType + + case *types.Var: + // For x in 'struct {x T}', return struct type, for now. + if _, ok := path[1].(*ast.Field); ok { + _ = path[2].(*ast.FieldList) // assertion + if _, ok := path[3].(*ast.StructType); ok { + return path[3:], actionType + } + } + return path, actionExpr + + case *types.Func: + return path, actionExpr + + case *types.Builtin: + // For reference to built-in function, return enclosing call. + path = path[1:] // ascend to enclosing function call + continue + + case *types.Nil: + return path, actionExpr + } + + // No object. + switch path[1].(type) { + case *ast.SelectorExpr: + // Return enclosing selector expression. + return path[1:], actionExpr + + case *ast.Field: + // TODO(adonovan): test this. + // e.g. all f in: + // struct { f, g int } + // interface { f() } + // func (f T) method(f, g int) (f, g bool) + // + // switch path[3].(type) { + // case *ast.FuncDecl: + // case *ast.StructType: + // case *ast.InterfaceType: + // } + // + // return path[1:], actionExpr + // + // Unclear what to do with these. + // Struct.Fields -- field + // Interface.Methods -- field + // FuncType.{Params.Results} -- actionExpr + // FuncDecl.Recv -- actionExpr + + case *ast.File: + // 'package foo' + return path, actionPackage + + case *ast.ImportSpec: + return path[1:], actionPackage + + default: + // e.g. blank identifier + // or y in "switch y := x.(type)" + // or code in a _test.go file that's not part of the package. + return path, actionUnknown + } + + case *ast.StarExpr: + if pkginfo.Types[n].IsType() { + return path, actionType + } + return path, actionExpr + + case ast.Expr: + // All Expr but {BasicLit,Ident,StarExpr} are + // "true" expressions that evaluate to a value. + return path, actionExpr + } + + // Ascend to parent. + path = path[1:] + } + + return nil, actionUnknown // unreachable +} + +func describeValue(qpos *queryPos, path []ast.Node) (*describeValueResult, error) { + var expr ast.Expr + var obj types.Object + switch n := path[0].(type) { + case *ast.ValueSpec: + // ambiguous ValueSpec containing multiple names + return nil, fmt.Errorf("multiple value specification") + case *ast.Ident: + obj = qpos.info.ObjectOf(n) + expr = n + case ast.Expr: + expr = n + default: + // TODO(adonovan): is this reachable? + return nil, fmt.Errorf("unexpected AST for expr: %T", n) + } + + t := qpos.info.TypeOf(expr) + if t == nil { + t = types.Typ[types.Invalid] + } + constVal := qpos.info.Types[expr].Value + + return &describeValueResult{ + qpos: qpos, + expr: expr, + typ: t, + constVal: constVal, + obj: obj, + methods: accessibleMethods(t, qpos.info.Pkg), + fields: accessibleFields(t, qpos.info.Pkg), + }, nil +} + +type describeValueResult struct { + qpos *queryPos + expr ast.Expr // query node + typ types.Type // type of expression + constVal exact.Value // value of expression, if constant + obj types.Object // var/func/const object, if expr was Ident + methods []*types.Selection + fields []describeField +} + +func (r *describeValueResult) PrintPlain(printf printfFunc) { + var prefix, suffix string + if r.constVal != nil { + suffix = fmt.Sprintf(" of constant value %s", constValString(r.constVal)) + } + switch obj := r.obj.(type) { + case *types.Func: + if recv := obj.Type().(*types.Signature).Recv(); recv != nil { + if _, ok := recv.Type().Underlying().(*types.Interface); ok { + prefix = "interface method " + } else { + prefix = "method " + } + } + } + + // Describe the expression. + if r.obj != nil { + if r.obj.Pos() == r.expr.Pos() { + // defining ident + printf(r.expr, "definition of %s%s%s", prefix, r.qpos.objectString(r.obj), suffix) + } else { + // referring ident + printf(r.expr, "reference to %s%s%s", prefix, r.qpos.objectString(r.obj), suffix) + if def := r.obj.Pos(); def != token.NoPos { + printf(def, "defined here") + } + } + } else { + desc := astutil.NodeDescription(r.expr) + if suffix != "" { + // constant expression + printf(r.expr, "%s%s", desc, suffix) + } else { + // non-constant expression + printf(r.expr, "%s of type %s", desc, r.qpos.typeString(r.typ)) + } + } + + printMethods(printf, r.expr, r.methods) + printFields(printf, r.expr, r.fields) +} + +func (r *describeValueResult) JSON(fset *token.FileSet) []byte { + var value, objpos string + if r.constVal != nil { + value = r.constVal.String() + } + if r.obj != nil { + objpos = fset.Position(r.obj.Pos()).String() + } + + return toJSON(&serial.Describe{ + Desc: astutil.NodeDescription(r.expr), + Pos: fset.Position(r.expr.Pos()).String(), + Detail: "value", + Value: &serial.DescribeValue{ + Type: r.qpos.typeString(r.typ), + Value: value, + ObjPos: objpos, + }, + }) +} + +// ---- TYPE ------------------------------------------------------------ + +func describeType(qpos *queryPos, path []ast.Node) (*describeTypeResult, error) { + var description string + var t types.Type + switch n := path[0].(type) { + case *ast.Ident: + t = qpos.info.TypeOf(n) + switch t := t.(type) { + case *types.Basic: + description = "reference to built-in " + + case *types.Named: + isDef := t.Obj().Pos() == n.Pos() // see caveats at isDef above + if isDef { + description = "definition of " + } else { + description = "reference to " + } + } + + case ast.Expr: + t = qpos.info.TypeOf(n) + + default: + // Unreachable? + return nil, fmt.Errorf("unexpected AST for type: %T", n) + } + + description = description + "type " + qpos.typeString(t) + + // Show sizes for structs and named types (it's fairly obvious for others). + switch t.(type) { + case *types.Named, *types.Struct: + szs := types.StdSizes{WordSize: 8, MaxAlign: 8} // assume amd64 + description = fmt.Sprintf("%s (size %d, align %d)", description, + szs.Sizeof(t), szs.Alignof(t)) + } + + return &describeTypeResult{ + qpos: qpos, + node: path[0], + description: description, + typ: t, + methods: accessibleMethods(t, qpos.info.Pkg), + fields: accessibleFields(t, qpos.info.Pkg), + }, nil +} + +type describeTypeResult struct { + qpos *queryPos + node ast.Node + description string + typ types.Type + methods []*types.Selection + fields []describeField +} + +type describeField struct { + implicits []*types.Named + field *types.Var +} + +func printMethods(printf printfFunc, node ast.Node, methods []*types.Selection) { + if len(methods) > 0 { + printf(node, "Methods:") + } + for _, meth := range methods { + // Print the method type relative to the package + // in which it was defined, not the query package, + printf(meth.Obj(), "\t%s", + types.SelectionString(meth, types.RelativeTo(meth.Obj().Pkg()))) + } +} + +func printFields(printf printfFunc, node ast.Node, fields []describeField) { + if len(fields) > 0 { + printf(node, "Fields:") + } + + // Align the names and the types (requires two passes). + var width int + var names []string + for _, f := range fields { + var buf bytes.Buffer + for _, fld := range f.implicits { + buf.WriteString(fld.Obj().Name()) + buf.WriteByte('.') + } + buf.WriteString(f.field.Name()) + name := buf.String() + if n := utf8.RuneCountInString(name); n > width { + width = n + } + names = append(names, name) + } + + for i, f := range fields { + // Print the field type relative to the package + // in which it was defined, not the query package, + printf(f.field, "\t%*s %s", -width, names[i], + types.TypeString(f.field.Type(), types.RelativeTo(f.field.Pkg()))) + } +} + +func (r *describeTypeResult) PrintPlain(printf printfFunc) { + printf(r.node, "%s", r.description) + + // Show the underlying type for a reference to a named type. + if nt, ok := r.typ.(*types.Named); ok && r.node.Pos() != nt.Obj().Pos() { + // TODO(adonovan): improve display of complex struct/interface types. + printf(nt.Obj(), "defined as %s", r.qpos.typeString(nt.Underlying())) + } + + printMethods(printf, r.node, r.methods) + if len(r.methods) == 0 { + // Only report null result for type kinds + // capable of bearing methods. + switch r.typ.(type) { + case *types.Interface, *types.Struct, *types.Named: + printf(r.node, "No methods.") + } + } + + printFields(printf, r.node, r.fields) +} + +func (r *describeTypeResult) JSON(fset *token.FileSet) []byte { + var namePos, nameDef string + if nt, ok := r.typ.(*types.Named); ok { + namePos = fset.Position(nt.Obj().Pos()).String() + nameDef = nt.Underlying().String() + } + return toJSON(&serial.Describe{ + Desc: r.description, + Pos: fset.Position(r.node.Pos()).String(), + Detail: "type", + Type: &serial.DescribeType{ + Type: r.qpos.typeString(r.typ), + NamePos: namePos, + NameDef: nameDef, + Methods: methodsToSerial(r.qpos.info.Pkg, r.methods, fset), + }, + }) +} + +// ---- PACKAGE ------------------------------------------------------------ + +func describePackage(qpos *queryPos, path []ast.Node) (*describePackageResult, error) { + var description string + var pkg *types.Package + switch n := path[0].(type) { + case *ast.ImportSpec: + var obj types.Object + if n.Name != nil { + obj = qpos.info.Defs[n.Name] + } else { + obj = qpos.info.Implicits[n] + } + pkgname, _ := obj.(*types.PkgName) + if pkgname == nil { + return nil, fmt.Errorf("can't import package %s", n.Path.Value) + } + pkg = pkgname.Imported() + description = fmt.Sprintf("import of package %q", pkg.Path()) + + case *ast.Ident: + if _, isDef := path[1].(*ast.File); isDef { + // e.g. package id + pkg = qpos.info.Pkg + description = fmt.Sprintf("definition of package %q", pkg.Path()) + } else { + // e.g. import id "..." + // or id.F() + pkg = qpos.info.ObjectOf(n).(*types.PkgName).Imported() + description = fmt.Sprintf("reference to package %q", pkg.Path()) + } + + default: + // Unreachable? + return nil, fmt.Errorf("unexpected AST for package: %T", n) + } + + var members []*describeMember + // NB: "unsafe" has no types.Package + if pkg != nil { + // Enumerate the accessible package members + // in lexicographic order. + for _, name := range pkg.Scope().Names() { + if pkg == qpos.info.Pkg || ast.IsExported(name) { + mem := pkg.Scope().Lookup(name) + var methods []*types.Selection + if mem, ok := mem.(*types.TypeName); ok { + methods = accessibleMethods(mem.Type(), qpos.info.Pkg) + } + members = append(members, &describeMember{ + mem, + methods, + }) + + } + } + } + + return &describePackageResult{qpos.fset, path[0], description, pkg, members}, nil +} + +type describePackageResult struct { + fset *token.FileSet + node ast.Node + description string + pkg *types.Package + members []*describeMember // in lexicographic name order +} + +type describeMember struct { + obj types.Object + methods []*types.Selection // in types.MethodSet order +} + +func (r *describePackageResult) PrintPlain(printf printfFunc) { + printf(r.node, "%s", r.description) + + // Compute max width of name "column". + maxname := 0 + for _, mem := range r.members { + if l := len(mem.obj.Name()); l > maxname { + maxname = l + } + } + + for _, mem := range r.members { + printf(mem.obj, "\t%s", formatMember(mem.obj, maxname)) + for _, meth := range mem.methods { + printf(meth.Obj(), "\t\t%s", types.SelectionString(meth, types.RelativeTo(r.pkg))) + } + } +} + +// Helper function to adjust go1.5 numeric go/constant formatting. +// Can be removed once we give up compatibility with go1.5. +func constValString(v exact.Value) string { + if v.Kind() == exact.Float { + // In go1.5, go/constant floating-point values are printed + // as fractions. Make them appear as floating-point numbers. + f, _ := exact.Float64Val(v) + return fmt.Sprintf("%g", f) + } + return v.String() +} + +func formatMember(obj types.Object, maxname int) string { + qualifier := types.RelativeTo(obj.Pkg()) + var buf bytes.Buffer + fmt.Fprintf(&buf, "%-5s %-*s", tokenOf(obj), maxname, obj.Name()) + switch obj := obj.(type) { + case *types.Const: + fmt.Fprintf(&buf, " %s = %s", types.TypeString(obj.Type(), qualifier), constValString(obj.Val())) + + case *types.Func: + fmt.Fprintf(&buf, " %s", types.TypeString(obj.Type(), qualifier)) + + case *types.TypeName: + // Abbreviate long aggregate type names. + var abbrev string + switch t := obj.Type().Underlying().(type) { + case *types.Interface: + if t.NumMethods() > 1 { + abbrev = "interface{...}" + } + case *types.Struct: + if t.NumFields() > 1 { + abbrev = "struct{...}" + } + } + if abbrev == "" { + fmt.Fprintf(&buf, " %s", types.TypeString(obj.Type().Underlying(), qualifier)) + } else { + fmt.Fprintf(&buf, " %s", abbrev) + } + + case *types.Var: + fmt.Fprintf(&buf, " %s", types.TypeString(obj.Type(), qualifier)) + } + return buf.String() +} + +func (r *describePackageResult) JSON(fset *token.FileSet) []byte { + var members []*serial.DescribeMember + for _, mem := range r.members { + typ := mem.obj.Type() + var val string + switch mem := mem.obj.(type) { + case *types.Const: + val = constValString(mem.Val()) + case *types.TypeName: + typ = typ.Underlying() + } + members = append(members, &serial.DescribeMember{ + Name: mem.obj.Name(), + Type: typ.String(), + Value: val, + Pos: fset.Position(mem.obj.Pos()).String(), + Kind: tokenOf(mem.obj), + Methods: methodsToSerial(r.pkg, mem.methods, fset), + }) + } + return toJSON(&serial.Describe{ + Desc: r.description, + Pos: fset.Position(r.node.Pos()).String(), + Detail: "package", + Package: &serial.DescribePackage{ + Path: r.pkg.Path(), + Members: members, + }, + }) +} + +func tokenOf(o types.Object) string { + switch o.(type) { + case *types.Func: + return "func" + case *types.Var: + return "var" + case *types.TypeName: + return "type" + case *types.Const: + return "const" + case *types.PkgName: + return "package" + case *types.Builtin: + return "builtin" // e.g. when describing package "unsafe" + case *types.Nil: + return "nil" + case *types.Label: + return "label" + } + panic(o) +} + +// ---- STATEMENT ------------------------------------------------------------ + +func describeStmt(qpos *queryPos, path []ast.Node) (*describeStmtResult, error) { + var description string + switch n := path[0].(type) { + case *ast.Ident: + if qpos.info.Defs[n] != nil { + description = "labelled statement" + } else { + description = "reference to labelled statement" + } + + default: + // Nothing much to say about statements. + description = astutil.NodeDescription(n) + } + return &describeStmtResult{qpos.fset, path[0], description}, nil +} + +type describeStmtResult struct { + fset *token.FileSet + node ast.Node + description string +} + +func (r *describeStmtResult) PrintPlain(printf printfFunc) { + printf(r.node, "%s", r.description) +} + +func (r *describeStmtResult) JSON(fset *token.FileSet) []byte { + return toJSON(&serial.Describe{ + Desc: r.description, + Pos: fset.Position(r.node.Pos()).String(), + Detail: "unknown", + }) +} + +// ------------------- Utilities ------------------- + +// pathToString returns a string containing the concrete types of the +// nodes in path. +func pathToString(path []ast.Node) string { + var buf bytes.Buffer + fmt.Fprint(&buf, "[") + for i, n := range path { + if i > 0 { + fmt.Fprint(&buf, " ") + } + fmt.Fprint(&buf, strings.TrimPrefix(fmt.Sprintf("%T", n), "*ast.")) + } + fmt.Fprint(&buf, "]") + return buf.String() +} + +func accessibleMethods(t types.Type, from *types.Package) []*types.Selection { + var methods []*types.Selection + for _, meth := range typeutil.IntuitiveMethodSet(t, nil) { + if isAccessibleFrom(meth.Obj(), from) { + methods = append(methods, meth) + } + } + return methods +} + +// accessibleFields returns the set of accessible +// field selections on a value of type recv. +func accessibleFields(recv types.Type, from *types.Package) []describeField { + wantField := func(f *types.Var) bool { + if !isAccessibleFrom(f, from) { + return false + } + // Check that the field is not shadowed. + obj, _, _ := types.LookupFieldOrMethod(recv, true, f.Pkg(), f.Name()) + return obj == f + } + + var fields []describeField + var visit func(t types.Type, stack []*types.Named) + visit = func(t types.Type, stack []*types.Named) { + tStruct, ok := deref(t).Underlying().(*types.Struct) + if !ok { + return + } + fieldloop: + for i := 0; i < tStruct.NumFields(); i++ { + f := tStruct.Field(i) + + // Handle recursion through anonymous fields. + if f.Anonymous() { + tf := f.Type() + if ptr, ok := tf.(*types.Pointer); ok { + tf = ptr.Elem() + } + if named, ok := tf.(*types.Named); ok { // (be defensive) + // If we've already visited this named type + // on this path, break the cycle. + for _, x := range stack { + if x == named { + continue fieldloop + } + } + visit(f.Type(), append(stack, named)) + } + } + + // Save accessible fields. + if wantField(f) { + fields = append(fields, describeField{ + implicits: append([]*types.Named(nil), stack...), + field: f, + }) + } + } + } + visit(recv, nil) + + return fields +} + +func isAccessibleFrom(obj types.Object, pkg *types.Package) bool { + return ast.IsExported(obj.Name()) || obj.Pkg() == pkg +} + +func methodsToSerial(this *types.Package, methods []*types.Selection, fset *token.FileSet) []serial.DescribeMethod { + qualifier := types.RelativeTo(this) + var jmethods []serial.DescribeMethod + for _, meth := range methods { + var ser serial.DescribeMethod + if meth != nil { // may contain nils when called by implements (on a method) + ser = serial.DescribeMethod{ + Name: types.SelectionString(meth, qualifier), + Pos: fset.Position(meth.Obj().Pos()).String(), + } + } + jmethods = append(jmethods, ser) + } + return jmethods +} diff --git a/cmd/guru/referrers.go b/cmd/guru/referrers.go index d6b9df90ee..e80b0a6774 100644 --- a/cmd/guru/referrers.go +++ b/cmd/guru/referrers.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package main import ( diff --git a/cmd/guru/referrers18.go b/cmd/guru/referrers18.go new file mode 100644 index 0000000000..277f57a5b0 --- /dev/null +++ b/cmd/guru/referrers18.go @@ -0,0 +1,524 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/token" + "go/types" + "io" + "log" + "sort" + "strings" + "sync" + + "golang.org/x/tools/cmd/guru/serial" + "golang.org/x/tools/go/buildutil" + "golang.org/x/tools/go/loader" + "golang.org/x/tools/refactor/importgraph" +) + +// Referrers reports all identifiers that resolve to the same object +// as the queried identifier, within any package in the workspace. +func referrers(q *Query) error { + fset := token.NewFileSet() + lconf := loader.Config{Fset: fset, Build: q.Build} + allowErrors(&lconf) + + if _, err := importQueryPackage(q.Pos, &lconf); err != nil { + return err + } + + // Load/parse/type-check the query package. + lprog, err := lconf.Load() + if err != nil { + return err + } + + qpos, err := parseQueryPos(lprog, q.Pos, false) + if err != nil { + return err + } + + id, _ := qpos.path[0].(*ast.Ident) + if id == nil { + return fmt.Errorf("no identifier here") + } + + obj := qpos.info.ObjectOf(id) + if obj == nil { + // Happens for y in "switch y := x.(type)", + // the package declaration, + // and unresolved identifiers. + if _, ok := qpos.path[1].(*ast.File); ok { // package decl? + return packageReferrers(q, qpos.info.Pkg.Path()) + } + return fmt.Errorf("no object for identifier: %T", qpos.path[1]) + } + + // Imported package name? + if pkgname, ok := obj.(*types.PkgName); ok { + return packageReferrers(q, pkgname.Imported().Path()) + } + + if obj.Pkg() == nil { + return fmt.Errorf("references to predeclared %q are everywhere!", obj.Name()) + } + + // For a globally accessible object defined in package P, we + // must load packages that depend on P. Specifically, for a + // package-level object, we need load only direct importers + // of P, but for a field or interface method, we must load + // any package that transitively imports P. + if global, pkglevel := classify(obj); global { + // We'll use the the object's position to identify it in the larger program. + objposn := fset.Position(obj.Pos()) + defpkg := obj.Pkg().Path() // defining package + return globalReferrers(q, qpos.info.Pkg.Path(), defpkg, objposn, pkglevel) + } + + q.Output(fset, &referrersInitialResult{ + qinfo: qpos.info, + obj: obj, + }) + + outputUses(q, fset, usesOf(obj, qpos.info), obj.Pkg()) + + return nil // success +} + +// classify classifies objects by how far +// we have to look to find references to them. +func classify(obj types.Object) (global, pkglevel bool) { + if obj.Exported() { + if obj.Parent() == nil { + // selectable object (field or method) + return true, false + } + if obj.Parent() == obj.Pkg().Scope() { + // lexical object (package-level var/const/func/type) + return true, true + } + } + // object with unexported named or defined in local scope + return false, false +} + +// packageReferrers reports all references to the specified package +// throughout the workspace. +func packageReferrers(q *Query, path string) error { + // Scan the workspace and build the import graph. + // Ignore broken packages. + _, rev, _ := importgraph.Build(q.Build) + + // Find the set of packages that directly import the query package. + // Only those packages need typechecking of function bodies. + users := rev[path] + + // Load the larger program. + fset := token.NewFileSet() + lconf := loader.Config{ + Fset: fset, + Build: q.Build, + TypeCheckFuncBodies: func(p string) bool { + return users[strings.TrimSuffix(p, "_test")] + }, + } + allowErrors(&lconf) + + // The importgraph doesn't treat external test packages + // as separate nodes, so we must use ImportWithTests. + for path := range users { + lconf.ImportWithTests(path) + } + + // Subtle! AfterTypeCheck needs no mutex for qpkg because the + // topological import order gives us the necessary happens-before edges. + // TODO(adonovan): what about import cycles? + var qpkg *types.Package + + // For efficiency, we scan each package for references + // just after it has been type-checked. The loader calls + // AfterTypeCheck (concurrently), providing us with a stream of + // packages. + lconf.AfterTypeCheck = func(info *loader.PackageInfo, files []*ast.File) { + // AfterTypeCheck may be called twice for the same package due to augmentation. + + if info.Pkg.Path() == path && qpkg == nil { + // Found the package of interest. + qpkg = info.Pkg + fakepkgname := types.NewPkgName(token.NoPos, qpkg, qpkg.Name(), qpkg) + q.Output(fset, &referrersInitialResult{ + qinfo: info, + obj: fakepkgname, // bogus + }) + } + + // Only inspect packages that directly import the + // declaring package (and thus were type-checked). + if lconf.TypeCheckFuncBodies(info.Pkg.Path()) { + // Find PkgNames that refer to qpkg. + // TODO(adonovan): perhaps more useful would be to show imports + // of the package instead of qualified identifiers. + var refs []*ast.Ident + for id, obj := range info.Uses { + if obj, ok := obj.(*types.PkgName); ok && obj.Imported() == qpkg { + refs = append(refs, id) + } + } + outputUses(q, fset, refs, info.Pkg) + } + + clearInfoFields(info) // save memory + } + + lconf.Load() // ignore error + + if qpkg == nil { + log.Fatalf("query package %q not found during reloading", path) + } + + return nil +} + +func usesOf(queryObj types.Object, info *loader.PackageInfo) []*ast.Ident { + var refs []*ast.Ident + for id, obj := range info.Uses { + if sameObj(queryObj, obj) { + refs = append(refs, id) + } + } + return refs +} + +// outputUses outputs a result describing refs, which appear in the package denoted by info. +func outputUses(q *Query, fset *token.FileSet, refs []*ast.Ident, pkg *types.Package) { + if len(refs) > 0 { + sort.Sort(byNamePos{fset, refs}) + q.Output(fset, &referrersPackageResult{ + pkg: pkg, + build: q.Build, + fset: fset, + refs: refs, + }) + } +} + +// globalReferrers reports references throughout the entire workspace to the +// object at the specified source position. Its defining package is defpkg, +// and the query package is qpkg. isPkgLevel indicates whether the object +// is defined at package-level. +func globalReferrers(q *Query, qpkg, defpkg string, objposn token.Position, isPkgLevel bool) error { + // Scan the workspace and build the import graph. + // Ignore broken packages. + _, rev, _ := importgraph.Build(q.Build) + + // Find the set of packages that depend on defpkg. + // Only function bodies in those packages need type-checking. + var users map[string]bool + if isPkgLevel { + users = rev[defpkg] // direct importers + if users == nil { + users = make(map[string]bool) + } + users[defpkg] = true // plus the defining package itself + } else { + users = rev.Search(defpkg) // transitive importers + } + + // Prepare to load the larger program. + fset := token.NewFileSet() + lconf := loader.Config{ + Fset: fset, + Build: q.Build, + TypeCheckFuncBodies: func(p string) bool { + return users[strings.TrimSuffix(p, "_test")] + }, + } + allowErrors(&lconf) + + // The importgraph doesn't treat external test packages + // as separate nodes, so we must use ImportWithTests. + for path := range users { + lconf.ImportWithTests(path) + } + + // The remainder of this function is somewhat tricky because it + // operates on the concurrent stream of packages observed by the + // loader's AfterTypeCheck hook. Most of guru's helper + // functions assume the entire program has already been loaded, + // so we can't use them here. + // TODO(adonovan): smooth things out once the other changes have landed. + + // Results are reported concurrently from within the + // AfterTypeCheck hook. The program may provide a useful stream + // of information even if the user doesn't let the program run + // to completion. + + var ( + mu sync.Mutex + qobj types.Object + qinfo *loader.PackageInfo // info for qpkg + ) + + // For efficiency, we scan each package for references + // just after it has been type-checked. The loader calls + // AfterTypeCheck (concurrently), providing us with a stream of + // packages. + lconf.AfterTypeCheck = func(info *loader.PackageInfo, files []*ast.File) { + // AfterTypeCheck may be called twice for the same package due to augmentation. + + // Only inspect packages that depend on the declaring package + // (and thus were type-checked). + if lconf.TypeCheckFuncBodies(info.Pkg.Path()) { + // Record the query object and its package when we see it. + mu.Lock() + if qobj == nil && info.Pkg.Path() == defpkg { + // Find the object by its position (slightly ugly). + qobj = findObject(fset, &info.Info, objposn) + if qobj == nil { + // It really ought to be there; + // we found it once already. + log.Fatalf("object at %s not found in package %s", + objposn, defpkg) + } + + // Object found. + qinfo = info + q.Output(fset, &referrersInitialResult{ + qinfo: qinfo, + obj: qobj, + }) + } + obj := qobj + mu.Unlock() + + // Look for references to the query object. + if obj != nil { + outputUses(q, fset, usesOf(obj, info), info.Pkg) + } + } + + clearInfoFields(info) // save memory + } + + lconf.Load() // ignore error + + if qobj == nil { + log.Fatal("query object not found during reloading") + } + + return nil // success +} + +// findObject returns the object defined at the specified position. +func findObject(fset *token.FileSet, info *types.Info, objposn token.Position) types.Object { + good := func(obj types.Object) bool { + if obj == nil { + return false + } + posn := fset.Position(obj.Pos()) + return posn.Filename == objposn.Filename && posn.Offset == objposn.Offset + } + for _, obj := range info.Defs { + if good(obj) { + return obj + } + } + for _, obj := range info.Implicits { + if good(obj) { + return obj + } + } + return nil +} + +// same reports whether x and y are identical, or both are PkgNames +// that import the same Package. +// +func sameObj(x, y types.Object) bool { + if x == y { + return true + } + if x, ok := x.(*types.PkgName); ok { + if y, ok := y.(*types.PkgName); ok { + return x.Imported() == y.Imported() + } + } + return false +} + +func clearInfoFields(info *loader.PackageInfo) { + // TODO(adonovan): opt: save memory by eliminating unneeded scopes/objects. + // (Requires go/types change for Go 1.7.) + // info.Pkg.Scope().ClearChildren() + + // Discard the file ASTs and their accumulated type + // information to save memory. + info.Files = nil + info.Defs = make(map[*ast.Ident]types.Object) + info.Uses = make(map[*ast.Ident]types.Object) + info.Implicits = make(map[ast.Node]types.Object) + + // Also, disable future collection of wholly unneeded + // type information for the package in case there is + // more type-checking to do (augmentation). + info.Types = nil + info.Scopes = nil + info.Selections = nil +} + +// -------- utils -------- + +// An deterministic ordering for token.Pos that doesn't +// depend on the order in which packages were loaded. +func lessPos(fset *token.FileSet, x, y token.Pos) bool { + fx := fset.File(x) + fy := fset.File(y) + if fx != fy { + return fx.Name() < fy.Name() + } + return x < y +} + +type byNamePos struct { + fset *token.FileSet + ids []*ast.Ident +} + +func (p byNamePos) Len() int { return len(p.ids) } +func (p byNamePos) Swap(i, j int) { p.ids[i], p.ids[j] = p.ids[j], p.ids[i] } +func (p byNamePos) Less(i, j int) bool { + return lessPos(p.fset, p.ids[i].NamePos, p.ids[j].NamePos) +} + +// referrersInitialResult is the initial result of a "referrers" query. +type referrersInitialResult struct { + qinfo *loader.PackageInfo + obj types.Object // object it denotes +} + +func (r *referrersInitialResult) PrintPlain(printf printfFunc) { + printf(r.obj, "references to %s", + types.ObjectString(r.obj, types.RelativeTo(r.qinfo.Pkg))) +} + +func (r *referrersInitialResult) JSON(fset *token.FileSet) []byte { + var objpos string + if pos := r.obj.Pos(); pos.IsValid() { + objpos = fset.Position(pos).String() + } + return toJSON(&serial.ReferrersInitial{ + Desc: r.obj.String(), + ObjPos: objpos, + }) +} + +// referrersPackageResult is the streaming result for one package of a "referrers" query. +type referrersPackageResult struct { + pkg *types.Package + build *build.Context + fset *token.FileSet + refs []*ast.Ident // set of all other references to it +} + +// forEachRef calls f(id, text) for id in r.refs, in order. +// Text is the text of the line on which id appears. +func (r *referrersPackageResult) foreachRef(f func(id *ast.Ident, text string)) { + // Show referring lines, like grep. + type fileinfo struct { + refs []*ast.Ident + linenums []int // line number of refs[i] + data chan interface{} // file contents or error + } + var fileinfos []*fileinfo + fileinfosByName := make(map[string]*fileinfo) + + // First pass: start the file reads concurrently. + sema := make(chan struct{}, 20) // counting semaphore to limit I/O concurrency + for _, ref := range r.refs { + posn := r.fset.Position(ref.Pos()) + fi := fileinfosByName[posn.Filename] + if fi == nil { + fi = &fileinfo{data: make(chan interface{})} + fileinfosByName[posn.Filename] = fi + fileinfos = append(fileinfos, fi) + + // First request for this file: + // start asynchronous read. + go func() { + sema <- struct{}{} // acquire token + content, err := readFile(r.build, posn.Filename) + <-sema // release token + if err != nil { + fi.data <- err + } else { + fi.data <- content + } + }() + } + fi.refs = append(fi.refs, ref) + fi.linenums = append(fi.linenums, posn.Line) + } + + // Second pass: print refs in original order. + // One line may have several refs at different columns. + for _, fi := range fileinfos { + v := <-fi.data // wait for I/O completion + + // Print one item for all refs in a file that could not + // be loaded (perhaps due to //line directives). + if err, ok := v.(error); ok { + var suffix string + if more := len(fi.refs) - 1; more > 0 { + suffix = fmt.Sprintf(" (+ %d more refs in this file)", more) + } + f(fi.refs[0], err.Error()+suffix) + continue + } + + lines := bytes.Split(v.([]byte), []byte("\n")) + for i, ref := range fi.refs { + f(ref, string(lines[fi.linenums[i]-1])) + } + } +} + +// readFile is like ioutil.ReadFile, but +// it goes through the virtualized build.Context. +func readFile(ctxt *build.Context, filename string) ([]byte, error) { + rc, err := buildutil.OpenFile(ctxt, filename) + if err != nil { + return nil, err + } + defer rc.Close() + var buf bytes.Buffer + if _, err := io.Copy(&buf, rc); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (r *referrersPackageResult) PrintPlain(printf printfFunc) { + r.foreachRef(func(id *ast.Ident, text string) { + printf(id, "%s", text) + }) +} + +func (r *referrersPackageResult) JSON(fset *token.FileSet) []byte { + refs := serial.ReferrersPackage{Package: r.pkg.Path()} + r.foreachRef(func(id *ast.Ident, text string) { + refs.Refs = append(refs.Refs, serial.Ref{ + Pos: fset.Position(id.NamePos).String(), + Text: text, + }) + }) + return toJSON(refs) +} diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go index 1d8fa9ae63..c581e16eb4 100644 --- a/cmd/stringer/stringer.go +++ b/cmd/stringer/stringer.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 // Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer // interface. Given the name of a (signed or unsigned) integer type T that has constants diff --git a/cmd/stringer/stringer18.go b/cmd/stringer/stringer18.go new file mode 100644 index 0000000000..1b0dfbbaa9 --- /dev/null +++ b/cmd/stringer/stringer18.go @@ -0,0 +1,638 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// func (t T) String() string +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + exact "go/constant" + "go/format" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttp://godoc.org/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var ( + dir string + g Generator + ) + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + g.parsePackageDir(args[0]) + } else { + dir = filepath.Dir(args[0]) + g.parsePackageFiles(args) + } + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"fmt\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := ioutil.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. +} + +type Package struct { + dir string + name string + defs map[*ast.Ident]types.Object + files []*File + typesPkg *types.Package +} + +// parsePackageDir parses the package residing in the directory. +func (g *Generator) parsePackageDir(directory string) { + pkg, err := build.Default.ImportDir(directory, 0) + if err != nil { + log.Fatalf("cannot process directory %s: %s", directory, err) + } + var names []string + names = append(names, pkg.GoFiles...) + names = append(names, pkg.CgoFiles...) + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + // names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package. + names = append(names, pkg.SFiles...) + names = prefixDirectory(directory, names) + g.parsePackage(directory, names, nil) +} + +// parsePackageFiles parses the package occupying the named files. +func (g *Generator) parsePackageFiles(names []string) { + g.parsePackage(".", names, nil) +} + +// prefixDirectory places the directory name on the beginning of each name in the list. +func prefixDirectory(directory string, names []string) []string { + if directory == "." { + return names + } + ret := make([]string, len(names)) + for i, name := range names { + ret[i] = filepath.Join(directory, name) + } + return ret +} + +// parsePackage analyzes the single package constructed from the named files. +// If text is non-nil, it is a string to be used instead of the content of the file, +// to be used for testing. parsePackage exits if there is an error. +func (g *Generator) parsePackage(directory string, names []string, text interface{}) { + var files []*File + var astFiles []*ast.File + g.pkg = new(Package) + fs := token.NewFileSet() + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + parsedFile, err := parser.ParseFile(fs, name, text, 0) + if err != nil { + log.Fatalf("parsing package: %s: %s", name, err) + } + astFiles = append(astFiles, parsedFile) + files = append(files, &File{ + file: parsedFile, + pkg: g.pkg, + }) + } + if len(astFiles) == 0 { + log.Fatalf("%s: no buildable Go files", directory) + } + g.pkg.name = astFiles[0].Name.Name + g.pkg.files = files + g.pkg.dir = directory + // Type check the package. + g.pkg.check(fs, astFiles) +} + +// check type-checks the package. The package must be OK to proceed. +func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) { + pkg.defs = make(map[*ast.Ident]types.Object) + config := types.Config{Importer: importer.Default(), FakeImportC: true} + info := &types.Info{ + Defs: pkg.defs, + } + typesPkg, err := config.Check(pkg.dir, fs, astFiles, info) + if err != nil { + log.Fatalf("checking package: %s", err) + } + pkg.typesPkg = typesPkg +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + name string // The name of the constant. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/exact" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value, the constant is untyped. + // Skip this vspec and reset the remembered type. + typ = "" + continue + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != exact.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := exact.Int64Val(value) + u64, isUint := exact.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + name: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + indexes = append(indexes, index) + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return fmt.Sprintf("%[1]s(%%d)", i) + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return fmt.Sprintf("%[1]s(%%d)", i + %[2]s) + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn fmt.Sprintf(\"%s(%%d)\", i)\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return fmt.Sprintf("%[1]s(%%d)", i) +} +` diff --git a/go/ast/astutil/enclosing.go b/go/ast/astutil/enclosing.go index 6b7052b892..94dff19cda 100644 --- a/go/ast/astutil/enclosing.go +++ b/go/ast/astutil/enclosing.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package astutil // This file defines utilities for working with source positions. diff --git a/go/ast/astutil/enclosing18.go b/go/ast/astutil/enclosing18.go new file mode 100644 index 0000000000..af897cb0b8 --- /dev/null +++ b/go/ast/astutil/enclosing18.go @@ -0,0 +1,629 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/go/ssa/builder.go b/go/ssa/builder.go index e19da8f161..6dc554b9e6 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 package ssa diff --git a/go/ssa/builder18.go b/go/ssa/builder18.go new file mode 100644 index 0000000000..db89eb434a --- /dev/null +++ b/go/ssa/builder18.go @@ -0,0 +1,2385 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package ssa + +// This file implements the BUILD phase of SSA construction. +// +// SSA construction has two phases, CREATE and BUILD. In the CREATE phase +// (create.go), all packages are constructed and type-checked and +// definitions of all package members are created, method-sets are +// computed, and wrapper methods are synthesized. +// ssa.Packages are created in arbitrary order. +// +// In the BUILD phase (builder.go), the builder traverses the AST of +// each Go source function and generates SSA instructions for the +// function body. Initializer expressions for package-level variables +// are emitted to the package's init() function in the order specified +// by go/types.Info.InitOrder, then code for each function in the +// package is generated in lexical order. +// The BUILD phases for distinct packages are independent and are +// executed in parallel. +// +// TODO(adonovan): indeed, building functions is now embarrassingly parallel. +// Audit for concurrency then benchmark using more goroutines. +// +// The builder's and Program's indices (maps) are populated and +// mutated during the CREATE phase, but during the BUILD phase they +// remain constant. The sole exception is Prog.methodSets and its +// related maps, which are protected by a dedicated mutex. + +import ( + "fmt" + "go/ast" + exact "go/constant" + "go/token" + "go/types" + "os" + "sync" +) + +type opaqueType struct { + types.Type + name string +} + +func (t *opaqueType) String() string { return t.name } + +var ( + varOk = newVar("ok", tBool) + varIndex = newVar("index", tInt) + + // Type constants. + tBool = types.Typ[types.Bool] + tByte = types.Typ[types.Byte] + tInt = types.Typ[types.Int] + tInvalid = types.Typ[types.Invalid] + tString = types.Typ[types.String] + tUntypedNil = types.Typ[types.UntypedNil] + tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators + tEface = new(types.Interface) + + // SSA Value constants. + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(exact.MakeBool(true), tBool) +) + +// builder holds state associated with the package currently being built. +// Its methods contain all the logic for AST-to-SSA conversion. +type builder struct{} + +// cond emits to fn code to evaluate boolean condition e and jump +// to t or f depending on its value, performing various simplifications. +// +// Postcondition: fn.currentBlock is nil. +// +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { + switch e := e.(type) { + case *ast.ParenExpr: + b.cond(fn, e.X, t, f) + return + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + ltrue := fn.newBasicBlock("cond.true") + b.cond(fn, e.X, ltrue, f) + fn.currentBlock = ltrue + b.cond(fn, e.Y, t, f) + return + + case token.LOR: + lfalse := fn.newBasicBlock("cond.false") + b.cond(fn, e.X, t, lfalse) + fn.currentBlock = lfalse + b.cond(fn, e.Y, t, f) + return + } + + case *ast.UnaryExpr: + if e.Op == token.NOT { + b.cond(fn, e.X, f, t) + return + } + } + + // A traditional compiler would simplify "if false" (etc) here + // but we do not, for better fidelity to the source code. + // + // The value of a constant condition may be platform-specific, + // and may cause blocks that are reachable in some configuration + // to be hidden from subsequent analyses such as bug-finding tools. + emitIf(fn, b.expr(fn, e), t, f) +} + +// logicalBinop emits code to fn to evaluate e, a &&- or +// ||-expression whose reified boolean value is wanted. +// The value is returned. +// +func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { + rhs := fn.newBasicBlock("binop.rhs") + done := fn.newBasicBlock("binop.done") + + // T(e) = T(e.X) = T(e.Y) after untyped constants have been + // eliminated. + // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. + t := fn.Pkg.typeOf(e) + + var short Value // value of the short-circuit path + switch e.Op { + case token.LAND: + b.cond(fn, e.X, rhs, done) + short = NewConst(exact.MakeBool(false), t) + + case token.LOR: + b.cond(fn, e.X, done, rhs) + short = NewConst(exact.MakeBool(true), t) + } + + // Is rhs unreachable? + if rhs.Preds == nil { + // Simplify false&&y to false, true||y to true. + fn.currentBlock = done + return short + } + + // Is done unreachable? + if done.Preds == nil { + // Simplify true&&y (or false||y) to y. + fn.currentBlock = rhs + return b.expr(fn, e.Y) + } + + // All edges from e.X to done carry the short-circuit value. + var edges []Value + for _ = range done.Preds { + edges = append(edges, short) + } + + // The edge from e.Y to done carries the value of e.Y. + fn.currentBlock = rhs + edges = append(edges, b.expr(fn, e.Y)) + emitJump(fn, done) + fn.currentBlock = done + + phi := &Phi{Edges: edges, Comment: e.Op.String()} + phi.pos = e.OpPos + phi.typ = t + return done.emit(phi) +} + +// exprN lowers a multi-result expression e to SSA form, emitting code +// to fn and returning a single Value whose type is a *types.Tuple. +// The caller must access the components via Extract. +// +// Multi-result expressions include CallExprs in a multi-value +// assignment or return statement, and "value,ok" uses of +// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op +// is token.ARROW). +// +func (b *builder) exprN(fn *Function, e ast.Expr) Value { + typ := fn.Pkg.typeOf(e).(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return b.exprN(fn, e.X) + + case *ast.CallExpr: + // Currently, no built-in function nor type conversion + // has multiple results, so we can avoid some of the + // cases for single-valued CallExpr. + var c Call + b.setCall(fn, e, &c.Call) + c.typ = typ + return fn.emit(&c) + + case *ast.IndexExpr: + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + lookup := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + CommaOk: true, + } + lookup.setType(typ) + lookup.setPos(e.Lbrack) + return fn.emit(lookup) + + case *ast.TypeAssertExpr: + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) + + case *ast.UnaryExpr: // must be receive <- + unop := &UnOp{ + Op: token.ARROW, + X: b.expr(fn, e.X), + CommaOk: true, + } + unop.setType(typ) + unop.setPos(e.OpPos) + return fn.emit(unop) + } + panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) +} + +// builtin emits to fn SSA instructions to implement a call to the +// built-in function obj with the specified arguments +// and return type. It returns the value defined by the result. +// +// The result is nil if no special handling was required; in this case +// the caller should treat this like an ordinary library function +// call. +// +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { + switch obj.Name() { + case "make": + switch typ.Underlying().(type) { + case *types.Slice: + n := b.expr(fn, args[1]) + m := n + if len(args) == 3 { + m = b.expr(fn, args[2]) + } + if m, ok := m.(*Const); ok { + // treat make([]T, n, m) as new([m]T)[:n] + cap := m.Int64() + at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) + alloc := emitNew(fn, at, pos) + alloc.Comment = "makeslice" + v := &Slice{ + X: alloc, + High: n, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + v := &MakeSlice{ + Len: n, + Cap: m, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Map: + var res Value + if len(args) == 2 { + res = b.expr(fn, args[1]) + } + v := &MakeMap{Reserve: res} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Chan: + var sz Value = vZero + if len(args) == 2 { + sz = b.expr(fn, args[1]) + } + v := &MakeChan{Size: sz} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + + case "new": + alloc := emitNew(fn, deref(typ), pos) + alloc.Comment = "new" + return alloc + + case "len", "cap": + // Special case: len or cap of an array or *array is + // based on the type, not the value which may be nil. + // We must still evaluate the value, though. (If it + // was side-effect free, the whole call would have + // been constant-folded.) + t := deref(fn.Pkg.typeOf(args[0])).Underlying() + if at, ok := t.(*types.Array); ok { + b.expr(fn, args[0]) // for effects only + return intConst(at.Len()) + } + // Otherwise treat as normal. + + case "panic": + fn.emit(&Panic{ + X: emitConv(fn, b.expr(fn, args[0]), tEface), + pos: pos, + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + return vTrue // any non-nil Value will do + } + return nil // treat all others as a regular function call +} + +// addr lowers a single-result addressable expression e to SSA form, +// emitting code to fn and returning the location (an lvalue) defined +// by the expression. +// +// If escaping is true, addr marks the base variable of the +// addressable expression e as being a potentially escaping pointer +// value. For example, in this code: +// +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c +// +// the application of & causes a.b[0].c to have its address taken, +// which means that ultimately the local variable a must be +// heap-allocated. This is a simple but very conservative escape +// analysis. +// +// Operations forming potentially escaping pointers include: +// - &x, including when implicit in method call or composite literals. +// - a[:] iff a is an array (not *array) +// - references to variables in lexically enclosing functions. +// +func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { + switch e := e.(type) { + case *ast.Ident: + if isBlankIdent(e) { + return blank{} + } + obj := fn.Pkg.objectOf(e) + v := fn.Prog.packageLevelValue(obj) // var (address) + if v == nil { + v = fn.lookup(obj, escaping) + } + return &address{addr: v, pos: e.Pos(), expr: e} + + case *ast.CompositeLit: + t := deref(fn.Pkg.typeOf(e)) + var v *Alloc + if escaping { + v = emitNew(fn, t, e.Lbrace) + } else { + v = fn.addLocal(t, e.Lbrace) + } + v.Comment = "complit" + var sb storebuf + b.compLit(fn, v, e, true, &sb) + sb.emit(fn) + return &address{addr: v, pos: e.Lbrace, expr: e} + + case *ast.ParenExpr: + return b.addr(fn, e.X, escaping) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.addr(fn, e.Sel, escaping) + } + if sel.Kind() != types.FieldVal { + panic(sel) + } + wantAddr := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + last := len(sel.Index()) - 1 + return &address{ + addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), + pos: e.Sel.Pos(), + expr: e.Sel, + } + + case *ast.IndexExpr: + var x Value + var et types.Type + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + x = b.addr(fn, e.X, escaping).address(fn) + et = types.NewPointer(t.Elem()) + case *types.Pointer: // *array + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) + case *types.Slice: + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem()) + case *types.Map: + return &element{ + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), t.Key()), + t: t.Elem(), + pos: e.Lbrack, + } + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + v := &IndexAddr{ + X: x, + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(et) + return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} + + case *ast.StarExpr: + return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} + } + + panic(fmt.Sprintf("unexpected address expression: %T", e)) +} + +type store struct { + lhs lvalue + rhs Value +} + +type storebuf struct{ stores []store } + +func (sb *storebuf) store(lhs lvalue, rhs Value) { + sb.stores = append(sb.stores, store{lhs, rhs}) +} + +func (sb *storebuf) emit(fn *Function) { + for _, s := range sb.stores { + s.lhs.store(fn, s.rhs) + } +} + +// assign emits to fn code to initialize the lvalue loc with the value +// of expression e. If isZero is true, assign assumes that loc holds +// the zero value for its type. +// +// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate +// better code in some cases, e.g., for composite literals in an +// addressable location. +// +// If sb is not nil, assign generates code to evaluate expression e, but +// not to update loc. Instead, the necessary stores are appended to the +// storebuf sb so that they can be executed later. This allows correct +// in-place update of existing variables when the RHS is a composite +// literal that may reference parts of the LHS. +// +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { + // Can we initialize it in place? + if e, ok := unparen(e).(*ast.CompositeLit); ok { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + if _, ok := loc.(blank); !ok { // avoid calling blank.typ() + if isPointer(loc.typ()) { + ptr := b.addr(fn, e, true).address(fn) + // copy address + if sb != nil { + sb.store(loc, ptr) + } else { + loc.store(fn, ptr) + } + return + } + } + + if _, ok := loc.(*address); ok { + if isInterface(loc.typ()) { + // e.g. var x interface{} = T{...} + // Can't in-place initialize an interface value. + // Fall back to copying. + } else { + // x = T{...} or x := T{...} + addr := loc.address(fn) + if sb != nil { + b.compLit(fn, addr, e, isZero, sb) + } else { + var sb storebuf + b.compLit(fn, addr, e, isZero, &sb) + sb.emit(fn) + } + + // Subtle: emit debug ref for aggregate types only; + // slice and map are handled by store ops in compLit. + switch loc.typ().Underlying().(type) { + case *types.Struct, *types.Array: + emitDebugRef(fn, e, addr, true) + } + + return + } + } + } + + // simple case: just copy + rhs := b.expr(fn, e) + if sb != nil { + sb.store(loc, rhs) + } else { + loc.store(fn, rhs) + } +} + +// expr lowers a single-result expression e to SSA form, emitting code +// to fn and returning the Value defined by the expression. +// +func (b *builder) expr(fn *Function, e ast.Expr) Value { + e = unparen(e) + + tv := fn.Pkg.info.Types[e] + + // Is expression a constant? + if tv.Value != nil { + return NewConst(tv.Value, tv.Type) + } + + var v Value + if tv.Addressable() { + // Prefer pointer arithmetic ({Index,Field}Addr) followed + // by Load over subelement extraction (e.g. Index, Field), + // to avoid large copies. + v = b.addr(fn, e, false).load(fn) + } else { + v = b.expr0(fn, e, tv) + } + if fn.debugInfo() { + emitDebugRef(fn, e, v, false) + } + return v +} + +func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { + switch e := e.(type) { + case *ast.BasicLit: + panic("non-constant BasicLit") // unreachable + + case *ast.FuncLit: + fn2 := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), + pos: e.Type.Func, + parent: fn, + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: e, + } + fn.AnonFuncs = append(fn.AnonFuncs, fn2) + b.buildFunction(fn2) + if fn2.FreeVars == nil { + return fn2 + } + v := &MakeClosure{Fn: fn2} + v.setType(tv.Type) + for _, fv := range fn2.FreeVars { + v.Bindings = append(v.Bindings, fv.outer) + fv.outer = nil + } + return fn.emit(v) + + case *ast.TypeAssertExpr: // single-result form only + return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen) + + case *ast.CallExpr: + if fn.Pkg.info.Types[e.Fun].IsType() { + // Explicit type conversion, e.g. string(x) or big.Int(x) + x := b.expr(fn, e.Args[0]) + y := emitConv(fn, x, tv.Type) + if y != x { + switch y := y.(type) { + case *Convert: + y.pos = e.Lparen + case *ChangeType: + y.pos = e.Lparen + case *MakeInterface: + y.pos = e.Lparen + } + } + return y + } + // Call to "intrinsic" built-ins, e.g. new, make, panic. + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { + if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil { + return v + } + } + } + // Regular function call. + var v Call + b.setCall(fn, e, &v.Call) + v.setType(tv.Type) + return fn.emit(&v) + + case *ast.UnaryExpr: + switch e.Op { + case token.AND: // &X --- potentially escaping. + addr := b.addr(fn, e.X, true) + if _, ok := unparen(e.X).(*ast.StarExpr); ok { + // &*p must panic if p is nil (http://golang.org/s/go12nil). + // For simplicity, we'll just (suboptimally) rely + // on the side effects of a load. + // TODO(adonovan): emit dedicated nilcheck. + addr.load(fn) + } + return addr.address(fn) + case token.ADD: + return b.expr(fn, e.X) + case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ + v := &UnOp{ + Op: e.Op, + X: b.expr(fn, e.X), + } + v.setPos(e.OpPos) + v.setType(tv.Type) + return fn.emit(v) + default: + panic(e.Op) + } + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND, token.LOR: + return b.logicalBinop(fn, e) + case token.SHL, token.SHR: + fallthrough + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos) + + case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) + // The type of x==y may be UntypedBool. + return emitConv(fn, cmp, DefaultType(tv.Type)) + default: + panic("illegal op in BinaryExpr: " + e.Op.String()) + } + + case *ast.SliceExpr: + var low, high, max Value + var x Value + switch fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Potentially escaping. + x = b.addr(fn, e.X, true).address(fn) + case *types.Basic, *types.Slice, *types.Pointer: // *array + x = b.expr(fn, e.X) + default: + panic("unreachable") + } + if e.High != nil { + high = b.expr(fn, e.High) + } + if e.Low != nil { + low = b.expr(fn, e.Low) + } + if e.Slice3 { + max = b.expr(fn, e.Max) + } + v := &Slice{ + X: x, + Low: low, + High: high, + Max: max, + } + v.setPos(e.Lbrack) + v.setType(tv.Type) + return fn.emit(v) + + case *ast.Ident: + obj := fn.Pkg.info.Uses[e] + // Universal built-in or nil? + switch obj := obj.(type) { + case *types.Builtin: + return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} + case *types.Nil: + return nilConst(tv.Type) + } + // Package-level func or var? + if v := fn.Prog.packageLevelValue(obj); v != nil { + if _, ok := obj.(*types.Var); ok { + return emitLoad(fn, v) // var (address) + } + return v // (func) + } + // Local var. + return emitLoad(fn, fn.lookup(obj, false)) // var (address) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.expr(fn, e.Sel) + } + switch sel.Kind() { + case types.MethodExpr: + // (*T).f or T.f, the method f from the method-set of type T. + // The result is a "thunk". + return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type) + + case types.MethodVal: + // e.f where e is an expression and f is a method. + // The result is a "bound". + obj := sel.Obj().(*types.Func) + rt := recvType(obj) + wantAddr := isPointer(rt) + escaping := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + if isInterface(rt) { + // If v has interface type I, + // we must emit a check that v is non-nil. + // We use: typeassert v.(I). + emitTypeAssert(fn, v, rt, token.NoPos) + } + c := &MakeClosure{ + Fn: makeBound(fn.Prog, obj), + Bindings: []Value{v}, + } + c.setPos(e.Sel.Pos()) + c.setType(tv.Type) + return fn.emit(c) + + case types.FieldVal: + indices := sel.Index() + last := len(indices) - 1 + v := b.expr(fn, e.X) + v = emitImplicitSelections(fn, v, indices[:last]) + v = emitFieldSelection(fn, v, indices[last], false, e.Sel) + return v + } + + panic("unexpected expression-relative selector") + + case *ast.IndexExpr: + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Non-addressable array (in a register). + v := &Index{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(t.Elem()) + return fn.emit(v) + + case *types.Map: + // Maps are not addressable. + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + v := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + } + v.setPos(e.Lbrack) + v.setType(mapt.Elem()) + return fn.emit(v) + + case *types.Basic: // => string + // Strings are not addressable. + v := &Lookup{ + X: b.expr(fn, e.X), + Index: b.expr(fn, e.Index), + } + v.setPos(e.Lbrack) + v.setType(tByte) + return fn.emit(v) + + case *types.Slice, *types.Pointer: // *array + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn) + + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + + case *ast.CompositeLit, *ast.StarExpr: + // Addressable types (lvalues) + return b.addr(fn, e, false).load(fn) + } + + panic(fmt.Sprintf("unexpected expr: %T", e)) +} + +// stmtList emits to fn code for all statements in list. +func (b *builder) stmtList(fn *Function, list []ast.Stmt) { + for _, s := range list { + b.stmt(fn, s) + } +} + +// receiver emits to fn code for expression e in the "receiver" +// position of selection e.f (where f may be a field or a method) and +// returns the effective receiver after applying the implicit field +// selections of sel. +// +// wantAddr requests that the result is an an address. If +// !sel.Indirect(), this may require that e be built in addr() mode; it +// must thus be addressable. +// +// escaping is defined as per builder.addr(). +// +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value { + var v Value + if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { + v = b.addr(fn, e, escaping).address(fn) + } else { + v = b.expr(fn, e) + } + + last := len(sel.Index()) - 1 + v = emitImplicitSelections(fn, v, sel.Index()[:last]) + if !wantAddr && isPointer(v.Type()) { + v = emitLoad(fn, v) + } + return v +} + +// setCallFunc populates the function parts of a CallCommon structure +// (Func, Method, Recv, Args[0]) based on the kind of invocation +// occurring in e. +// +func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { + c.pos = e.Lparen + + // Is this a method call? + if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + sel, ok := fn.Pkg.info.Selections[selector] + if ok && sel.Kind() == types.MethodVal { + obj := sel.Obj().(*types.Func) + recv := recvType(obj) + wantAddr := isPointer(recv) + escaping := true + v := b.receiver(fn, selector.X, wantAddr, escaping, sel) + if isInterface(recv) { + // Invoke-mode call. + c.Value = v + c.Method = obj + } else { + // "Call"-mode call. + c.Value = fn.Prog.declaredFunc(obj) + c.Args = append(c.Args, v) + } + return + } + + // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): + // a statically dispatched call to the method f in the + // method-set of T or *T. T may be an interface. + // + // e.Fun would evaluate to a concrete method, interface + // wrapper function, or promotion wrapper. + // + // For now, we evaluate it in the usual way. + // + // TODO(adonovan): opt: inline expr() here, to make the + // call static and to avoid generation of wrappers. + // It's somewhat tricky as it may consume the first + // actual parameter if the call is "invoke" mode. + // + // Examples: + // type T struct{}; func (T) f() {} // "call" mode + // type T interface { f() } // "invoke" mode + // + // type S struct{ T } + // + // var s S + // S.f(s) + // (*S).f(&s) + // + // Suggested approach: + // - consume the first actual parameter expression + // and build it with b.expr(). + // - apply implicit field selections. + // - use MethodVal logic to populate fields of c. + } + + // Evaluate the function operand in the usual way. + c.Value = b.expr(fn, e.Fun) +} + +// emitCallArgs emits to f code for the actual parameters of call e to +// a (possibly built-in) function of effective type sig. +// The argument values are appended to args, which is then returned. +// +func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { + // f(x, y, z...): pass slice z straight through. + if e.Ellipsis != 0 { + for i, arg := range e.Args { + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) + args = append(args, v) + } + return args + } + + offset := len(args) // 1 if call has receiver, 0 otherwise + + // Evaluate actual parameter expressions. + // + // If this is a chained call of the form f(g()) where g has + // multiple return values (MRV), they are flattened out into + // args; a suffix of them may end up in a varargs slice. + for _, arg := range e.Args { + v := b.expr(fn, arg) + if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain + for i, n := 0, ttuple.Len(); i < n; i++ { + args = append(args, emitExtract(fn, v, i)) + } + } else { + args = append(args, v) + } + } + + // Actual->formal assignability conversions for normal parameters. + np := sig.Params().Len() // number of normal parameters + if sig.Variadic() { + np-- + } + for i := 0; i < np; i++ { + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) + } + + // Actual->formal assignability conversions for variadic parameter, + // and construction of slice. + if sig.Variadic() { + varargs := args[offset+np:] + st := sig.Params().At(np).Type().(*types.Slice) + vt := st.Elem() + if len(varargs) == 0 { + args = append(args, nilConst(st)) + } else { + // Replace a suffix of args with a slice containing it. + at := types.NewArray(vt, int64(len(varargs))) + a := emitNew(fn, at, token.NoPos) + a.setPos(e.Rparen) + a.Comment = "varargs" + for i, arg := range varargs { + iaddr := &IndexAddr{ + X: a, + Index: intConst(int64(i)), + } + iaddr.setType(types.NewPointer(vt)) + fn.emit(iaddr) + emitStore(fn, iaddr, arg, arg.Pos()) + } + s := &Slice{X: a} + s.setType(st) + args[offset+np] = fn.emit(s) + args = args[:offset+np+1] + } + } + return args +} + +// setCall emits to fn code to evaluate all the parameters of a function +// call e, and populates *c with those values. +// +func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { + // First deal with the f(...) part and optional receiver. + b.setCallFunc(fn, e, c) + + // Then append the other actual parameters. + sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) + if sig == nil { + panic(fmt.Sprintf("no signature for call of %s", e.Fun)) + } + c.Args = b.emitCallArgs(fn, sig, e, c.Args) +} + +// assignOp emits to fn code to perform loc += incr or loc -= incr. +func (b *builder) assignOp(fn *Function, loc lvalue, incr Value, op token.Token) { + oldv := loc.load(fn) + loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, incr, oldv.Type()), loc.typ(), token.NoPos)) +} + +// localValueSpec emits to fn code to define all of the vars in the +// function-local ValueSpec, spec. +// +func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { + switch { + case len(spec.Values) == len(spec.Names): + // e.g. var x, y = 0, 1 + // 1:1 assignment + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + } + lval := b.addr(fn, id, false) // non-escaping + b.assign(fn, lval, spec.Values[i], true, nil) + } + + case len(spec.Values) == 0: + // e.g. var x, y int + // Locals are implicitly zero-initialized. + for _, id := range spec.Names { + if !isBlankIdent(id) { + lhs := fn.addLocalForIdent(id) + if fn.debugInfo() { + emitDebugRef(fn, id, lhs, true) + } + } + } + + default: + // e.g. var x, y = pos() + tuple := b.exprN(fn, spec.Values[0]) + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + lhs := b.addr(fn, id, false) // non-escaping + lhs.store(fn, emitExtract(fn, tuple, i)) + } + } + } +} + +// assignStmt emits code to fn for a parallel assignment of rhss to lhss. +// isDef is true if this is a short variable declaration (:=). +// +// Note the similarity with localValueSpec. +// +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { + // Side effects of all LHSs and RHSs must occur in left-to-right order. + lvals := make([]lvalue, len(lhss)) + isZero := make([]bool, len(lhss)) + for i, lhs := range lhss { + var lval lvalue = blank{} + if !isBlankIdent(lhs) { + if isDef { + if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { + fn.addNamedLocal(obj) + isZero[i] = true + } + } + lval = b.addr(fn, lhs, false) // non-escaping + } + lvals[i] = lval + } + if len(lhss) == len(rhss) { + // Simple assignment: x = f() (!isDef) + // Parallel assignment: x, y = f(), g() (!isDef) + // or short var decl: x, y := f(), g() (isDef) + // + // In all cases, the RHSs may refer to the LHSs, + // so we need a storebuf. + var sb storebuf + for i := range rhss { + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) + } + sb.emit(fn) + } else { + // e.g. x, y = pos() + tuple := b.exprN(fn, rhss[0]) + emitDebugRef(fn, rhss[0], tuple, false) + for i, lval := range lvals { + lval.store(fn, emitExtract(fn, tuple, i)) + } + } +} + +// arrayLen returns the length of the array whose composite literal elements are elts. +func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { + var max int64 = -1 + var i int64 = -1 + for _, e := range elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + i = b.expr(fn, kv.Key).(*Const).Int64() + } else { + i++ + } + if i > max { + max = i + } + } + return max + 1 +} + +// compLit emits to fn code to initialize a composite literal e at +// address addr with type typ. +// +// Nested composite literals are recursively initialized in place +// where possible. If isZero is true, compLit assumes that addr +// holds the zero value for typ. +// +// Because the elements of a composite literal may refer to the +// variables being updated, as in the second line below, +// x := T{a: 1} +// x = T{a: x.a} +// all the reads must occur before all the writes. Thus all stores to +// loc are emitted to the storebuf sb for later execution. +// +// A CompositeLit may have pointer type only in the recursive (nested) +// case when the type name is implicit. e.g. in []*T{{}}, the inner +// literal has type *T behaves like &T{}. +// In that case, addr must hold a T, not a *T. +// +func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { + typ := deref(fn.Pkg.typeOf(e)) + switch t := typ.Underlying().(type) { + case *types.Struct: + if !isZero && len(e.Elts) != t.NumFields() { + // memclear + sb.store(&address{addr, e.Lbrace, nil}, + zeroValue(fn, deref(addr.Type()))) + isZero = true + } + for i, e := range e.Elts { + fieldIndex := i + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + pos = kv.Colon + e = kv.Value + break + } + } + } + sf := t.Field(fieldIndex) + faddr := &FieldAddr{ + X: addr, + Field: fieldIndex, + } + faddr.setType(types.NewPointer(sf.Type())) + fn.emit(faddr) + b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) + } + + case *types.Array, *types.Slice: + var at *types.Array + var array Value + switch t := t.(type) { + case *types.Slice: + at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) + alloc := emitNew(fn, at, e.Lbrace) + alloc.Comment = "slicelit" + array = alloc + case *types.Array: + at = t + array = addr + + if !isZero && int64(len(e.Elts)) != at.Len() { + // memclear + sb.store(&address{array, e.Lbrace, nil}, + zeroValue(fn, deref(array.Type()))) + } + } + + var idx *Const + for _, e := range e.Elts { + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + pos = kv.Colon + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = intConst(idxval) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) + } else { + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) + } + } + + if t != at { // slice + s := &Slice{X: array} + s.setPos(e.Lbrace) + s.setType(typ) + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) + } + + case *types.Map: + m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} + m.setPos(e.Lbrace) + m.setType(typ) + fn.emit(m) + for _, e := range e.Elts { + e := e.(*ast.KeyValueExpr) + + // If a key expression in a map literal is itself a + // composite literal, the type may be omitted. + // For example: + // map[*struct{}]bool{{}: true} + // An &-operation may be implied: + // map[*struct{}]bool{&struct{}{}: true} + var key Value + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + key = b.addr(fn, e.Key, true).address(fn) + } else { + key = b.expr(fn, e.Key) + } + + loc := element{ + m: m, + k: emitConv(fn, key, t.Key()), + t: t.Elem(), + pos: e.Colon, + } + + // We call assign() only because it takes care + // of any &-operation required in the recursive + // case, e.g., + // map[int]*struct{}{0: {}} implies &struct{}{}. + // In-place update is of course impossible, + // and no storebuf is needed. + b.assign(fn, &loc, e.Value, true, nil) + } + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) + + default: + panic("unexpected CompositeLit type: " + t.String()) + } +} + +// switchStmt emits to fn code for the switch statement s, optionally +// labelled by label. +// +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + // We treat SwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches() + // to those cases that are free of side effects. + if s.Init != nil { + b.stmt(fn, s.Init) + } + var tag Value = vTrue + if s.Tag != nil { + tag = b.expr(fn, s.Tag) + } + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var dfltBody *[]ast.Stmt + var dfltFallthrough *BasicBlock + var fallthru, dfltBlock *BasicBlock + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = fn.newBasicBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = fn.newBasicBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + dfltBody = &cc.Body + dfltFallthrough = fallthru + dfltBlock = body + continue + } + + var nextCond *BasicBlock + for _, cond := range cc.List { + nextCond = fn.newBasicBlock("switch.next") + // TODO(adonovan): opt: when tag==vTrue, we'd + // get better code if we use b.cond(cond) + // instead of BinOp(EQL, tag, b.expr(cond)) + // followed by If. Don't forget conversions + // though. + cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), token.NoPos) + emitIf(fn, cond, body, nextCond) + fn.currentBlock = nextCond + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = nextCond + } + if dfltBlock != nil { + emitJump(fn, dfltBlock) + fn.currentBlock = dfltBlock + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: dfltFallthrough, + } + b.stmtList(fn, *dfltBody) + fn.targets = fn.targets.tail + } + emitJump(fn, done) + fn.currentBlock = done +} + +// typeSwitchStmt emits to fn code for the type switch statement s, optionally +// labelled by label. +// +func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { + // We treat TypeSwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches(). + + // Typeswitch lowering: + // + // var x X + // switch y := x.(type) { + // case T1, T2: S1 // >1 (y := x) + // case nil: SN // nil (y := x) + // default: SD // 0 types (y := x) + // case T3: S3 // 1 type (y := x.(T3)) + // } + // + // ...s.Init... + // x := eval x + // .caseT1: + // t1, ok1 := typeswitch,ok x + // if ok1 then goto S1 else goto .caseT2 + // .caseT2: + // t2, ok2 := typeswitch,ok x + // if ok2 then goto S1 else goto .caseNil + // .S1: + // y := x + // ...S1... + // goto done + // .caseNil: + // if t2, ok2 := typeswitch,ok x + // if x == nil then goto SN else goto .caseT3 + // .SN: + // y := x + // ...SN... + // goto done + // .caseT3: + // t3, ok3 := typeswitch,ok x + // if ok3 then goto S3 else goto default + // .S3: + // y := t3 + // ...S3... + // goto done + // .default: + // y := x + // ...SD... + // goto done + // .done: + + if s.Init != nil { + b.stmt(fn, s.Init) + } + + var x Value + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + + done := fn.newBasicBlock("typeswitch.done") + if label != nil { + label._break = done + } + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + default_ = cc + continue + } + body := fn.newBasicBlock("typeswitch.body") + var next *BasicBlock + var casetype types.Type + var ti Value // ti, ok := typeassert,ok x + for _, cond := range cc.List { + next = fn.newBasicBlock("typeswitch.next") + casetype = fn.Pkg.typeOf(cond) + var condv Value + if casetype == tUntypedNil { + condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos) + ti = x + } else { + yok := emitTypeTest(fn, x, casetype, cc.Case) + ti = emitExtract(fn, yok, 0) + condv = emitExtract(fn, yok, 1) + } + emitIf(fn, condv, body, next) + fn.currentBlock = next + } + if len(cc.List) != 1 { + ti = x + } + fn.currentBlock = body + b.typeCaseBody(fn, cc, ti, done) + fn.currentBlock = next + } + if default_ != nil { + b.typeCaseBody(fn, default_, x, done) + } else { + emitJump(fn, done) + } + fn.currentBlock = done +} + +func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) +} + +// selectStmt emits to fn code for the select statement s, optionally +// labelled by label. +// +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { + // A blocking select of a single case degenerates to a + // simple send or receive. + // TODO(adonovan): opt: is this optimization worth its weight? + if len(s.Body.List) == 1 { + clause := s.Body.List[0].(*ast.CommClause) + if clause.Comm != nil { + b.stmt(fn, clause.Comm) + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = done + return + } + } + + // First evaluate all channels in all cases, and find + // the directions of each state. + var states []*SelectState + blocking := true + debugInfo := fn.debugInfo() + for _, clause := range s.Body.List { + var st *SelectState + switch comm := clause.(*ast.CommClause).Comm.(type) { + case nil: // default case + blocking = false + continue + + case *ast.SendStmt: // ch<- i + ch := b.expr(fn, comm.Chan) + st = &SelectState{ + Dir: types.SendOnly, + Chan: ch, + Send: emitConv(fn, b.expr(fn, comm.Value), + ch.Type().Underlying().(*types.Chan).Elem()), + Pos: comm.Arrow, + } + if debugInfo { + st.DebugNode = comm + } + + case *ast.AssignStmt: // x := <-ch + recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + + case *ast.ExprStmt: // <-ch + recv := unparen(comm.X).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + } + states = append(states, st) + } + + // We dispatch on the (fair) result of Select using a + // sequential if-else chain, in effect: + // + // idx, recvOk, r0...r_n-1 := select(...) + // if idx == 0 { // receive on channel 0 (first receive => r0) + // x, ok := r0, recvOk + // ...state0... + // } else if v == 1 { // send on channel 1 + // ...state1... + // } else { + // ...default... + // } + sel := &Select{ + States: states, + Blocking: blocking, + } + sel.setPos(s.Select) + var vars []*types.Var + vars = append(vars, varIndex, varOk) + for _, st := range states { + if st.Dir == types.RecvOnly { + tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() + vars = append(vars, anonVar(tElem)) + } + } + sel.setType(types.NewTuple(vars...)) + + fn.emit(sel) + idx := emitExtract(fn, sel, 0) + + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + + var defaultBody *[]ast.Stmt + state := 0 + r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + defaultBody = &clause.Body + continue + } + body := fn.newBasicBlock("select.body") + next := fn.newBasicBlock("select.next") + emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + if debugInfo { + v := emitExtract(fn, sel, r) + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + r++ + + case *ast.AssignStmt: // x := <-states[state].Chan + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + } + x := b.addr(fn, comm.Lhs[0], false) // non-escaping + v := emitExtract(fn, sel, r) + if debugInfo { + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + x.store(fn, v) + + if len(comm.Lhs) == 2 { // x, ok := ... + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + } + ok := b.addr(fn, comm.Lhs[1], false) // non-escaping + ok.store(fn, emitExtract(fn, sel, 1)) + } + r++ + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = next + state++ + } + if defaultBody != nil { + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, *defaultBody) + fn.targets = fn.targets.tail + } else { + // A blocking select must match some case. + // (This should really be a runtime.errorString, not a string.) + fn.emit(&Panic{ + X: emitConv(fn, stringConst("blocking select matched no case"), tEface), + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + } + emitJump(fn, done) + fn.currentBlock = done +} + +// forStmt emits to fn code for the for statement s, optionally +// labelled by label. +// +func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(fn, s.Init) + } + body := fn.newBasicBlock("for.body") + done := fn.newBasicBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = fn.newBasicBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = fn.newBasicBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + emitJump(fn, loop) + fn.currentBlock = loop + if loop != body { + b.cond(fn, s.Cond, body, done) + fn.currentBlock = body + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: cont, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, cont) + + if s.Post != nil { + fn.currentBlock = cont + b.stmt(fn, s.Post) + emitJump(fn, loop) // back-edge + } + fn.currentBlock = done +} + +// rangeIndexed emits to fn the header for an integer-indexed loop +// over array, *array or slice value x. +// The v result is defined only if tv is non-nil. +// forPos is the position of the "for" token. +// +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done + // body: + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) + + // Determine number of iterations. + var length Value + if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + // For array or *array, the number of iterations is + // known statically thanks to the type. We avoid a + // data dependence upon x, permitting later dead-code + // elimination if x is pure, static unrolling, etc. + // Ranging over a nil *array may have >0 iterations. + // We still generate code for x, in case it has effects. + length = intConst(arr.Len()) + } else { + // length = len(x). + var c Call + c.Call.Value = makeLen(x.Type()) + c.Call.Args = []Value{x} + c.setType(tInt) + length = fn.emit(&c) + } + + index := fn.addLocal(tInt, token.NoPos) + emitStore(fn, index, intConst(-1), pos) + + loop = fn.newBasicBlock("rangeindex.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, index), + Y: vOne, + } + incr.setType(tInt) + emitStore(fn, index, fn.emit(incr), pos) + + body := fn.newBasicBlock("rangeindex.body") + done = fn.newBasicBlock("rangeindex.done") + emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) + fn.currentBlock = body + + k = emitLoad(fn, index) + if tv != nil { + switch t := x.Type().Underlying().(type) { + case *types.Array: + instr := &Index{ + X: x, + Index: k, + } + instr.setType(t.Elem()) + v = fn.emit(instr) + + case *types.Pointer: // *array + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) + v = emitLoad(fn, fn.emit(instr)) + + case *types.Slice: + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem())) + v = emitLoad(fn, fn.emit(instr)) + + default: + panic("rangeIndexed x:" + t.String()) + } + } + return +} + +// rangeIter emits to fn the header for a loop using +// Range/Next/Extract to iterate over map or string value x. +// tk and tv are the types of the key/value results k and v, or nil +// if the respective component is not wanted. +// +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // it = range x + // loop: (target of continue) + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done + // body: + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop + // done: (target of break) + // + + if tk == nil { + tk = tInvalid + } + if tv == nil { + tv = tInvalid + } + + rng := &Range{X: x} + rng.setPos(pos) + rng.setType(tRangeIter) + it := fn.emit(rng) + + loop = fn.newBasicBlock("rangeiter.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + _, isString := x.Type().Underlying().(*types.Basic) + + okv := &Next{ + Iter: it, + IsString: isString, + } + okv.setType(types.NewTuple( + varOk, + newVar("k", tk), + newVar("v", tv), + )) + fn.emit(okv) + + body := fn.newBasicBlock("rangeiter.body") + done = fn.newBasicBlock("rangeiter.done") + emitIf(fn, emitExtract(fn, okv, 0), body, done) + fn.currentBlock = body + + if tk != tInvalid { + k = emitExtract(fn, okv, 1) + } + if tv != tInvalid { + v = emitExtract(fn, okv, 2) + } + return +} + +// rangeChan emits to fn the header for a loop that receives from +// channel x until it fails. +// tk is the channel's element type, or nil if the k result is +// not wanted +// pos is the position of the '=' or ':=' token. +// +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // loop: (target of continue) + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done + // body: + // k = extract ko #0 + // ... + // goto loop + // done: (target of break) + + loop = fn.newBasicBlock("rangechan.loop") + emitJump(fn, loop) + fn.currentBlock = loop + recv := &UnOp{ + Op: token.ARROW, + X: x, + CommaOk: true, + } + recv.setPos(pos) + recv.setType(types.NewTuple( + newVar("k", x.Type().Underlying().(*types.Chan).Elem()), + varOk, + )) + ko := fn.emit(recv) + body := fn.newBasicBlock("rangechan.body") + done = fn.newBasicBlock("rangechan.done") + emitIf(fn, emitExtract(fn, ko, 1), body, done) + fn.currentBlock = body + if tk != nil { + k = emitExtract(fn, ko, 0) + } + return +} + +// rangeStmt emits to fn code for the range statement s, optionally +// labelled by label. +// +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.Pkg.typeOf(s.Key) + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.Pkg.typeOf(s.Value) + } + + // If iteration variables are defined (:=), this + // occurs once outside the loop. + // + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. + if s.Tok == token.DEFINE { + if tk != nil { + fn.addLocalForIdent(s.Key.(*ast.Ident)) + } + if tv != nil { + fn.addLocalForIdent(s.Value.(*ast.Ident)) + } + } + + x := b.expr(fn, s.X) + + var k, v Value + var loop, done *BasicBlock + switch rt := x.Type().Underlying().(type) { + case *types.Slice, *types.Array, *types.Pointer: // *array + k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) + + case *types.Chan: + k, loop, done = b.rangeChan(fn, x, tk, s.For) + + case *types.Map, *types.Basic: // string + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + default: + panic("Cannot range over: " + rt.String()) + } + + // Evaluate both LHS expressions before we update either. + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k) + } + if tv != nil { + vl.store(fn, v) + } + + if label != nil { + label._break = done + label._continue = loop + } + + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: loop, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, loop) // back-edge + fn.currentBlock = done +} + +// stmt lowers statement s to SSA form, emitting code to fn. +func (b *builder) stmt(fn *Function, _s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.EmptyStmt: + // ignore. (Usually removed by gofmt.) + + case *ast.DeclStmt: // Con, Var or Typ + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if vs, ok := spec.(*ast.ValueSpec); ok { + b.localValueSpec(fn, vs) + } + } + } + + case *ast.LabeledStmt: + label = fn.labelledBlock(s.Label) + emitJump(fn, label._goto) + fn.currentBlock = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(fn, s.Stmt, label) + + case *ast.ExprStmt: + b.expr(fn, s.X) + + case *ast.SendStmt: + fn.emit(&Send{ + Chan: b.expr(fn, s.Chan), + X: emitConv(fn, b.expr(fn, s.Value), + fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), + pos: s.Arrow, + }) + + case *ast.IncDecStmt: + op := token.ADD + if s.Tok == token.DEC { + op = token.SUB + } + loc := b.addr(fn, s.X, false) + b.assignOp(fn, loc, NewConst(exact.MakeInt64(1), loc.typ()), op) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) + + default: // +=, etc. + op := s.Tok + token.ADD - token.ADD_ASSIGN + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op) + } + + case *ast.GoStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Go{pos: s.Go} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + case *ast.DeferStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Defer{pos: s.Defer} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + // A deferred call can cause recovery from panic, + // and control resumes at the Recover block. + createRecoverBlock(fn) + + case *ast.ReturnStmt: + var results []Value + if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { + // Return of one expression in a multi-valued function. + tuple := b.exprN(fn, s.Results[0]) + ttuple := tuple.Type().(*types.Tuple) + for i, n := 0, ttuple.Len(); i < n; i++ { + results = append(results, + emitConv(fn, emitExtract(fn, tuple, i), + fn.Signature.Results().At(i).Type())) + } + } else { + // 1:1 return, or no-arg return in non-void function. + for i, r := range s.Results { + v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type()) + results = append(results, v) + } + } + if fn.namedResults != nil { + // Function has named result parameters (NRPs). + // Perform parallel assignment of return operands to NRPs. + for i, r := range results { + emitStore(fn, fn.namedResults[i], r, s.Return) + } + } + // Run function calls deferred in this + // function when explicitly returning from it. + fn.emit(new(RunDefers)) + if fn.namedResults != nil { + // Reload NRPs to form the result tuple. + results = results[:0] + for _, r := range fn.namedResults { + results = append(results, emitLoad(fn, r)) + } + } + fn.emit(&Return{Results: results, pos: s.Return}) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BranchStmt: + var block *BasicBlock + switch s.Tok { + case token.BREAK: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._break + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._break + } + } + + case token.CONTINUE: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._continue + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._continue + } + } + + case token.FALLTHROUGH: + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._fallthrough + } + + case token.GOTO: + block = fn.labelledBlock(s.Label)._goto + } + emitJump(fn, block) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BlockStmt: + b.stmtList(fn, s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(fn, s.Init) + } + then := fn.newBasicBlock("if.then") + done := fn.newBasicBlock("if.done") + els := done + if s.Else != nil { + els = fn.newBasicBlock("if.else") + } + b.cond(fn, s.Cond, then, els) + fn.currentBlock = then + b.stmt(fn, s.Body) + emitJump(fn, done) + + if s.Else != nil { + fn.currentBlock = els + b.stmt(fn, s.Else) + emitJump(fn, done) + } + + fn.currentBlock = done + + case *ast.SwitchStmt: + b.switchStmt(fn, s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(fn, s, label) + + case *ast.SelectStmt: + b.selectStmt(fn, s, label) + + case *ast.ForStmt: + b.forStmt(fn, s, label) + + case *ast.RangeStmt: + b.rangeStmt(fn, s, label) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +// buildFunction builds SSA code for the body of function fn. Idempotent. +func (b *builder) buildFunction(fn *Function) { + if fn.Blocks != nil { + return // building already started + } + + var recvField *ast.FieldList + var body *ast.BlockStmt + var functype *ast.FuncType + switch n := fn.syntax.(type) { + case nil: + return // not a Go source function. (Synthetic, or from object file.) + case *ast.FuncDecl: + functype = n.Type + recvField = n.Recv + body = n.Body + case *ast.FuncLit: + functype = n.Type + body = n.Body + default: + panic(n) + } + + if body == nil { + // External function. + if fn.Params == nil { + // This condition ensures we add a non-empty + // params list once only, but we may attempt + // the degenerate empty case repeatedly. + // TODO(adonovan): opt: don't do that. + + // We set Function.Params even though there is no body + // code to reference them. This simplifies clients. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamObj(recv) + } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamObj(params.At(i)) + } + } + return + } + if fn.Prog.mode&LogSource != 0 { + defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.startBody() + fn.createSyntacticParams(recvField, functype) + b.stmt(fn, body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // + // Block optimizations eliminate the current block, if + // unreachable. It is a builder invariant that + // if this no-arg return is ill-typed for + // fn.Signature.Results, this block must be + // unreachable. The sanity checker checks this. + fn.emit(new(RunDefers)) + fn.emit(new(Return)) + } + fn.finishBody() +} + +// buildFuncDecl builds SSA code for the function or method declared +// by decl in package pkg. +// +func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { + id := decl.Name + if isBlankIdent(id) { + return // discard + } + fn := pkg.values[pkg.info.Defs[id]].(*Function) + if decl.Recv == nil && id.Name == "init" { + var v Call + v.Call.Value = fn + v.setType(types.NewTuple()) + pkg.init.emit(&v) + } + b.buildFunction(fn) +} + +// Build calls Package.Build for each package in prog. +// Building occurs in parallel unless the BuildSerially mode flag was set. +// +// Build is intended for whole-program analysis; a typical compiler +// need only build a single package. +// +// Build is idempotent and thread-safe. +// +func (prog *Program) Build() { + var wg sync.WaitGroup + for _, p := range prog.packages { + if prog.mode&BuildSerially != 0 { + p.Build() + } else { + wg.Add(1) + go func(p *Package) { + p.Build() + wg.Done() + }(p) + } + } + wg.Wait() +} + +// Build builds SSA code for all functions and vars in package p. +// +// Precondition: CreatePackage must have been called for all of p's +// direct imports (and hence its direct imports must have been +// error-free). +// +// Build is idempotent and thread-safe. +// +func (p *Package) Build() { p.buildOnce.Do(p.build) } + +func (p *Package) build() { + if p.info == nil { + return // synthetic package, e.g. "testmain" + } + if p.files == nil { + p.info = nil + return // package loaded from export data + } + + // Ensure we have runtime type info for all exported members. + // TODO(adonovan): ideally belongs in memberFromObject, but + // that would require package creation in topological order. + for name, mem := range p.Members { + if ast.IsExported(name) { + p.Prog.needMethodsOf(mem.Type()) + } + } + if p.Prog.mode&LogSource != 0 { + defer logStack("build %s", p)() + } + init := p.init + init.startBody() + + var done *BasicBlock + + if p.Prog.mode&BareInits == 0 { + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := init.newBasicBlock("init.start") + done = init.newBasicBlock("init.done") + emitIf(init, emitLoad(init, initguard), done, doinit) + init.currentBlock = doinit + emitStore(init, initguard, vTrue, token.NoPos) + + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) + } + var v Call + v.Call.Value = prereq.init + v.Call.pos = init.pos + v.setType(types.NewTuple()) + init.emit(&v) + } + } + + var b builder + + // Initialize package-level vars in correct order. + for _, varinit := range p.info.InitOrder { + if init.Prog.mode&LogSource != 0 { + fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", + varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) + } + if len(varinit.Lhs) == 1 { + // 1:1 initialization: var x, y = a(), b() + var lval lvalue + if v := varinit.Lhs[0]; v.Name() != "_" { + lval = &address{addr: p.values[v].(*Global), pos: v.Pos()} + } else { + lval = blank{} + } + b.assign(init, lval, varinit.Rhs, true, nil) + } else { + // n:1 initialization: var x, y := f() + tuple := b.exprN(init, varinit.Rhs) + for i, v := range varinit.Lhs { + if v.Name() == "_" { + continue + } + emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + } + } + } + + // Build all package-level functions, init functions + // and methods, including unreachable/blank ones. + // We build them in source order, but it's not significant. + for _, file := range p.files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + b.buildFuncDecl(p, decl) + } + } + } + + // Finish up init(). + if p.Prog.mode&BareInits == 0 { + emitJump(init, done) + init.currentBlock = done + } + init.emit(new(Return)) + init.finishBody() + + p.info = nil // We no longer need ASTs or go/types deductions. + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// Like ObjectOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) objectOf(id *ast.Ident) types.Object { + if o := p.info.ObjectOf(id); o != nil { + return o + } + panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", + id.Name, p.Prog.Fset.Position(id.Pos()))) +} + +// Like TypeOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) typeOf(e ast.Expr) types.Type { + if T := p.info.TypeOf(e); T != nil { + return T + } + panic(fmt.Sprintf("no type for %T @ %s", + e, p.Prog.Fset.Position(e.Pos()))) +} diff --git a/go/ssa/create.go b/go/ssa/create.go index 372d1c7288..3bef1d64f6 100644 --- a/go/ssa/create.go +++ b/go/ssa/create.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 package ssa diff --git a/go/ssa/create18.go b/go/ssa/create18.go new file mode 100644 index 0000000000..432e4a4a6b --- /dev/null +++ b/go/ssa/create18.go @@ -0,0 +1,259 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package ssa + +// This file implements the CREATE phase of SSA construction. +// See builder.go for explanation. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "sync" + + "golang.org/x/tools/go/types/typeutil" +) + +// NewProgram returns a new SSA Program. +// +// mode controls diagnostics and checking during SSA construction. +// +func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { + prog := &Program{ + Fset: fset, + imported: make(map[string]*Package), + packages: make(map[*types.Package]*Package), + thunks: make(map[selectionKey]*Function), + bounds: make(map[*types.Func]*Function), + mode: mode, + } + + h := typeutil.MakeHasher() // protected by methodsMu, in effect + prog.methodSets.SetHasher(h) + prog.canon.SetHasher(h) + + return prog +} + +// memberFromObject populates package pkg with a member for the +// typechecker object obj. +// +// For objects from Go source code, syntax is the associated syntax +// tree (for funcs and vars only); it will be used during the build +// phase. +// +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { + name := obj.Name() + switch obj := obj.(type) { + case *types.TypeName: + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } + + case *types.Const: + c := &NamedConst{ + object: obj, + Value: NewConst(obj.Val(), obj.Type()), + pkg: pkg, + } + pkg.values[obj] = c.Value + pkg.Members[name] = c + + case *types.Var: + g := &Global{ + Pkg: pkg, + name: name, + object: obj, + typ: types.NewPointer(obj.Type()), // address + pos: obj.Pos(), + } + pkg.values[obj] = g + pkg.Members[name] = g + + case *types.Func: + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil && name == "init" { + pkg.ninit++ + name = fmt.Sprintf("init#%d", pkg.ninit) + } + fn := &Function{ + name: name, + object: obj, + Signature: sig, + syntax: syntax, + pos: obj.Pos(), + Pkg: pkg, + Prog: pkg.Prog, + } + if syntax == nil { + fn.Synthetic = "loaded from gc object file" + } + + pkg.values[obj] = fn + if sig.Recv() == nil { + pkg.Members[name] = fn // package-level function + } + + default: // (incl. *types.Package) + panic("unexpected Object type: " + obj.String()) + } +} + +// membersFromDecl populates package pkg with members for each +// typechecker object (var, func, const or type) associated with the +// specified decl. +// +func membersFromDecl(pkg *Package, decl ast.Decl) { + switch decl := decl.(type) { + case *ast.GenDecl: // import, const, type or var + switch decl.Tok { + case token.CONST: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case token.VAR: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], spec) + } + } + } + + case token.TYPE: + for _, spec := range decl.Specs { + id := spec.(*ast.TypeSpec).Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case *ast.FuncDecl: + id := decl.Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], decl) + } + } +} + +// CreatePackage constructs and returns an SSA Package from the +// specified type-checked, error-free file ASTs, and populates its +// Members mapping. +// +// importable determines whether this package should be returned by a +// subsequent call to ImportedPackage(pkg.Path()). +// +// The real work of building SSA form for each function is not done +// until a subsequent call to Package.Build(). +// +func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + p := &Package{ + Prog: prog, + Members: make(map[string]Member), + values: make(map[types.Object]Value), + Pkg: pkg, + info: info, // transient (CREATE and BUILD phases) + files: files, // transient (CREATE and BUILD phases) + } + + // Add init() function. + p.init = &Function{ + name: "init", + Signature: new(types.Signature), + Synthetic: "package initializer", + Pkg: p, + Prog: prog, + } + p.Members[p.init.name] = p.init + + // CREATE phase. + // Allocate all package members: vars, funcs, consts and types. + if len(files) > 0 { + // Go source package. + for _, file := range files { + for _, decl := range file.Decls { + membersFromDecl(p, decl) + } + } + } else { + // GC-compiled binary package. + // No code. + // No position information. + scope := p.Pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + memberFromObject(p, obj, nil) + if obj, ok := obj.(*types.TypeName); ok { + named := obj.Type().(*types.Named) + for i, n := 0, named.NumMethods(); i < n; i++ { + memberFromObject(p, named.Method(i), nil) + } + } + } + } + + if prog.mode&BareInits == 0 { + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), + } + p.Members[initguard.Name()] = initguard + } + + if prog.mode&GlobalDebug != 0 { + p.SetDebugMode(true) + } + + if prog.mode&PrintPackages != 0 { + printMu.Lock() + p.WriteTo(os.Stdout) + printMu.Unlock() + } + + if importable { + prog.imported[p.Pkg.Path()] = p + } + prog.packages[p.Pkg] = p + + return p +} + +// printMu serializes printing of Packages/Functions to stdout. +var printMu sync.Mutex + +// AllPackages returns a new slice containing all packages in the +// program prog in unspecified order. +// +func (prog *Program) AllPackages() []*Package { + pkgs := make([]*Package, 0, len(prog.packages)) + for _, pkg := range prog.packages { + pkgs = append(pkgs, pkg) + } + return pkgs +} + +// ImportedPackage returns the importable SSA Package whose import +// path is path, or nil if no such SSA package has been created. +// +// Not all packages are importable. For example, no import +// declaration can resolve to the x_test package created by 'go test' +// or the ad-hoc main package created 'go build foo.go'. +// +func (prog *Program) ImportedPackage(path string) *Package { + return prog.imported[path] +} diff --git a/godoc/index.go b/godoc/index.go index 725121a56b..fbf80d6b95 100644 --- a/godoc/index.go +++ b/godoc/index.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + // This file contains the infrastructure to create an // identifier and full-text index for a set of Go files. // diff --git a/godoc/index18.go b/godoc/index18.go new file mode 100644 index 0000000000..49d5e95b92 --- /dev/null +++ b/godoc/index18.go @@ -0,0 +1,1592 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// This file contains the infrastructure to create an +// identifier and full-text index for a set of Go files. +// +// Algorithm for identifier index: +// - traverse all .go files of the file tree specified by root +// - for each identifier (word) encountered, collect all occurrences (spots) +// into a list; this produces a list of spots for each word +// - reduce the lists: from a list of spots to a list of FileRuns, +// and from a list of FileRuns into a list of PakRuns +// - make a HitList from the PakRuns +// +// Details: +// - keep two lists per word: one containing package-level declarations +// that have snippets, and one containing all other spots +// - keep the snippets in a separate table indexed by snippet index +// and store the snippet index in place of the line number in a SpotInfo +// (the line number for spots with snippets is stored in the snippet) +// - at the end, create lists of alternative spellings for a given +// word +// +// Algorithm for full text index: +// - concatenate all source code in a byte buffer (in memory) +// - add the files to a file set in lockstep as they are added to the byte +// buffer such that a byte buffer offset corresponds to the Pos value for +// that file location +// - create a suffix array from the concatenated sources +// +// String lookup in full text index: +// - use the suffix array to lookup a string's offsets - the offsets +// correspond to the Pos values relative to the file set +// - translate the Pos values back into file and line information and +// sort the result + +package godoc + +import ( + "bufio" + "bytes" + "encoding/gob" + "errors" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "index/suffixarray" + "io" + "log" + "os" + pathpkg "path" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/tools/godoc/util" + "golang.org/x/tools/godoc/vfs" +) + +// ---------------------------------------------------------------------------- +// InterfaceSlice is a helper type for sorting interface +// slices according to some slice-specific sort criteria. + +type comparer func(x, y interface{}) bool + +type interfaceSlice struct { + slice []interface{} + less comparer +} + +// ---------------------------------------------------------------------------- +// RunList + +// A RunList is a list of entries that can be sorted according to some +// criteria. A RunList may be compressed by grouping "runs" of entries +// which are equal (according to the sort critera) into a new RunList of +// runs. For instance, a RunList containing pairs (x, y) may be compressed +// into a RunList containing pair runs (x, {y}) where each run consists of +// a list of y's with the same x. +type RunList []interface{} + +func (h RunList) sort(less comparer) { + sort.Sort(&interfaceSlice{h, less}) +} + +func (p *interfaceSlice) Len() int { return len(p.slice) } +func (p *interfaceSlice) Less(i, j int) bool { return p.less(p.slice[i], p.slice[j]) } +func (p *interfaceSlice) Swap(i, j int) { p.slice[i], p.slice[j] = p.slice[j], p.slice[i] } + +// Compress entries which are the same according to a sort criteria +// (specified by less) into "runs". +func (h RunList) reduce(less comparer, newRun func(h RunList) interface{}) RunList { + if len(h) == 0 { + return nil + } + // len(h) > 0 + + // create runs of entries with equal values + h.sort(less) + + // for each run, make a new run object and collect them in a new RunList + var hh RunList + i, x := 0, h[0] + for j, y := range h { + if less(x, y) { + hh = append(hh, newRun(h[i:j])) + i, x = j, h[j] // start a new run + } + } + // add final run, if any + if i < len(h) { + hh = append(hh, newRun(h[i:])) + } + + return hh +} + +// ---------------------------------------------------------------------------- +// KindRun + +// Debugging support. Disable to see multiple entries per line. +const removeDuplicates = true + +// A KindRun is a run of SpotInfos of the same kind in a given file. +// The kind (3 bits) is stored in each SpotInfo element; to find the +// kind of a KindRun, look at any of its elements. +type KindRun []SpotInfo + +// KindRuns are sorted by line number or index. Since the isIndex bit +// is always the same for all infos in one list we can compare lori's. +func (k KindRun) Len() int { return len(k) } +func (k KindRun) Less(i, j int) bool { return k[i].Lori() < k[j].Lori() } +func (k KindRun) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +// FileRun contents are sorted by Kind for the reduction into KindRuns. +func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() } + +// newKindRun allocates a new KindRun from the SpotInfo run h. +func newKindRun(h RunList) interface{} { + run := make(KindRun, len(h)) + for i, x := range h { + run[i] = x.(SpotInfo) + } + + // Spots were sorted by file and kind to create this run. + // Within this run, sort them by line number or index. + sort.Sort(run) + + if removeDuplicates { + // Since both the lori and kind field must be + // same for duplicates, and since the isIndex + // bit is always the same for all infos in one + // list we can simply compare the entire info. + k := 0 + prev := SpotInfo(1<<32 - 1) // an unlikely value + for _, x := range run { + if x != prev { + run[k] = x + k++ + prev = x + } + } + run = run[0:k] + } + + return run +} + +// ---------------------------------------------------------------------------- +// FileRun + +// A Pak describes a Go package. +type Pak struct { + Path string // path of directory containing the package + Name string // package name as declared by package clause +} + +// Paks are sorted by name (primary key) and by import path (secondary key). +func (p *Pak) less(q *Pak) bool { + return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path +} + +// A File describes a Go file. +type File struct { + Name string // directory-local file name + Pak *Pak // the package to which the file belongs +} + +// Path returns the file path of f. +func (f *File) Path() string { + return pathpkg.Join(f.Pak.Path, f.Name) +} + +// A Spot describes a single occurrence of a word. +type Spot struct { + File *File + Info SpotInfo +} + +// A FileRun is a list of KindRuns belonging to the same file. +type FileRun struct { + File *File + Groups []KindRun +} + +// Spots are sorted by file path for the reduction into FileRuns. +func lessSpot(x, y interface{}) bool { + fx := x.(Spot).File + fy := y.(Spot).File + // same as "return fx.Path() < fy.Path()" but w/o computing the file path first + px := fx.Pak.Path + py := fy.Pak.Path + return px < py || px == py && fx.Name < fy.Name +} + +// newFileRun allocates a new FileRun from the Spot run h. +func newFileRun(h RunList) interface{} { + file := h[0].(Spot).File + + // reduce the list of Spots into a list of KindRuns + h1 := make(RunList, len(h)) + for i, x := range h { + h1[i] = x.(Spot).Info + } + h2 := h1.reduce(lessKind, newKindRun) + + // create the FileRun + groups := make([]KindRun, len(h2)) + for i, x := range h2 { + groups[i] = x.(KindRun) + } + return &FileRun{file, groups} +} + +// ---------------------------------------------------------------------------- +// PakRun + +// A PakRun describes a run of *FileRuns of a package. +type PakRun struct { + Pak *Pak + Files []*FileRun +} + +// Sorting support for files within a PakRun. +func (p *PakRun) Len() int { return len(p.Files) } +func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Name < p.Files[j].File.Name } +func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] } + +// FileRuns are sorted by package for the reduction into PakRuns. +func lessFileRun(x, y interface{}) bool { + return x.(*FileRun).File.Pak.less(y.(*FileRun).File.Pak) +} + +// newPakRun allocates a new PakRun from the *FileRun run h. +func newPakRun(h RunList) interface{} { + pak := h[0].(*FileRun).File.Pak + files := make([]*FileRun, len(h)) + for i, x := range h { + files[i] = x.(*FileRun) + } + run := &PakRun{pak, files} + sort.Sort(run) // files were sorted by package; sort them by file now + return run +} + +// ---------------------------------------------------------------------------- +// HitList + +// A HitList describes a list of PakRuns. +type HitList []*PakRun + +// PakRuns are sorted by package. +func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(y.(*PakRun).Pak) } + +func reduce(h0 RunList) HitList { + // reduce a list of Spots into a list of FileRuns + h1 := h0.reduce(lessSpot, newFileRun) + // reduce a list of FileRuns into a list of PakRuns + h2 := h1.reduce(lessFileRun, newPakRun) + // sort the list of PakRuns by package + h2.sort(lessPakRun) + // create a HitList + h := make(HitList, len(h2)) + for i, p := range h2 { + h[i] = p.(*PakRun) + } + return h +} + +// filter returns a new HitList created by filtering +// all PakRuns from h that have a matching pakname. +func (h HitList) filter(pakname string) HitList { + var hh HitList + for _, p := range h { + if p.Pak.Name == pakname { + hh = append(hh, p) + } + } + return hh +} + +// ---------------------------------------------------------------------------- +// AltWords + +type wordPair struct { + canon string // canonical word spelling (all lowercase) + alt string // alternative spelling +} + +// An AltWords describes a list of alternative spellings for a +// canonical (all lowercase) spelling of a word. +type AltWords struct { + Canon string // canonical word spelling (all lowercase) + Alts []string // alternative spelling for the same word +} + +// wordPairs are sorted by their canonical spelling. +func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon } + +// newAltWords allocates a new AltWords from the *wordPair run h. +func newAltWords(h RunList) interface{} { + canon := h[0].(*wordPair).canon + alts := make([]string, len(h)) + for i, x := range h { + alts[i] = x.(*wordPair).alt + } + return &AltWords{canon, alts} +} + +func (a *AltWords) filter(s string) *AltWords { + var alts []string + for _, w := range a.Alts { + if w != s { + alts = append(alts, w) + } + } + if len(alts) > 0 { + return &AltWords{a.Canon, alts} + } + return nil +} + +// Ident stores information about external identifiers in order to create +// links to package documentation. +type Ident struct { + Path string // e.g. "net/http" + Package string // e.g. "http" + Name string // e.g. "NewRequest" + Doc string // e.g. "NewRequest returns a new Request..." +} + +// byImportCount sorts the given slice of Idents by the import +// counts of the packages to which they belong. +type byImportCount struct { + Idents []Ident + ImportCount map[string]int +} + +func (ic byImportCount) Len() int { + return len(ic.Idents) +} + +func (ic byImportCount) Less(i, j int) bool { + ri := ic.ImportCount[ic.Idents[i].Path] + rj := ic.ImportCount[ic.Idents[j].Path] + if ri == rj { + return ic.Idents[i].Path < ic.Idents[j].Path + } + return ri > rj +} + +func (ic byImportCount) Swap(i, j int) { + ic.Idents[i], ic.Idents[j] = ic.Idents[j], ic.Idents[i] +} + +func (ic byImportCount) String() string { + buf := bytes.NewBuffer([]byte("[")) + for _, v := range ic.Idents { + buf.WriteString(fmt.Sprintf("\n\t%s, %s (%d)", v.Path, v.Name, ic.ImportCount[v.Path])) + } + buf.WriteString("\n]") + return buf.String() +} + +// filter creates a new Ident list where the results match the given +// package name. +func (ic byImportCount) filter(pakname string) []Ident { + if ic.Idents == nil { + return nil + } + var res []Ident + for _, i := range ic.Idents { + if i.Package == pakname { + res = append(res, i) + } + } + return res +} + +// top returns the top n identifiers. +func (ic byImportCount) top(n int) []Ident { + if len(ic.Idents) > n { + return ic.Idents[:n] + } + return ic.Idents +} + +// ---------------------------------------------------------------------------- +// Indexer + +type IndexResult struct { + Decls RunList // package-level declarations (with snippets) + Others RunList // all other occurrences +} + +// Statistics provides statistics information for an index. +type Statistics struct { + Bytes int // total size of indexed source files + Files int // number of indexed source files + Lines int // number of lines (all files) + Words int // number of different identifiers + Spots int // number of identifier occurrences +} + +// An Indexer maintains the data structures and provides the machinery +// for indexing .go files under a file tree. It implements the path.Visitor +// interface for walking file trees, and the ast.Visitor interface for +// walking Go ASTs. +type Indexer struct { + c *Corpus + fset *token.FileSet // file set for all indexed files + fsOpenGate chan bool // send pre fs.Open; receive on close + + mu sync.Mutex // guards all the following + sources bytes.Buffer // concatenated sources + strings map[string]string // interned string + packages map[Pak]*Pak // interned *Paks + words map[string]*IndexResult // RunLists of Spots + snippets []*Snippet // indices are stored in SpotInfos + current *token.File // last file added to file set + file *File // AST for current file + decl ast.Decl // AST for current decl + stats Statistics + throttle *util.Throttle + importCount map[string]int // package path ("net/http") => count + packagePath map[string]map[string]bool // "template" => "text/template" => true + exports map[string]map[string]SpotKind // "net/http" => "ListenAndServe" => FuncDecl + curPkgExports map[string]SpotKind + idents map[SpotKind]map[string][]Ident // kind => name => list of Idents +} + +func (x *Indexer) intern(s string) string { + if s, ok := x.strings[s]; ok { + return s + } + x.strings[s] = s + return s +} + +func (x *Indexer) lookupPackage(path, name string) *Pak { + // In the source directory tree, more than one package may + // live in the same directory. For the packages map, construct + // a key that includes both the directory path and the package + // name. + key := Pak{Path: x.intern(path), Name: x.intern(name)} + pak := x.packages[key] + if pak == nil { + pak = &key + x.packages[key] = pak + } + return pak +} + +func (x *Indexer) addSnippet(s *Snippet) int { + index := len(x.snippets) + x.snippets = append(x.snippets, s) + return index +} + +func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) { + if id == nil { + return + } + name := x.intern(id.Name) + + switch kind { + case TypeDecl, FuncDecl, ConstDecl, VarDecl: + x.curPkgExports[name] = kind + } + + lists, found := x.words[name] + if !found { + lists = new(IndexResult) + x.words[name] = lists + } + + if kind == Use || x.decl == nil { + if x.c.IndexGoCode { + // not a declaration or no snippet required + info := makeSpotInfo(kind, x.current.Line(id.Pos()), false) + lists.Others = append(lists.Others, Spot{x.file, info}) + } + } else { + // a declaration with snippet + index := x.addSnippet(NewSnippet(x.fset, x.decl, id)) + info := makeSpotInfo(kind, index, true) + lists.Decls = append(lists.Decls, Spot{x.file, info}) + } + + x.stats.Spots++ +} + +func (x *Indexer) visitFieldList(kind SpotKind, flist *ast.FieldList) { + for _, f := range flist.List { + x.decl = nil // no snippets for fields + for _, name := range f.Names { + x.visitIdent(kind, name) + } + ast.Walk(x, f.Type) + // ignore tag - not indexed at the moment + } +} + +func (x *Indexer) visitSpec(kind SpotKind, spec ast.Spec) { + switch n := spec.(type) { + case *ast.ImportSpec: + x.visitIdent(ImportDecl, n.Name) + if n.Path != nil { + if imp, err := strconv.Unquote(n.Path.Value); err == nil { + x.importCount[x.intern(imp)]++ + } + } + + case *ast.ValueSpec: + for _, n := range n.Names { + x.visitIdent(kind, n) + } + ast.Walk(x, n.Type) + for _, v := range n.Values { + ast.Walk(x, v) + } + + case *ast.TypeSpec: + x.visitIdent(TypeDecl, n.Name) + ast.Walk(x, n.Type) + } +} + +func (x *Indexer) visitGenDecl(decl *ast.GenDecl) { + kind := VarDecl + if decl.Tok == token.CONST { + kind = ConstDecl + } + x.decl = decl + for _, s := range decl.Specs { + x.visitSpec(kind, s) + } +} + +func (x *Indexer) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case nil: + // nothing to do + + case *ast.Ident: + x.visitIdent(Use, n) + + case *ast.FieldList: + x.visitFieldList(VarDecl, n) + + case *ast.InterfaceType: + x.visitFieldList(MethodDecl, n.Methods) + + case *ast.DeclStmt: + // local declarations should only be *ast.GenDecls; + // ignore incorrect ASTs + if decl, ok := n.Decl.(*ast.GenDecl); ok { + x.decl = nil // no snippets for local declarations + x.visitGenDecl(decl) + } + + case *ast.GenDecl: + x.decl = n + x.visitGenDecl(n) + + case *ast.FuncDecl: + kind := FuncDecl + if n.Recv != nil { + kind = MethodDecl + ast.Walk(x, n.Recv) + } + x.decl = n + x.visitIdent(kind, n.Name) + ast.Walk(x, n.Type) + if n.Body != nil { + ast.Walk(x, n.Body) + } + + case *ast.File: + x.decl = nil + x.visitIdent(PackageClause, n.Name) + for _, d := range n.Decls { + ast.Walk(x, d) + } + + default: + return x + } + + return nil +} + +// addFile adds a file to the index if possible and returns the file set file +// and the file's AST if it was successfully parsed as a Go file. If addFile +// failed (that is, if the file was not added), it returns file == nil. +func (x *Indexer) addFile(f vfs.ReadSeekCloser, filename string, goFile bool) (file *token.File, ast *ast.File) { + defer f.Close() + + // The file set's base offset and x.sources size must be in lock-step; + // this permits the direct mapping of suffix array lookup results to + // to corresponding Pos values. + // + // When a file is added to the file set, its offset base increases by + // the size of the file + 1; and the initial base offset is 1. Add an + // extra byte to the sources here. + x.sources.WriteByte(0) + + // If the sources length doesn't match the file set base at this point + // the file set implementation changed or we have another error. + base := x.fset.Base() + if x.sources.Len() != base { + panic("internal error: file base incorrect") + } + + // append file contents (src) to x.sources + if _, err := x.sources.ReadFrom(f); err == nil { + src := x.sources.Bytes()[base:] + + if goFile { + // parse the file and in the process add it to the file set + if ast, err = parser.ParseFile(x.fset, filename, src, parser.ParseComments); err == nil { + file = x.fset.File(ast.Pos()) // ast.Pos() is inside the file + return + } + // file has parse errors, and the AST may be incorrect - + // set lines information explicitly and index as ordinary + // text file (cannot fall through to the text case below + // because the file has already been added to the file set + // by the parser) + file = x.fset.File(token.Pos(base)) // token.Pos(base) is inside the file + file.SetLinesForContent(src) + ast = nil + return + } + + if util.IsText(src) { + // only add the file to the file set (for the full text index) + file = x.fset.AddFile(filename, x.fset.Base(), len(src)) + file.SetLinesForContent(src) + return + } + } + + // discard possibly added data + x.sources.Truncate(base - 1) // -1 to remove added byte 0 since no file was added + return +} + +// Design note: Using an explicit white list of permitted files for indexing +// makes sure that the important files are included and massively reduces the +// number of files to index. The advantage over a blacklist is that unexpected +// (non-blacklisted) files won't suddenly explode the index. + +// Files are whitelisted if they have a file name or extension +// present as key in whitelisted. +var whitelisted = map[string]bool{ + ".bash": true, + ".c": true, + ".cc": true, + ".cpp": true, + ".cxx": true, + ".css": true, + ".go": true, + ".goc": true, + ".h": true, + ".hh": true, + ".hpp": true, + ".hxx": true, + ".html": true, + ".js": true, + ".out": true, + ".py": true, + ".s": true, + ".sh": true, + ".txt": true, + ".xml": true, + "AUTHORS": true, + "CONTRIBUTORS": true, + "LICENSE": true, + "Makefile": true, + "PATENTS": true, + "README": true, +} + +// isWhitelisted returns true if a file is on the list +// of "permitted" files for indexing. The filename must +// be the directory-local name of the file. +func isWhitelisted(filename string) bool { + key := pathpkg.Ext(filename) + if key == "" { + // file has no extension - use entire filename + key = filename + } + return whitelisted[key] +} + +func (x *Indexer) indexDocs(dirname string, filename string, astFile *ast.File) { + pkgName := x.intern(astFile.Name.Name) + if pkgName == "main" { + return + } + pkgPath := x.intern(strings.TrimPrefix(strings.TrimPrefix(dirname, "/src/"), "pkg/")) + astPkg := ast.Package{ + Name: pkgName, + Files: map[string]*ast.File{ + filename: astFile, + }, + } + var m doc.Mode + docPkg := doc.New(&astPkg, dirname, m) + addIdent := func(sk SpotKind, name string, docstr string) { + if x.idents[sk] == nil { + x.idents[sk] = make(map[string][]Ident) + } + name = x.intern(name) + x.idents[sk][name] = append(x.idents[sk][name], Ident{ + Path: pkgPath, + Package: pkgName, + Name: name, + Doc: doc.Synopsis(docstr), + }) + } + + if x.idents[PackageClause] == nil { + x.idents[PackageClause] = make(map[string][]Ident) + } + // List of words under which the package identifier will be stored. + // This includes the package name and the components of the directory + // in which it resides. + words := strings.Split(pathpkg.Dir(pkgPath), "/") + if words[0] == "." { + words = []string{} + } + name := x.intern(docPkg.Name) + synopsis := doc.Synopsis(docPkg.Doc) + words = append(words, name) + pkgIdent := Ident{ + Path: pkgPath, + Package: pkgName, + Name: name, + Doc: synopsis, + } + for _, word := range words { + word = x.intern(word) + found := false + pkgs := x.idents[PackageClause][word] + for i, p := range pkgs { + if p.Path == pkgPath { + if docPkg.Doc != "" { + p.Doc = synopsis + pkgs[i] = p + } + found = true + break + } + } + if !found { + x.idents[PackageClause][word] = append(x.idents[PackageClause][word], pkgIdent) + } + } + + for _, c := range docPkg.Consts { + for _, name := range c.Names { + addIdent(ConstDecl, name, c.Doc) + } + } + for _, t := range docPkg.Types { + addIdent(TypeDecl, t.Name, t.Doc) + for _, c := range t.Consts { + for _, name := range c.Names { + addIdent(ConstDecl, name, c.Doc) + } + } + for _, v := range t.Vars { + for _, name := range v.Names { + addIdent(VarDecl, name, v.Doc) + } + } + for _, f := range t.Funcs { + addIdent(FuncDecl, f.Name, f.Doc) + } + for _, f := range t.Methods { + addIdent(MethodDecl, f.Name, f.Doc) + // Change the name of methods to be ".". + // They will still be indexed as . + idents := x.idents[MethodDecl][f.Name] + idents[len(idents)-1].Name = x.intern(t.Name + "." + f.Name) + } + } + for _, v := range docPkg.Vars { + for _, name := range v.Names { + addIdent(VarDecl, name, v.Doc) + } + } + for _, f := range docPkg.Funcs { + addIdent(FuncDecl, f.Name, f.Doc) + } +} + +func (x *Indexer) indexGoFile(dirname string, filename string, file *token.File, astFile *ast.File) { + pkgName := astFile.Name.Name + + if x.c.IndexGoCode { + x.current = file + pak := x.lookupPackage(dirname, pkgName) + x.file = &File{filename, pak} + ast.Walk(x, astFile) + } + + if x.c.IndexDocs { + // Test files are already filtered out in visitFile if IndexGoCode and + // IndexFullText are false. Otherwise, check here. + isTestFile := (x.c.IndexGoCode || x.c.IndexFullText) && + (strings.HasSuffix(filename, "_test.go") || strings.HasPrefix(dirname, "/test/")) + if !isTestFile { + x.indexDocs(dirname, filename, astFile) + } + } + + ppKey := x.intern(pkgName) + if _, ok := x.packagePath[ppKey]; !ok { + x.packagePath[ppKey] = make(map[string]bool) + } + pkgPath := x.intern(strings.TrimPrefix(strings.TrimPrefix(dirname, "/src/"), "pkg/")) + x.packagePath[ppKey][pkgPath] = true + + // Merge in exported symbols found walking this file into + // the map for that package. + if len(x.curPkgExports) > 0 { + dest, ok := x.exports[pkgPath] + if !ok { + dest = make(map[string]SpotKind) + x.exports[pkgPath] = dest + } + for k, v := range x.curPkgExports { + dest[k] = v + } + } +} + +func (x *Indexer) visitFile(dirname string, fi os.FileInfo) { + if fi.IsDir() || !x.c.IndexEnabled { + return + } + + filename := pathpkg.Join(dirname, fi.Name()) + goFile := isGoFile(fi) + + switch { + case x.c.IndexFullText: + if !isWhitelisted(fi.Name()) { + return + } + case x.c.IndexGoCode: + if !goFile { + return + } + case x.c.IndexDocs: + if !goFile || + strings.HasSuffix(fi.Name(), "_test.go") || + strings.HasPrefix(dirname, "/test/") { + return + } + default: + // No indexing turned on. + return + } + + x.fsOpenGate <- true + defer func() { <-x.fsOpenGate }() + + // open file + f, err := x.c.fs.Open(filename) + if err != nil { + return + } + + x.mu.Lock() + defer x.mu.Unlock() + + x.throttle.Throttle() + + x.curPkgExports = make(map[string]SpotKind) + file, fast := x.addFile(f, filename, goFile) + if file == nil { + return // addFile failed + } + + if fast != nil { + x.indexGoFile(dirname, fi.Name(), file, fast) + } + + // update statistics + x.stats.Bytes += file.Size() + x.stats.Files++ + x.stats.Lines += file.LineCount() +} + +// indexOptions contains information that affects the contents of an index. +type indexOptions struct { + // Docs provides documentation search results. + // It is only consulted if IndexEnabled is true. + // The default values is true. + Docs bool + + // GoCode provides Go source code search results. + // It is only consulted if IndexEnabled is true. + // The default values is true. + GoCode bool + + // FullText provides search results from all files. + // It is only consulted if IndexEnabled is true. + // The default values is true. + FullText bool + + // MaxResults optionally specifies the maximum results for indexing. + // The default is 1000. + MaxResults int +} + +// ---------------------------------------------------------------------------- +// Index + +type LookupResult struct { + Decls HitList // package-level declarations (with snippets) + Others HitList // all other occurrences +} + +type Index struct { + fset *token.FileSet // file set used during indexing; nil if no textindex + suffixes *suffixarray.Index // suffixes for concatenated sources; nil if no textindex + words map[string]*LookupResult // maps words to hit lists + alts map[string]*AltWords // maps canonical(words) to lists of alternative spellings + snippets []*Snippet // all snippets, indexed by snippet index + stats Statistics + importCount map[string]int // package path ("net/http") => count + packagePath map[string]map[string]bool // "template" => "text/template" => true + exports map[string]map[string]SpotKind // "net/http" => "ListenAndServe" => FuncDecl + idents map[SpotKind]map[string][]Ident + opts indexOptions +} + +func canonical(w string) string { return strings.ToLower(w) } + +// Somewhat arbitrary, but I figure low enough to not hurt disk-based filesystems +// consuming file descriptors, where some systems have low 256 or 512 limits. +// Go should have a built-in way to cap fd usage under the ulimit. +const ( + maxOpenFiles = 200 + maxOpenDirs = 50 +) + +func (c *Corpus) throttle() float64 { + if c.IndexThrottle <= 0 { + return 0.9 + } + if c.IndexThrottle > 1.0 { + return 1.0 + } + return c.IndexThrottle +} + +// NewIndex creates a new index for the .go files provided by the corpus. +func (c *Corpus) NewIndex() *Index { + // initialize Indexer + // (use some reasonably sized maps to start) + x := &Indexer{ + c: c, + fset: token.NewFileSet(), + fsOpenGate: make(chan bool, maxOpenFiles), + strings: make(map[string]string), + packages: make(map[Pak]*Pak, 256), + words: make(map[string]*IndexResult, 8192), + throttle: util.NewThrottle(c.throttle(), 100*time.Millisecond), // run at least 0.1s at a time + importCount: make(map[string]int), + packagePath: make(map[string]map[string]bool), + exports: make(map[string]map[string]SpotKind), + idents: make(map[SpotKind]map[string][]Ident, 4), + } + + // index all files in the directories given by dirnames + var wg sync.WaitGroup // outstanding ReadDir + visitFile + dirGate := make(chan bool, maxOpenDirs) + for dirname := range c.fsDirnames() { + if c.IndexDirectory != nil && !c.IndexDirectory(dirname) { + continue + } + dirGate <- true + wg.Add(1) + go func(dirname string) { + defer func() { <-dirGate }() + defer wg.Done() + + list, err := c.fs.ReadDir(dirname) + if err != nil { + log.Printf("ReadDir(%q): %v; skipping directory", dirname, err) + return // ignore this directory + } + for _, fi := range list { + wg.Add(1) + go func(fi os.FileInfo) { + defer wg.Done() + x.visitFile(dirname, fi) + }(fi) + } + }(dirname) + } + wg.Wait() + + if !c.IndexFullText { + // the file set, the current file, and the sources are + // not needed after indexing if no text index is built - + // help GC and clear them + x.fset = nil + x.sources.Reset() + x.current = nil // contains reference to fset! + } + + // for each word, reduce the RunLists into a LookupResult; + // also collect the word with its canonical spelling in a + // word list for later computation of alternative spellings + words := make(map[string]*LookupResult) + var wlist RunList + for w, h := range x.words { + decls := reduce(h.Decls) + others := reduce(h.Others) + words[w] = &LookupResult{ + Decls: decls, + Others: others, + } + wlist = append(wlist, &wordPair{canonical(w), w}) + x.throttle.Throttle() + } + x.stats.Words = len(words) + + // reduce the word list {canonical(w), w} into + // a list of AltWords runs {canonical(w), {w}} + alist := wlist.reduce(lessWordPair, newAltWords) + + // convert alist into a map of alternative spellings + alts := make(map[string]*AltWords) + for i := 0; i < len(alist); i++ { + a := alist[i].(*AltWords) + alts[a.Canon] = a + } + + // create text index + var suffixes *suffixarray.Index + if c.IndexFullText { + suffixes = suffixarray.New(x.sources.Bytes()) + } + + // sort idents by the number of imports of their respective packages + for _, idMap := range x.idents { + for _, ir := range idMap { + sort.Sort(byImportCount{ir, x.importCount}) + } + } + + return &Index{ + fset: x.fset, + suffixes: suffixes, + words: words, + alts: alts, + snippets: x.snippets, + stats: x.stats, + importCount: x.importCount, + packagePath: x.packagePath, + exports: x.exports, + idents: x.idents, + opts: indexOptions{ + Docs: x.c.IndexDocs, + GoCode: x.c.IndexGoCode, + FullText: x.c.IndexFullText, + MaxResults: x.c.MaxResults, + }, + } +} + +var ErrFileIndexVersion = errors.New("file index version out of date") + +const fileIndexVersion = 3 + +// fileIndex is the subset of Index that's gob-encoded for use by +// Index.Write and Index.Read. +type fileIndex struct { + Version int + Words map[string]*LookupResult + Alts map[string]*AltWords + Snippets []*Snippet + Fulltext bool + Stats Statistics + ImportCount map[string]int + PackagePath map[string]map[string]bool + Exports map[string]map[string]SpotKind + Idents map[SpotKind]map[string][]Ident + Opts indexOptions +} + +func (x *fileIndex) Write(w io.Writer) error { + return gob.NewEncoder(w).Encode(x) +} + +func (x *fileIndex) Read(r io.Reader) error { + return gob.NewDecoder(r).Decode(x) +} + +// WriteTo writes the index x to w. +func (x *Index) WriteTo(w io.Writer) (n int64, err error) { + w = countingWriter{&n, w} + fulltext := false + if x.suffixes != nil { + fulltext = true + } + fx := fileIndex{ + Version: fileIndexVersion, + Words: x.words, + Alts: x.alts, + Snippets: x.snippets, + Fulltext: fulltext, + Stats: x.stats, + ImportCount: x.importCount, + PackagePath: x.packagePath, + Exports: x.exports, + Idents: x.idents, + Opts: x.opts, + } + if err := fx.Write(w); err != nil { + return 0, err + } + if fulltext { + encode := func(x interface{}) error { + return gob.NewEncoder(w).Encode(x) + } + if err := x.fset.Write(encode); err != nil { + return 0, err + } + if err := x.suffixes.Write(w); err != nil { + return 0, err + } + } + return n, nil +} + +// ReadFrom reads the index from r into x; x must not be nil. +// If r does not also implement io.ByteReader, it will be wrapped in a bufio.Reader. +// If the index is from an old version, the error is ErrFileIndexVersion. +func (x *Index) ReadFrom(r io.Reader) (n int64, err error) { + // We use the ability to read bytes as a plausible surrogate for buffering. + if _, ok := r.(io.ByteReader); !ok { + r = bufio.NewReader(r) + } + r = countingReader{&n, r.(byteReader)} + var fx fileIndex + if err := fx.Read(r); err != nil { + return n, err + } + if fx.Version != fileIndexVersion { + return 0, ErrFileIndexVersion + } + x.words = fx.Words + x.alts = fx.Alts + x.snippets = fx.Snippets + x.stats = fx.Stats + x.importCount = fx.ImportCount + x.packagePath = fx.PackagePath + x.exports = fx.Exports + x.idents = fx.Idents + x.opts = fx.Opts + if fx.Fulltext { + x.fset = token.NewFileSet() + decode := func(x interface{}) error { + return gob.NewDecoder(r).Decode(x) + } + if err := x.fset.Read(decode); err != nil { + return n, err + } + x.suffixes = new(suffixarray.Index) + if err := x.suffixes.Read(r); err != nil { + return n, err + } + } + return n, nil +} + +// Stats returns index statistics. +func (x *Index) Stats() Statistics { + return x.stats +} + +// ImportCount returns a map from import paths to how many times they were seen. +func (x *Index) ImportCount() map[string]int { + return x.importCount +} + +// PackagePath returns a map from short package name to a set +// of full package path names that use that short package name. +func (x *Index) PackagePath() map[string]map[string]bool { + return x.packagePath +} + +// Exports returns a map from full package path to exported +// symbol name to its type. +func (x *Index) Exports() map[string]map[string]SpotKind { + return x.exports +} + +// Idents returns a map from identifier type to exported +// symbol name to the list of identifiers matching that name. +func (x *Index) Idents() map[SpotKind]map[string][]Ident { + return x.idents +} + +func (x *Index) lookupWord(w string) (match *LookupResult, alt *AltWords) { + match = x.words[w] + alt = x.alts[canonical(w)] + // remove current spelling from alternatives + // (if there is no match, the alternatives do + // not contain the current spelling) + if match != nil && alt != nil { + alt = alt.filter(w) + } + return +} + +// isIdentifier reports whether s is a Go identifier. +func isIdentifier(s string) bool { + for i, ch := range s { + if unicode.IsLetter(ch) || ch == '_' || i > 0 && unicode.IsDigit(ch) { + continue + } + return false + } + return len(s) > 0 +} + +// For a given query, which is either a single identifier or a qualified +// identifier, Lookup returns a SearchResult containing packages, a LookupResult, a +// list of alternative spellings, and identifiers, if any. Any and all results +// may be nil. If the query syntax is wrong, an error is reported. +func (x *Index) Lookup(query string) (*SearchResult, error) { + ss := strings.Split(query, ".") + + // check query syntax + for _, s := range ss { + if !isIdentifier(s) { + return nil, errors.New("all query parts must be identifiers") + } + } + rslt := &SearchResult{ + Query: query, + Idents: make(map[SpotKind][]Ident, 5), + } + // handle simple and qualified identifiers + switch len(ss) { + case 1: + ident := ss[0] + rslt.Hit, rslt.Alt = x.lookupWord(ident) + if rslt.Hit != nil { + // found a match - filter packages with same name + // for the list of packages called ident, if any + rslt.Pak = rslt.Hit.Others.filter(ident) + } + for k, v := range x.idents { + const rsltLimit = 50 + ids := byImportCount{v[ident], x.importCount} + rslt.Idents[k] = ids.top(rsltLimit) + } + + case 2: + pakname, ident := ss[0], ss[1] + rslt.Hit, rslt.Alt = x.lookupWord(ident) + if rslt.Hit != nil { + // found a match - filter by package name + // (no paks - package names are not qualified) + decls := rslt.Hit.Decls.filter(pakname) + others := rslt.Hit.Others.filter(pakname) + rslt.Hit = &LookupResult{decls, others} + } + for k, v := range x.idents { + ids := byImportCount{v[ident], x.importCount} + rslt.Idents[k] = ids.filter(pakname) + } + + default: + return nil, errors.New("query is not a (qualified) identifier") + } + + return rslt, nil +} + +func (x *Index) Snippet(i int) *Snippet { + // handle illegal snippet indices gracefully + if 0 <= i && i < len(x.snippets) { + return x.snippets[i] + } + return nil +} + +type positionList []struct { + filename string + line int +} + +func (list positionList) Len() int { return len(list) } +func (list positionList) Less(i, j int) bool { return list[i].filename < list[j].filename } +func (list positionList) Swap(i, j int) { list[i], list[j] = list[j], list[i] } + +// unique returns the list sorted and with duplicate entries removed +func unique(list []int) []int { + sort.Ints(list) + var last int + i := 0 + for _, x := range list { + if i == 0 || x != last { + last = x + list[i] = x + i++ + } + } + return list[0:i] +} + +// A FileLines value specifies a file and line numbers within that file. +type FileLines struct { + Filename string + Lines []int +} + +// LookupRegexp returns the number of matches and the matches where a regular +// expression r is found in the full text index. At most n matches are +// returned (thus found <= n). +// +func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileLines) { + if x.suffixes == nil || n <= 0 { + return + } + // n > 0 + + var list positionList + // FindAllIndex may returns matches that span across file boundaries. + // Such matches are unlikely, buf after eliminating them we may end up + // with fewer than n matches. If we don't have enough at the end, redo + // the search with an increased value n1, but only if FindAllIndex + // returned all the requested matches in the first place (if it + // returned fewer than that there cannot be more). + for n1 := n; found < n; n1 += n - found { + found = 0 + matches := x.suffixes.FindAllIndex(r, n1) + // compute files, exclude matches that span file boundaries, + // and map offsets to file-local offsets + list = make(positionList, len(matches)) + for _, m := range matches { + // by construction, an offset corresponds to the Pos value + // for the file set - use it to get the file and line + p := token.Pos(m[0]) + if file := x.fset.File(p); file != nil { + if base := file.Base(); base <= m[1] && m[1] <= base+file.Size() { + // match [m[0], m[1]) is within the file boundaries + list[found].filename = file.Name() + list[found].line = file.Line(p) + found++ + } + } + } + if found == n || len(matches) < n1 { + // found all matches or there's no chance to find more + break + } + } + list = list[0:found] + sort.Sort(list) // sort by filename + + // collect matches belonging to the same file + var last string + var lines []int + addLines := func() { + if len(lines) > 0 { + // remove duplicate lines + result = append(result, FileLines{last, unique(lines)}) + lines = nil + } + } + for _, m := range list { + if m.filename != last { + addLines() + last = m.filename + } + lines = append(lines, m.line) + } + addLines() + + return +} + +// InvalidateIndex should be called whenever any of the file systems +// under godoc's observation change so that the indexer is kicked on. +func (c *Corpus) invalidateIndex() { + c.fsModified.Set(nil) + c.refreshMetadata() +} + +// indexUpToDate() returns true if the search index is not older +// than any of the file systems under godoc's observation. +// +func (c *Corpus) indexUpToDate() bool { + _, fsTime := c.fsModified.Get() + _, siTime := c.searchIndex.Get() + return !fsTime.After(siTime) +} + +// feedDirnames feeds the directory names of all directories +// under the file system given by root to channel c. +// +func (c *Corpus) feedDirnames(ch chan<- string) { + if dir, _ := c.fsTree.Get(); dir != nil { + for d := range dir.(*Directory).iter(false) { + ch <- d.Path + } + } +} + +// fsDirnames() returns a channel sending all directory names +// of all the file systems under godoc's observation. +// +func (c *Corpus) fsDirnames() <-chan string { + ch := make(chan string, 256) // buffered for fewer context switches + go func() { + c.feedDirnames(ch) + close(ch) + }() + return ch +} + +// CompatibleWith reports whether the Index x is compatible with the corpus +// indexing options set in c. +func (x *Index) CompatibleWith(c *Corpus) bool { + return x.opts.Docs == c.IndexDocs && + x.opts.GoCode == c.IndexGoCode && + x.opts.FullText == c.IndexFullText && + x.opts.MaxResults == c.MaxResults +} + +func (c *Corpus) readIndex(filenames string) error { + matches, err := filepath.Glob(filenames) + if err != nil { + return err + } else if matches == nil { + return fmt.Errorf("no index files match %q", filenames) + } + sort.Strings(matches) // make sure files are in the right order + files := make([]io.Reader, 0, len(matches)) + for _, filename := range matches { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + files = append(files, f) + } + return c.ReadIndexFrom(io.MultiReader(files...)) +} + +// ReadIndexFrom sets the current index from the serialized version found in r. +func (c *Corpus) ReadIndexFrom(r io.Reader) error { + x := new(Index) + if _, err := x.ReadFrom(r); err != nil { + return err + } + if !x.CompatibleWith(c) { + return fmt.Errorf("index file options are incompatible: %v", x.opts) + } + c.searchIndex.Set(x) + return nil +} + +func (c *Corpus) UpdateIndex() { + if c.Verbose { + log.Printf("updating index...") + } + start := time.Now() + index := c.NewIndex() + stop := time.Now() + c.searchIndex.Set(index) + if c.Verbose { + secs := stop.Sub(start).Seconds() + stats := index.Stats() + log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)", + secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots) + } + memstats := new(runtime.MemStats) + runtime.ReadMemStats(memstats) + if c.Verbose { + log.Printf("before GC: bytes = %d footprint = %d", memstats.HeapAlloc, memstats.Sys) + } + runtime.GC() + runtime.ReadMemStats(memstats) + if c.Verbose { + log.Printf("after GC: bytes = %d footprint = %d", memstats.HeapAlloc, memstats.Sys) + } +} + +// RunIndexer runs forever, indexing. +func (c *Corpus) RunIndexer() { + // initialize the index from disk if possible + if c.IndexFiles != "" { + c.initFSTree() + if err := c.readIndex(c.IndexFiles); err != nil { + log.Printf("error reading index from file %s: %v", c.IndexFiles, err) + } + return + } + + // Repeatedly update the package directory tree and index. + // TODO(bgarcia): Use fsnotify to only update when notified of a filesystem change. + for { + c.initFSTree() + c.UpdateIndex() + if c.IndexInterval < 0 { + return + } + delay := 5 * time.Minute // by default, reindex every 5 minutes + if c.IndexInterval > 0 { + delay = c.IndexInterval + } + time.Sleep(delay) + } +} + +type countingWriter struct { + n *int64 + w io.Writer +} + +func (c countingWriter) Write(p []byte) (n int, err error) { + n, err = c.w.Write(p) + *c.n += int64(n) + return +} + +type byteReader interface { + io.Reader + io.ByteReader +} + +type countingReader struct { + n *int64 + r byteReader +} + +func (c countingReader) Read(p []byte) (n int, err error) { + n, err = c.r.Read(p) + *c.n += int64(n) + return +} + +func (c countingReader) ReadByte() (b byte, err error) { + b, err = c.r.ReadByte() + *c.n += 1 + return +} diff --git a/godoc/linkify.go b/godoc/linkify.go index 0a8fb474ec..db30b12d10 100644 --- a/godoc/linkify.go +++ b/godoc/linkify.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + // This file implements LinkifyText which introduces // links for identifiers pointing to their declarations. // The approach does not cover all cases because godoc diff --git a/godoc/linkify18.go b/godoc/linkify18.go new file mode 100644 index 0000000000..7ac0d7b513 --- /dev/null +++ b/godoc/linkify18.go @@ -0,0 +1,236 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// This file implements LinkifyText which introduces +// links for identifiers pointing to their declarations. +// The approach does not cover all cases because godoc +// doesn't have complete type information, but it's +// reasonably good for browsing. + +package godoc + +import ( + "fmt" + "go/ast" + "go/token" + "io" + "strconv" +) + +// LinkifyText HTML-escapes source text and writes it to w. +// Identifiers that are in a "use" position (i.e., that are +// not being declared), are wrapped with HTML links pointing +// to the respective declaration, if possible. Comments are +// formatted the same way as with FormatText. +// +func LinkifyText(w io.Writer, text []byte, n ast.Node) { + links := linksFor(n) + + i := 0 // links index + prev := "" // prev HTML tag + linkWriter := func(w io.Writer, _ int, start bool) { + // end tag + if !start { + if prev != "" { + fmt.Fprintf(w, ``, prev) + prev = "" + } + return + } + + // start tag + prev = "" + if i < len(links) { + switch info := links[i]; { + case info.path != "" && info.name == "": + // package path + fmt.Fprintf(w, ``, info.path) + prev = "a" + case info.path != "" && info.name != "": + // qualified identifier + fmt.Fprintf(w, ``, info.path, info.name) + prev = "a" + case info.path == "" && info.name != "": + // local identifier + if info.mode == identVal { + fmt.Fprintf(w, ``, info.name) + prev = "span" + } else if ast.IsExported(info.name) { + fmt.Fprintf(w, ``, info.name) + prev = "a" + } + } + i++ + } + } + + idents := tokenSelection(text, token.IDENT) + comments := tokenSelection(text, token.COMMENT) + FormatSelections(w, text, linkWriter, idents, selectionTag, comments) +} + +// A link describes the (HTML) link information for an identifier. +// The zero value of a link represents "no link". +// +type link struct { + mode identMode + path, name string // package path, identifier name +} + +// linksFor returns the list of links for the identifiers used +// by node in the same order as they appear in the source. +// +func linksFor(node ast.Node) (list []link) { + modes := identModesFor(node) + + // NOTE: We are expecting ast.Inspect to call the + // callback function in source text order. + ast.Inspect(node, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.Ident: + m := modes[n] + info := link{mode: m} + switch m { + case identUse: + if n.Obj == nil && predeclared[n.Name] { + info.path = builtinPkgPath + } + info.name = n.Name + case identDef: + // any declaration expect const or var - empty link + case identVal: + // const or var declaration + info.name = n.Name + } + list = append(list, info) + return false + case *ast.SelectorExpr: + // Detect qualified identifiers of the form pkg.ident. + // If anything fails we return true and collect individual + // identifiers instead. + if x, _ := n.X.(*ast.Ident); x != nil { + // x must be a package for a qualified identifier + if obj := x.Obj; obj != nil && obj.Kind == ast.Pkg { + if spec, _ := obj.Decl.(*ast.ImportSpec); spec != nil { + // spec.Path.Value is the import path + if path, err := strconv.Unquote(spec.Path.Value); err == nil { + // Register two links, one for the package + // and one for the qualified identifier. + info := link{path: path} + list = append(list, info) + info.name = n.Sel.Name + list = append(list, info) + return false + } + } + } + } + } + return true + }) + + return +} + +// The identMode describes how an identifier is "used" at its source location. +type identMode int + +const ( + identUse identMode = iota // identifier is used (must be zero value for identMode) + identDef // identifier is defined + identVal // identifier is defined in a const or var declaration +) + +// identModesFor returns a map providing the identMode for each identifier used by node. +func identModesFor(node ast.Node) map[*ast.Ident]identMode { + m := make(map[*ast.Ident]identMode) + + ast.Inspect(node, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.Field: + for _, n := range n.Names { + m[n] = identDef + } + case *ast.ImportSpec: + if name := n.Name; name != nil { + m[name] = identDef + } + case *ast.ValueSpec: + for _, n := range n.Names { + m[n] = identVal + } + case *ast.TypeSpec: + m[n.Name] = identDef + case *ast.FuncDecl: + m[n.Name] = identDef + case *ast.AssignStmt: + // Short variable declarations only show up if we apply + // this code to all source code (as opposed to exported + // declarations only). + if n.Tok == token.DEFINE { + // Some of the lhs variables may be re-declared, + // so technically they are not defs. We don't + // care for now. + for _, x := range n.Lhs { + // Each lhs expression should be an + // ident, but we are conservative and check. + if n, _ := x.(*ast.Ident); n != nil { + m[n] = identVal + } + } + } + } + return true + }) + + return m +} + +// The predeclared map represents the set of all predeclared identifiers. +// TODO(gri) This information is also encoded in similar maps in go/doc, +// but not exported. Consider exporting an accessor and using +// it instead. +var predeclared = map[string]bool{ + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "error": true, + "float32": true, + "float64": true, + "int": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "rune": true, + "string": true, + "uint": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, + "true": true, + "false": true, + "iota": true, + "nil": true, + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, +} diff --git a/godoc/server.go b/godoc/server.go index 18f110a308..30d008d42a 100644 --- a/godoc/server.go +++ b/godoc/server.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package godoc import ( diff --git a/godoc/server18.go b/godoc/server18.go new file mode 100644 index 0000000000..247c9d4df3 --- /dev/null +++ b/godoc/server18.go @@ -0,0 +1,766 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package godoc + +import ( + "bytes" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/token" + htmlpkg "html" + htmltemplate "html/template" + "io" + "io/ioutil" + "log" + "net/http" + "os" + pathpkg "path" + "path/filepath" + "sort" + "strings" + "text/template" + "time" + + "golang.org/x/tools/godoc/analysis" + "golang.org/x/tools/godoc/util" + "golang.org/x/tools/godoc/vfs" +) + +// handlerServer is a migration from an old godoc http Handler type. +// This should probably merge into something else. +type handlerServer struct { + p *Presentation + c *Corpus // copy of p.Corpus + pattern string // url pattern; e.g. "/pkg/" + stripPrefix string // prefix to strip from import path; e.g. "pkg/" + fsRoot string // file system root to which the pattern is mapped; e.g. "/src" + exclude []string // file system paths to exclude; e.g. "/src/cmd" +} + +func (s *handlerServer) registerWithMux(mux *http.ServeMux) { + mux.Handle(s.pattern, s) +} + +// getPageInfo returns the PageInfo for a package directory abspath. If the +// parameter genAST is set, an AST containing only the package exports is +// computed (PageInfo.PAst), otherwise package documentation (PageInfo.Doc) +// is extracted from the AST. If there is no corresponding package in the +// directory, PageInfo.PAst and PageInfo.PDoc are nil. If there are no sub- +// directories, PageInfo.Dirs is nil. If an error occurred, PageInfo.Err is +// set to the respective error but the error is not logged. +// +func (h *handlerServer) GetPageInfo(abspath, relpath string, mode PageInfoMode, goos, goarch string) *PageInfo { + info := &PageInfo{Dirname: abspath} + + // Restrict to the package files that would be used when building + // the package on this system. This makes sure that if there are + // separate implementations for, say, Windows vs Unix, we don't + // jumble them all together. + // Note: If goos/goarch aren't set, the current binary's GOOS/GOARCH + // are used. + ctxt := build.Default + ctxt.IsAbsPath = pathpkg.IsAbs + ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { + f, err := h.c.fs.ReadDir(filepath.ToSlash(dir)) + filtered := make([]os.FileInfo, 0, len(f)) + for _, i := range f { + if mode&NoFiltering != 0 || i.Name() != "internal" { + filtered = append(filtered, i) + } + } + return filtered, err + } + ctxt.OpenFile = func(name string) (r io.ReadCloser, err error) { + data, err := vfs.ReadFile(h.c.fs, filepath.ToSlash(name)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(bytes.NewReader(data)), nil + } + + if goos != "" { + ctxt.GOOS = goos + } + if goarch != "" { + ctxt.GOARCH = goarch + } + + pkginfo, err := ctxt.ImportDir(abspath, 0) + // continue if there are no Go source files; we still want the directory info + if _, nogo := err.(*build.NoGoError); err != nil && !nogo { + info.Err = err + return info + } + + // collect package files + pkgname := pkginfo.Name + pkgfiles := append(pkginfo.GoFiles, pkginfo.CgoFiles...) + if len(pkgfiles) == 0 { + // Commands written in C have no .go files in the build. + // Instead, documentation may be found in an ignored file. + // The file may be ignored via an explicit +build ignore + // constraint (recommended), or by defining the package + // documentation (historic). + pkgname = "main" // assume package main since pkginfo.Name == "" + pkgfiles = pkginfo.IgnoredGoFiles + } + + // get package information, if any + if len(pkgfiles) > 0 { + // build package AST + fset := token.NewFileSet() + files, err := h.c.parseFiles(fset, relpath, abspath, pkgfiles) + if err != nil { + info.Err = err + return info + } + + // ignore any errors - they are due to unresolved identifiers + pkg, _ := ast.NewPackage(fset, files, poorMansImporter, nil) + + // extract package documentation + info.FSet = fset + if mode&ShowSource == 0 { + // show extracted documentation + var m doc.Mode + if mode&NoFiltering != 0 { + m |= doc.AllDecls + } + if mode&AllMethods != 0 { + m |= doc.AllMethods + } + info.PDoc = doc.New(pkg, pathpkg.Clean(relpath), m) // no trailing '/' in importpath + if mode&NoTypeAssoc != 0 { + for _, t := range info.PDoc.Types { + info.PDoc.Consts = append(info.PDoc.Consts, t.Consts...) + info.PDoc.Vars = append(info.PDoc.Vars, t.Vars...) + info.PDoc.Funcs = append(info.PDoc.Funcs, t.Funcs...) + t.Consts = nil + t.Vars = nil + t.Funcs = nil + } + // for now we cannot easily sort consts and vars since + // go/doc.Value doesn't export the order information + sort.Sort(funcsByName(info.PDoc.Funcs)) + } + + // collect examples + testfiles := append(pkginfo.TestGoFiles, pkginfo.XTestGoFiles...) + files, err = h.c.parseFiles(fset, relpath, abspath, testfiles) + if err != nil { + log.Println("parsing examples:", err) + } + info.Examples = collectExamples(h.c, pkg, files) + + // collect any notes that we want to show + if info.PDoc.Notes != nil { + // could regexp.Compile only once per godoc, but probably not worth it + if rx := h.p.NotesRx; rx != nil { + for m, n := range info.PDoc.Notes { + if rx.MatchString(m) { + if info.Notes == nil { + info.Notes = make(map[string][]*doc.Note) + } + info.Notes[m] = n + } + } + } + } + + } else { + // show source code + // TODO(gri) Consider eliminating export filtering in this mode, + // or perhaps eliminating the mode altogether. + if mode&NoFiltering == 0 { + packageExports(fset, pkg) + } + info.PAst = files + } + info.IsMain = pkgname == "main" + } + + // get directory information, if any + var dir *Directory + var timestamp time.Time + if tree, ts := h.c.fsTree.Get(); tree != nil && tree.(*Directory) != nil { + // directory tree is present; lookup respective directory + // (may still fail if the file system was updated and the + // new directory tree has not yet been computed) + dir = tree.(*Directory).lookup(abspath) + timestamp = ts + } + if dir == nil { + // no directory tree present (too early after startup or + // command-line mode); compute one level for this page + // note: cannot use path filter here because in general + // it doesn't contain the FSTree path + dir = h.c.newDirectory(abspath, 1) + timestamp = time.Now() + } + info.Dirs = dir.listing(true, func(path string) bool { return h.includePath(path, mode) }) + info.DirTime = timestamp + info.DirFlat = mode&FlatDir != 0 + + return info +} + +func (h *handlerServer) includePath(path string, mode PageInfoMode) (r bool) { + // if the path is under one of the exclusion paths, don't list. + for _, e := range h.exclude { + if strings.HasPrefix(path, e) { + return false + } + } + + // if the path includes 'internal', don't list unless we are in the NoFiltering mode. + if mode&NoFiltering != 0 { + return true + } + if strings.Contains(path, "internal") || strings.Contains(path, "vendor") { + for _, c := range strings.Split(filepath.Clean(path), string(os.PathSeparator)) { + if c == "internal" || c == "vendor" { + return false + } + } + } + return true +} + +type funcsByName []*doc.Func + +func (s funcsByName) Len() int { return len(s) } +func (s funcsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s funcsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +func (h *handlerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if redirect(w, r) { + return + } + + relpath := pathpkg.Clean(r.URL.Path[len(h.stripPrefix)+1:]) + abspath := pathpkg.Join(h.fsRoot, relpath) + mode := h.p.GetPageInfoMode(r) + if relpath == builtinPkgPath { + mode = NoFiltering | NoTypeAssoc + } + info := h.GetPageInfo(abspath, relpath, mode, r.FormValue("GOOS"), r.FormValue("GOARCH")) + if info.Err != nil { + log.Print(info.Err) + h.p.ServeError(w, r, relpath, info.Err) + return + } + + if mode&NoHTML != 0 { + h.p.ServeText(w, applyTemplate(h.p.PackageText, "packageText", info)) + return + } + + var tabtitle, title, subtitle string + switch { + case info.PAst != nil: + for _, ast := range info.PAst { + tabtitle = ast.Name.Name + break + } + case info.PDoc != nil: + tabtitle = info.PDoc.Name + default: + tabtitle = info.Dirname + title = "Directory " + if h.p.ShowTimestamps { + subtitle = "Last update: " + info.DirTime.String() + } + } + if title == "" { + if info.IsMain { + // assume that the directory name is the command name + _, tabtitle = pathpkg.Split(relpath) + title = "Command " + } else { + title = "Package " + } + } + title += tabtitle + + // special cases for top-level package/command directories + switch tabtitle { + case "/src": + title = "Packages" + tabtitle = "Packages" + case "/src/cmd": + title = "Commands" + tabtitle = "Commands" + } + + // Emit JSON array for type information. + pi := h.c.Analysis.PackageInfo(relpath) + info.CallGraphIndex = pi.CallGraphIndex + info.CallGraph = htmltemplate.JS(marshalJSON(pi.CallGraph)) + info.AnalysisData = htmltemplate.JS(marshalJSON(pi.Types)) + info.TypeInfoIndex = make(map[string]int) + for i, ti := range pi.Types { + info.TypeInfoIndex[ti.Name] = i + } + + info.Share = allowShare(r) + h.p.ServePage(w, Page{ + Title: title, + Tabtitle: tabtitle, + Subtitle: subtitle, + Body: applyTemplate(h.p.PackageHTML, "packageHTML", info), + Share: info.Share, + }) +} + +type PageInfoMode uint + +const ( + NoFiltering PageInfoMode = 1 << iota // do not filter exports + AllMethods // show all embedded methods + ShowSource // show source code, do not extract documentation + NoHTML // show result in textual form, do not generate HTML + FlatDir // show directory in a flat (non-indented) manner + NoTypeAssoc // don't associate consts, vars, and factory functions with types +) + +// modeNames defines names for each PageInfoMode flag. +var modeNames = map[string]PageInfoMode{ + "all": NoFiltering, + "methods": AllMethods, + "src": ShowSource, + "text": NoHTML, + "flat": FlatDir, +} + +// GetPageInfoMode computes the PageInfoMode flags by analyzing the request +// URL form value "m". It is value is a comma-separated list of mode names +// as defined by modeNames (e.g.: m=src,text). +func (p *Presentation) GetPageInfoMode(r *http.Request) PageInfoMode { + var mode PageInfoMode + for _, k := range strings.Split(r.FormValue("m"), ",") { + if m, found := modeNames[strings.TrimSpace(k)]; found { + mode |= m + } + } + if p.AdjustPageInfoMode != nil { + mode = p.AdjustPageInfoMode(r, mode) + } + return mode +} + +// poorMansImporter returns a (dummy) package object named +// by the last path component of the provided package path +// (as is the convention for packages). This is sufficient +// to resolve package identifiers without doing an actual +// import. It never returns an error. +// +func poorMansImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) { + pkg := imports[path] + if pkg == nil { + // note that strings.LastIndex returns -1 if there is no "/" + pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:]) + pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import + imports[path] = pkg + } + return pkg, nil +} + +// globalNames returns a set of the names declared by all package-level +// declarations. Method names are returned in the form Receiver_Method. +func globalNames(pkg *ast.Package) map[string]bool { + names := make(map[string]bool) + for _, file := range pkg.Files { + for _, decl := range file.Decls { + addNames(names, decl) + } + } + return names +} + +// collectExamples collects examples for pkg from testfiles. +func collectExamples(c *Corpus, pkg *ast.Package, testfiles map[string]*ast.File) []*doc.Example { + var files []*ast.File + for _, f := range testfiles { + files = append(files, f) + } + + var examples []*doc.Example + globals := globalNames(pkg) + for _, e := range doc.Examples(files...) { + name := stripExampleSuffix(e.Name) + if name == "" || globals[name] { + examples = append(examples, e) + } else if c.Verbose { + log.Printf("skipping example 'Example%s' because '%s' is not a known function or type", e.Name, e.Name) + } + } + + return examples +} + +// addNames adds the names declared by decl to the names set. +// Method names are added in the form ReceiverTypeName_Method. +func addNames(names map[string]bool, decl ast.Decl) { + switch d := decl.(type) { + case *ast.FuncDecl: + name := d.Name.Name + if d.Recv != nil { + var typeName string + switch r := d.Recv.List[0].Type.(type) { + case *ast.StarExpr: + typeName = r.X.(*ast.Ident).Name + case *ast.Ident: + typeName = r.Name + } + name = typeName + "_" + name + } + names[name] = true + case *ast.GenDecl: + for _, spec := range d.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + names[s.Name.Name] = true + case *ast.ValueSpec: + for _, id := range s.Names { + names[id.Name] = true + } + } + } + } +} + +// packageExports is a local implementation of ast.PackageExports +// which correctly updates each package file's comment list. +// (The ast.PackageExports signature is frozen, hence the local +// implementation). +// +func packageExports(fset *token.FileSet, pkg *ast.Package) { + for _, src := range pkg.Files { + cmap := ast.NewCommentMap(fset, src, src.Comments) + ast.FileExports(src) + src.Comments = cmap.Filter(src).Comments() + } +} + +func applyTemplate(t *template.Template, name string, data interface{}) []byte { + var buf bytes.Buffer + if err := t.Execute(&buf, data); err != nil { + log.Printf("%s.Execute: %s", name, err) + } + return buf.Bytes() +} + +type writerCapturesErr struct { + w io.Writer + err error +} + +func (w *writerCapturesErr) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + if err != nil { + w.err = err + } + return n, err +} + +// applyTemplateToResponseWriter uses an http.ResponseWriter as the io.Writer +// for the call to template.Execute. It uses an io.Writer wrapper to capture +// errors from the underlying http.ResponseWriter. Errors are logged only when +// they come from the template processing and not the Writer; this avoid +// polluting log files with error messages due to networking issues, such as +// client disconnects and http HEAD protocol violations. +func applyTemplateToResponseWriter(rw http.ResponseWriter, t *template.Template, data interface{}) { + w := &writerCapturesErr{w: rw} + err := t.Execute(w, data) + // There are some cases where template.Execute does not return an error when + // rw returns an error, and some where it does. So check w.err first. + if w.err == nil && err != nil { + // Log template errors. + log.Printf("%s.Execute: %s", t.Name(), err) + } +} + +func redirect(w http.ResponseWriter, r *http.Request) (redirected bool) { + canonical := pathpkg.Clean(r.URL.Path) + if !strings.HasSuffix(canonical, "/") { + canonical += "/" + } + if r.URL.Path != canonical { + url := *r.URL + url.Path = canonical + http.Redirect(w, r, url.String(), http.StatusMovedPermanently) + redirected = true + } + return +} + +func redirectFile(w http.ResponseWriter, r *http.Request) (redirected bool) { + c := pathpkg.Clean(r.URL.Path) + c = strings.TrimRight(c, "/") + if r.URL.Path != c { + url := *r.URL + url.Path = c + http.Redirect(w, r, url.String(), http.StatusMovedPermanently) + redirected = true + } + return +} + +func (p *Presentation) serveTextFile(w http.ResponseWriter, r *http.Request, abspath, relpath, title string) { + src, err := vfs.ReadFile(p.Corpus.fs, abspath) + if err != nil { + log.Printf("ReadFile: %s", err) + p.ServeError(w, r, relpath, err) + return + } + + if r.FormValue("m") == "text" { + p.ServeText(w, src) + return + } + + h := r.FormValue("h") + s := RangeSelection(r.FormValue("s")) + + var buf bytes.Buffer + if pathpkg.Ext(abspath) == ".go" { + // Find markup links for this file (e.g. "/src/fmt/print.go"). + fi := p.Corpus.Analysis.FileInfo(abspath) + buf.WriteString("\n") + + if status := p.Corpus.Analysis.Status(); status != "" { + buf.WriteString("Static analysis features ") + // TODO(adonovan): show analysis status at per-file granularity. + fmt.Fprintf(&buf, "[%s]
", htmlpkg.EscapeString(status)) + } + + buf.WriteString("
")
+		formatGoSource(&buf, src, fi.Links, h, s)
+		buf.WriteString("
") + } else { + buf.WriteString("
")
+		FormatText(&buf, src, 1, false, h, s)
+		buf.WriteString("
") + } + fmt.Fprintf(&buf, `

View as plain text

`, htmlpkg.EscapeString(relpath)) + + p.ServePage(w, Page{ + Title: title + " " + relpath, + Tabtitle: relpath, + Body: buf.Bytes(), + Share: allowShare(r), + }) +} + +// formatGoSource HTML-escapes Go source text and writes it to w, +// decorating it with the specified analysis links. +// +func formatGoSource(buf *bytes.Buffer, text []byte, links []analysis.Link, pattern string, selection Selection) { + // Emit to a temp buffer so that we can add line anchors at the end. + saved, buf := buf, new(bytes.Buffer) + + var i int + var link analysis.Link // shared state of the two funcs below + segmentIter := func() (seg Segment) { + if i < len(links) { + link = links[i] + i++ + seg = Segment{link.Start(), link.End()} + } + return + } + linkWriter := func(w io.Writer, offs int, start bool) { + link.Write(w, offs, start) + } + + comments := tokenSelection(text, token.COMMENT) + var highlights Selection + if pattern != "" { + highlights = regexpSelection(text, pattern) + } + + FormatSelections(buf, text, linkWriter, segmentIter, selectionTag, comments, highlights, selection) + + // Now copy buf to saved, adding line anchors. + + // The lineSelection mechanism can't be composed with our + // linkWriter, so we have to add line spans as another pass. + n := 1 + for _, line := range bytes.Split(buf.Bytes(), []byte("\n")) { + fmt.Fprintf(saved, "%6d\t", n, n) + n++ + saved.Write(line) + saved.WriteByte('\n') + } +} + +func (p *Presentation) serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath string) { + if redirect(w, r) { + return + } + + list, err := p.Corpus.fs.ReadDir(abspath) + if err != nil { + p.ServeError(w, r, relpath, err) + return + } + + p.ServePage(w, Page{ + Title: "Directory " + relpath, + Tabtitle: relpath, + Body: applyTemplate(p.DirlistHTML, "dirlistHTML", list), + Share: allowShare(r), + }) +} + +func (p *Presentation) ServeHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) { + // get HTML body contents + src, err := vfs.ReadFile(p.Corpus.fs, abspath) + if err != nil { + log.Printf("ReadFile: %s", err) + p.ServeError(w, r, relpath, err) + return + } + + // if it begins with " tag + var buf2 bytes.Buffer + buf2.WriteString("
")
+	FormatText(&buf2, buf1.Bytes(), -1, true, id.Name, nil)
+	buf2.WriteString("
") + return &Snippet{fset.Position(id.Pos()).Line, buf2.String()} +} + +func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec { + for _, spec := range list { + switch s := spec.(type) { + case *ast.ImportSpec: + if s.Name == id { + return s + } + case *ast.ValueSpec: + for _, n := range s.Names { + if n == id { + return s + } + } + case *ast.TypeSpec: + if s.Name == id { + return s + } + } + } + return nil +} + +func (p *Presentation) genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet { + s := findSpec(d.Specs, id) + if s == nil { + return nil // declaration doesn't contain id - exit gracefully + } + + // only use the spec containing the id for the snippet + dd := &ast.GenDecl{ + Doc: d.Doc, + TokPos: d.Pos(), + Tok: d.Tok, + Lparen: d.Lparen, + Specs: []ast.Spec{s}, + Rparen: d.Rparen, + } + + return p.newSnippet(fset, dd, id) +} + +func (p *Presentation) funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet { + if d.Name != id { + return nil // declaration doesn't contain id - exit gracefully + } + + // only use the function signature for the snippet + dd := &ast.FuncDecl{ + Doc: d.Doc, + Recv: d.Recv, + Name: d.Name, + Type: d.Type, + } + + return p.newSnippet(fset, dd, id) +} + +// NewSnippet creates a text snippet from a declaration decl containing an +// identifier id. Parts of the declaration not containing the identifier +// may be removed for a more compact snippet. +func NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet { + // TODO(bradfitz, adg): remove this function. But it's used by indexer, which + // doesn't have a *Presentation, and NewSnippet needs a TabWidth. + var p Presentation + p.TabWidth = 4 + return p.NewSnippet(fset, decl, id) +} + +// NewSnippet creates a text snippet from a declaration decl containing an +// identifier id. Parts of the declaration not containing the identifier +// may be removed for a more compact snippet. +func (p *Presentation) NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet { + var s *Snippet + switch d := decl.(type) { + case *ast.GenDecl: + s = p.genSnippet(fset, d, id) + case *ast.FuncDecl: + s = p.funcSnippet(fset, d, id) + } + + // handle failure gracefully + if s == nil { + var buf bytes.Buffer + fmt.Fprintf(&buf, `could not generate a snippet for %s`, id.Name) + s = &Snippet{fset.Position(id.Pos()).Line, buf.String()} + } + return s +} diff --git a/imports/fix.go b/imports/fix.go index a241c29763..feb6656a71 100644 --- a/imports/fix.go +++ b/imports/fix.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.8 + package imports import ( diff --git a/imports/fix18.go b/imports/fix18.go new file mode 100644 index 0000000000..9a857481ea --- /dev/null +++ b/imports/fix18.go @@ -0,0 +1,977 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + + "golang.org/x/tools/go/ast/astutil" +) + +// Debug controls verbose logging. +var Debug = false + +var ( + inTests = false // set true by fix_test.go; if false, no need to use testMu + testMu sync.RWMutex // guards globals reset by tests; used only if inTests +) + +// If set, LocalPrefix instructs Process to sort import paths with the given +// prefix into another group after 3rd-party packages. +var LocalPrefix string + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(importPath string) (num int, ok bool){ + func(importPath string) (num int, ok bool) { + if LocalPrefix != "" && strings.HasPrefix(importPath, LocalPrefix) { + return 3, true + } + return + }, + func(importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(importPath string) (num int, ok bool) { + if strings.Contains(importPath, ".") { + return 1, true + } + return + }, +} + +func importGroup(importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(importPath); ok { + return n + } + } + return 0 +} + +// packageInfo is a summary of features found in a package. +type packageInfo struct { + Globals map[string]bool // symbol => true +} + +// dirPackageInfo gets information from other files in the package. +func dirPackageInfo(srcDir, filename string) (*packageInfo, error) { + considerTests := strings.HasSuffix(filename, "_test.go") + + // Handle file from stdin + if _, err := os.Stat(filename); err != nil { + if os.IsNotExist(err) { + return &packageInfo{}, nil + } + return nil, err + } + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil, err + } + + info := &packageInfo{Globals: make(map[string]bool)} + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + fileSet := token.NewFileSet() + root, err := parser.ParseFile(fileSet, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + for _, decl := range root.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + info.Globals[valueSpec.Names[0].Name] = true + } + } + } + return info, nil +} + +func fixImports(fset *token.FileSet, f *ast.File, filename string) (added []string, err error) { + // refs are a set of possible package references currently unsatisfied by imports. + // first key: either base package (e.g. "fmt") or renamed package + // second key: referenced package symbol (e.g. "Println") + refs := make(map[string]map[string]bool) + + // decls are the current package imports. key is base package or renamed package. + decls := make(map[string]*ast.ImportSpec) + + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if Debug { + log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + var packageInfo *packageInfo + var loadedPackageInfo bool + + // collect potential uses of packages. + var visitor visitFn + visitor = visitFn(func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.ImportSpec: + if v.Name != nil { + decls[v.Name.Name] = v + break + } + ipath := strings.Trim(v.Path.Value, `"`) + if ipath == "C" { + break + } + local := importPathToName(ipath, srcDir) + decls[local] = v + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // if the parser can resolve it, it's not a package ref + break + } + pkgName := xident.Name + if refs[pkgName] == nil { + refs[pkgName] = make(map[string]bool) + } + if !loadedPackageInfo { + loadedPackageInfo = true + packageInfo, _ = dirPackageInfo(srcDir, filename) + } + if decls[pkgName] == nil && (packageInfo == nil || !packageInfo.Globals[pkgName]) { + refs[pkgName][v.Sel.Name] = true + } + } + return visitor + }) + ast.Walk(visitor, f) + + // Nil out any unused ImportSpecs, to be removed in following passes + unusedImport := map[string]string{} + for pkg, is := range decls { + if refs[pkg] == nil && pkg != "_" && pkg != "." { + name := "" + if is.Name != nil { + name = is.Name.Name + } + unusedImport[strings.Trim(is.Path.Value, `"`)] = name + } + } + for ipath, name := range unusedImport { + if ipath == "C" { + // Don't remove cgo stuff. + continue + } + astutil.DeleteNamedImport(fset, f, name, ipath) + } + + for pkgName, symbols := range refs { + if len(symbols) == 0 { + // skip over packages already imported + delete(refs, pkgName) + } + } + + // Search for imports matching potential package references. + searches := 0 + type result struct { + ipath string // import path (if err == nil) + name string // optional name to rename import as + err error + } + results := make(chan result) + for pkgName, symbols := range refs { + go func(pkgName string, symbols map[string]bool) { + ipath, rename, err := findImport(pkgName, symbols, filename) + r := result{ipath: ipath, err: err} + if rename { + r.name = pkgName + } + results <- r + }(pkgName, symbols) + searches++ + } + for i := 0; i < searches; i++ { + result := <-results + if result.err != nil { + return nil, result.err + } + if result.ipath != "" { + if result.name != "" { + astutil.AddNamedImport(fset, f, result.name, result.ipath) + } else { + astutil.AddImport(fset, f, result.ipath) + } + added = append(added, result.ipath) + } + } + + return added, nil +} + +// importPathToName returns the package name for the given import path. +var importPathToName func(importPath, srcDir string) (packageName string) = importPathToNameGoPath + +// importPathToNameBasic assumes the package name is the base of import path. +func importPathToNameBasic(importPath, srcDir string) (packageName string) { + return path.Base(importPath) +} + +// importPathToNameGoPath finds out the actual package name, as declared in its .go files. +// If there's a problem, it falls back to using importPathToNameBasic. +func importPathToNameGoPath(importPath, srcDir string) (packageName string) { + // Fast path for standard library without going to disk. + if pkg, ok := stdImportPackage[importPath]; ok { + return pkg + } + + pkgName, err := importPathToNameGoPathParse(importPath, srcDir) + if Debug { + log.Printf("importPathToNameGoPathParse(%q, srcDir=%q) = %q, %v", importPath, srcDir, pkgName, err) + } + if err == nil { + return pkgName + } + return importPathToNameBasic(importPath, srcDir) +} + +// importPathToNameGoPathParse is a faster version of build.Import if +// the only thing desired is the package name. It uses build.FindOnly +// to find the directory and then only parses one file in the package, +// trusting that the files in the directory are consistent. +func importPathToNameGoPathParse(importPath, srcDir string) (packageName string, err error) { + buildPkg, err := build.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "", err + } + d, err := os.Open(buildPkg.Dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(buildPkg.Dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +var stdImportPackage = map[string]string{} // "net/http" => "http" + +func init() { + // Nothing in the standard library has a package name not + // matching its import base name. + for _, pkg := range stdlib { + if _, ok := stdImportPackage[pkg]; !ok { + stdImportPackage[pkg] = path.Base(pkg) + } + } +} + +// Directory-scanning state. +var ( + // scanGoRootOnce guards calling scanGoRoot (for $GOROOT) + scanGoRootOnce sync.Once + // scanGoPathOnce guards calling scanGoPath (for $GOPATH) + scanGoPathOnce sync.Once + + // populateIgnoreOnce guards calling populateIgnore + populateIgnoreOnce sync.Once + ignoredDirs []os.FileInfo + + dirScanMu sync.RWMutex + dirScan map[string]*pkg // abs dir path => *pkg +) + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPath string // full pkg import path ("net/http", "foo/bar/vendor/a/b") + importPathShort string // vendorless import path ("net/http", "a/b") +} + +// byImportPathShortLength sorts by the short import path length, breaking ties on the +// import string itself. +type byImportPathShortLength []*pkg + +func (s byImportPathShortLength) Len() int { return len(s) } +func (s byImportPathShortLength) Less(i, j int) bool { + vi, vj := s[i].importPathShort, s[j].importPathShort + return len(vi) < len(vj) || (len(vi) == len(vj) && vi < vj) + +} +func (s byImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// gate is a semaphore for limiting concurrency. +type gate chan struct{} + +func (g gate) enter() { g <- struct{}{} } +func (g gate) leave() { <-g } + +var visitedSymlinks struct { + sync.Mutex + m map[string]struct{} +} + +// guarded by populateIgnoreOnce; populates ignoredDirs. +func populateIgnore() { + for _, srcDir := range build.Default.SrcDirs() { + if srcDir == filepath.Join(build.Default.GOROOT, "src") { + continue + } + populateIgnoredDirs(srcDir) + } +} + +// populateIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func populateIgnoredDirs(path string) { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return + } + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + full := filepath.Join(path, line) + if fi, err := os.Stat(full); err == nil { + ignoredDirs = append(ignoredDirs, fi) + if Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if Debug { + log.Printf("Error statting entry in .goimportsignore: %v", err) + } + } +} + +func skipDir(fi os.FileInfo) bool { + for _, ignoredDir := range ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + return false +} + +// shouldTraverse reports whether the symlink fi should, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + if !os.IsNotExist(err) { + fmt.Fprintln(os.Stderr, err) + } + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + + realParent, err := filepath.EvalSymlinks(dir) + if err != nil { + fmt.Fprint(os.Stderr, err) + return false + } + realPath := filepath.Join(realParent, fi.Name()) + visitedSymlinks.Lock() + defer visitedSymlinks.Unlock() + if visitedSymlinks.m == nil { + visitedSymlinks.m = make(map[string]struct{}) + } + if _, ok := visitedSymlinks.m[realPath]; ok { + return false + } + visitedSymlinks.m[realPath] = struct{}{} + return true +} + +var testHookScanDir = func(dir string) {} + +var scanGoRootDone = make(chan struct{}) // closed when scanGoRoot is done + +func scanGoRoot() { + go func() { + scanGoDirs(true) + close(scanGoRootDone) + }() +} + +func scanGoPath() { scanGoDirs(false) } + +func scanGoDirs(goRoot bool) { + if Debug { + which := "$GOROOT" + if !goRoot { + which = "$GOPATH" + } + log.Printf("scanning " + which) + defer log.Printf("scanned " + which) + } + dirScanMu.Lock() + if dirScan == nil { + dirScan = make(map[string]*pkg) + } + dirScanMu.Unlock() + + for _, srcDir := range build.Default.SrcDirs() { + isGoroot := srcDir == filepath.Join(build.Default.GOROOT, "src") + if isGoroot != goRoot { + continue + } + testHookScanDir(srcDir) + walkFn := func(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == srcDir { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return nil + } + if !strings.HasSuffix(path, ".go") { + return nil + } + dirScanMu.Lock() + if _, dup := dirScan[dir]; !dup { + importpath := filepath.ToSlash(dir[len(srcDir)+len("/"):]) + dirScan[dir] = &pkg{ + importPath: importpath, + importPathShort: vendorlessImportPath(importpath), + dir: dir, + } + } + dirScanMu.Unlock() + return nil + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || base == "node_modules" { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && skipDir(fi) { + if Debug { + log.Printf("skipping directory %q under %s", fi.Name(), dir) + } + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if shouldTraverse(dir, fi) { + return traverseLink + } + } + return nil + } + if err := fastWalk(srcDir, walkFn); err != nil { + log.Printf("goimports: scanning directory %v: %v", srcDir, err) + } + } +} + +// vendorlessImportPath returns the devendorized version of the provided import path. +// e.g. "foo/bar/vendor/a/b" => "a/b" +func vendorlessImportPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +// loadExports returns the set of exported symbols in the package at dir. +// It returns nil on error or if the package name in dir does not match expectPackage. +var loadExports func(expectPackage, dir string) map[string]bool = loadExportsGoPath + +func loadExportsGoPath(expectPackage, dir string) map[string]bool { + if Debug { + log.Printf("loading exports in dir %s (seeking package %s)", dir, expectPackage) + } + exports := make(map[string]bool) + + ctx := build.Default + + // ReadDir is like ioutil.ReadDir, but only returns *.go files + // and filters out _test.go files since they're not relevant + // and only slow things down. + ctx.ReadDir = func(dir string) (notTests []os.FileInfo, err error) { + all, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + notTests = all[:0] + for _, fi := range all { + name := fi.Name() + if strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go") { + notTests = append(notTests, fi) + } + } + return notTests, nil + } + + files, err := ctx.ReadDir(dir) + if err != nil { + log.Print(err) + return nil + } + + fset := token.NewFileSet() + + for _, fi := range files { + match, err := ctx.MatchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if Debug { + log.Printf("Parsing %s: %v", fullFile, err) + } + return nil + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName != expectPackage { + if Debug { + log.Printf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName) + } + return nil + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + + if Debug { + exportList := make([]string, 0, len(exports)) + for k := range exports { + exportList = append(exportList, k) + } + sort.Strings(exportList) + log.Printf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", ")) + } + return exports +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +// +// This is declared as a variable rather than a function so goimports +// can be easily extended by adding a file with an init function. +// +// The rename value tells goimports whether to use the package name as +// a local qualifier in an import. For example, if findImports("pkg", +// "X") returns ("foo/bar", rename=true), then goimports adds the +// import line: +// import pkg "foo/bar" +// to satisfy uses of pkg.X in the file. +var findImport func(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) = findImportGoPath + +// findImportGoPath is the normal implementation of findImport. +// (Some companies have their own internally.) +func findImportGoPath(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) { + if inTests { + testMu.RLock() + defer testMu.RUnlock() + } + + // Fast path for the standard library. + // In the common case we hopefully never have to scan the GOPATH, which can + // be slow with moving disks. + if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok { + return pkg, rename, nil + } + if pkgName == "rand" && symbols["Read"] { + // Special-case rand.Read. + // + // If findImportStdlib didn't find it above, don't go + // searching for it, lest it find and pick math/rand + // in GOROOT (new as of Go 1.6) + // + // crypto/rand is the safer choice. + return "", false, nil + } + + // TODO(sameer): look at the import lines for other Go files in the + // local directory, since the user is likely to import the same packages + // in the current Go file. Return rename=true when the other Go files + // use a renamed package that's also used in the current file. + + // Read all the $GOPATH/src/.goimportsignore files before scanning directories. + populateIgnoreOnce.Do(populateIgnore) + + // Start scanning the $GOROOT asynchronously, then run the + // GOPATH scan synchronously if needed, and then wait for the + // $GOROOT to finish. + // + // TODO(bradfitz): run each $GOPATH entry async. But nobody + // really has more than one anyway, so low priority. + scanGoRootOnce.Do(scanGoRoot) // async + if !fileInDir(filename, build.Default.GOROOT) { + scanGoPathOnce.Do(scanGoPath) // blocking + } + <-scanGoRootDone + + // Find candidate packages, looking only at their directory names first. + var candidates []*pkg + for _, pkg := range dirScan { + if pkgIsCandidate(filename, pkgName, pkg) { + candidates = append(candidates, pkg) + } + } + + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byImportPathShortLength(candidates)) + if Debug { + for i, pkg := range candidates { + log.Printf("%s candidate %d/%d: %v", pkgName, i+1, len(candidates), pkg.importPathShort) + } + } + + // Collect exports for packages with matching names. + + done := make(chan struct{}) // closed when we find the answer + defer close(done) + + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + go func() { + for i, pkg := range candidates { + select { + case loadExportsSem <- struct{}{}: + select { + case <-done: + default: + } + case <-done: + return + } + pkg := pkg + resc := rescv[i] + go func() { + if inTests { + testMu.RLock() + defer testMu.RUnlock() + } + defer func() { <-loadExportsSem }() + exports := loadExports(pkgName, pkg.dir) + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exports[symbol] { + pkg = nil + break + } + } + select { + case resc <- pkg: + case <-done: + } + }() + } + }() + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + // If the package name in the source doesn't match the import path's base, + // return true so the rewriter adds a name (import foo "github.com/bar/go-foo") + needsRename := path.Base(pkg.importPath) != pkgName + return pkg.importPathShort, needsRename, nil + } + return "", false, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accomodate that. + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) { + for symbol := range symbols { + key := shortPkg + "." + symbol + path := stdlib[key] + if path == "" { + if key == "rand.Read" { + continue + } + return "", false, false + } + if importPath != "" && importPath != path { + // Ambiguous. Symbols pointed to different things. + return "", false, false + } + importPath = path + } + if importPath == "" && shortPkg == "rand" && symbols["Read"] { + return "crypto/rand", false, true + } + return importPath, false, importPath != "" +} + +// fileInDir reports whether the provided file path looks like +// it's in dir. (without hitting the filesystem) +func fileInDir(file, dir string) bool { + rest := strings.TrimPrefix(file, dir) + if len(rest) == len(file) { + // dir is not a prefix of file. + return false + } + // Check for boundary: either nothing (file == dir), or a slash. + return len(rest) == 0 || rest[0] == '/' || rest[0] == '\\' +} diff --git a/refactor/eg/eg.go b/refactor/eg/eg.go index 4d56824bab..b935db4281 100644 --- a/refactor/eg/eg.go +++ b/refactor/eg/eg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 // Package eg implements the example-based refactoring tool whose // command-line is defined in golang.org/x/tools/cmd/eg. diff --git a/refactor/eg/eg18.go b/refactor/eg/eg18.go new file mode 100644 index 0000000000..177e652fdf --- /dev/null +++ b/refactor/eg/eg18.go @@ -0,0 +1,346 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// Package eg implements the example-based refactoring tool whose +// command-line is defined in golang.org/x/tools/cmd/eg. +package eg // import "golang.org/x/tools/refactor/eg" + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/printer" + "go/token" + "go/types" + "os" +) + +const Help = ` +This tool implements example-based refactoring of expressions. + +The transformation is specified as a Go file defining two functions, +'before' and 'after', of identical types. Each function body consists +of a single statement: either a return statement with a single +(possibly multi-valued) expression, or an expression statement. The +'before' expression specifies a pattern and the 'after' expression its +replacement. + + package P + import ( "errors"; "fmt" ) + func before(s string) error { return fmt.Errorf("%s", s) } + func after(s string) error { return errors.New(s) } + +The expression statement form is useful when the expression has no +result, for example: + + func before(msg string) { log.Fatalf("%s", msg) } + func after(msg string) { log.Fatal(msg) } + +The parameters of both functions are wildcards that may match any +expression assignable to that type. If the pattern contains multiple +occurrences of the same parameter, each must match the same expression +in the input for the pattern to match. If the replacement contains +multiple occurrences of the same parameter, the expression will be +duplicated, possibly changing the side-effects. + +The tool analyses all Go code in the packages specified by the +arguments, replacing all occurrences of the pattern with the +substitution. + +So, the transform above would change this input: + err := fmt.Errorf("%s", "error: " + msg) +to this output: + err := errors.New("error: " + msg) + +Identifiers, including qualified identifiers (p.X) are considered to +match only if they denote the same object. This allows correct +matching even in the presence of dot imports, named imports and +locally shadowed package names in the input program. + +Matching of type syntax is semantic, not syntactic: type syntax in the +pattern matches type syntax in the input if the types are identical. +Thus, func(x int) matches func(y int). + +This tool was inspired by other example-based refactoring tools, +'gofmt -r' for Go and Refaster for Java. + + +LIMITATIONS +=========== + +EXPRESSIVENESS + +Only refactorings that replace one expression with another, regardless +of the expression's context, may be expressed. Refactoring arbitrary +statements (or sequences of statements) is a less well-defined problem +and is less amenable to this approach. + +A pattern that contains a function literal (and hence statements) +never matches. + +There is no way to generalize over related types, e.g. to express that +a wildcard may have any integer type, for example. + +It is not possible to replace an expression by one of a different +type, even in contexts where this is legal, such as x in fmt.Print(x). + +The struct literals T{x} and T{K: x} cannot both be matched by a single +template. + + +SAFETY + +Verifying that a transformation does not introduce type errors is very +complex in the general case. An innocuous-looking replacement of one +constant by another (e.g. 1 to 2) may cause type errors relating to +array types and indices, for example. The tool performs only very +superficial checks of type preservation. + + +IMPORTS + +Although the matching algorithm is fully aware of scoping rules, the +replacement algorithm is not, so the replacement code may contain +incorrect identifier syntax for imported objects if there are dot +imports, named imports or locally shadowed package names in the input +program. + +Imports are added as needed, but they are not removed as needed. +Run 'goimports' on the modified file for now. + +Dot imports are forbidden in the template. + + +TIPS +==== + +Sometimes a little creativity is required to implement the desired +migration. This section lists a few tips and tricks. + +To remove the final parameter from a function, temporarily change the +function signature so that the final parameter is variadic, as this +allows legal calls both with and without the argument. Then use eg to +remove the final argument from all callers, and remove the variadic +parameter by hand. The reverse process can be used to add a final +parameter. + +To add or remove parameters other than the final one, you must do it in +stages: (1) declare a variant function f' with a different name and the +desired parameters; (2) use eg to transform calls to f into calls to f', +changing the arguments as needed; (3) change the declaration of f to +match f'; (4) use eg to rename f' to f in all calls; (5) delete f'. +` + +// TODO(adonovan): expand upon the above documentation as an HTML page. + +// A Transformer represents a single example-based transformation. +type Transformer struct { + fset *token.FileSet + verbose bool + info *types.Info // combined type info for template/input/output ASTs + seenInfos map[*types.Info]bool + wildcards map[*types.Var]bool // set of parameters in func before() + env map[string]ast.Expr // maps parameter name to wildcard binding + importedObjs map[types.Object]*ast.SelectorExpr // objects imported by after(). + before, after ast.Expr + allowWildcards bool + + // Working state of Transform(): + nsubsts int // number of substitutions made + currentPkg *types.Package // package of current call +} + +// NewTransformer returns a transformer based on the specified template, +// a single-file package containing "before" and "after" functions as +// described in the package documentation. +// tmplInfo is the type information for tmplFile. +// +func NewTransformer(fset *token.FileSet, tmplPkg *types.Package, tmplFile *ast.File, tmplInfo *types.Info, verbose bool) (*Transformer, error) { + // Check the template. + beforeSig := funcSig(tmplPkg, "before") + if beforeSig == nil { + return nil, fmt.Errorf("no 'before' func found in template") + } + afterSig := funcSig(tmplPkg, "after") + if afterSig == nil { + return nil, fmt.Errorf("no 'after' func found in template") + } + + // TODO(adonovan): should we also check the names of the params match? + if !types.Identical(afterSig, beforeSig) { + return nil, fmt.Errorf("before %s and after %s functions have different signatures", + beforeSig, afterSig) + } + + for _, imp := range tmplFile.Imports { + if imp.Name != nil && imp.Name.Name == "." { + // Dot imports are currently forbidden. We + // make the simplifying assumption that all + // imports are regular, without local renames. + // TODO(adonovan): document + return nil, fmt.Errorf("dot-import (of %s) in template", imp.Path.Value) + } + } + var beforeDecl, afterDecl *ast.FuncDecl + for _, decl := range tmplFile.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + switch decl.Name.Name { + case "before": + beforeDecl = decl + case "after": + afterDecl = decl + } + } + } + + before, err := soleExpr(beforeDecl) + if err != nil { + return nil, fmt.Errorf("before: %s", err) + } + after, err := soleExpr(afterDecl) + if err != nil { + return nil, fmt.Errorf("after: %s", err) + } + + wildcards := make(map[*types.Var]bool) + for i := 0; i < beforeSig.Params().Len(); i++ { + wildcards[beforeSig.Params().At(i)] = true + } + + // checkExprTypes returns an error if Tb (type of before()) is not + // safe to replace with Ta (type of after()). + // + // Only superficial checks are performed, and they may result in both + // false positives and negatives. + // + // Ideally, we would only require that the replacement be assignable + // to the context of a specific pattern occurrence, but the type + // checker doesn't record that information and it's complex to deduce. + // A Go type cannot capture all the constraints of a given expression + // context, which may include the size, constness, signedness, + // namedness or constructor of its type, and even the specific value + // of the replacement. (Consider the rule that array literal keys + // must be unique.) So we cannot hope to prove the safety of a + // transformation in general. + Tb := tmplInfo.TypeOf(before) + Ta := tmplInfo.TypeOf(after) + if types.AssignableTo(Tb, Ta) { + // safe: replacement is assignable to pattern. + } else if tuple, ok := Tb.(*types.Tuple); ok && tuple.Len() == 0 { + // safe: pattern has void type (must appear in an ExprStmt). + } else { + return nil, fmt.Errorf("%s is not a safe replacement for %s", Ta, Tb) + } + + tr := &Transformer{ + fset: fset, + verbose: verbose, + wildcards: wildcards, + allowWildcards: true, + seenInfos: make(map[*types.Info]bool), + importedObjs: make(map[types.Object]*ast.SelectorExpr), + before: before, + after: after, + } + + // Combine type info from the template and input packages, and + // type info for the synthesized ASTs too. This saves us + // having to book-keep where each ast.Node originated as we + // construct the resulting hybrid AST. + tr.info = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + mergeTypeInfo(tr.info, tmplInfo) + + // Compute set of imported objects required by after(). + // TODO(adonovan): reject dot-imports in pattern + ast.Inspect(after, func(n ast.Node) bool { + if n, ok := n.(*ast.SelectorExpr); ok { + if _, ok := tr.info.Selections[n]; !ok { + // qualified ident + obj := tr.info.Uses[n.Sel] + tr.importedObjs[obj] = n + return false // prune + } + } + return true // recur + }) + + return tr, nil +} + +// WriteAST is a convenience function that writes AST f to the specified file. +func WriteAST(fset *token.FileSet, filename string, f *ast.File) (err error) { + fh, err := os.Create(filename) + if err != nil { + return err + } + defer func() { + if err2 := fh.Close(); err != nil { + err = err2 // prefer earlier error + } + }() + return format.Node(fh, fset, f) +} + +// -- utilities -------------------------------------------------------- + +// funcSig returns the signature of the specified package-level function. +func funcSig(pkg *types.Package, name string) *types.Signature { + if f, ok := pkg.Scope().Lookup(name).(*types.Func); ok { + return f.Type().(*types.Signature) + } + return nil +} + +// soleExpr returns the sole expression in the before/after template function. +func soleExpr(fn *ast.FuncDecl) (ast.Expr, error) { + if fn.Body == nil { + return nil, fmt.Errorf("no body") + } + if len(fn.Body.List) != 1 { + return nil, fmt.Errorf("must contain a single statement") + } + switch stmt := fn.Body.List[0].(type) { + case *ast.ReturnStmt: + if len(stmt.Results) != 1 { + return nil, fmt.Errorf("return statement must have a single operand") + } + return stmt.Results[0], nil + + case *ast.ExprStmt: + return stmt.X, nil + } + + return nil, fmt.Errorf("must contain a single return or expression statement") +} + +// mergeTypeInfo adds type info from src to dst. +func mergeTypeInfo(dst, src *types.Info) { + for k, v := range src.Types { + dst.Types[k] = v + } + for k, v := range src.Defs { + dst.Defs[k] = v + } + for k, v := range src.Uses { + dst.Uses[k] = v + } + for k, v := range src.Selections { + dst.Selections[k] = v + } +} + +// (debugging only) +func astString(fset *token.FileSet, n ast.Node) string { + var buf bytes.Buffer + printer.Fprint(&buf, fset, n) + return buf.String() +} diff --git a/refactor/eg/match.go b/refactor/eg/match.go index 457f5e1f94..1152e4dcff 100644 --- a/refactor/eg/match.go +++ b/refactor/eg/match.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 package eg diff --git a/refactor/eg/match18.go b/refactor/eg/match18.go new file mode 100644 index 0000000000..01c96ab9ff --- /dev/null +++ b/refactor/eg/match18.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package eg + +import ( + "fmt" + "go/ast" + exact "go/constant" + "go/token" + "go/types" + "log" + "os" + "reflect" + + "golang.org/x/tools/go/ast/astutil" +) + +// matchExpr reports whether pattern x matches y. +// +// If tr.allowWildcards, Idents in x that refer to parameters are +// treated as wildcards, and match any y that is assignable to the +// parameter type; matchExpr records this correspondence in tr.env. +// Otherwise, matchExpr simply reports whether the two trees are +// equivalent. +// +// A wildcard appearing more than once in the pattern must +// consistently match the same tree. +// +func (tr *Transformer) matchExpr(x, y ast.Expr) bool { + if x == nil && y == nil { + return true + } + if x == nil || y == nil { + return false + } + x = unparen(x) + y = unparen(y) + + // Is x a wildcard? (a reference to a 'before' parameter) + if xobj, ok := tr.wildcardObj(x); ok { + return tr.matchWildcard(xobj, y) + } + + // Object identifiers (including pkg-qualified ones) + // are handled semantically, not syntactically. + xobj := isRef(x, tr.info) + yobj := isRef(y, tr.info) + if xobj != nil { + return xobj == yobj + } + if yobj != nil { + return false + } + + // TODO(adonovan): audit: we cannot assume these ast.Exprs + // contain non-nil pointers. e.g. ImportSpec.Name may be a + // nil *ast.Ident. + + if reflect.TypeOf(x) != reflect.TypeOf(y) { + return false + } + switch x := x.(type) { + case *ast.Ident: + log.Fatalf("unexpected Ident: %s", astString(tr.fset, x)) + + case *ast.BasicLit: + y := y.(*ast.BasicLit) + xval := exact.MakeFromLiteral(x.Value, x.Kind, 0) + yval := exact.MakeFromLiteral(y.Value, y.Kind, 0) + return exact.Compare(xval, token.EQL, yval) + + case *ast.FuncLit: + // func literals (and thus statement syntax) never match. + return false + + case *ast.CompositeLit: + y := y.(*ast.CompositeLit) + return (x.Type == nil) == (y.Type == nil) && + (x.Type == nil || tr.matchType(x.Type, y.Type)) && + tr.matchExprs(x.Elts, y.Elts) + + case *ast.SelectorExpr: + y := y.(*ast.SelectorExpr) + return tr.matchSelectorExpr(x, y) && + tr.info.Selections[x].Obj() == tr.info.Selections[y].Obj() + + case *ast.IndexExpr: + y := y.(*ast.IndexExpr) + return tr.matchExpr(x.X, y.X) && + tr.matchExpr(x.Index, y.Index) + + case *ast.SliceExpr: + y := y.(*ast.SliceExpr) + return tr.matchExpr(x.X, y.X) && + tr.matchExpr(x.Low, y.Low) && + tr.matchExpr(x.High, y.High) && + tr.matchExpr(x.Max, y.Max) && + x.Slice3 == y.Slice3 + + case *ast.TypeAssertExpr: + y := y.(*ast.TypeAssertExpr) + return tr.matchExpr(x.X, y.X) && + tr.matchType(x.Type, y.Type) + + case *ast.CallExpr: + y := y.(*ast.CallExpr) + match := tr.matchExpr // function call + if tr.info.Types[x.Fun].IsType() { + match = tr.matchType // type conversion + } + return x.Ellipsis.IsValid() == y.Ellipsis.IsValid() && + match(x.Fun, y.Fun) && + tr.matchExprs(x.Args, y.Args) + + case *ast.StarExpr: + y := y.(*ast.StarExpr) + return tr.matchExpr(x.X, y.X) + + case *ast.UnaryExpr: + y := y.(*ast.UnaryExpr) + return x.Op == y.Op && + tr.matchExpr(x.X, y.X) + + case *ast.BinaryExpr: + y := y.(*ast.BinaryExpr) + return x.Op == y.Op && + tr.matchExpr(x.X, y.X) && + tr.matchExpr(x.Y, y.Y) + + case *ast.KeyValueExpr: + y := y.(*ast.KeyValueExpr) + return tr.matchExpr(x.Key, y.Key) && + tr.matchExpr(x.Value, y.Value) + } + + panic(fmt.Sprintf("unhandled AST node type: %T", x)) +} + +func (tr *Transformer) matchExprs(xx, yy []ast.Expr) bool { + if len(xx) != len(yy) { + return false + } + for i := range xx { + if !tr.matchExpr(xx[i], yy[i]) { + return false + } + } + return true +} + +// matchType reports whether the two type ASTs denote identical types. +func (tr *Transformer) matchType(x, y ast.Expr) bool { + tx := tr.info.Types[x].Type + ty := tr.info.Types[y].Type + return types.Identical(tx, ty) +} + +func (tr *Transformer) wildcardObj(x ast.Expr) (*types.Var, bool) { + if x, ok := x.(*ast.Ident); ok && x != nil && tr.allowWildcards { + if xobj, ok := tr.info.Uses[x].(*types.Var); ok && tr.wildcards[xobj] { + return xobj, true + } + } + return nil, false +} + +func (tr *Transformer) matchSelectorExpr(x, y *ast.SelectorExpr) bool { + if xobj, ok := tr.wildcardObj(x.X); ok { + field := x.Sel.Name + yt := tr.info.TypeOf(y.X) + o, _, _ := types.LookupFieldOrMethod(yt, true, tr.currentPkg, field) + if o != nil { + tr.env[xobj.Name()] = y.X // record binding + return true + } + } + return tr.matchExpr(x.X, y.X) +} + +func (tr *Transformer) matchWildcard(xobj *types.Var, y ast.Expr) bool { + name := xobj.Name() + + if tr.verbose { + fmt.Fprintf(os.Stderr, "%s: wildcard %s -> %s?: ", + tr.fset.Position(y.Pos()), name, astString(tr.fset, y)) + } + + // Check that y is assignable to the declared type of the param. + yt := tr.info.TypeOf(y) + if yt == nil { + // y has no type. + // Perhaps it is an *ast.Ellipsis in [...]T{}, or + // an *ast.KeyValueExpr in T{k: v}. + // Clearly these pseudo-expressions cannot match a + // wildcard, but it would nice if we had a way to ignore + // the difference between T{v} and T{k:v} for structs. + return false + } + if !types.AssignableTo(yt, xobj.Type()) { + if tr.verbose { + fmt.Fprintf(os.Stderr, "%s not assignable to %s\n", yt, xobj.Type()) + } + return false + } + + // A wildcard matches any expression. + // If it appears multiple times in the pattern, it must match + // the same expression each time. + if old, ok := tr.env[name]; ok { + // found existing binding + tr.allowWildcards = false + r := tr.matchExpr(old, y) + if tr.verbose { + fmt.Fprintf(os.Stderr, "%t secondary match, primary was %s\n", + r, astString(tr.fset, old)) + } + tr.allowWildcards = true + return r + } + + if tr.verbose { + fmt.Fprintf(os.Stderr, "primary match\n") + } + + tr.env[name] = y // record binding + return true +} + +// -- utilities -------------------------------------------------------- + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +// isRef returns the object referred to by this (possibly qualified) +// identifier, or nil if the node is not a referring identifier. +func isRef(n ast.Node, info *types.Info) types.Object { + switch n := n.(type) { + case *ast.Ident: + return info.Uses[n] + + case *ast.SelectorExpr: + if _, ok := info.Selections[n]; !ok { + // qualified ident + return info.Uses[n.Sel] + } + } + return nil +} diff --git a/refactor/rename/check.go b/refactor/rename/check.go index cf1999e89c..365d756148 100644 --- a/refactor/rename/check.go +++ b/refactor/rename/check.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 package rename diff --git a/refactor/rename/check18.go b/refactor/rename/check18.go new file mode 100644 index 0000000000..8bbfce139e --- /dev/null +++ b/refactor/rename/check18.go @@ -0,0 +1,860 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package rename + +// This file defines the safety checks for each kind of renaming. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/loader" + "golang.org/x/tools/refactor/satisfy" +) + +// errorf reports an error (e.g. conflict) and prevents file modification. +func (r *renamer) errorf(pos token.Pos, format string, args ...interface{}) { + r.hadConflicts = true + reportError(r.iprog.Fset.Position(pos), fmt.Sprintf(format, args...)) +} + +// check performs safety checks of the renaming of the 'from' object to r.to. +func (r *renamer) check(from types.Object) { + if r.objsToUpdate[from] { + return + } + r.objsToUpdate[from] = true + + // NB: order of conditions is important. + if from_, ok := from.(*types.PkgName); ok { + r.checkInFileBlock(from_) + } else if from_, ok := from.(*types.Label); ok { + r.checkLabel(from_) + } else if isPackageLevel(from) { + r.checkInPackageBlock(from) + } else if v, ok := from.(*types.Var); ok && v.IsField() { + r.checkStructField(v) + } else if f, ok := from.(*types.Func); ok && recv(f) != nil { + r.checkMethod(f) + } else if isLocal(from) { + r.checkInLocalScope(from) + } else { + r.errorf(from.Pos(), "unexpected %s object %q (please report a bug)\n", + objectKind(from), from) + } +} + +// checkInFileBlock performs safety checks for renames of objects in the file block, +// i.e. imported package names. +func (r *renamer) checkInFileBlock(from *types.PkgName) { + // Check import name is not "init". + if r.to == "init" { + r.errorf(from.Pos(), "%q is not a valid imported package name", r.to) + } + + // Check for conflicts between file and package block. + if prev := from.Pkg().Scope().Lookup(r.to); prev != nil { + r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", + objectKind(from), from.Name(), r.to) + r.errorf(prev.Pos(), "\twith this package member %s", + objectKind(prev)) + return // since checkInPackageBlock would report redundant errors + } + + // Check for conflicts in lexical scope. + r.checkInLexicalScope(from, r.packages[from.Pkg()]) + + // Finally, modify ImportSpec syntax to add or remove the Name as needed. + info, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) + if from.Imported().Name() == r.to { + // ImportSpec.Name not needed + path[1].(*ast.ImportSpec).Name = nil + } else { + // ImportSpec.Name needed + if spec := path[1].(*ast.ImportSpec); spec.Name == nil { + spec.Name = &ast.Ident{NamePos: spec.Path.Pos(), Name: r.to} + info.Defs[spec.Name] = from + } + } +} + +// checkInPackageBlock performs safety checks for renames of +// func/var/const/type objects in the package block. +func (r *renamer) checkInPackageBlock(from types.Object) { + // Check that there are no references to the name from another + // package if the renaming would make it unexported. + if ast.IsExported(from.Name()) && !ast.IsExported(r.to) { + for pkg, info := range r.packages { + if pkg == from.Pkg() { + continue + } + if id := someUse(info, from); id != nil && + !r.checkExport(id, pkg, from) { + break + } + } + } + + info := r.packages[from.Pkg()] + + // Check that in the package block, "init" is a function, and never referenced. + if r.to == "init" { + kind := objectKind(from) + if kind == "func" { + // Reject if intra-package references to it exist. + for id, obj := range info.Uses { + if obj == from { + r.errorf(from.Pos(), + "renaming this func %q to %q would make it a package initializer", + from.Name(), r.to) + r.errorf(id.Pos(), "\tbut references to it exist") + break + } + } + } else { + r.errorf(from.Pos(), "you cannot have a %s at package level named %q", + kind, r.to) + } + } + + // Check for conflicts between package block and all file blocks. + for _, f := range info.Files { + fileScope := info.Info.Scopes[f] + b, prev := fileScope.LookupParent(r.to, token.NoPos) + if b == fileScope { + r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", + objectKind(from), from.Name(), r.to) + r.errorf(prev.Pos(), "\twith this %s", + objectKind(prev)) + return // since checkInPackageBlock would report redundant errors + } + } + + // Check for conflicts in lexical scope. + if from.Exported() { + for _, info := range r.packages { + r.checkInLexicalScope(from, info) + } + } else { + r.checkInLexicalScope(from, info) + } +} + +func (r *renamer) checkInLocalScope(from types.Object) { + info := r.packages[from.Pkg()] + + // Is this object an implicit local var for a type switch? + // Each case has its own var, whose position is the decl of y, + // but Ident in that decl does not appear in the Uses map. + // + // switch y := x.(type) { // Defs[Ident(y)] is undefined + // case int: print(y) // Implicits[CaseClause(int)] = Var(y_int) + // case string: print(y) // Implicits[CaseClause(string)] = Var(y_string) + // } + // + var isCaseVar bool + for syntax, obj := range info.Implicits { + if _, ok := syntax.(*ast.CaseClause); ok && obj.Pos() == from.Pos() { + isCaseVar = true + r.check(obj) + } + } + + r.checkInLexicalScope(from, info) + + // Finally, if this was a type switch, change the variable y. + if isCaseVar { + _, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) + path[0].(*ast.Ident).Name = r.to // path is [Ident AssignStmt TypeSwitchStmt...] + } +} + +// checkInLexicalScope performs safety checks that a renaming does not +// change the lexical reference structure of the specified package. +// +// For objects in lexical scope, there are three kinds of conflicts: +// same-, sub-, and super-block conflicts. We will illustrate all three +// using this example: +// +// var x int +// var z int +// +// func f(y int) { +// print(x) +// print(y) +// } +// +// Renaming x to z encounters a SAME-BLOCK CONFLICT, because an object +// with the new name already exists, defined in the same lexical block +// as the old object. +// +// Renaming x to y encounters a SUB-BLOCK CONFLICT, because there exists +// a reference to x from within (what would become) a hole in its scope. +// The definition of y in an (inner) sub-block would cast a shadow in +// the scope of the renamed variable. +// +// Renaming y to x encounters a SUPER-BLOCK CONFLICT. This is the +// converse situation: there is an existing definition of the new name +// (x) in an (enclosing) super-block, and the renaming would create a +// hole in its scope, within which there exist references to it. The +// new name casts a shadow in scope of the existing definition of x in +// the super-block. +// +// Removing the old name (and all references to it) is always safe, and +// requires no checks. +// +func (r *renamer) checkInLexicalScope(from types.Object, info *loader.PackageInfo) { + b := from.Parent() // the block defining the 'from' object + if b != nil { + toBlock, to := b.LookupParent(r.to, from.Parent().End()) + if toBlock == b { + // same-block conflict + r.errorf(from.Pos(), "renaming this %s %q to %q", + objectKind(from), from.Name(), r.to) + r.errorf(to.Pos(), "\tconflicts with %s in same block", + objectKind(to)) + return + } else if toBlock != nil { + // Check for super-block conflict. + // The name r.to is defined in a superblock. + // Is that name referenced from within this block? + forEachLexicalRef(info, to, func(id *ast.Ident, block *types.Scope) bool { + _, obj := lexicalLookup(block, from.Name(), id.Pos()) + if obj == from { + // super-block conflict + r.errorf(from.Pos(), "renaming this %s %q to %q", + objectKind(from), from.Name(), r.to) + r.errorf(id.Pos(), "\twould shadow this reference") + r.errorf(to.Pos(), "\tto the %s declared here", + objectKind(to)) + return false // stop + } + return true + }) + } + } + + // Check for sub-block conflict. + // Is there an intervening definition of r.to between + // the block defining 'from' and some reference to it? + forEachLexicalRef(info, from, func(id *ast.Ident, block *types.Scope) bool { + // Find the block that defines the found reference. + // It may be an ancestor. + fromBlock, _ := lexicalLookup(block, from.Name(), id.Pos()) + + // See what r.to would resolve to in the same scope. + toBlock, to := lexicalLookup(block, r.to, id.Pos()) + if to != nil { + // sub-block conflict + if deeper(toBlock, fromBlock) { + r.errorf(from.Pos(), "renaming this %s %q to %q", + objectKind(from), from.Name(), r.to) + r.errorf(id.Pos(), "\twould cause this reference to become shadowed") + r.errorf(to.Pos(), "\tby this intervening %s definition", + objectKind(to)) + return false // stop + } + } + return true + }) + + // Renaming a type that is used as an embedded field + // requires renaming the field too. e.g. + // type T int // if we rename this to U.. + // var s struct {T} + // print(s.T) // ...this must change too + if _, ok := from.(*types.TypeName); ok { + for id, obj := range info.Uses { + if obj == from { + if field := info.Defs[id]; field != nil { + r.check(field) + } + } + } + } +} + +// lexicalLookup is like (*types.Scope).LookupParent but respects the +// environment visible at pos. It assumes the relative position +// information is correct with each file. +func lexicalLookup(block *types.Scope, name string, pos token.Pos) (*types.Scope, types.Object) { + for b := block; b != nil; b = b.Parent() { + obj := b.Lookup(name) + // The scope of a package-level object is the entire package, + // so ignore pos in that case. + // No analogous clause is needed for file-level objects + // since no reference can appear before an import decl. + if obj != nil && (b == obj.Pkg().Scope() || obj.Pos() < pos) { + return b, obj + } + } + return nil, nil +} + +// deeper reports whether block x is lexically deeper than y. +func deeper(x, y *types.Scope) bool { + if x == y || x == nil { + return false + } else if y == nil { + return true + } else { + return deeper(x.Parent(), y.Parent()) + } +} + +// forEachLexicalRef calls fn(id, block) for each identifier id in package +// info that is a reference to obj in lexical scope. block is the +// lexical block enclosing the reference. If fn returns false the +// iteration is terminated and findLexicalRefs returns false. +func forEachLexicalRef(info *loader.PackageInfo, obj types.Object, fn func(id *ast.Ident, block *types.Scope) bool) bool { + ok := true + var stack []ast.Node + + var visit func(n ast.Node) bool + visit = func(n ast.Node) bool { + if n == nil { + stack = stack[:len(stack)-1] // pop + return false + } + if !ok { + return false // bail out + } + + stack = append(stack, n) // push + switch n := n.(type) { + case *ast.Ident: + if info.Uses[n] == obj { + block := enclosingBlock(&info.Info, stack) + if !fn(n, block) { + ok = false + } + } + return visit(nil) // pop stack + + case *ast.SelectorExpr: + // don't visit n.Sel + ast.Inspect(n.X, visit) + return visit(nil) // pop stack, don't descend + + case *ast.CompositeLit: + // Handle recursion ourselves for struct literals + // so we don't visit field identifiers. + tv := info.Types[n] + if _, ok := deref(tv.Type).Underlying().(*types.Struct); ok { + if n.Type != nil { + ast.Inspect(n.Type, visit) + } + for _, elt := range n.Elts { + if kv, ok := elt.(*ast.KeyValueExpr); ok { + ast.Inspect(kv.Value, visit) + } else { + ast.Inspect(elt, visit) + } + } + return visit(nil) // pop stack, don't descend + } + } + return true + } + + for _, f := range info.Files { + ast.Inspect(f, visit) + if len(stack) != 0 { + panic(stack) + } + if !ok { + break + } + } + return ok +} + +// enclosingBlock returns the innermost block enclosing the specified +// AST node, specified in the form of a path from the root of the file, +// [file...n]. +func enclosingBlock(info *types.Info, stack []ast.Node) *types.Scope { + for i := range stack { + n := stack[len(stack)-1-i] + // For some reason, go/types always associates a + // function's scope with its FuncType. + // TODO(adonovan): feature or a bug? + switch f := n.(type) { + case *ast.FuncDecl: + n = f.Type + case *ast.FuncLit: + n = f.Type + } + if b := info.Scopes[n]; b != nil { + return b + } + } + panic("no Scope for *ast.File") +} + +func (r *renamer) checkLabel(label *types.Label) { + // Check there are no identical labels in the function's label block. + // (Label blocks don't nest, so this is easy.) + if prev := label.Parent().Lookup(r.to); prev != nil { + r.errorf(label.Pos(), "renaming this label %q to %q", label.Name(), prev.Name()) + r.errorf(prev.Pos(), "\twould conflict with this one") + } +} + +// checkStructField checks that the field renaming will not cause +// conflicts at its declaration, or ambiguity or changes to any selection. +func (r *renamer) checkStructField(from *types.Var) { + // Check that the struct declaration is free of field conflicts, + // and field/method conflicts. + + // go/types offers no easy way to get from a field (or interface + // method) to its declaring struct (or interface), so we must + // ascend the AST. + info, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) + // path matches this pattern: + // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File] + + // Ascend to FieldList. + var i int + for { + if _, ok := path[i].(*ast.FieldList); ok { + break + } + i++ + } + i++ + tStruct := path[i].(*ast.StructType) + i++ + // Ascend past parens (unlikely). + for { + _, ok := path[i].(*ast.ParenExpr) + if !ok { + break + } + i++ + } + if spec, ok := path[i].(*ast.TypeSpec); ok { + // This struct is also a named type. + // We must check for direct (non-promoted) field/field + // and method/field conflicts. + named := info.Defs[spec.Name].Type() + prev, indices, _ := types.LookupFieldOrMethod(named, true, info.Pkg, r.to) + if len(indices) == 1 { + r.errorf(from.Pos(), "renaming this field %q to %q", + from.Name(), r.to) + r.errorf(prev.Pos(), "\twould conflict with this %s", + objectKind(prev)) + return // skip checkSelections to avoid redundant errors + } + } else { + // This struct is not a named type. + // We need only check for direct (non-promoted) field/field conflicts. + T := info.Types[tStruct].Type.Underlying().(*types.Struct) + for i := 0; i < T.NumFields(); i++ { + if prev := T.Field(i); prev.Name() == r.to { + r.errorf(from.Pos(), "renaming this field %q to %q", + from.Name(), r.to) + r.errorf(prev.Pos(), "\twould conflict with this field") + return // skip checkSelections to avoid redundant errors + } + } + } + + // Renaming an anonymous field requires renaming the type too. e.g. + // print(s.T) // if we rename T to U, + // type T int // this and + // var s struct {T} // this must change too. + if from.Anonymous() { + if named, ok := from.Type().(*types.Named); ok { + r.check(named.Obj()) + } else if named, ok := deref(from.Type()).(*types.Named); ok { + r.check(named.Obj()) + } + } + + // Check integrity of existing (field and method) selections. + r.checkSelections(from) +} + +// checkSelection checks that all uses and selections that resolve to +// the specified object would continue to do so after the renaming. +func (r *renamer) checkSelections(from types.Object) { + for pkg, info := range r.packages { + if id := someUse(info, from); id != nil { + if !r.checkExport(id, pkg, from) { + return + } + } + + for syntax, sel := range info.Selections { + // There may be extant selections of only the old + // name or only the new name, so we must check both. + // (If neither, the renaming is sound.) + // + // In both cases, we wish to compare the lengths + // of the implicit field path (Selection.Index) + // to see if the renaming would change it. + // + // If a selection that resolves to 'from', when renamed, + // would yield a path of the same or shorter length, + // this indicates ambiguity or a changed referent, + // analogous to same- or sub-block lexical conflict. + // + // If a selection using the name 'to' would + // yield a path of the same or shorter length, + // this indicates ambiguity or shadowing, + // analogous to same- or super-block lexical conflict. + + // TODO(adonovan): fix: derive from Types[syntax.X].Mode + // TODO(adonovan): test with pointer, value, addressable value. + isAddressable := true + + if sel.Obj() == from { + if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), r.to); obj != nil { + // Renaming this existing selection of + // 'from' may block access to an existing + // type member named 'to'. + delta := len(indices) - len(sel.Index()) + if delta > 0 { + continue // no ambiguity + } + r.selectionConflict(from, delta, syntax, obj) + return + } + + } else if sel.Obj().Name() == r.to { + if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), from.Name()); obj == from { + // Renaming 'from' may cause this existing + // selection of the name 'to' to change + // its meaning. + delta := len(indices) - len(sel.Index()) + if delta > 0 { + continue // no ambiguity + } + r.selectionConflict(from, -delta, syntax, sel.Obj()) + return + } + } + } + } +} + +func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.SelectorExpr, obj types.Object) { + r.errorf(from.Pos(), "renaming this %s %q to %q", + objectKind(from), from.Name(), r.to) + + switch { + case delta < 0: + // analogous to sub-block conflict + r.errorf(syntax.Sel.Pos(), + "\twould change the referent of this selection") + r.errorf(obj.Pos(), "\tof this %s", objectKind(obj)) + case delta == 0: + // analogous to same-block conflict + r.errorf(syntax.Sel.Pos(), + "\twould make this reference ambiguous") + r.errorf(obj.Pos(), "\twith this %s", objectKind(obj)) + case delta > 0: + // analogous to super-block conflict + r.errorf(syntax.Sel.Pos(), + "\twould shadow this selection") + r.errorf(obj.Pos(), "\tof the %s declared here", + objectKind(obj)) + } +} + +// checkMethod performs safety checks for renaming a method. +// There are three hazards: +// - declaration conflicts +// - selection ambiguity/changes +// - entailed renamings of assignable concrete/interface types. +// We reject renamings initiated at concrete methods if it would +// change the assignability relation. For renamings of abstract +// methods, we rename all methods transitively coupled to it via +// assignability. +func (r *renamer) checkMethod(from *types.Func) { + // e.g. error.Error + if from.Pkg() == nil { + r.errorf(from.Pos(), "you cannot rename built-in method %s", from) + return + } + + // ASSIGNABILITY: We reject renamings of concrete methods that + // would break a 'satisfy' constraint; but renamings of abstract + // methods are allowed to proceed, and we rename affected + // concrete and abstract methods as necessary. It is the + // initial method that determines the policy. + + // Check for conflict at point of declaration. + // Check to ensure preservation of assignability requirements. + R := recv(from).Type() + if isInterface(R) { + // Abstract method + + // declaration + prev, _, _ := types.LookupFieldOrMethod(R, false, from.Pkg(), r.to) + if prev != nil { + r.errorf(from.Pos(), "renaming this interface method %q to %q", + from.Name(), r.to) + r.errorf(prev.Pos(), "\twould conflict with this method") + return + } + + // Check all interfaces that embed this one for + // declaration conflicts too. + for _, info := range r.packages { + // Start with named interface types (better errors) + for _, obj := range info.Defs { + if obj, ok := obj.(*types.TypeName); ok && isInterface(obj.Type()) { + f, _, _ := types.LookupFieldOrMethod( + obj.Type(), false, from.Pkg(), from.Name()) + if f == nil { + continue + } + t, _, _ := types.LookupFieldOrMethod( + obj.Type(), false, from.Pkg(), r.to) + if t == nil { + continue + } + r.errorf(from.Pos(), "renaming this interface method %q to %q", + from.Name(), r.to) + r.errorf(t.Pos(), "\twould conflict with this method") + r.errorf(obj.Pos(), "\tin named interface type %q", obj.Name()) + } + } + + // Now look at all literal interface types (includes named ones again). + for e, tv := range info.Types { + if e, ok := e.(*ast.InterfaceType); ok { + _ = e + _ = tv.Type.(*types.Interface) + // TODO(adonovan): implement same check as above. + } + } + } + + // assignability + // + // Find the set of concrete or abstract methods directly + // coupled to abstract method 'from' by some + // satisfy.Constraint, and rename them too. + for key := range r.satisfy() { + // key = (lhs, rhs) where lhs is always an interface. + + lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name()) + if lsel == nil { + continue + } + rmethods := r.msets.MethodSet(key.RHS) + rsel := rmethods.Lookup(from.Pkg(), from.Name()) + if rsel == nil { + continue + } + + // If both sides have a method of this name, + // and one of them is m, the other must be coupled. + var coupled *types.Func + switch from { + case lsel.Obj(): + coupled = rsel.Obj().(*types.Func) + case rsel.Obj(): + coupled = lsel.Obj().(*types.Func) + default: + continue + } + + // We must treat concrete-to-interface + // constraints like an implicit selection C.f of + // each interface method I.f, and check that the + // renaming leaves the selection unchanged and + // unambiguous. + // + // Fun fact: the implicit selection of C.f + // type I interface{f()} + // type C struct{I} + // func (C) g() + // var _ I = C{} // here + // yields abstract method I.f. This can make error + // messages less than obvious. + // + if !isInterface(key.RHS) { + // The logic below was derived from checkSelections. + + rtosel := rmethods.Lookup(from.Pkg(), r.to) + if rtosel != nil { + rto := rtosel.Obj().(*types.Func) + delta := len(rsel.Index()) - len(rtosel.Index()) + if delta < 0 { + continue // no ambiguity + } + + // TODO(adonovan): record the constraint's position. + keyPos := token.NoPos + + r.errorf(from.Pos(), "renaming this method %q to %q", + from.Name(), r.to) + if delta == 0 { + // analogous to same-block conflict + r.errorf(keyPos, "\twould make the %s method of %s invoked via interface %s ambiguous", + r.to, key.RHS, key.LHS) + r.errorf(rto.Pos(), "\twith (%s).%s", + recv(rto).Type(), r.to) + } else { + // analogous to super-block conflict + r.errorf(keyPos, "\twould change the %s method of %s invoked via interface %s", + r.to, key.RHS, key.LHS) + r.errorf(coupled.Pos(), "\tfrom (%s).%s", + recv(coupled).Type(), r.to) + r.errorf(rto.Pos(), "\tto (%s).%s", + recv(rto).Type(), r.to) + } + return // one error is enough + } + } + + if !r.changeMethods { + // This should be unreachable. + r.errorf(from.Pos(), "internal error: during renaming of abstract method %s", from) + r.errorf(coupled.Pos(), "\tchangedMethods=false, coupled method=%s", coupled) + r.errorf(from.Pos(), "\tPlease file a bug report") + return + } + + // Rename the coupled method to preserve assignability. + r.check(coupled) + } + } else { + // Concrete method + + // declaration + prev, indices, _ := types.LookupFieldOrMethod(R, true, from.Pkg(), r.to) + if prev != nil && len(indices) == 1 { + r.errorf(from.Pos(), "renaming this method %q to %q", + from.Name(), r.to) + r.errorf(prev.Pos(), "\twould conflict with this %s", + objectKind(prev)) + return + } + + // assignability + // + // Find the set of abstract methods coupled to concrete + // method 'from' by some satisfy.Constraint, and rename + // them too. + // + // Coupling may be indirect, e.g. I.f <-> C.f via type D. + // + // type I interface {f()} + // type C int + // type (C) f() + // type D struct{C} + // var _ I = D{} + // + for key := range r.satisfy() { + // key = (lhs, rhs) where lhs is always an interface. + if isInterface(key.RHS) { + continue + } + rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name()) + if rsel == nil || rsel.Obj() != from { + continue // rhs does not have the method + } + lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name()) + if lsel == nil { + continue + } + imeth := lsel.Obj().(*types.Func) + + // imeth is the abstract method (e.g. I.f) + // and key.RHS is the concrete coupling type (e.g. D). + if !r.changeMethods { + r.errorf(from.Pos(), "renaming this method %q to %q", + from.Name(), r.to) + var pos token.Pos + var iface string + + I := recv(imeth).Type() + if named, ok := I.(*types.Named); ok { + pos = named.Obj().Pos() + iface = "interface " + named.Obj().Name() + } else { + pos = from.Pos() + iface = I.String() + } + r.errorf(pos, "\twould make %s no longer assignable to %s", + key.RHS, iface) + r.errorf(imeth.Pos(), "\t(rename %s.%s if you intend to change both types)", + I, from.Name()) + return // one error is enough + } + + // Rename the coupled interface method to preserve assignability. + r.check(imeth) + } + } + + // Check integrity of existing (field and method) selections. + // We skip this if there were errors above, to avoid redundant errors. + r.checkSelections(from) +} + +func (r *renamer) checkExport(id *ast.Ident, pkg *types.Package, from types.Object) bool { + // Reject cross-package references if r.to is unexported. + // (Such references may be qualified identifiers or field/method + // selections.) + if !ast.IsExported(r.to) && pkg != from.Pkg() { + r.errorf(from.Pos(), + "renaming this %s %q to %q would make it unexported", + objectKind(from), from.Name(), r.to) + r.errorf(id.Pos(), "\tbreaking references from packages such as %q", + pkg.Path()) + return false + } + return true +} + +// satisfy returns the set of interface satisfaction constraints. +func (r *renamer) satisfy() map[satisfy.Constraint]bool { + if r.satisfyConstraints == nil { + // Compute on demand: it's expensive. + var f satisfy.Finder + for _, info := range r.packages { + f.Find(&info.Info, info.Files) + } + r.satisfyConstraints = f.Result + } + return r.satisfyConstraints +} + +// -- helpers ---------------------------------------------------------- + +// recv returns the method's receiver. +func recv(meth *types.Func) *types.Var { + return meth.Type().(*types.Signature).Recv() +} + +// someUse returns an arbitrary use of obj within info. +func someUse(info *loader.PackageInfo, obj types.Object) *ast.Ident { + for id, o := range info.Uses { + if o == obj { + return id + } + } + return nil +} + +// -- Plundered from golang.org/x/tools/go/ssa ----------------- + +func isInterface(T types.Type) bool { return types.IsInterface(T) } + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} diff --git a/refactor/rename/util.go b/refactor/rename/util.go index bcf684d74a..a400be7d11 100644 --- a/refactor/rename/util.go +++ b/refactor/rename/util.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 package rename diff --git a/refactor/rename/util18.go b/refactor/rename/util18.go new file mode 100644 index 0000000000..2594fc834b --- /dev/null +++ b/refactor/rename/util18.go @@ -0,0 +1,107 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package rename + +import ( + "go/ast" + "go/token" + "go/types" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "unicode" + + "golang.org/x/tools/go/ast/astutil" +) + +func objectKind(obj types.Object) string { + switch obj := obj.(type) { + case *types.PkgName: + return "imported package name" + case *types.TypeName: + return "type" + case *types.Var: + if obj.IsField() { + return "field" + } + case *types.Func: + if obj.Type().(*types.Signature).Recv() != nil { + return "method" + } + } + // label, func, var, const + return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types.")) +} + +func typeKind(T types.Type) string { + return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(T.Underlying()).String(), "*types.")) +} + +// NB: for renamings, blank is not considered valid. +func isValidIdentifier(id string) bool { + if id == "" || id == "_" { + return false + } + for i, r := range id { + if !isLetter(r) && (i == 0 || !isDigit(r)) { + return false + } + } + return token.Lookup(id) == token.IDENT +} + +// isLocal reports whether obj is local to some function. +// Precondition: not a struct field or interface method. +func isLocal(obj types.Object) bool { + // [... 5=stmt 4=func 3=file 2=pkg 1=universe] + var depth int + for scope := obj.Parent(); scope != nil; scope = scope.Parent() { + depth++ + } + return depth >= 4 +} + +func isPackageLevel(obj types.Object) bool { + return obj.Pkg().Scope().Lookup(obj.Name()) == obj +} + +// -- Plundered from go/scanner: --------------------------------------- + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// -- Plundered from golang.org/x/tools/cmd/guru ----------------- + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if runtime.GOOS == "windows" { + x = filepath.ToSlash(x) + y = filepath.ToSlash(y) + } + if x == y { + return true + } + if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go index a346c1ac72..1bc43cf8d5 100644 --- a/refactor/satisfy/find.go +++ b/refactor/satisfy/find.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build !go1.8 // Package satisfy inspects the type-checked ASTs of Go packages and // reports the set of discovered type constraints of the form (lhs, rhs diff --git a/refactor/satisfy/find18.go b/refactor/satisfy/find18.go new file mode 100644 index 0000000000..abb15d9179 --- /dev/null +++ b/refactor/satisfy/find18.go @@ -0,0 +1,707 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// Package satisfy inspects the type-checked ASTs of Go packages and +// reports the set of discovered type constraints of the form (lhs, rhs +// Type) where lhs is a non-trivial interface, rhs satisfies this +// interface, and this fact is necessary for the package to be +// well-typed. +// +// THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME. +// +// It is provided only for the gorename tool. Ideally this +// functionality will become part of the type-checker in due course, +// since it is computing it anyway, and it is robust for ill-typed +// inputs, which this package is not. +// +package satisfy // import "golang.org/x/tools/refactor/satisfy" + +// NOTES: +// +// We don't care about numeric conversions, so we don't descend into +// types or constant expressions. This is unsound because +// constant expressions can contain arbitrary statements, e.g. +// const x = len([1]func(){func() { +// ... +// }}) +// +// TODO(adonovan): make this robust against ill-typed input. +// Or move it into the type-checker. +// +// Assignability conversions are possible in the following places: +// - in assignments y = x, y := x, var y = x. +// - from call argument types to formal parameter types +// - in append and delete calls +// - from return operands to result parameter types +// - in composite literal T{k:v}, from k and v to T's field/element/key type +// - in map[key] from key to the map's key type +// - in comparisons x==y and switch x { case y: }. +// - in explicit conversions T(x) +// - in sends ch <- x, from x to the channel element type +// - in type assertions x.(T) and switch x.(type) { case T: } +// +// The results of this pass provide information equivalent to the +// ssa.MakeInterface and ssa.ChangeInterface instructions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/types/typeutil" +) + +// A Constraint records the fact that the RHS type does and must +// satisify the LHS type, which is an interface. +// The names are suggestive of an assignment statement LHS = RHS. +type Constraint struct { + LHS, RHS types.Type +} + +// A Finder inspects the type-checked ASTs of Go packages and +// accumulates the set of type constraints (x, y) such that x is +// assignable to y, y is an interface, and both x and y have methods. +// +// In other words, it returns the subset of the "implements" relation +// that is checked during compilation of a package. Refactoring tools +// will need to preserve at least this part of the relation to ensure +// continued compilation. +// +type Finder struct { + Result map[Constraint]bool + msetcache typeutil.MethodSetCache + + // per-Find state + info *types.Info + sig *types.Signature +} + +// Find inspects a single package, populating Result with its pairs of +// constrained types. +// +// The result is non-canonical and thus may contain duplicates (but this +// tends to preserves names of interface types better). +// +// The package must be free of type errors, and +// info.{Defs,Uses,Selections,Types} must have been populated by the +// type-checker. +// +func (f *Finder) Find(info *types.Info, files []*ast.File) { + if f.Result == nil { + f.Result = make(map[Constraint]bool) + } + + f.info = info + for _, file := range files { + for _, d := range file.Decls { + switch d := d.(type) { + case *ast.GenDecl: + if d.Tok == token.VAR { // ignore consts + for _, spec := range d.Specs { + f.valueSpec(spec.(*ast.ValueSpec)) + } + } + + case *ast.FuncDecl: + if d.Body != nil { + f.sig = f.info.Defs[d.Name].Type().(*types.Signature) + f.stmt(d.Body) + f.sig = nil + } + } + } + } + f.info = nil +} + +var ( + tInvalid = types.Typ[types.Invalid] + tUntypedBool = types.Typ[types.UntypedBool] + tUntypedNil = types.Typ[types.UntypedNil] +) + +// exprN visits an expression in a multi-value context. +func (f *Finder) exprN(e ast.Expr) types.Type { + typ := f.info.Types[e].Type.(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return f.exprN(e.X) + + case *ast.CallExpr: + // x, err := f(args) + sig := f.expr(e.Fun).Underlying().(*types.Signature) + f.call(sig, e.Args) + + case *ast.IndexExpr: + // y, ok := x[i] + x := f.expr(e.X) + f.assign(f.expr(e.Index), x.Underlying().(*types.Map).Key()) + + case *ast.TypeAssertExpr: + // y, ok := x.(T) + f.typeAssert(f.expr(e.X), typ.At(0).Type()) + + case *ast.UnaryExpr: // must be receive <- + // y, ok := <-x + f.expr(e.X) + + default: + panic(e) + } + return typ +} + +func (f *Finder) call(sig *types.Signature, args []ast.Expr) { + if len(args) == 0 { + return + } + + // Ellipsis call? e.g. f(x, y, z...) + if _, ok := args[len(args)-1].(*ast.Ellipsis); ok { + for i, arg := range args { + // The final arg is a slice, and so is the final param. + f.assign(sig.Params().At(i).Type(), f.expr(arg)) + } + return + } + + var argtypes []types.Type + + // Gather the effective actual parameter types. + if tuple, ok := f.info.Types[args[0]].Type.(*types.Tuple); ok { + // f(g()) call where g has multiple results? + f.expr(args[0]) + // unpack the tuple + for i := 0; i < tuple.Len(); i++ { + argtypes = append(argtypes, tuple.At(i).Type()) + } + } else { + for _, arg := range args { + argtypes = append(argtypes, f.expr(arg)) + } + } + + // Assign the actuals to the formals. + if !sig.Variadic() { + for i, argtype := range argtypes { + f.assign(sig.Params().At(i).Type(), argtype) + } + } else { + // The first n-1 parameters are assigned normally. + nnormals := sig.Params().Len() - 1 + for i, argtype := range argtypes[:nnormals] { + f.assign(sig.Params().At(i).Type(), argtype) + } + // Remaining args are assigned to elements of varargs slice. + tElem := sig.Params().At(nnormals).Type().(*types.Slice).Elem() + for i := nnormals; i < len(argtypes); i++ { + f.assign(tElem, argtypes[i]) + } + } +} + +func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr, T types.Type) types.Type { + switch obj.Name() { + case "make", "new": + // skip the type operand + for _, arg := range args[1:] { + f.expr(arg) + } + + case "append": + s := f.expr(args[0]) + if _, ok := args[len(args)-1].(*ast.Ellipsis); ok && len(args) == 2 { + // append(x, y...) including append([]byte, "foo"...) + f.expr(args[1]) + } else { + // append(x, y, z) + tElem := s.Underlying().(*types.Slice).Elem() + for _, arg := range args[1:] { + f.assign(tElem, f.expr(arg)) + } + } + + case "delete": + m := f.expr(args[0]) + k := f.expr(args[1]) + f.assign(m.Underlying().(*types.Map).Key(), k) + + default: + // ordinary call + f.call(sig, args) + } + + return T +} + +func (f *Finder) extract(tuple types.Type, i int) types.Type { + if tuple, ok := tuple.(*types.Tuple); ok && i < tuple.Len() { + return tuple.At(i).Type() + } + return tInvalid +} + +func (f *Finder) valueSpec(spec *ast.ValueSpec) { + var T types.Type + if spec.Type != nil { + T = f.info.Types[spec.Type].Type + } + switch len(spec.Values) { + case len(spec.Names): // e.g. var x, y = f(), g() + for _, value := range spec.Values { + v := f.expr(value) + if T != nil { + f.assign(T, v) + } + } + + case 1: // e.g. var x, y = f() + tuple := f.exprN(spec.Values[0]) + for i := range spec.Names { + if T != nil { + f.assign(T, f.extract(tuple, i)) + } + } + } +} + +// assign records pairs of distinct types that are related by +// assignability, where the left-hand side is an interface and both +// sides have methods. +// +// It should be called for all assignability checks, type assertions, +// explicit conversions and comparisons between two types, unless the +// types are uninteresting (e.g. lhs is a concrete type, or the empty +// interface; rhs has no methods). +// +func (f *Finder) assign(lhs, rhs types.Type) { + if types.Identical(lhs, rhs) { + return + } + if !isInterface(lhs) { + return + } + + if f.msetcache.MethodSet(lhs).Len() == 0 { + return + } + if f.msetcache.MethodSet(rhs).Len() == 0 { + return + } + // record the pair + f.Result[Constraint{lhs, rhs}] = true +} + +// typeAssert must be called for each type assertion x.(T) where x has +// interface type I. +func (f *Finder) typeAssert(I, T types.Type) { + // Type assertions are slightly subtle, because they are allowed + // to be "impossible", e.g. + // + // var x interface{f()} + // _ = x.(interface{f()int}) // legal + // + // (In hindsight, the language spec should probably not have + // allowed this, but it's too late to fix now.) + // + // This means that a type assert from I to T isn't exactly a + // constraint that T is assignable to I, but for a refactoring + // tool it is a conditional constraint that, if T is assignable + // to I before a refactoring, it should remain so after. + + if types.AssignableTo(T, I) { + f.assign(I, T) + } +} + +// compare must be called for each comparison x==y. +func (f *Finder) compare(x, y types.Type) { + if types.AssignableTo(x, y) { + f.assign(y, x) + } else if types.AssignableTo(y, x) { + f.assign(x, y) + } +} + +// expr visits a true expression (not a type or defining ident) +// and returns its type. +func (f *Finder) expr(e ast.Expr) types.Type { + tv := f.info.Types[e] + if tv.Value != nil { + return tv.Type // prune the descent for constants + } + + // tv.Type may be nil for an ast.Ident. + + switch e := e.(type) { + case *ast.BadExpr, *ast.BasicLit: + // no-op + + case *ast.Ident: + // (referring idents only) + if obj, ok := f.info.Uses[e]; ok { + return obj.Type() + } + if e.Name == "_" { // e.g. "for _ = range x" + return tInvalid + } + panic("undefined ident: " + e.Name) + + case *ast.Ellipsis: + if e.Elt != nil { + f.expr(e.Elt) + } + + case *ast.FuncLit: + saved := f.sig + f.sig = tv.Type.(*types.Signature) + f.stmt(e.Body) + f.sig = saved + + case *ast.CompositeLit: + switch T := deref(tv.Type).Underlying().(type) { + case *types.Struct: + for i, elem := range e.Elts { + if kv, ok := elem.(*ast.KeyValueExpr); ok { + f.assign(f.info.Uses[kv.Key.(*ast.Ident)].Type(), f.expr(kv.Value)) + } else { + f.assign(T.Field(i).Type(), f.expr(elem)) + } + } + + case *types.Map: + for _, elem := range e.Elts { + elem := elem.(*ast.KeyValueExpr) + f.assign(T.Key(), f.expr(elem.Key)) + f.assign(T.Elem(), f.expr(elem.Value)) + } + + case *types.Array, *types.Slice: + tElem := T.(interface { + Elem() types.Type + }).Elem() + for _, elem := range e.Elts { + if kv, ok := elem.(*ast.KeyValueExpr); ok { + // ignore the key + f.assign(tElem, f.expr(kv.Value)) + } else { + f.assign(tElem, f.expr(elem)) + } + } + + default: + panic("unexpected composite literal type: " + tv.Type.String()) + } + + case *ast.ParenExpr: + f.expr(e.X) + + case *ast.SelectorExpr: + if _, ok := f.info.Selections[e]; ok { + f.expr(e.X) // selection + } else { + return f.info.Uses[e.Sel].Type() // qualified identifier + } + + case *ast.IndexExpr: + x := f.expr(e.X) + i := f.expr(e.Index) + if ux, ok := x.Underlying().(*types.Map); ok { + f.assign(ux.Key(), i) + } + + case *ast.SliceExpr: + f.expr(e.X) + if e.Low != nil { + f.expr(e.Low) + } + if e.High != nil { + f.expr(e.High) + } + if e.Max != nil { + f.expr(e.Max) + } + + case *ast.TypeAssertExpr: + x := f.expr(e.X) + f.typeAssert(x, f.info.Types[e.Type].Type) + + case *ast.CallExpr: + if tvFun := f.info.Types[e.Fun]; tvFun.IsType() { + // conversion + arg0 := f.expr(e.Args[0]) + f.assign(tvFun.Type, arg0) + } else { + // function call + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := f.info.Uses[id].(*types.Builtin); ok { + sig := f.info.Types[id].Type.(*types.Signature) + return f.builtin(obj, sig, e.Args, tv.Type) + } + } + // ordinary call + f.call(f.expr(e.Fun).Underlying().(*types.Signature), e.Args) + } + + case *ast.StarExpr: + f.expr(e.X) + + case *ast.UnaryExpr: + f.expr(e.X) + + case *ast.BinaryExpr: + x := f.expr(e.X) + y := f.expr(e.Y) + if e.Op == token.EQL || e.Op == token.NEQ { + f.compare(x, y) + } + + case *ast.KeyValueExpr: + f.expr(e.Key) + f.expr(e.Value) + + case *ast.ArrayType, + *ast.StructType, + *ast.FuncType, + *ast.InterfaceType, + *ast.MapType, + *ast.ChanType: + panic(e) + } + + if tv.Type == nil { + panic(fmt.Sprintf("no type for %T", e)) + } + + return tv.Type +} + +func (f *Finder) stmt(s ast.Stmt) { + switch s := s.(type) { + case *ast.BadStmt, + *ast.EmptyStmt, + *ast.BranchStmt: + // no-op + + case *ast.DeclStmt: + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { // ignore consts + for _, spec := range d.Specs { + f.valueSpec(spec.(*ast.ValueSpec)) + } + } + + case *ast.LabeledStmt: + f.stmt(s.Stmt) + + case *ast.ExprStmt: + f.expr(s.X) + + case *ast.SendStmt: + ch := f.expr(s.Chan) + val := f.expr(s.Value) + f.assign(ch.Underlying().(*types.Chan).Elem(), val) + + case *ast.IncDecStmt: + f.expr(s.X) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + // y := x or y = x + var rhsTuple types.Type + if len(s.Lhs) != len(s.Rhs) { + rhsTuple = f.exprN(s.Rhs[0]) + } + for i := range s.Lhs { + var lhs, rhs types.Type + if rhsTuple == nil { + rhs = f.expr(s.Rhs[i]) // 1:1 assignment + } else { + rhs = f.extract(rhsTuple, i) // n:1 assignment + } + + if id, ok := s.Lhs[i].(*ast.Ident); ok { + if id.Name != "_" { + if obj, ok := f.info.Defs[id]; ok { + lhs = obj.Type() // definition + } + } + } + if lhs == nil { + lhs = f.expr(s.Lhs[i]) // assignment + } + f.assign(lhs, rhs) + } + + default: + // y op= x + f.expr(s.Lhs[0]) + f.expr(s.Rhs[0]) + } + + case *ast.GoStmt: + f.expr(s.Call) + + case *ast.DeferStmt: + f.expr(s.Call) + + case *ast.ReturnStmt: + formals := f.sig.Results() + switch len(s.Results) { + case formals.Len(): // 1:1 + for i, result := range s.Results { + f.assign(formals.At(i).Type(), f.expr(result)) + } + + case 1: // n:1 + tuple := f.exprN(s.Results[0]) + for i := 0; i < formals.Len(); i++ { + f.assign(formals.At(i).Type(), f.extract(tuple, i)) + } + } + + case *ast.SelectStmt: + f.stmt(s.Body) + + case *ast.BlockStmt: + for _, s := range s.List { + f.stmt(s) + } + + case *ast.IfStmt: + if s.Init != nil { + f.stmt(s.Init) + } + f.expr(s.Cond) + f.stmt(s.Body) + if s.Else != nil { + f.stmt(s.Else) + } + + case *ast.SwitchStmt: + if s.Init != nil { + f.stmt(s.Init) + } + var tag types.Type = tUntypedBool + if s.Tag != nil { + tag = f.expr(s.Tag) + } + for _, cc := range s.Body.List { + cc := cc.(*ast.CaseClause) + for _, cond := range cc.List { + f.compare(tag, f.info.Types[cond].Type) + } + for _, s := range cc.Body { + f.stmt(s) + } + } + + case *ast.TypeSwitchStmt: + if s.Init != nil { + f.stmt(s.Init) + } + var I types.Type + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + I = f.expr(unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + I = f.expr(unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + for _, cc := range s.Body.List { + cc := cc.(*ast.CaseClause) + for _, cond := range cc.List { + tCase := f.info.Types[cond].Type + if tCase != tUntypedNil { + f.typeAssert(I, tCase) + } + } + for _, s := range cc.Body { + f.stmt(s) + } + } + + case *ast.CommClause: + if s.Comm != nil { + f.stmt(s.Comm) + } + for _, s := range s.Body { + f.stmt(s) + } + + case *ast.ForStmt: + if s.Init != nil { + f.stmt(s.Init) + } + if s.Cond != nil { + f.expr(s.Cond) + } + if s.Post != nil { + f.stmt(s.Post) + } + f.stmt(s.Body) + + case *ast.RangeStmt: + x := f.expr(s.X) + // No conversions are involved when Tok==DEFINE. + if s.Tok == token.ASSIGN { + if s.Key != nil { + k := f.expr(s.Key) + var xelem types.Type + // keys of array, *array, slice, string aren't interesting + switch ux := x.Underlying().(type) { + case *types.Chan: + xelem = ux.Elem() + case *types.Map: + xelem = ux.Key() + } + if xelem != nil { + f.assign(xelem, k) + } + } + if s.Value != nil { + val := f.expr(s.Value) + var xelem types.Type + // values of strings aren't interesting + switch ux := x.Underlying().(type) { + case *types.Array: + xelem = ux.Elem() + case *types.Chan: + xelem = ux.Elem() + case *types.Map: + xelem = ux.Elem() + case *types.Pointer: // *array + xelem = deref(ux).(*types.Array).Elem() + case *types.Slice: + xelem = ux.Elem() + } + if xelem != nil { + f.assign(xelem, val) + } + } + } + f.stmt(s.Body) + + default: + panic(s) + } +} + +// -- Plundered from golang.org/x/tools/go/ssa ----------------- + +// deref returns a pointer's element type; otherwise it returns typ. +func deref(typ types.Type) types.Type { + if p, ok := typ.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return typ +} + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +func isInterface(T types.Type) bool { return types.IsInterface(T) }