diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go new file mode 100644 index 0000000000..f73a5042b1 --- /dev/null +++ b/go/analysis/analysistest/analysistest.go @@ -0,0 +1,184 @@ +// Package analysistest provides utilities for testing analyzers. +package analysistest + +import ( + "fmt" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/packages" +) + +// WriteFiles is a helper function that creates a temporary directory +// and populates it with a GOPATH-style project using filemap (which +// maps file names to contents). On success it returns the name of the +// directory and a cleanup function to delete it. +func WriteFiles(filemap map[string]string) (dir string, cleanup func(), err error) { + gopath, err := ioutil.TempDir("", "analysistest") + if err != nil { + return "", nil, err + } + cleanup = func() { os.RemoveAll(gopath) } + + for name, content := range filemap { + filename := filepath.Join(gopath, "src", name) + os.MkdirAll(filepath.Dir(filename), 0777) // ignore error + if err := ioutil.WriteFile(filename, []byte(content), 0666); err != nil { + cleanup() + return "", nil, err + } + } + return gopath, cleanup, nil +} + +// TestData returns the effective filename of +// the program's "testdata" directory. +// This function may be overridden by projects using +// an alternative build system (such as Blaze) that +// does not run a test in its package directory. +var TestData = func() string { + testdata, err := filepath.Abs("testdata") + if err != nil { + log.Fatal(err) + } + return testdata +} + +// Testing is an abstraction of a *testing.T. +type Testing interface { + Errorf(format string, args ...interface{}) +} + +// Run applies an analysis to each named package. +// It loads each package from the specified GOPATH-style project +// directory using golang.org/x/tools/go/packages, runs the analysis on +// it, and checks that each the analysis generates the diagnostics +// specified by 'want "..."' comments in the package's source files. +// +// You may wish to call this function from within a (*testing.T).Run +// subtest to ensure that errors have adequate contextual description. +func Run(t Testing, dir string, a *analysis.Analyzer, pkgnames ...string) { + for _, pkgname := range pkgnames { + pkg, err := loadPackage(dir, pkgname) + if err != nil { + t.Errorf("loading %s: %v", pkgname, err) + continue + } + + pass, diagnostics, err := checker.Analyze(pkg, a) + if err != nil { + t.Errorf("analyzing %s: %v", pkgname, err) + continue + } + + checkDiagnostics(t, pass, diagnostics) + } +} + +// loadPackage loads the specified package (from source, with +// dependencies) from dir, which is the root of a GOPATH-style project tree. +func loadPackage(dir, pkgpath string) (*packages.Package, error) { + // packages.Load loads the real standard library, not a minimal + // fake version, which would be more efficient, especially if we + // have many small tests that import, say, net/http. + // However there is no easy way to make go/packages to consume + // a list of packages we generate and then do the parsing and + // typechecking, though this feature seems to be a recurring need. + // + // It is possible to write a custom driver, but it's fairly + // involved and requires setting a global (environment) variable. + // + // Also, using the "go list" driver will probably not work in google3. + // + // TODO(adonovan): extend go/packages to allow bypassing the driver. + + cfg := &packages.Config{ + Mode: packages.LoadAllSyntax, + Dir: dir, + Tests: true, + Env: append(os.Environ(), "GOPATH="+dir), + } + pkgs, err := packages.Load(cfg, pkgpath) + if err != nil { + return nil, err + } + if len(pkgs) != 1 { + return nil, fmt.Errorf("pattern %q expanded to %d packages, want 1", + pkgpath, len(pkgs)) + } + + return pkgs[0], nil +} + +// checkDiagnostics inspects an analysis pass on which the analysis has +// already been run, and verifies that all reported diagnostics match those +// specified by 'want "..."' comments in the package's source files, +// which must have been parsed with comments enabled. Surplus diagnostics +// and unmatched expectations are reported as errors to the Testing. +func checkDiagnostics(t Testing, pass *analysis.Pass, diagnostics []analysis.Diagnostic) { + // Read expectations out of comments. + type key struct { + file string + line int + } + wantErrs := make(map[key]*regexp.Regexp) + for _, f := range pass.Files { + for _, c := range f.Comments { + posn := pass.Fset.Position(c.Pos()) + sanitize(&posn) + text := strings.TrimSpace(c.Text()) + if !strings.HasPrefix(text, "want") { + continue + } + text = strings.TrimSpace(text[len("want"):]) + pattern, err := strconv.Unquote(text) + if err != nil { + t.Errorf("%s: in 'want' comment: %v", posn, err) + continue + } + rx, err := regexp.Compile(pattern) + if err != nil { + t.Errorf("%s: %v", posn, err) + continue + } + wantErrs[key{posn.Filename, posn.Line}] = rx + } + } + + // Check the diagnostics match expectations. + for _, f := range diagnostics { + posn := pass.Fset.Position(f.Pos) + sanitize(&posn) + rx, ok := wantErrs[key{posn.Filename, posn.Line}] + if !ok { + t.Errorf("%v: unexpected diagnostic: %v", posn, f.Message) + continue + } + delete(wantErrs, key{posn.Filename, posn.Line}) + if !rx.MatchString(f.Message) { + t.Errorf("%v: diagnostic %q does not match pattern %q", posn, f.Message, rx) + } + } + for key, rx := range wantErrs { + t.Errorf("%s:%d: expected diagnostic matching %q", key.file, key.line, rx) + } +} + +// sanitize removes the GOPATH portion of the filename, +// typically a gnarly /tmp directory. +func sanitize(posn *token.Position) { + // TODO(adonovan): port to windows. + if strings.HasPrefix(posn.Filename, "/tmp/") { + if i := strings.Index(posn.Filename, "/src/"); i > 0 { + posn.Filename = posn.Filename[i+len("/src/"):] + } + } +} diff --git a/go/analysis/analysistest/analysistest_test.go b/go/analysis/analysistest/analysistest_test.go new file mode 100644 index 0000000000..7217220be2 --- /dev/null +++ b/go/analysis/analysistest/analysistest_test.go @@ -0,0 +1,54 @@ +package analysistest_test + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/findcall" +) + +// TestTheTest tests the analysistest testing infrastructure. +func TestTheTest(t *testing.T) { + // We'll simulate a partly failing test of the findcall analysis, + // which (by default) reports calls to functions named 'println'. + filemap := map[string]string{"a/b.go": `package main + +func main() { + println("hello, world") // want "call of println" + println("hello, world") // want "wrong expectation text" + println() // trigger an unexpected diagnostic + print() // want "unsatisfied expectation" + print() // want: "ill-formed 'want' comment" +} +`} + dir, cleanup, err := analysistest.WriteFiles(filemap) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + var got []string + t2 := errorfunc(func(s string) { got = append(got, s) }) // a fake *testing.T + analysistest.Run(t2, dir, findcall.Analyzer, "a") + + want := []string{ + `a/b.go:8:10: in 'want' comment: invalid syntax`, + `a/b.go:5:9: diagnostic "call of println(...)" does not match pattern "wrong expectation text"`, + `a/b.go:6:9: unexpected diagnostic: call of println(...)`, + `a/b.go:7: expected diagnostic matching "unsatisfied expectation"`, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got:\n%s\nwant:\n%s", + strings.Join(got, "\n"), + strings.Join(want, "\n")) + } +} + +type errorfunc func(string) + +func (f errorfunc) Errorf(format string, args ...interface{}) { + f(fmt.Sprintf(format, args...)) +} diff --git a/go/analysis/cmd/analyze/analyze.go b/go/analysis/cmd/analyze/analyze.go new file mode 100644 index 0000000000..50fd8bfe0a --- /dev/null +++ b/go/analysis/cmd/analyze/analyze.go @@ -0,0 +1,26 @@ +// The analyze command is a static checker for Go programs, similar to +// vet, but with pluggable analyzers defined using the analysis +// interface, and using the go/packages API to load packages in any +// build system. +// +// Each analysis flag name is preceded by the analysis name: --analysis.flag. +// In addition, the --analysis.enabled flag controls whether the +// diagnostics of that analysis are displayed. (A disabled analysis may yet +// be run if it is required by some other analysis that is enabled.) +package main + +import ( + "log" + + "golang.org/x/tools/go/analysis/multichecker" + + // analysis plug-ins + "golang.org/x/tools/go/analysis/passes/findcall" +) + +func main() { + log.SetFlags(0) + log.SetPrefix("analyze: ") + + multichecker.Main(findcall.Analyzer) +} diff --git a/go/analysis/internal/checker/checker.go b/go/analysis/internal/checker/checker.go new file mode 100644 index 0000000000..290ad5a39e --- /dev/null +++ b/go/analysis/internal/checker/checker.go @@ -0,0 +1,703 @@ +// Package checker defines the implementation of the checker commands. +// The same code drives the multi-analysis driver, the single-analysis +// driver that is conventionally provided for convenience along with +// each analysis package, and the test driver. +package checker + +import ( + "bytes" + "context" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "reflect" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" +) + +var ( + JSON = false + + // Debug is a set of single-letter flags: + // + // f show [f]acts as they are created + // p disable [p]arallel execution of analyzers + // s do additional [s]anity checks on fact types and serialization + // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) + // v show [v]erbose logging + // + Debug = "" + + Context = -1 // if >=0, display offending line plus this many lines of context + + // Log files for optional performance tracing. + CPUProfile, MemProfile, Trace string +) + +// RegisterFlags registers command-line flags used the analysis driver. +func RegisterFlags() { + flag.BoolVar(&JSON, "json", JSON, "emit JSON output") + flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "lpsv"`) + flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) + + flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") + flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") + flag.StringVar(&Trace, "trace", "", "write trace log to this file") +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +func Run(args []string, analyzers []*analysis.Analyzer) error { + if CPUProfile != "" { + f, err := os.Create(CPUProfile) + if err != nil { + log.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + // NB: profile won't be written in case of error. + defer pprof.StopCPUProfile() + } + + if Trace != "" { + f, err := os.Create(Trace) + if err != nil { + log.Fatal(err) + } + if err := trace.Start(f); err != nil { + log.Fatal(err) + } + // NB: trace log won't be written in case of error. + defer func() { + trace.Stop() + log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) + }() + } + + if MemProfile != "" { + f, err := os.Create(MemProfile) + if err != nil { + log.Fatal(err) + } + // NB: memprofile won't be written in case of error. + defer func() { + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Writing memory profile: %v", err) + } + f.Close() + }() + } + + // Load the packages. + if dbg('v') { + log.SetPrefix("") + log.SetFlags(log.Lmicroseconds) // display timing + log.Printf("load %s", args) + } + + // Optimization: if the selected analyzers don't produce/consume + // facts, we need source only for the initial packages. + allSyntax := needFacts(analyzers) + initial, err := load(args, allSyntax) + if err != nil { + return err + } + + roots := analyze(initial, analyzers) + + // Print the results. + printDiagnostics(roots) + + return nil +} + +// load loads the initial packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + conf := packages.Config{ + Mode: mode, + Tests: true, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil { + if n := packages.PrintErrors(initial); n > 1 { + err = fmt.Errorf("%d errors during loading", n) + } else if n == 1 { + err = fmt.Errorf("error during loading") + } + } + return initial, err +} + +// Analyze applies an analysis to a package (and their dependencies if +// necessary) and returns the graph of results. +// +// It is exposed for use in testing. +func Analyze(pkg *packages.Package, a *analysis.Analyzer) (*analysis.Pass, []analysis.Diagnostic, error) { + act := analyze([]*packages.Package{pkg}, []*analysis.Analyzer{a})[0] + return act.pass, act.diagnostics, act.err +} + +func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { + // Construct the action graph. + if dbg('v') { + log.Printf("building graph of analysis passes") + } + + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + *analysis.Analyzer + *packages.Package + } + actions := make(map[key]*action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &action{a: a, pkg: pkg} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if a.UsesFacts { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.deps = append(act.deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.isroot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + return roots +} + +// printDiagnostics prints the diagnostics for the root packages in either +// plain text or JSON format. JSON format also includes errors for any +// dependencies. +func printDiagnostics(roots []*action) { + // Print the output. + // + // Print diagnostics only for root packages, + // but errors for all packages. + printed := make(map[*action]bool) + var print func(*action) + var visitAll func(actions []*action) + visitAll = func(actions []*action) { + for _, act := range actions { + if !printed[act] { + printed[act] = true + visitAll(act.deps) + print(act) + } + } + } + + if JSON { + tree := make(map[string]map[string]interface{}) // ID -> analysis -> result + + print = func(act *action) { + m, existing := tree[act.pkg.ID] + if !existing { + m = make(map[string]interface{}) + // Insert m into tree later iff non-empty. + } + if act.err != nil { + type jsonError struct { + Err string `json:"error"` + } + m[act.a.Name] = jsonError{act.err.Error()} + } else if act.isroot { + type jsonDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` + Message string `json:"message"` + } + var diagnostics []jsonDiagnostic + for _, f := range act.diagnostics { + diagnostics = append(diagnostics, jsonDiagnostic{ + Category: f.Category, + Posn: act.pkg.Fset.Position(f.Pos).String(), + Message: f.Message, + }) + } + if diagnostics != nil { + m[act.a.Name] = diagnostics + } + } + if !existing && len(m) > 0 { + tree[act.pkg.ID] = m + } + } + visitAll(roots) + + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshalling failed: %v", err) + } + os.Stdout.Write(data) + fmt.Println() + } else { + // plain text output + + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + type key struct { + token.Position + *analysis.Analyzer + message, class string + } + seen := make(map[key]bool) + + print = func(act *action) { + if act.err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) + return + } + if act.isroot { + for _, f := range act.diagnostics { + class := act.a.Name + if f.Category != "" { + class += "." + f.Category + } + posn := act.pkg.Fset.Position(f.Pos) + + k := key{posn, act.a, class, f.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + fmt.Printf("%s: [%s] %s\n", posn, class, f.Message) + + // -c=0: show offending line of code in context. + if Context >= 0 { + data, _ := ioutil.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - Context; i <= posn.Line+Context; i++ { + if 1 <= i && i <= len(lines) { + fmt.Printf("%d\t%s\n", i, lines[i-1]) + } + } + } + } + } + } + visitAll(roots) + } + + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } + var all []*action + var total time.Duration + for act := range printed { + all = append(all, act) + total += act.duration + } + sort.Slice(all, func(i, j int) bool { + return all[i].duration > all[j].duration + }) + + // Print actions accounting for 90% of the total. + var sum time.Duration + for _, act := range all { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) + sum += act.duration + if sum >= total*9/10 { + break + } + } + } +} + +// needFacts reports whether any analysis required by the specified set +// needs facts. If so, we must load the entire program from source. +func needFacts(analyzers []*analysis.Analyzer) bool { + seen := make(map[*analysis.Analyzer]bool) + var q []*analysis.Analyzer // for BFS + q = append(q, analyzers...) + for len(q) > 0 { + a := q[0] + q = q[1:] + if !seen[a] { + seen[a] = true + if a.UsesFacts { + return true + } + q = append(q, a.Requires...) + } + } + return false +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + once sync.Once + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + isroot bool + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + inputs map[*analysis.Analyzer]interface{} + result interface{} + diagnostics []analysis.Diagnostic + err error + duration time.Duration +} + +type objectFactKey struct { + types.Object + reflect.Type +} + +type packageFactKey struct { + *types.Package + reflect.Type +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +func execAll(actions []*action) { + sequential := dbg('p') + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *action) { + act.exec() + wg.Done() + } + if sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *action) exec() { act.once.Do(act.execOnce) } + +func (act *action) execOnce() { + // Analyze dependencies. + execAll(act.deps) + + ctx, task := trace.NewTask(context.Background(), "exec") + defer task.End() + trace.Log(ctx, "pass", act.String()) + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + if dbg('t') { + t0 := time.Now() + defer func() { act.duration = time.Since(t0) }() + } + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.deps { + if dep.err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]interface{}) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + } + act.pass = pass + + var err error + if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { + err = fmt.Errorf("analysis skipped due to errors in package") + } else { + act.result, err = pass.Analyzer.Run(pass) + if err == nil { + if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { + err = fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + } + } + act.err = err + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + serialize := dbg('s') + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.Object, dep.pkg.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.Object, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + var err error + fact, err = codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.Object, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + var err error + fact, err = codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.Package.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to oether packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + return obj.Exported() && obj.Pkg() == pkg || + obj.IsField() + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.pkg.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if dbg('f') { + objstr := types.ObjectString(obj, (*types.Package).Name) + log.Printf("fact %#v on %s", fact, objstr) + } +} + +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if dbg('f') { + log.Printf("fact %#v on %s", fact, act.pass.Pkg) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", t) + } + return t +} + +func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/go/analysis/multichecker/multichecker.go b/go/analysis/multichecker/multichecker.go new file mode 100644 index 0000000000..562839d726 --- /dev/null +++ b/go/analysis/multichecker/multichecker.go @@ -0,0 +1,53 @@ +// Package multichecker defines the main function for an analysis driver +// with several analyzers. This package makes it easy for anyone to build +// an analysis tool containing just the analyzers they need. +package multichecker + +import ( + "flag" + "log" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/checker" +) + +func Main(analyzers ...*analysis.Analyzer) { + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + checker.RegisterFlags() + + // Connect each analysis flag to the command line as --analysis.flag. + enabled := make(map[*analysis.Analyzer]*bool) + for _, a := range analyzers { + prefix := a.Name + "." + + // Add --foo.enable flag. + enable := new(bool) + flag.BoolVar(enable, prefix+"enable", false, "enable only "+a.Name+" analysis") + enabled[a] = enable + + a.Flags.VisitAll(func(f *flag.Flag) { + flag.Var(f.Value, prefix+f.Name, f.Usage) + }) + } + + flag.Parse() // (ExitOnError) + + // If any --foo.enable flag is set, + // run only those analyzers. + var keep []*analysis.Analyzer + for _, a := range analyzers { + if *enabled[a] { + keep = append(keep, a) + } + } + if keep != nil { + analyzers = keep + } + + if err := checker.Run(flag.Args(), analyzers); err != nil { + log.Fatal(err) + } +} diff --git a/go/analysis/passes/README b/go/analysis/passes/README new file mode 100644 index 0000000000..add86bdba5 --- /dev/null +++ b/go/analysis/passes/README @@ -0,0 +1,8 @@ + +This directory does not contain a Go package, +but acts as a container for various analyses +that implement the golang.org/x/tools/go/analysis +API and may be imported into an analysis tool. + +By convention, each package foo provides the analysis, +and each command foo/cmd/foo provides a standalone driver. diff --git a/go/analysis/passes/findcall/cmd/findcall/main.go b/go/analysis/passes/findcall/cmd/findcall/main.go new file mode 100644 index 0000000000..c933b2406f --- /dev/null +++ b/go/analysis/passes/findcall/cmd/findcall/main.go @@ -0,0 +1,9 @@ +// The findcall command runs the findcall analyzer. +package main + +import ( + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { singlechecker.Main(findcall.Analyzer) } diff --git a/go/analysis/passes/findcall/findcall_test.go b/go/analysis/passes/findcall/findcall_test.go new file mode 100644 index 0000000000..c521885c29 --- /dev/null +++ b/go/analysis/passes/findcall/findcall_test.go @@ -0,0 +1,57 @@ +package findcall_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/findcall" +) + +// TestFromStringLiterals demonstrates how to test an analysis using +// a table of string literals for each test case. +// +// Such tests are typically quite compact. +func TestFromStringLiterals(t *testing.T) { + for _, test := range [...]struct { + desc string + pkgpath string + files map[string]string + }{ + { + desc: "SimpleTest", + pkgpath: "main", + files: map[string]string{"main/main.go": `package main + +func main() { + println("hello") // want "call of println" + print("goodbye") // not a call of println +}`, + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + dir, cleanup, err := analysistest.WriteFiles(test.files) + if err != nil { + t.Fatal(err) + } + defer cleanup() + analysistest.Run(t, dir, findcall.Analyzer, test.pkgpath) + }) + } +} + +// TestFromFileSystem demonstrates how to test an analysis using input +// files stored in the file system. +// +// These tests have the advantages that test data can be edited +// directly, and that files named in error messages can be opened. +// However, they tend to spread a small number of lines of text across a +// rather deep directory hierarchy, and obscure similarities among +// related tests, especially when tests involve multiple packages, or +// multiple variants of a single scenario. +func TestFromFileSystem(t *testing.T) { + testdata := analysistest.TestData() + analysistest.Run(t, testdata, findcall.Analyzer, + "a", // loads testdata/src/a/a.go. + ) +} diff --git a/go/analysis/passes/findcall/testdata/src/a/a.go b/go/analysis/passes/findcall/testdata/src/a/a.go new file mode 100644 index 0000000000..f2c205330e --- /dev/null +++ b/go/analysis/passes/findcall/testdata/src/a/a.go @@ -0,0 +1,6 @@ +package main + +func main() { + println("hi") // want "call of println" + print("hi") // not a call of println +} diff --git a/go/analysis/passes/pkgfact/pkgfact_test.go b/go/analysis/passes/pkgfact/pkgfact_test.go new file mode 100644 index 0000000000..11e6f893a3 --- /dev/null +++ b/go/analysis/passes/pkgfact/pkgfact_test.go @@ -0,0 +1,15 @@ +package pkgfact_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/pkgfact" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + analysistest.Run(t, testdata, pkgfact.Analyzer, + "c", // loads testdata/src/c/c.go. + ) +} diff --git a/go/analysis/passes/pkgfact/testdata/src/a/a.go b/go/analysis/passes/pkgfact/testdata/src/a/a.go new file mode 100644 index 0000000000..2dc5eb819e --- /dev/null +++ b/go/analysis/passes/pkgfact/testdata/src/a/a.go @@ -0,0 +1,4 @@ +package a + +const _greeting = "hello" +const _audience = "world" diff --git a/go/analysis/passes/pkgfact/testdata/src/b/b.go b/go/analysis/passes/pkgfact/testdata/src/b/b.go new file mode 100644 index 0000000000..3ff65b6d33 --- /dev/null +++ b/go/analysis/passes/pkgfact/testdata/src/b/b.go @@ -0,0 +1,5 @@ +package b + +import _ "a" + +const _pi = 3.14159 diff --git a/go/analysis/passes/pkgfact/testdata/src/c/c.go b/go/analysis/passes/pkgfact/testdata/src/c/c.go new file mode 100644 index 0000000000..83dac7efbe --- /dev/null +++ b/go/analysis/passes/pkgfact/testdata/src/c/c.go @@ -0,0 +1,3 @@ +package c + +import _ "b" // want `audience="world" greeting="hello" pi=3.14159` diff --git a/go/analysis/singlechecker/singlechecker.go b/go/analysis/singlechecker/singlechecker.go new file mode 100644 index 0000000000..d740b49665 --- /dev/null +++ b/go/analysis/singlechecker/singlechecker.go @@ -0,0 +1,50 @@ +// Package singlechecker defines the main function for an analysis +// driver with only a single analysis. +// This package makes it easy for a provider of an analysis package to +// also provide a standalone tool that runs just that analysis. +// +// For example, if example.org/findbadness is an analysis package, +// all that is needed to define a standalone tool is a file, +// example.org/findbadness/cmd/findbadness/main.go, containing: +// +// // The findbadness command runs an analysis. +// package main +// +// import ( +// "example.org/findbadness" +// "golang.org/x/tools/go/analysis/singlechecker" +// ) +// +// func main() { singlechecker.Main(findbadness.Analyzer) } +// +package singlechecker + +import ( + "flag" + "log" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/checker" +) + +// Main is the main function for a checker command for a single analysis. +func Main(a *analysis.Analyzer) { + log.SetFlags(0) + log.SetPrefix(a.Name + ": ") + + checker.RegisterFlags() + + a.Flags.VisitAll(func(f *flag.Flag) { + if flag.Lookup(f.Name) != nil { + log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name) + return + } + flag.Var(f.Value, f.Name, f.Usage) + }) + + flag.Parse() + + if err := checker.Run(flag.Args(), []*analysis.Analyzer{a}); err != nil { + log.Fatal(err) + } +}