mirror of
https://github.com/golang/go.git
synced 2025-05-05 23:53:05 +00:00
dashboard: delete after move to the golang.org/x/build repository
Change-Id: I04335eb643d342fa7de1f39fc3fc26b1543a60e1 Reviewed-on: https://go-review.googlesource.com/3078 Reviewed-by: Andrew Gerrand <adg@golang.org>
This commit is contained in:
parent
b34c44b0e2
commit
cb6b359e69
@ -1,40 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
The files in these directories constitute the continuous builder:
|
|
||||||
|
|
||||||
app/: a.k.a the "dashboard"; the App Engine code that runs http://build.golang.org/
|
|
||||||
|
|
||||||
cmd/:
|
|
||||||
|
|
||||||
buildlet/: HTTP server that runs on a VM and is told what to write to disk
|
|
||||||
and what command to run. This is cross-compiled to different architectures
|
|
||||||
and is the first program run when a builder VM comes up. It then
|
|
||||||
is contacted by the coordinator to do a build. Not all builders use
|
|
||||||
the buildlet (at least not yet).
|
|
||||||
|
|
||||||
builder/: gobuilder, a Go continuous build client. The original Go builder program.
|
|
||||||
|
|
||||||
coordinator/: daemon that runs on CoreOS on Google Compute Engine and manages
|
|
||||||
builds using Docker containers and/or VMs as needed.
|
|
||||||
|
|
||||||
retrybuilds/: a Go client program to delete build results from the dashboard (app)
|
|
||||||
|
|
||||||
upload/: a Go program to upload to Google Cloud Storage. used by Makefiles elsewhere.
|
|
||||||
|
|
||||||
watcher/: a daemon that watches for new commits to the Go repository and
|
|
||||||
its sub-repositories, and notifies the dashboard of those commits.
|
|
||||||
|
|
||||||
env/: configuration files describing the environment of builders and related
|
|
||||||
binaries. Many builders are still configured ad-hoc, without a hermetic
|
|
||||||
environment.
|
|
||||||
|
|
||||||
types/: a Go package contain common types used by other pieces.
|
|
||||||
|
|
||||||
|
|
||||||
If you wish to run a Go builder, please email golang-dev@googlegroups.com first.
|
|
||||||
There is documentation at https://golang.org/wiki/DashboardBuilders but
|
|
||||||
depending on the type of builder, we may want to run it ourselves, after you
|
|
||||||
prepare an environment description (resulting in a VM image) of it. See the env
|
|
||||||
directory.
|
|
@ -1,21 +0,0 @@
|
|||||||
# Update with
|
|
||||||
# google_appengine/appcfg.py [-V build-test] update .
|
|
||||||
#
|
|
||||||
# Using -V build-test will run as build-test.golang.org.
|
|
||||||
|
|
||||||
application: golang-org
|
|
||||||
version: build
|
|
||||||
runtime: go
|
|
||||||
api_version: go1
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- url: /static
|
|
||||||
static_dir: static
|
|
||||||
- url: /(|gccgo/|hg/)log/.+
|
|
||||||
script: _go_app
|
|
||||||
- url: /(|gccgo/|hg/)(|building|clear-results|commit|packages|result|perf-result|tag|todo|perf|perfdetail|perfgraph|updatebenchmark)
|
|
||||||
script: _go_app
|
|
||||||
- url: /(|gccgo/|hg/)(init|buildtest|key|perflearn|_ah/queue/go/delay)
|
|
||||||
script: _go_app
|
|
||||||
login: admin
|
|
||||||
|
|
@ -1,946 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto/sha1"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
|
|
||||||
"cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxDatastoreStringLen = 500
|
|
||||||
PerfRunLength = 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Package describes a package that is listed on the dashboard.
|
|
||||||
type Package struct {
|
|
||||||
Kind string // "subrepo", "external", or empty for the main Go tree
|
|
||||||
Name string
|
|
||||||
Path string // (empty for the main Go tree)
|
|
||||||
NextNum int // Num of the next head Commit
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Package) String() string {
|
|
||||||
return fmt.Sprintf("%s: %q", p.Path, p.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Package) Key(c appengine.Context) *datastore.Key {
|
|
||||||
key := p.Path
|
|
||||||
if key == "" {
|
|
||||||
key = "go"
|
|
||||||
}
|
|
||||||
return datastore.NewKey(c, "Package", key, 0, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LastCommit returns the most recent Commit for this Package.
|
|
||||||
func (p *Package) LastCommit(c appengine.Context) (*Commit, error) {
|
|
||||||
var commits []*Commit
|
|
||||||
_, err := datastore.NewQuery("Commit").
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Order("-Time").
|
|
||||||
Limit(1).
|
|
||||||
GetAll(c, &commits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(commits) != 1 {
|
|
||||||
return nil, datastore.ErrNoSuchEntity
|
|
||||||
}
|
|
||||||
return commits[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPackage fetches a Package by path from the datastore.
|
|
||||||
func GetPackage(c appengine.Context, path string) (*Package, error) {
|
|
||||||
p := &Package{Path: path}
|
|
||||||
err := datastore.Get(c, p.Key(c), p)
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
return nil, fmt.Errorf("package %q not found", path)
|
|
||||||
}
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type builderAndGoHash struct {
|
|
||||||
builder, goHash string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Commit describes an individual commit in a package.
|
|
||||||
//
|
|
||||||
// Each Commit entity is a descendant of its associated Package entity.
|
|
||||||
// In other words, all Commits with the same PackagePath belong to the same
|
|
||||||
// datastore entity group.
|
|
||||||
type Commit struct {
|
|
||||||
PackagePath string // (empty for main repo commits)
|
|
||||||
Hash string
|
|
||||||
ParentHash string
|
|
||||||
Num int // Internal monotonic counter unique to this package.
|
|
||||||
|
|
||||||
User string
|
|
||||||
Desc string `datastore:",noindex"`
|
|
||||||
Time time.Time
|
|
||||||
NeedsBenchmarking bool
|
|
||||||
TryPatch bool
|
|
||||||
Branch string
|
|
||||||
|
|
||||||
// ResultData is the Data string of each build Result for this Commit.
|
|
||||||
// For non-Go commits, only the Results for the current Go tip, weekly,
|
|
||||||
// and release Tags are stored here. This is purely de-normalized data.
|
|
||||||
// The complete data set is stored in Result entities.
|
|
||||||
ResultData []string `datastore:",noindex"`
|
|
||||||
|
|
||||||
// PerfResults holds a set of “builder|benchmark” tuples denoting
|
|
||||||
// what benchmarks have been executed on the commit.
|
|
||||||
PerfResults []string `datastore:",noindex"`
|
|
||||||
|
|
||||||
FailNotificationSent bool
|
|
||||||
|
|
||||||
buildingURLs map[builderAndGoHash]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (com *Commit) Key(c appengine.Context) *datastore.Key {
|
|
||||||
if com.Hash == "" {
|
|
||||||
panic("tried Key on Commit with empty Hash")
|
|
||||||
}
|
|
||||||
p := Package{Path: com.PackagePath}
|
|
||||||
key := com.PackagePath + "|" + com.Hash
|
|
||||||
return datastore.NewKey(c, "Commit", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Commit) Valid() error {
|
|
||||||
if !validHash(c.Hash) {
|
|
||||||
return errors.New("invalid Hash")
|
|
||||||
}
|
|
||||||
if c.ParentHash != "" && !validHash(c.ParentHash) { // empty is OK
|
|
||||||
return errors.New("invalid ParentHash")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func putCommit(c appengine.Context, com *Commit) error {
|
|
||||||
if err := com.Valid(); err != nil {
|
|
||||||
return fmt.Errorf("putting Commit: %v", err)
|
|
||||||
}
|
|
||||||
if com.Num == 0 && com.ParentHash != "0000" { // 0000 is used in tests
|
|
||||||
return fmt.Errorf("putting Commit: invalid Num (must be > 0)")
|
|
||||||
}
|
|
||||||
if _, err := datastore.Put(c, com.Key(c), com); err != nil {
|
|
||||||
return fmt.Errorf("putting Commit: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// each result line is approx 105 bytes. This constant is a tradeoff between
|
|
||||||
// build history and the AppEngine datastore limit of 1mb.
|
|
||||||
const maxResults = 1000
|
|
||||||
|
|
||||||
// AddResult adds the denormalized Result data to the Commit's ResultData field.
|
|
||||||
// It must be called from inside a datastore transaction.
|
|
||||||
func (com *Commit) AddResult(c appengine.Context, r *Result) error {
|
|
||||||
if err := datastore.Get(c, com.Key(c), com); err != nil {
|
|
||||||
return fmt.Errorf("getting Commit: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resultExists bool
|
|
||||||
for i, s := range com.ResultData {
|
|
||||||
// if there already exists result data for this builder at com, overwrite it.
|
|
||||||
if strings.HasPrefix(s, r.Builder+"|") && strings.HasSuffix(s, "|"+r.GoHash) {
|
|
||||||
resultExists = true
|
|
||||||
com.ResultData[i] = r.Data()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !resultExists {
|
|
||||||
// otherwise, add the new result data for this builder.
|
|
||||||
com.ResultData = trim(append(com.ResultData, r.Data()), maxResults)
|
|
||||||
}
|
|
||||||
return putCommit(c, com)
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeResult removes the denormalized Result data from the ResultData field
|
|
||||||
// for the given builder and go hash.
|
|
||||||
// It must be called from within the datastore transaction that gets and puts
|
|
||||||
// the Commit. Note this is slightly different to AddResult, above.
|
|
||||||
func (com *Commit) RemoveResult(r *Result) {
|
|
||||||
var rd []string
|
|
||||||
for _, s := range com.ResultData {
|
|
||||||
if strings.HasPrefix(s, r.Builder+"|") && strings.HasSuffix(s, "|"+r.GoHash) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rd = append(rd, s)
|
|
||||||
}
|
|
||||||
com.ResultData = rd
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddPerfResult remembers that the builder has run the benchmark on the commit.
|
|
||||||
// It must be called from inside a datastore transaction.
|
|
||||||
func (com *Commit) AddPerfResult(c appengine.Context, builder, benchmark string) error {
|
|
||||||
if err := datastore.Get(c, com.Key(c), com); err != nil {
|
|
||||||
return fmt.Errorf("getting Commit: %v", err)
|
|
||||||
}
|
|
||||||
if !com.NeedsBenchmarking {
|
|
||||||
return fmt.Errorf("trying to add perf result to Commit(%v) that does not require benchmarking", com.Hash)
|
|
||||||
}
|
|
||||||
s := builder + "|" + benchmark
|
|
||||||
for _, v := range com.PerfResults {
|
|
||||||
if v == s {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
com.PerfResults = append(com.PerfResults, s)
|
|
||||||
return putCommit(c, com)
|
|
||||||
}
|
|
||||||
|
|
||||||
func trim(s []string, n int) []string {
|
|
||||||
l := min(len(s), n)
|
|
||||||
return s[len(s)-l:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result returns the build Result for this Commit for the given builder/goHash.
|
|
||||||
func (c *Commit) Result(builder, goHash string) *Result {
|
|
||||||
for _, r := range c.ResultData {
|
|
||||||
if !strings.HasPrefix(r, builder) {
|
|
||||||
// Avoid strings.SplitN alloc in the common case.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p := strings.SplitN(r, "|", 4)
|
|
||||||
if len(p) != 4 || p[0] != builder || p[3] != goHash {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return partsToResult(c, p)
|
|
||||||
}
|
|
||||||
if u, ok := c.buildingURLs[builderAndGoHash{builder, goHash}]; ok {
|
|
||||||
return &Result{
|
|
||||||
Builder: builder,
|
|
||||||
BuildingURL: u,
|
|
||||||
Hash: c.Hash,
|
|
||||||
GoHash: goHash,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Results returns the build Results for this Commit.
|
|
||||||
func (c *Commit) Results() (results []*Result) {
|
|
||||||
for _, r := range c.ResultData {
|
|
||||||
p := strings.SplitN(r, "|", 4)
|
|
||||||
if len(p) != 4 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
results = append(results, partsToResult(c, p))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Commit) ResultGoHashes() []string {
|
|
||||||
// For the main repo, just return the empty string
|
|
||||||
// (there's no corresponding main repo hash for a main repo Commit).
|
|
||||||
// This function is only really useful for sub-repos.
|
|
||||||
if c.PackagePath == "" {
|
|
||||||
return []string{""}
|
|
||||||
}
|
|
||||||
var hashes []string
|
|
||||||
for _, r := range c.ResultData {
|
|
||||||
p := strings.SplitN(r, "|", 4)
|
|
||||||
if len(p) != 4 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Append only new results (use linear scan to preserve order).
|
|
||||||
if !contains(hashes, p[3]) {
|
|
||||||
hashes = append(hashes, p[3])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Return results in reverse order (newest first).
|
|
||||||
reverse(hashes)
|
|
||||||
return hashes
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(t []string, s string) bool {
|
|
||||||
for _, s2 := range t {
|
|
||||||
if s2 == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func reverse(s []string) {
|
|
||||||
for i := 0; i < len(s)/2; i++ {
|
|
||||||
j := len(s) - i - 1
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A CommitRun provides summary information for commits [StartCommitNum, StartCommitNum + PerfRunLength).
|
|
||||||
// Descendant of Package.
|
|
||||||
type CommitRun struct {
|
|
||||||
PackagePath string // (empty for main repo commits)
|
|
||||||
StartCommitNum int
|
|
||||||
Hash []string `datastore:",noindex"`
|
|
||||||
User []string `datastore:",noindex"`
|
|
||||||
Desc []string `datastore:",noindex"` // Only first line.
|
|
||||||
Time []time.Time `datastore:",noindex"`
|
|
||||||
NeedsBenchmarking []bool `datastore:",noindex"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *CommitRun) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{Path: cr.PackagePath}
|
|
||||||
key := strconv.Itoa(cr.StartCommitNum)
|
|
||||||
return datastore.NewKey(c, "CommitRun", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCommitRun loads and returns CommitRun that contains information
|
|
||||||
// for commit commitNum.
|
|
||||||
func GetCommitRun(c appengine.Context, commitNum int) (*CommitRun, error) {
|
|
||||||
cr := &CommitRun{StartCommitNum: commitNum / PerfRunLength * PerfRunLength}
|
|
||||||
err := datastore.Get(c, cr.Key(c), cr)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return nil, fmt.Errorf("getting CommitRun: %v", err)
|
|
||||||
}
|
|
||||||
if len(cr.Hash) != PerfRunLength {
|
|
||||||
cr.Hash = make([]string, PerfRunLength)
|
|
||||||
cr.User = make([]string, PerfRunLength)
|
|
||||||
cr.Desc = make([]string, PerfRunLength)
|
|
||||||
cr.Time = make([]time.Time, PerfRunLength)
|
|
||||||
cr.NeedsBenchmarking = make([]bool, PerfRunLength)
|
|
||||||
}
|
|
||||||
return cr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr *CommitRun) AddCommit(c appengine.Context, com *Commit) error {
|
|
||||||
if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
|
|
||||||
return fmt.Errorf("AddCommit: commit num %v out of range [%v, %v)",
|
|
||||||
com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
|
|
||||||
}
|
|
||||||
i := com.Num - cr.StartCommitNum
|
|
||||||
// Be careful with string lengths,
|
|
||||||
// we need to fit 1024 commits into 1 MB.
|
|
||||||
cr.Hash[i] = com.Hash
|
|
||||||
cr.User[i] = shortDesc(com.User)
|
|
||||||
cr.Desc[i] = shortDesc(com.Desc)
|
|
||||||
cr.Time[i] = com.Time
|
|
||||||
cr.NeedsBenchmarking[i] = com.NeedsBenchmarking
|
|
||||||
if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
|
|
||||||
return fmt.Errorf("putting CommitRun: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCommits returns [startCommitNum, startCommitNum+n) commits.
|
|
||||||
// Commits information is partial (obtained from CommitRun),
|
|
||||||
// do not store them back into datastore.
|
|
||||||
func GetCommits(c appengine.Context, startCommitNum, n int) ([]*Commit, error) {
|
|
||||||
if startCommitNum < 0 || n <= 0 {
|
|
||||||
return nil, fmt.Errorf("GetCommits: invalid args (%v, %v)", startCommitNum, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Package{}
|
|
||||||
t := datastore.NewQuery("CommitRun").
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
|
|
||||||
Order("StartCommitNum").
|
|
||||||
Limit(100).
|
|
||||||
Run(c)
|
|
||||||
|
|
||||||
res := make([]*Commit, n)
|
|
||||||
for {
|
|
||||||
cr := new(CommitRun)
|
|
||||||
_, err := t.Next(cr)
|
|
||||||
if err == datastore.Done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if cr.StartCommitNum >= startCommitNum+n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Calculate start index for copying.
|
|
||||||
i := 0
|
|
||||||
if cr.StartCommitNum < startCommitNum {
|
|
||||||
i = startCommitNum - cr.StartCommitNum
|
|
||||||
}
|
|
||||||
// Calculate end index for copying.
|
|
||||||
e := PerfRunLength
|
|
||||||
if cr.StartCommitNum+e > startCommitNum+n {
|
|
||||||
e = startCommitNum + n - cr.StartCommitNum
|
|
||||||
}
|
|
||||||
for ; i < e; i++ {
|
|
||||||
com := new(Commit)
|
|
||||||
com.Hash = cr.Hash[i]
|
|
||||||
com.User = cr.User[i]
|
|
||||||
com.Desc = cr.Desc[i]
|
|
||||||
com.Time = cr.Time[i]
|
|
||||||
com.NeedsBenchmarking = cr.NeedsBenchmarking[i]
|
|
||||||
res[cr.StartCommitNum-startCommitNum+i] = com
|
|
||||||
}
|
|
||||||
if e != PerfRunLength {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// partsToResult converts a Commit and ResultData substrings to a Result.
|
|
||||||
func partsToResult(c *Commit, p []string) *Result {
|
|
||||||
return &Result{
|
|
||||||
Builder: p[0],
|
|
||||||
Hash: c.Hash,
|
|
||||||
PackagePath: c.PackagePath,
|
|
||||||
GoHash: p[3],
|
|
||||||
OK: p[1] == "true",
|
|
||||||
LogHash: p[2],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Result describes a build result for a Commit on an OS/architecture.
|
|
||||||
//
|
|
||||||
// Each Result entity is a descendant of its associated Package entity.
|
|
||||||
type Result struct {
|
|
||||||
PackagePath string // (empty for Go commits)
|
|
||||||
Builder string // "os-arch[-note]"
|
|
||||||
Hash string
|
|
||||||
|
|
||||||
// The Go Commit this was built against (empty for Go commits).
|
|
||||||
GoHash string
|
|
||||||
|
|
||||||
BuildingURL string `datastore:"-"` // non-empty if currently building
|
|
||||||
OK bool
|
|
||||||
Log string `datastore:"-"` // for JSON unmarshaling only
|
|
||||||
LogHash string `datastore:",noindex"` // Key to the Log record.
|
|
||||||
|
|
||||||
RunTime int64 // time to build+test in nanoseconds
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Result) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{Path: r.PackagePath}
|
|
||||||
key := r.Builder + "|" + r.PackagePath + "|" + r.Hash + "|" + r.GoHash
|
|
||||||
return datastore.NewKey(c, "Result", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Result) Valid() error {
|
|
||||||
if !validHash(r.Hash) {
|
|
||||||
return errors.New("invalid Hash")
|
|
||||||
}
|
|
||||||
if r.PackagePath != "" && !validHash(r.GoHash) {
|
|
||||||
return errors.New("invalid GoHash")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data returns the Result in string format
|
|
||||||
// to be stored in Commit's ResultData field.
|
|
||||||
func (r *Result) Data() string {
|
|
||||||
return fmt.Sprintf("%v|%v|%v|%v", r.Builder, r.OK, r.LogHash, r.GoHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A PerfResult describes all benchmarking result for a Commit.
|
|
||||||
// Descendant of Package.
|
|
||||||
type PerfResult struct {
|
|
||||||
PackagePath string
|
|
||||||
CommitHash string
|
|
||||||
CommitNum int
|
|
||||||
Data []string `datastore:",noindex"` // "builder|benchmark|ok|metric1=val1|metric2=val2|file:log=hash|file:cpuprof=hash"
|
|
||||||
|
|
||||||
// Local cache with parsed Data.
|
|
||||||
// Maps builder->benchmark->ParsedPerfResult.
|
|
||||||
parsedData map[string]map[string]*ParsedPerfResult
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParsedPerfResult struct {
|
|
||||||
OK bool
|
|
||||||
Metrics map[string]uint64
|
|
||||||
Artifacts map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PerfResult) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{Path: r.PackagePath}
|
|
||||||
key := r.CommitHash
|
|
||||||
return datastore.NewKey(c, "PerfResult", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddResult add the benchmarking result to r.
|
|
||||||
// Existing result for the same builder/benchmark is replaced if already exists.
|
|
||||||
// Returns whether the result was already present.
|
|
||||||
func (r *PerfResult) AddResult(req *PerfRequest) bool {
|
|
||||||
present := false
|
|
||||||
str := fmt.Sprintf("%v|%v|", req.Builder, req.Benchmark)
|
|
||||||
for i, s := range r.Data {
|
|
||||||
if strings.HasPrefix(s, str) {
|
|
||||||
present = true
|
|
||||||
last := len(r.Data) - 1
|
|
||||||
r.Data[i] = r.Data[last]
|
|
||||||
r.Data = r.Data[:last]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ok := "ok"
|
|
||||||
if !req.OK {
|
|
||||||
ok = "false"
|
|
||||||
}
|
|
||||||
str += ok
|
|
||||||
for _, m := range req.Metrics {
|
|
||||||
str += fmt.Sprintf("|%v=%v", m.Type, m.Val)
|
|
||||||
}
|
|
||||||
for _, a := range req.Artifacts {
|
|
||||||
str += fmt.Sprintf("|file:%v=%v", a.Type, a.Body)
|
|
||||||
}
|
|
||||||
r.Data = append(r.Data, str)
|
|
||||||
r.parsedData = nil
|
|
||||||
return present
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PerfResult) ParseData() map[string]map[string]*ParsedPerfResult {
|
|
||||||
if r.parsedData != nil {
|
|
||||||
return r.parsedData
|
|
||||||
}
|
|
||||||
res := make(map[string]map[string]*ParsedPerfResult)
|
|
||||||
for _, str := range r.Data {
|
|
||||||
ss := strings.Split(str, "|")
|
|
||||||
builder := ss[0]
|
|
||||||
bench := ss[1]
|
|
||||||
ok := ss[2]
|
|
||||||
m := res[builder]
|
|
||||||
if m == nil {
|
|
||||||
m = make(map[string]*ParsedPerfResult)
|
|
||||||
res[builder] = m
|
|
||||||
}
|
|
||||||
var p ParsedPerfResult
|
|
||||||
p.OK = ok == "ok"
|
|
||||||
p.Metrics = make(map[string]uint64)
|
|
||||||
p.Artifacts = make(map[string]string)
|
|
||||||
for _, entry := range ss[3:] {
|
|
||||||
if strings.HasPrefix(entry, "file:") {
|
|
||||||
ss1 := strings.Split(entry[len("file:"):], "=")
|
|
||||||
p.Artifacts[ss1[0]] = ss1[1]
|
|
||||||
} else {
|
|
||||||
ss1 := strings.Split(entry, "=")
|
|
||||||
val, _ := strconv.ParseUint(ss1[1], 10, 64)
|
|
||||||
p.Metrics[ss1[0]] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m[bench] = &p
|
|
||||||
}
|
|
||||||
r.parsedData = res
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// A PerfMetricRun entity holds a set of metric values for builder/benchmark/metric
|
|
||||||
// for commits [StartCommitNum, StartCommitNum + PerfRunLength).
|
|
||||||
// Descendant of Package.
|
|
||||||
type PerfMetricRun struct {
|
|
||||||
PackagePath string
|
|
||||||
Builder string
|
|
||||||
Benchmark string
|
|
||||||
Metric string // e.g. realtime, cputime, gc-pause
|
|
||||||
StartCommitNum int
|
|
||||||
Vals []int64 `datastore:",noindex"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *PerfMetricRun) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{Path: m.PackagePath}
|
|
||||||
key := m.Builder + "|" + m.Benchmark + "|" + m.Metric + "|" + strconv.Itoa(m.StartCommitNum)
|
|
||||||
return datastore.NewKey(c, "PerfMetricRun", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPerfMetricRun loads and returns PerfMetricRun that contains information
|
|
||||||
// for commit commitNum.
|
|
||||||
func GetPerfMetricRun(c appengine.Context, builder, benchmark, metric string, commitNum int) (*PerfMetricRun, error) {
|
|
||||||
startCommitNum := commitNum / PerfRunLength * PerfRunLength
|
|
||||||
m := &PerfMetricRun{Builder: builder, Benchmark: benchmark, Metric: metric, StartCommitNum: startCommitNum}
|
|
||||||
err := datastore.Get(c, m.Key(c), m)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return nil, fmt.Errorf("getting PerfMetricRun: %v", err)
|
|
||||||
}
|
|
||||||
if len(m.Vals) != PerfRunLength {
|
|
||||||
m.Vals = make([]int64, PerfRunLength)
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *PerfMetricRun) AddMetric(c appengine.Context, commitNum int, v uint64) error {
|
|
||||||
if commitNum < m.StartCommitNum || commitNum >= m.StartCommitNum+PerfRunLength {
|
|
||||||
return fmt.Errorf("AddMetric: CommitNum %v out of range [%v, %v)",
|
|
||||||
commitNum, m.StartCommitNum, m.StartCommitNum+PerfRunLength)
|
|
||||||
}
|
|
||||||
m.Vals[commitNum-m.StartCommitNum] = int64(v)
|
|
||||||
if _, err := datastore.Put(c, m.Key(c), m); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfMetricRun: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPerfMetricsForCommits returns perf metrics for builder/benchmark/metric
|
|
||||||
// and commits [startCommitNum, startCommitNum+n).
|
|
||||||
func GetPerfMetricsForCommits(c appengine.Context, builder, benchmark, metric string, startCommitNum, n int) ([]uint64, error) {
|
|
||||||
if startCommitNum < 0 || n <= 0 {
|
|
||||||
return nil, fmt.Errorf("GetPerfMetricsForCommits: invalid args (%v, %v)", startCommitNum, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Package{}
|
|
||||||
t := datastore.NewQuery("PerfMetricRun").
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Filter("Builder =", builder).
|
|
||||||
Filter("Benchmark =", benchmark).
|
|
||||||
Filter("Metric =", metric).
|
|
||||||
Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
|
|
||||||
Order("StartCommitNum").
|
|
||||||
Limit(100).
|
|
||||||
Run(c)
|
|
||||||
|
|
||||||
res := make([]uint64, n)
|
|
||||||
for {
|
|
||||||
metrics := new(PerfMetricRun)
|
|
||||||
_, err := t.Next(metrics)
|
|
||||||
if err == datastore.Done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if metrics.StartCommitNum >= startCommitNum+n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Calculate start index for copying.
|
|
||||||
i := 0
|
|
||||||
if metrics.StartCommitNum < startCommitNum {
|
|
||||||
i = startCommitNum - metrics.StartCommitNum
|
|
||||||
}
|
|
||||||
// Calculate end index for copying.
|
|
||||||
e := PerfRunLength
|
|
||||||
if metrics.StartCommitNum+e > startCommitNum+n {
|
|
||||||
e = startCommitNum + n - metrics.StartCommitNum
|
|
||||||
}
|
|
||||||
for ; i < e; i++ {
|
|
||||||
res[metrics.StartCommitNum-startCommitNum+i] = uint64(metrics.Vals[i])
|
|
||||||
}
|
|
||||||
if e != PerfRunLength {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PerfConfig holds read-mostly configuration related to benchmarking.
|
|
||||||
// There is only one PerfConfig entity.
|
|
||||||
type PerfConfig struct {
|
|
||||||
BuilderBench []string `datastore:",noindex"` // "builder|benchmark" pairs
|
|
||||||
BuilderProcs []string `datastore:",noindex"` // "builder|proc" pairs
|
|
||||||
BenchMetric []string `datastore:",noindex"` // "benchmark|metric" pairs
|
|
||||||
NoiseLevels []string `datastore:",noindex"` // "builder|benchmark|metric1=noise1|metric2=noise2"
|
|
||||||
|
|
||||||
// Local cache of "builder|benchmark|metric" -> noise.
|
|
||||||
noise map[string]float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func PerfConfigKey(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{}
|
|
||||||
return datastore.NewKey(c, "PerfConfig", "PerfConfig", 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
const perfConfigCacheKey = "perf-config"
|
|
||||||
|
|
||||||
func GetPerfConfig(c appengine.Context, r *http.Request) (*PerfConfig, error) {
|
|
||||||
pc := new(PerfConfig)
|
|
||||||
now := cache.Now(c)
|
|
||||||
if cache.Get(r, now, perfConfigCacheKey, pc) {
|
|
||||||
return pc, nil
|
|
||||||
}
|
|
||||||
err := datastore.Get(c, PerfConfigKey(c), pc)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return nil, fmt.Errorf("GetPerfConfig: %v", err)
|
|
||||||
}
|
|
||||||
cache.Set(r, now, perfConfigCacheKey, pc)
|
|
||||||
return pc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) NoiseLevel(builder, benchmark, metric string) float64 {
|
|
||||||
if pc.noise == nil {
|
|
||||||
pc.noise = make(map[string]float64)
|
|
||||||
for _, str := range pc.NoiseLevels {
|
|
||||||
split := strings.Split(str, "|")
|
|
||||||
builderBench := split[0] + "|" + split[1]
|
|
||||||
for _, entry := range split[2:] {
|
|
||||||
metricValue := strings.Split(entry, "=")
|
|
||||||
noise, _ := strconv.ParseFloat(metricValue[1], 64)
|
|
||||||
pc.noise[builderBench+"|"+metricValue[0]] = noise
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
me := fmt.Sprintf("%v|%v|%v", builder, benchmark, metric)
|
|
||||||
n := pc.noise[me]
|
|
||||||
if n == 0 {
|
|
||||||
// Use a very conservative value
|
|
||||||
// until we have learned the real noise level.
|
|
||||||
n = 200
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdatePerfConfig updates the PerfConfig entity with results of benchmarking.
|
|
||||||
// Returns whether it's a benchmark that we have not yet seem on the builder.
|
|
||||||
func UpdatePerfConfig(c appengine.Context, r *http.Request, req *PerfRequest) (newBenchmark bool, err error) {
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
modified := false
|
|
||||||
add := func(arr *[]string, str string) {
|
|
||||||
for _, s := range *arr {
|
|
||||||
if s == str {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*arr = append(*arr, str)
|
|
||||||
modified = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
BenchProcs := strings.Split(req.Benchmark, "-")
|
|
||||||
benchmark := BenchProcs[0]
|
|
||||||
procs := "1"
|
|
||||||
if len(BenchProcs) > 1 {
|
|
||||||
procs = BenchProcs[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
add(&pc.BuilderBench, req.Builder+"|"+benchmark)
|
|
||||||
newBenchmark = modified
|
|
||||||
add(&pc.BuilderProcs, req.Builder+"|"+procs)
|
|
||||||
for _, m := range req.Metrics {
|
|
||||||
add(&pc.BenchMetric, benchmark+"|"+m.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
if modified {
|
|
||||||
if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
|
|
||||||
return false, fmt.Errorf("putting PerfConfig: %v", err)
|
|
||||||
}
|
|
||||||
cache.Tick(c)
|
|
||||||
}
|
|
||||||
return newBenchmark, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricList []string
|
|
||||||
|
|
||||||
func (l MetricList) Len() int {
|
|
||||||
return len(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l MetricList) Less(i, j int) bool {
|
|
||||||
bi := strings.HasPrefix(l[i], "build-") || strings.HasPrefix(l[i], "binary-")
|
|
||||||
bj := strings.HasPrefix(l[j], "build-") || strings.HasPrefix(l[j], "binary-")
|
|
||||||
if bi == bj {
|
|
||||||
return l[i] < l[j]
|
|
||||||
}
|
|
||||||
return !bi
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l MetricList) Swap(i, j int) {
|
|
||||||
l[i], l[j] = l[j], l[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func collectList(all []string, idx int, second string) (res []string) {
|
|
||||||
m := make(map[string]bool)
|
|
||||||
for _, str := range all {
|
|
||||||
ss := strings.Split(str, "|")
|
|
||||||
v := ss[idx]
|
|
||||||
v2 := ss[1-idx]
|
|
||||||
if (second == "" || second == v2) && !m[v] {
|
|
||||||
m[v] = true
|
|
||||||
res = append(res, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(MetricList(res))
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) BuildersForBenchmark(bench string) []string {
|
|
||||||
return collectList(pc.BuilderBench, 0, bench)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) BenchmarksForBuilder(builder string) []string {
|
|
||||||
return collectList(pc.BuilderBench, 1, builder)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) MetricsForBenchmark(bench string) []string {
|
|
||||||
return collectList(pc.BenchMetric, 1, bench)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) BenchmarkProcList() (res []string) {
|
|
||||||
bl := pc.BenchmarksForBuilder("")
|
|
||||||
pl := pc.ProcList("")
|
|
||||||
for _, b := range bl {
|
|
||||||
for _, p := range pl {
|
|
||||||
res = append(res, fmt.Sprintf("%v-%v", b, p))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pc *PerfConfig) ProcList(builder string) []int {
|
|
||||||
ss := collectList(pc.BuilderProcs, 1, builder)
|
|
||||||
var procs []int
|
|
||||||
for _, s := range ss {
|
|
||||||
p, _ := strconv.ParseInt(s, 10, 32)
|
|
||||||
procs = append(procs, int(p))
|
|
||||||
}
|
|
||||||
sort.Ints(procs)
|
|
||||||
return procs
|
|
||||||
}
|
|
||||||
|
|
||||||
// A PerfTodo contains outstanding commits for benchmarking for a builder.
|
|
||||||
// Descendant of Package.
|
|
||||||
type PerfTodo struct {
|
|
||||||
PackagePath string // (empty for main repo commits)
|
|
||||||
Builder string
|
|
||||||
CommitNums []int `datastore:",noindex"` // LIFO queue of commits to benchmark.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (todo *PerfTodo) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := Package{Path: todo.PackagePath}
|
|
||||||
key := todo.Builder
|
|
||||||
return datastore.NewKey(c, "PerfTodo", key, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCommitToPerfTodo adds the commit to all existing PerfTodo entities.
|
|
||||||
func AddCommitToPerfTodo(c appengine.Context, com *Commit) error {
|
|
||||||
var todos []*PerfTodo
|
|
||||||
_, err := datastore.NewQuery("PerfTodo").
|
|
||||||
Ancestor((&Package{}).Key(c)).
|
|
||||||
GetAll(c, &todos)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("fetching PerfTodo's: %v", err)
|
|
||||||
}
|
|
||||||
for _, todo := range todos {
|
|
||||||
todo.CommitNums = append(todo.CommitNums, com.Num)
|
|
||||||
_, err = datastore.Put(c, todo.Key(c), todo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("updating PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Log is a gzip-compressed log file stored under the SHA1 hash of the
|
|
||||||
// uncompressed log text.
|
|
||||||
type Log struct {
|
|
||||||
CompressedLog []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Log) Text() ([]byte, error) {
|
|
||||||
d, err := gzip.NewReader(bytes.NewBuffer(l.CompressedLog))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading log data: %v", err)
|
|
||||||
}
|
|
||||||
b, err := ioutil.ReadAll(d)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading log data: %v", err)
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func PutLog(c appengine.Context, text string) (hash string, err error) {
|
|
||||||
h := sha1.New()
|
|
||||||
io.WriteString(h, text)
|
|
||||||
b := new(bytes.Buffer)
|
|
||||||
z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
|
|
||||||
io.WriteString(z, text)
|
|
||||||
z.Close()
|
|
||||||
hash = fmt.Sprintf("%x", h.Sum(nil))
|
|
||||||
key := datastore.NewKey(c, "Log", hash, 0, nil)
|
|
||||||
_, err = datastore.Put(c, key, &Log{b.Bytes()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Tag is used to keep track of the most recent Go weekly and release tags.
|
|
||||||
// Typically there will be one Tag entity for each kind of hg tag.
|
|
||||||
type Tag struct {
|
|
||||||
Kind string // "weekly", "release", or "tip"
|
|
||||||
Name string // the tag itself (for example: "release.r60")
|
|
||||||
Hash string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) Key(c appengine.Context) *datastore.Key {
|
|
||||||
p := &Package{}
|
|
||||||
return datastore.NewKey(c, "Tag", t.Kind, 0, p.Key(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Tag) Valid() error {
|
|
||||||
if t.Kind != "weekly" && t.Kind != "release" && t.Kind != "tip" {
|
|
||||||
return errors.New("invalid Kind")
|
|
||||||
}
|
|
||||||
if !validHash(t.Hash) {
|
|
||||||
return errors.New("invalid Hash")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit returns the Commit that corresponds with this Tag.
|
|
||||||
func (t *Tag) Commit(c appengine.Context) (*Commit, error) {
|
|
||||||
com := &Commit{Hash: t.Hash}
|
|
||||||
err := datastore.Get(c, com.Key(c), com)
|
|
||||||
return com, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTag fetches a Tag by name from the datastore.
|
|
||||||
func GetTag(c appengine.Context, tag string) (*Tag, error) {
|
|
||||||
t := &Tag{Kind: tag}
|
|
||||||
if err := datastore.Get(c, t.Key(c), t); err != nil {
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
return nil, errors.New("tag not found: " + tag)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := t.Valid(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Packages returns packages of the specified kind.
|
|
||||||
// Kind must be one of "external" or "subrepo".
|
|
||||||
func Packages(c appengine.Context, kind string) ([]*Package, error) {
|
|
||||||
switch kind {
|
|
||||||
case "external", "subrepo":
|
|
||||||
default:
|
|
||||||
return nil, errors.New(`kind must be one of "external" or "subrepo"`)
|
|
||||||
}
|
|
||||||
var pkgs []*Package
|
|
||||||
q := datastore.NewQuery("Package").Filter("Kind=", kind)
|
|
||||||
for t := q.Run(c); ; {
|
|
||||||
pkg := new(Package)
|
|
||||||
_, err := t.Next(pkg)
|
|
||||||
if err == datastore.Done {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if pkg.Path != "" {
|
|
||||||
pkgs = append(pkgs, pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pkgs, nil
|
|
||||||
}
|
|
@ -1,201 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
)
|
|
||||||
|
|
||||||
func handleFunc(path string, h http.HandlerFunc) {
|
|
||||||
for _, d := range dashboards {
|
|
||||||
http.HandleFunc(d.Prefix+path, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dashboard describes a unique build dashboard.
|
|
||||||
type Dashboard struct {
|
|
||||||
Name string // This dashboard's name (eg, "Go")
|
|
||||||
Namespace string // This dashboard's namespace (eg, "" (default), "Git")
|
|
||||||
Prefix string // The path prefix (no trailing /)
|
|
||||||
Packages []*Package // The project's packages to build
|
|
||||||
}
|
|
||||||
|
|
||||||
// dashboardForRequest returns the appropriate dashboard for a given URL path.
|
|
||||||
func dashboardForRequest(r *http.Request) *Dashboard {
|
|
||||||
if strings.HasPrefix(r.URL.Path, gccgoDash.Prefix) {
|
|
||||||
return gccgoDash
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(r.URL.Path, hgDash.Prefix) {
|
|
||||||
return hgDash
|
|
||||||
}
|
|
||||||
return goDash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Context returns a namespaced context for this dashboard, or panics if it
|
|
||||||
// fails to create a new context.
|
|
||||||
func (d *Dashboard) Context(c appengine.Context) appengine.Context {
|
|
||||||
if d.Namespace == "" {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
n, err := appengine.Namespace(c, d.Namespace)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// the currently known dashboards.
|
|
||||||
var dashboards = []*Dashboard{goDash, hgDash, gccgoDash}
|
|
||||||
|
|
||||||
// hgDash is the dashboard for the old Mercural Go repository.
|
|
||||||
var hgDash = &Dashboard{
|
|
||||||
Name: "Mercurial",
|
|
||||||
Namespace: "", // Used to be the default.
|
|
||||||
Prefix: "/hg",
|
|
||||||
Packages: hgPackages,
|
|
||||||
}
|
|
||||||
|
|
||||||
// hgPackages is a list of all of the packages
|
|
||||||
// built by the old Mercurial Go repository.
|
|
||||||
var hgPackages = []*Package{
|
|
||||||
{
|
|
||||||
Kind: "go",
|
|
||||||
Name: "Go",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.blog",
|
|
||||||
Path: "code.google.com/p/go.blog",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.codereview",
|
|
||||||
Path: "code.google.com/p/go.codereview",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.crypto",
|
|
||||||
Path: "code.google.com/p/go.crypto",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.exp",
|
|
||||||
Path: "code.google.com/p/go.exp",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.image",
|
|
||||||
Path: "code.google.com/p/go.image",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.net",
|
|
||||||
Path: "code.google.com/p/go.net",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.sys",
|
|
||||||
Path: "code.google.com/p/go.sys",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.talks",
|
|
||||||
Path: "code.google.com/p/go.talks",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "go.tools",
|
|
||||||
Path: "code.google.com/p/go.tools",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// goDash is the dashboard for the main go repository.
|
|
||||||
var goDash = &Dashboard{
|
|
||||||
Name: "Go",
|
|
||||||
Namespace: "Git",
|
|
||||||
Prefix: "",
|
|
||||||
Packages: goPackages,
|
|
||||||
}
|
|
||||||
|
|
||||||
// goPackages is a list of all of the packages built by the main go repository.
|
|
||||||
var goPackages = []*Package{
|
|
||||||
{
|
|
||||||
Kind: "go",
|
|
||||||
Name: "Go",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "blog",
|
|
||||||
Path: "golang.org/x/blog",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "crypto",
|
|
||||||
Path: "golang.org/x/crypto",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "exp",
|
|
||||||
Path: "golang.org/x/exp",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "image",
|
|
||||||
Path: "golang.org/x/image",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "mobile",
|
|
||||||
Path: "golang.org/x/mobile",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "net",
|
|
||||||
Path: "golang.org/x/net",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "review",
|
|
||||||
Path: "golang.org/x/review",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "sys",
|
|
||||||
Path: "golang.org/x/sys",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "talks",
|
|
||||||
Path: "golang.org/x/talks",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "text",
|
|
||||||
Path: "golang.org/x/text",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Kind: "subrepo",
|
|
||||||
Name: "tools",
|
|
||||||
Path: "golang.org/x/tools",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// gccgoDash is the dashboard for gccgo.
|
|
||||||
var gccgoDash = &Dashboard{
|
|
||||||
Name: "Gccgo",
|
|
||||||
Namespace: "Gccgo",
|
|
||||||
Prefix: "/gccgo",
|
|
||||||
Packages: []*Package{
|
|
||||||
{
|
|
||||||
Kind: "gccgo",
|
|
||||||
Name: "Gccgo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
@ -1,994 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
"appengine/memcache"
|
|
||||||
|
|
||||||
"cache"
|
|
||||||
"key"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
commitsPerPage = 30
|
|
||||||
watcherVersion = 3 // must match dashboard/watcher/watcher.go
|
|
||||||
builderVersion = 1 // must match dashboard/builder/http.go
|
|
||||||
)
|
|
||||||
|
|
||||||
// commitHandler retrieves commit data or records a new commit.
|
|
||||||
//
|
|
||||||
// For GET requests it returns a Commit value for the specified
|
|
||||||
// packagePath and hash.
|
|
||||||
//
|
|
||||||
// For POST requests it reads a JSON-encoded Commit value from the request
|
|
||||||
// body and creates a new Commit entity. It also updates the "tip" Tag for
|
|
||||||
// each new commit at tip.
|
|
||||||
//
|
|
||||||
// This handler is used by a gobuilder process in -commit mode.
|
|
||||||
func commitHandler(r *http.Request) (interface{}, error) {
|
|
||||||
c := contextForRequest(r)
|
|
||||||
com := new(Commit)
|
|
||||||
|
|
||||||
if r.Method == "GET" {
|
|
||||||
com.PackagePath = r.FormValue("packagePath")
|
|
||||||
com.Hash = r.FormValue("hash")
|
|
||||||
err := datastore.Get(c, com.Key(c), com)
|
|
||||||
if com.Num == 0 && com.Desc == "" {
|
|
||||||
// Perf builder might have written an incomplete Commit.
|
|
||||||
// Pretend it doesn't exist, so that we can get complete details.
|
|
||||||
err = datastore.ErrNoSuchEntity
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
// This error string is special.
|
|
||||||
// The commit watcher expects it.
|
|
||||||
// Do not change it.
|
|
||||||
return nil, errors.New("Commit not found")
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("getting Commit: %v", err)
|
|
||||||
}
|
|
||||||
if com.Num == 0 {
|
|
||||||
// Corrupt state which shouldn't happen but does.
|
|
||||||
// Return an error so builders' commit loops will
|
|
||||||
// be willing to retry submitting this commit.
|
|
||||||
return nil, errors.New("in datastore with zero Num")
|
|
||||||
}
|
|
||||||
if com.Desc == "" || com.User == "" {
|
|
||||||
// Also shouldn't happen, but at least happened
|
|
||||||
// once on a single commit when trying to fix data
|
|
||||||
// in the datastore viewer UI?
|
|
||||||
return nil, errors.New("missing field")
|
|
||||||
}
|
|
||||||
// Strip potentially large and unnecessary fields.
|
|
||||||
com.ResultData = nil
|
|
||||||
com.PerfResults = nil
|
|
||||||
return com, nil
|
|
||||||
}
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
if !isMasterKey(c, r.FormValue("key")) {
|
|
||||||
return nil, errors.New("can only POST commits with master key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// For now, the commit watcher doesn't support gccgo.
|
|
||||||
// TODO(adg,cmang): remove this exception when gccgo is supported.
|
|
||||||
if dashboardForRequest(r) != gccgoDash {
|
|
||||||
v, _ := strconv.Atoi(r.FormValue("version"))
|
|
||||||
if v != watcherVersion {
|
|
||||||
return nil, fmt.Errorf("rejecting POST from commit watcher; need version %v", watcherVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST request
|
|
||||||
body, err := ioutil.ReadAll(r.Body)
|
|
||||||
r.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading Body: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Contains(body, needsBenchmarkingBytes) {
|
|
||||||
c.Warningf("old builder detected at %v", r.RemoteAddr)
|
|
||||||
return nil, fmt.Errorf("rejecting old builder request, body does not contain %s: %q", needsBenchmarkingBytes, body)
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(body, com); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshaling body %q: %v", body, err)
|
|
||||||
}
|
|
||||||
com.Desc = limitStringLength(com.Desc, maxDatastoreStringLen)
|
|
||||||
if err := com.Valid(); err != nil {
|
|
||||||
return nil, fmt.Errorf("validating Commit: %v", err)
|
|
||||||
}
|
|
||||||
defer cache.Tick(c)
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
return addCommit(c, com)
|
|
||||||
}
|
|
||||||
return nil, datastore.RunInTransaction(c, tx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var needsBenchmarkingBytes = []byte(`"NeedsBenchmarking"`)
|
|
||||||
|
|
||||||
// addCommit adds the Commit entity to the datastore and updates the tip Tag.
|
|
||||||
// It must be run inside a datastore transaction.
|
|
||||||
func addCommit(c appengine.Context, com *Commit) error {
|
|
||||||
var ec Commit // existing commit
|
|
||||||
isUpdate := false
|
|
||||||
err := datastore.Get(c, com.Key(c), &ec)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("getting Commit: %v", err)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
// Commit already in the datastore. Any fields different?
|
|
||||||
// If not, don't do anything.
|
|
||||||
changes := (com.Num != 0 && com.Num != ec.Num) ||
|
|
||||||
com.ParentHash != ec.ParentHash ||
|
|
||||||
com.Desc != ec.Desc ||
|
|
||||||
com.User != ec.User ||
|
|
||||||
!com.Time.Equal(ec.Time)
|
|
||||||
if !changes {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ec.ParentHash = com.ParentHash
|
|
||||||
ec.Desc = com.Desc
|
|
||||||
ec.User = com.User
|
|
||||||
if !com.Time.IsZero() {
|
|
||||||
ec.Time = com.Time
|
|
||||||
}
|
|
||||||
if com.Num != 0 {
|
|
||||||
ec.Num = com.Num
|
|
||||||
}
|
|
||||||
isUpdate = true
|
|
||||||
com = &ec
|
|
||||||
}
|
|
||||||
p, err := GetPackage(c, com.PackagePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("GetPackage: %v", err)
|
|
||||||
}
|
|
||||||
if com.Num == 0 {
|
|
||||||
// get the next commit number
|
|
||||||
com.Num = p.NextNum
|
|
||||||
p.NextNum++
|
|
||||||
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
|
|
||||||
return fmt.Errorf("putting Package: %v", err)
|
|
||||||
}
|
|
||||||
} else if com.Num >= p.NextNum {
|
|
||||||
p.NextNum = com.Num + 1
|
|
||||||
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
|
|
||||||
return fmt.Errorf("putting Package: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if this isn't the first Commit test the parent commit exists.
|
|
||||||
// The all zeros are returned by hg's p1node template for parentless commits.
|
|
||||||
if com.ParentHash != "" && com.ParentHash != "0000000000000000000000000000000000000000" && com.ParentHash != "0000" {
|
|
||||||
n, err := datastore.NewQuery("Commit").
|
|
||||||
Filter("Hash =", com.ParentHash).
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Count(c)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("testing for parent Commit: %v", err)
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
return errors.New("parent commit not found")
|
|
||||||
}
|
|
||||||
} else if com.Num != 1 {
|
|
||||||
// This is the first commit; fail if it is not number 1.
|
|
||||||
// (This will happen if we try to upload a new/different repo
|
|
||||||
// where there is already commit data. A bad thing to do.)
|
|
||||||
return errors.New("this package already has a first commit; aborting")
|
|
||||||
}
|
|
||||||
// update the tip Tag if this is the Go repo and this isn't on a release branch
|
|
||||||
if p.Path == "" && !strings.HasPrefix(com.Desc, "[") && !isUpdate {
|
|
||||||
t := &Tag{Kind: "tip", Hash: com.Hash}
|
|
||||||
if _, err = datastore.Put(c, t.Key(c), t); err != nil {
|
|
||||||
return fmt.Errorf("putting Tag: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// put the Commit
|
|
||||||
if err = putCommit(c, com); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if com.NeedsBenchmarking {
|
|
||||||
// add to CommitRun
|
|
||||||
cr, err := GetCommitRun(c, com.Num)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = cr.AddCommit(c, com); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// create PerfResult
|
|
||||||
res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
|
|
||||||
if _, err := datastore.Put(c, res.Key(c), res); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfResult: %v", err)
|
|
||||||
}
|
|
||||||
// Update perf todo if necessary.
|
|
||||||
if err = AddCommitToPerfTodo(c, com); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagHandler records a new tag. It reads a JSON-encoded Tag value from the
|
|
||||||
// request body and updates the Tag entity for the Kind of tag provided.
|
|
||||||
//
|
|
||||||
// This handler is used by a gobuilder process in -commit mode.
|
|
||||||
func tagHandler(r *http.Request) (interface{}, error) {
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
|
|
||||||
t := new(Tag)
|
|
||||||
defer r.Body.Close()
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(t); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := t.Valid(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := contextForRequest(r)
|
|
||||||
defer cache.Tick(c)
|
|
||||||
_, err := datastore.Put(c, t.Key(c), t)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Todo is a todoHandler response.
|
|
||||||
type Todo struct {
|
|
||||||
Kind string // "build-go-commit" or "build-package"
|
|
||||||
Data interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// todoHandler returns the next action to be performed by a builder.
|
|
||||||
// It expects "builder" and "kind" query parameters and returns a *Todo value.
|
|
||||||
// Multiple "kind" parameters may be specified.
|
|
||||||
func todoHandler(r *http.Request) (interface{}, error) {
|
|
||||||
c := contextForRequest(r)
|
|
||||||
now := cache.Now(c)
|
|
||||||
key := "build-todo-" + r.Form.Encode()
|
|
||||||
var todo *Todo
|
|
||||||
if cache.Get(r, now, key, &todo) {
|
|
||||||
return todo, nil
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
builder := r.FormValue("builder")
|
|
||||||
for _, kind := range r.Form["kind"] {
|
|
||||||
var com *Commit
|
|
||||||
switch kind {
|
|
||||||
case "build-go-commit":
|
|
||||||
com, err = buildTodo(c, builder, "", "")
|
|
||||||
if com != nil {
|
|
||||||
com.PerfResults = []string{}
|
|
||||||
}
|
|
||||||
case "build-package":
|
|
||||||
packagePath := r.FormValue("packagePath")
|
|
||||||
goHash := r.FormValue("goHash")
|
|
||||||
com, err = buildTodo(c, builder, packagePath, goHash)
|
|
||||||
if com != nil {
|
|
||||||
com.PerfResults = []string{}
|
|
||||||
}
|
|
||||||
case "benchmark-go-commit":
|
|
||||||
com, err = perfTodo(c, builder)
|
|
||||||
}
|
|
||||||
if com != nil || err != nil {
|
|
||||||
if com != nil {
|
|
||||||
// ResultData can be large and not needed on builder.
|
|
||||||
com.ResultData = []string{}
|
|
||||||
}
|
|
||||||
todo = &Todo{Kind: kind, Data: com}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
cache.Set(r, now, key, todo)
|
|
||||||
}
|
|
||||||
return todo, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTodo returns the next Commit to be built (or nil if none available).
|
|
||||||
//
|
|
||||||
// If packagePath and goHash are empty, it scans the first 20 Go Commits in
|
|
||||||
// Num-descending order and returns the first one it finds that doesn't have a
|
|
||||||
// Result for this builder.
|
|
||||||
//
|
|
||||||
// If provided with non-empty packagePath and goHash args, it scans the first
|
|
||||||
// 20 Commits in Num-descending order for the specified packagePath and
|
|
||||||
// returns the first that doesn't have a Result for this builder and goHash.
|
|
||||||
func buildTodo(c appengine.Context, builder, packagePath, goHash string) (*Commit, error) {
|
|
||||||
p, err := GetPackage(c, packagePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t := datastore.NewQuery("Commit").
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Limit(commitsPerPage).
|
|
||||||
Order("-Num").
|
|
||||||
Run(c)
|
|
||||||
for {
|
|
||||||
com := new(Commit)
|
|
||||||
if _, err := t.Next(com); err == datastore.Done {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if com.Result(builder, goHash) == nil {
|
|
||||||
return com, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing left to do if this is a package (not the Go tree).
|
|
||||||
if packagePath != "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are no Go tree commits left to build,
|
|
||||||
// see if there are any subrepo commits that need to be built at tip.
|
|
||||||
// If so, ask the builder to build a go tree at the tip commit.
|
|
||||||
// TODO(adg): do the same for "weekly" and "release" tags.
|
|
||||||
|
|
||||||
tag, err := GetTag(c, "tip")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that this Go commit builds OK for this builder.
|
|
||||||
// If not, don't re-build as the subrepos will never get built anyway.
|
|
||||||
com, err := tag.Commit(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if r := com.Result(builder, ""); r != nil && !r.OK {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgs, err := Packages(c, "subrepo")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, pkg := range pkgs {
|
|
||||||
com, err := pkg.LastCommit(c)
|
|
||||||
if err != nil {
|
|
||||||
c.Warningf("%v: no Commit found: %v", pkg, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if com.Result(builder, tag.Hash) == nil {
|
|
||||||
return tag.Commit(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// perfTodo returns the next Commit to be benchmarked (or nil if none available).
|
|
||||||
func perfTodo(c appengine.Context, builder string) (*Commit, error) {
|
|
||||||
p := &Package{}
|
|
||||||
todo := &PerfTodo{Builder: builder}
|
|
||||||
err := datastore.Get(c, todo.Key(c), todo)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return nil, fmt.Errorf("fetching PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
todo, err = buildPerfTodo(c, builder)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(todo.CommitNums) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Have commit to benchmark, fetch it.
|
|
||||||
num := todo.CommitNums[len(todo.CommitNums)-1]
|
|
||||||
t := datastore.NewQuery("Commit").
|
|
||||||
Ancestor(p.Key(c)).
|
|
||||||
Filter("Num =", num).
|
|
||||||
Limit(1).
|
|
||||||
Run(c)
|
|
||||||
com := new(Commit)
|
|
||||||
if _, err := t.Next(com); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !com.NeedsBenchmarking {
|
|
||||||
return nil, fmt.Errorf("commit from perf todo queue is not intended for benchmarking")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove benchmarks from other builders.
|
|
||||||
var benchs []string
|
|
||||||
for _, b := range com.PerfResults {
|
|
||||||
bb := strings.Split(b, "|")
|
|
||||||
if bb[0] == builder && bb[1] != "meta-done" {
|
|
||||||
benchs = append(benchs, bb[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
com.PerfResults = benchs
|
|
||||||
|
|
||||||
return com, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildPerfTodo creates PerfTodo for the builder with all commits. In a transaction.
|
|
||||||
func buildPerfTodo(c appengine.Context, builder string) (*PerfTodo, error) {
|
|
||||||
todo := &PerfTodo{Builder: builder}
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
err := datastore.Get(c, todo.Key(c), todo)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("fetching PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t := datastore.NewQuery("CommitRun").
|
|
||||||
Ancestor((&Package{}).Key(c)).
|
|
||||||
Order("-StartCommitNum").
|
|
||||||
Run(c)
|
|
||||||
var nums []int
|
|
||||||
var releaseNums []int
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
cr := new(CommitRun)
|
|
||||||
if _, err := t.Next(cr); err == datastore.Done {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("scanning commit runs for perf todo: %v", err)
|
|
||||||
}
|
|
||||||
for i := len(cr.Hash) - 1; i >= 0; i-- {
|
|
||||||
if !cr.NeedsBenchmarking[i] || cr.Hash[i] == "" {
|
|
||||||
continue // There's nothing to see here. Move along.
|
|
||||||
}
|
|
||||||
num := cr.StartCommitNum + i
|
|
||||||
for k, v := range knownTags {
|
|
||||||
// Releases are benchmarked first, because they are important (and there are few of them).
|
|
||||||
if cr.Hash[i] == v {
|
|
||||||
releaseNums = append(releaseNums, num)
|
|
||||||
if k == "go1" {
|
|
||||||
break loop // Point of no benchmark: test/bench/shootout: update timing.log to Go 1.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nums = append(nums, num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
todo.CommitNums = orderPerfTodo(nums)
|
|
||||||
todo.CommitNums = append(todo.CommitNums, releaseNums...)
|
|
||||||
if _, err = datastore.Put(c, todo.Key(c), todo); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return todo, datastore.RunInTransaction(c, tx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeCommitFromPerfTodo(c appengine.Context, builder string, num int) error {
|
|
||||||
todo := &PerfTodo{Builder: builder}
|
|
||||||
err := datastore.Get(c, todo.Key(c), todo)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("fetching PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for i := len(todo.CommitNums) - 1; i >= 0; i-- {
|
|
||||||
if todo.CommitNums[i] == num {
|
|
||||||
for ; i < len(todo.CommitNums)-1; i++ {
|
|
||||||
todo.CommitNums[i] = todo.CommitNums[i+1]
|
|
||||||
}
|
|
||||||
todo.CommitNums = todo.CommitNums[:i]
|
|
||||||
_, err = datastore.Put(c, todo.Key(c), todo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("putting PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// packagesHandler returns a list of the non-Go Packages monitored
|
|
||||||
// by the dashboard.
|
|
||||||
func packagesHandler(r *http.Request) (interface{}, error) {
|
|
||||||
kind := r.FormValue("kind")
|
|
||||||
c := contextForRequest(r)
|
|
||||||
now := cache.Now(c)
|
|
||||||
key := "build-packages-" + kind
|
|
||||||
var p []*Package
|
|
||||||
if cache.Get(r, now, key, &p) {
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
p, err := Packages(c, kind)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cache.Set(r, now, key, p)
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildingHandler records that a build is in progress.
|
|
||||||
// The data is only stored in memcache and with a timeout. It's assumed
|
|
||||||
// that the build system will periodically refresh this if the build
|
|
||||||
// is slow.
|
|
||||||
func buildingHandler(r *http.Request) (interface{}, error) {
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
c := contextForRequest(r)
|
|
||||||
key := fmt.Sprintf("building|%s|%s|%s", r.FormValue("hash"), r.FormValue("gohash"), r.FormValue("builder"))
|
|
||||||
err := memcache.Set(c, &memcache.Item{
|
|
||||||
Key: key,
|
|
||||||
Value: []byte(r.FormValue("url")),
|
|
||||||
Expiration: 15 * time.Minute,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return map[string]interface{}{
|
|
||||||
"key": key,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// resultHandler records a build result.
|
|
||||||
// It reads a JSON-encoded Result value from the request body,
|
|
||||||
// creates a new Result entity, and updates the relevant Commit entity.
|
|
||||||
// If the Log field is not empty, resultHandler creates a new Log entity
|
|
||||||
// and updates the LogHash field before putting the Commit entity.
|
|
||||||
func resultHandler(r *http.Request) (interface{}, error) {
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For now, the gccgo builders are using the old stuff.
|
|
||||||
// TODO(adg,cmang): remove this exception when gccgo is updated.
|
|
||||||
if dashboardForRequest(r) != gccgoDash {
|
|
||||||
v, _ := strconv.Atoi(r.FormValue("version"))
|
|
||||||
if v != builderVersion {
|
|
||||||
return nil, fmt.Errorf("rejecting POST from builder; need version %v", builderVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c := contextForRequest(r)
|
|
||||||
res := new(Result)
|
|
||||||
defer r.Body.Close()
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(res); err != nil {
|
|
||||||
return nil, fmt.Errorf("decoding Body: %v", err)
|
|
||||||
}
|
|
||||||
if err := res.Valid(); err != nil {
|
|
||||||
return nil, fmt.Errorf("validating Result: %v", err)
|
|
||||||
}
|
|
||||||
defer cache.Tick(c)
|
|
||||||
// store the Log text if supplied
|
|
||||||
if len(res.Log) > 0 {
|
|
||||||
hash, err := PutLog(c, res.Log)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("putting Log: %v", err)
|
|
||||||
}
|
|
||||||
res.LogHash = hash
|
|
||||||
}
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
// check Package exists
|
|
||||||
if _, err := GetPackage(c, res.PackagePath); err != nil {
|
|
||||||
return fmt.Errorf("GetPackage: %v", err)
|
|
||||||
}
|
|
||||||
// put Result
|
|
||||||
if _, err := datastore.Put(c, res.Key(c), res); err != nil {
|
|
||||||
return fmt.Errorf("putting Result: %v", err)
|
|
||||||
}
|
|
||||||
// add Result to Commit
|
|
||||||
com := &Commit{PackagePath: res.PackagePath, Hash: res.Hash}
|
|
||||||
if err := com.AddResult(c, res); err != nil {
|
|
||||||
return fmt.Errorf("AddResult: %v", err)
|
|
||||||
}
|
|
||||||
// Send build failure notifications, if necessary.
|
|
||||||
// Note this must run after the call AddResult, which
|
|
||||||
// populates the Commit's ResultData field.
|
|
||||||
return notifyOnFailure(c, com, res.Builder)
|
|
||||||
}
|
|
||||||
return nil, datastore.RunInTransaction(c, tx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// perf-result request payload
|
|
||||||
type PerfRequest struct {
|
|
||||||
Builder string
|
|
||||||
Benchmark string
|
|
||||||
Hash string
|
|
||||||
OK bool
|
|
||||||
Metrics []PerfMetric
|
|
||||||
Artifacts []PerfArtifact
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfMetric struct {
|
|
||||||
Type string
|
|
||||||
Val uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfArtifact struct {
|
|
||||||
Type string
|
|
||||||
Body string
|
|
||||||
}
|
|
||||||
|
|
||||||
// perfResultHandler records a becnhmarking result.
|
|
||||||
func perfResultHandler(r *http.Request) (interface{}, error) {
|
|
||||||
defer r.Body.Close()
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
|
|
||||||
req := new(PerfRequest)
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
|
|
||||||
return nil, fmt.Errorf("decoding Body: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c := contextForRequest(r)
|
|
||||||
defer cache.Tick(c)
|
|
||||||
|
|
||||||
// store the text files if supplied
|
|
||||||
for i, a := range req.Artifacts {
|
|
||||||
hash, err := PutLog(c, a.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("putting Log: %v", err)
|
|
||||||
}
|
|
||||||
req.Artifacts[i].Body = hash
|
|
||||||
}
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
return addPerfResult(c, r, req)
|
|
||||||
}
|
|
||||||
return nil, datastore.RunInTransaction(c, tx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addPerfResult creates PerfResult and updates Commit, PerfTodo,
|
|
||||||
// PerfMetricRun and PerfConfig.
|
|
||||||
// MUST be called from inside a transaction.
|
|
||||||
func addPerfResult(c appengine.Context, r *http.Request, req *PerfRequest) error {
|
|
||||||
// check Package exists
|
|
||||||
p, err := GetPackage(c, "")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("GetPackage: %v", err)
|
|
||||||
}
|
|
||||||
// add result to Commit
|
|
||||||
com := &Commit{Hash: req.Hash}
|
|
||||||
if err := com.AddPerfResult(c, req.Builder, req.Benchmark); err != nil {
|
|
||||||
return fmt.Errorf("AddPerfResult: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the result to PerfResult
|
|
||||||
res := &PerfResult{CommitHash: req.Hash}
|
|
||||||
if err := datastore.Get(c, res.Key(c), res); err != nil {
|
|
||||||
return fmt.Errorf("getting PerfResult: %v", err)
|
|
||||||
}
|
|
||||||
present := res.AddResult(req)
|
|
||||||
if _, err := datastore.Put(c, res.Key(c), res); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfResult: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Meta-done denotes that there are no benchmarks left.
|
|
||||||
if req.Benchmark == "meta-done" {
|
|
||||||
// Don't send duplicate emails for the same commit/builder.
|
|
||||||
// And don't send emails about too old commits.
|
|
||||||
if !present && com.Num >= p.NextNum-commitsPerPage {
|
|
||||||
if err := checkPerfChanges(c, r, com, req.Builder, res); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := removeCommitFromPerfTodo(c, req.Builder, com.Num); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// update PerfConfig
|
|
||||||
newBenchmark, err := UpdatePerfConfig(c, r, req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("updating PerfConfig: %v", err)
|
|
||||||
}
|
|
||||||
if newBenchmark {
|
|
||||||
// If this is a new benchmark on the builder, delete PerfTodo.
|
|
||||||
// It will be recreated later with all commits again.
|
|
||||||
todo := &PerfTodo{Builder: req.Builder}
|
|
||||||
err = datastore.Delete(c, todo.Key(c))
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("deleting PerfTodo: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add perf metrics
|
|
||||||
for _, metric := range req.Metrics {
|
|
||||||
m, err := GetPerfMetricRun(c, req.Builder, req.Benchmark, metric.Type, com.Num)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("GetPerfMetrics: %v", err)
|
|
||||||
}
|
|
||||||
if err = m.AddMetric(c, com.Num, metric.Val); err != nil {
|
|
||||||
return fmt.Errorf("AddMetric: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MUST be called from inside a transaction.
|
|
||||||
func checkPerfChanges(c appengine.Context, r *http.Request, com *Commit, builder string, res *PerfResult) error {
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
results := res.ParseData()[builder]
|
|
||||||
rcNewer := MakePerfResultCache(c, com, true)
|
|
||||||
rcOlder := MakePerfResultCache(c, com, false)
|
|
||||||
|
|
||||||
// Check whether we need to send failure notification email.
|
|
||||||
if results["meta-done"].OK {
|
|
||||||
// This one is successful, see if the next is failed.
|
|
||||||
nextRes, err := rcNewer.Next(com.Num)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if nextRes != nil && isPerfFailed(nextRes, builder) {
|
|
||||||
sendPerfFailMail(c, builder, nextRes)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This one is failed, see if the previous is successful.
|
|
||||||
prevRes, err := rcOlder.Next(com.Num)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if prevRes != nil && !isPerfFailed(prevRes, builder) {
|
|
||||||
sendPerfFailMail(c, builder, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now see if there are any performance changes.
|
|
||||||
// Find the previous and the next results for performance comparison.
|
|
||||||
prevRes, err := rcOlder.NextForComparison(com.Num, builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nextRes, err := rcNewer.NextForComparison(com.Num, builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if results["meta-done"].OK {
|
|
||||||
// This one is successful, compare with a previous one.
|
|
||||||
if prevRes != nil {
|
|
||||||
if err := comparePerfResults(c, pc, builder, prevRes, res); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Compare a next one with the current.
|
|
||||||
if nextRes != nil {
|
|
||||||
if err := comparePerfResults(c, pc, builder, res, nextRes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This one is failed, compare a previous one with a next one.
|
|
||||||
if prevRes != nil && nextRes != nil {
|
|
||||||
if err := comparePerfResults(c, pc, builder, prevRes, nextRes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePerfResults(c appengine.Context, pc *PerfConfig, builder string, prevRes, res *PerfResult) error {
|
|
||||||
changes := significantPerfChanges(pc, builder, prevRes, res)
|
|
||||||
if len(changes) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
com := &Commit{Hash: res.CommitHash}
|
|
||||||
if err := datastore.Get(c, com.Key(c), com); err != nil {
|
|
||||||
return fmt.Errorf("getting commit %v: %v", com.Hash, err)
|
|
||||||
}
|
|
||||||
sendPerfMailLater.Call(c, com, prevRes.CommitHash, builder, changes) // add task to queue
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// logHandler displays log text for a given hash.
|
|
||||||
// It handles paths like "/log/hash".
|
|
||||||
func logHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-type", "text/plain; charset=utf-8")
|
|
||||||
c := contextForRequest(r)
|
|
||||||
hash := r.URL.Path[strings.LastIndex(r.URL.Path, "/")+1:]
|
|
||||||
key := datastore.NewKey(c, "Log", hash, 0, nil)
|
|
||||||
l := new(Log)
|
|
||||||
if err := datastore.Get(c, key, l); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b, err := l.Text()
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// clearResultsHandler purges the last commitsPerPage results for the given builder.
|
|
||||||
// It optionally takes a comma-separated list of specific hashes to clear.
|
|
||||||
func clearResultsHandler(r *http.Request) (interface{}, error) {
|
|
||||||
if r.Method != "POST" {
|
|
||||||
return nil, errBadMethod(r.Method)
|
|
||||||
}
|
|
||||||
builder := r.FormValue("builder")
|
|
||||||
if builder == "" {
|
|
||||||
return nil, errors.New("must specify a builder")
|
|
||||||
}
|
|
||||||
clearAll := r.FormValue("hash") == ""
|
|
||||||
hash := strings.Split(r.FormValue("hash"), ",")
|
|
||||||
|
|
||||||
c := contextForRequest(r)
|
|
||||||
defer cache.Tick(c)
|
|
||||||
pkg := (&Package{}).Key(c) // TODO(adg): support clearing sub-repos
|
|
||||||
err := datastore.RunInTransaction(c, func(c appengine.Context) error {
|
|
||||||
var coms []*Commit
|
|
||||||
keys, err := datastore.NewQuery("Commit").
|
|
||||||
Ancestor(pkg).
|
|
||||||
Order("-Num").
|
|
||||||
Limit(commitsPerPage).
|
|
||||||
GetAll(c, &coms)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var rKeys []*datastore.Key
|
|
||||||
for _, com := range coms {
|
|
||||||
if !(clearAll || contains(hash, com.Hash)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r := com.Result(builder, "")
|
|
||||||
if r == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
com.RemoveResult(r)
|
|
||||||
rKeys = append(rKeys, r.Key(c))
|
|
||||||
}
|
|
||||||
_, err = datastore.PutMulti(c, keys, coms)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return datastore.DeleteMulti(c, rKeys)
|
|
||||||
}, nil)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type dashHandler func(*http.Request) (interface{}, error)
|
|
||||||
|
|
||||||
type dashResponse struct {
|
|
||||||
Response interface{}
|
|
||||||
Error string
|
|
||||||
}
|
|
||||||
|
|
||||||
// errBadMethod is returned by a dashHandler when
|
|
||||||
// the request has an unsuitable method.
|
|
||||||
type errBadMethod string
|
|
||||||
|
|
||||||
func (e errBadMethod) Error() string {
|
|
||||||
return "bad method: " + string(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthHandler wraps a http.HandlerFunc with a handler that validates the
|
|
||||||
// supplied key and builder query parameters.
|
|
||||||
func AuthHandler(h dashHandler) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
c := contextForRequest(r)
|
|
||||||
|
|
||||||
// Put the URL Query values into r.Form to avoid parsing the
|
|
||||||
// request body when calling r.FormValue.
|
|
||||||
r.Form = r.URL.Query()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var resp interface{}
|
|
||||||
|
|
||||||
// Validate key query parameter for POST requests only.
|
|
||||||
key := r.FormValue("key")
|
|
||||||
builder := r.FormValue("builder")
|
|
||||||
if r.Method == "POST" && !validKey(c, key, builder) {
|
|
||||||
err = fmt.Errorf("invalid key %q for builder %q", key, builder)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the original HandlerFunc and return the response.
|
|
||||||
if err == nil {
|
|
||||||
resp, err = h(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write JSON response.
|
|
||||||
dashResp := &dashResponse{Response: resp}
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("%v", err)
|
|
||||||
dashResp.Error = err.Error()
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
if err = json.NewEncoder(w).Encode(dashResp); err != nil {
|
|
||||||
c.Criticalf("encoding response: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
builder := r.FormValue("builder")
|
|
||||||
if builder == "" {
|
|
||||||
logErr(w, r, errors.New("must supply builder in query string"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c := contextForRequest(r)
|
|
||||||
fmt.Fprint(w, builderKey(c, builder))
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// admin handlers
|
|
||||||
handleFunc("/init", initHandler)
|
|
||||||
handleFunc("/key", keyHandler)
|
|
||||||
|
|
||||||
// authenticated handlers
|
|
||||||
handleFunc("/building", AuthHandler(buildingHandler))
|
|
||||||
handleFunc("/clear-results", AuthHandler(clearResultsHandler))
|
|
||||||
handleFunc("/commit", AuthHandler(commitHandler))
|
|
||||||
handleFunc("/packages", AuthHandler(packagesHandler))
|
|
||||||
handleFunc("/perf-result", AuthHandler(perfResultHandler))
|
|
||||||
handleFunc("/result", AuthHandler(resultHandler))
|
|
||||||
handleFunc("/tag", AuthHandler(tagHandler))
|
|
||||||
handleFunc("/todo", AuthHandler(todoHandler))
|
|
||||||
|
|
||||||
// public handlers
|
|
||||||
handleFunc("/log/", logHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validHash(hash string) bool {
|
|
||||||
// TODO(adg): correctly validate a hash
|
|
||||||
return hash != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func validKey(c appengine.Context, key, builder string) bool {
|
|
||||||
return isMasterKey(c, key) || key == builderKey(c, builder)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMasterKey(c appengine.Context, k string) bool {
|
|
||||||
return appengine.IsDevAppServer() || k == key.Secret(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func builderKey(c appengine.Context, builder string) string {
|
|
||||||
h := hmac.New(md5.New, []byte(key.Secret(c)))
|
|
||||||
h.Write([]byte(builder))
|
|
||||||
return fmt.Sprintf("%x", h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func logErr(w http.ResponseWriter, r *http.Request, err error) {
|
|
||||||
contextForRequest(r).Errorf("Error: %v", err)
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
fmt.Fprint(w, "Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func contextForRequest(r *http.Request) appengine.Context {
|
|
||||||
return dashboardForRequest(r).Context(appengine.NewContext(r))
|
|
||||||
}
|
|
||||||
|
|
||||||
// limitStringLength essentially does return s[:max],
|
|
||||||
// but it ensures that we dot not split UTF-8 rune in half.
|
|
||||||
// Otherwise appengine python scripts will break badly.
|
|
||||||
func limitStringLength(s string, max int) string {
|
|
||||||
if len(s) <= max {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
s = s[:max]
|
|
||||||
r, size := utf8.DecodeLastRuneInString(s)
|
|
||||||
if r != utf8.RuneError || size != 1 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
max--
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,47 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
|
|
||||||
"cache"
|
|
||||||
"key"
|
|
||||||
)
|
|
||||||
|
|
||||||
func initHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
defer cache.Tick(c)
|
|
||||||
for _, p := range d.Packages {
|
|
||||||
err := datastore.Get(c, p.Key(c), new(Package))
|
|
||||||
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
|
|
||||||
// Some fields have been removed, so it's okay to ignore this error.
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
continue
|
|
||||||
} else if err != datastore.ErrNoSuchEntity {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.NextNum = 1 // So we can add the first commit.
|
|
||||||
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create secret key.
|
|
||||||
key.Secret(c)
|
|
||||||
|
|
||||||
fmt.Fprint(w, "OK")
|
|
||||||
}
|
|
@ -1,378 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/gob"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"text/template"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
"appengine/delay"
|
|
||||||
"appengine/mail"
|
|
||||||
"appengine/urlfetch"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
mailFrom = "builder@golang.org" // use this for sending any mail
|
|
||||||
failMailTo = "golang-dev@googlegroups.com"
|
|
||||||
domain = "build.golang.org"
|
|
||||||
gobotBase = "http://research.swtch.com/gobot_codereview"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ignoreFailure is a set of builders that we don't email about because
|
|
||||||
// they are not yet production-ready.
|
|
||||||
var ignoreFailure = map[string]bool{
|
|
||||||
"dragonfly-386": true,
|
|
||||||
"dragonfly-amd64": true,
|
|
||||||
"freebsd-arm": true,
|
|
||||||
"netbsd-amd64-bsiegert": true,
|
|
||||||
"netbsd-arm-rpi": true,
|
|
||||||
"plan9-amd64-aram": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// notifyOnFailure checks whether the supplied Commit or the subsequent
|
|
||||||
// Commit (if present) breaks the build for this builder.
|
|
||||||
// If either of those commits break the build an email notification is sent
|
|
||||||
// from a delayed task. (We use a task because this way the mail won't be
|
|
||||||
// sent if the enclosing datastore transaction fails.)
|
|
||||||
//
|
|
||||||
// This must be run in a datastore transaction, and the provided *Commit must
|
|
||||||
// have been retrieved from the datastore within that transaction.
|
|
||||||
func notifyOnFailure(c appengine.Context, com *Commit, builder string) error {
|
|
||||||
if ignoreFailure[builder] {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(adg): implement notifications for packages
|
|
||||||
if com.PackagePath != "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Package{Path: com.PackagePath}
|
|
||||||
var broken *Commit
|
|
||||||
cr := com.Result(builder, "")
|
|
||||||
if cr == nil {
|
|
||||||
return fmt.Errorf("no result for %s/%s", com.Hash, builder)
|
|
||||||
}
|
|
||||||
q := datastore.NewQuery("Commit").Ancestor(p.Key(c))
|
|
||||||
if cr.OK {
|
|
||||||
// This commit is OK. Notify if next Commit is broken.
|
|
||||||
next := new(Commit)
|
|
||||||
q = q.Filter("ParentHash=", com.Hash)
|
|
||||||
if err := firstMatch(c, q, next); err != nil {
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
// OK at tip, no notification necessary.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if nr := next.Result(builder, ""); nr != nil && !nr.OK {
|
|
||||||
c.Debugf("commit ok: %#v\nresult: %#v", com, cr)
|
|
||||||
c.Debugf("next commit broken: %#v\nnext result:%#v", next, nr)
|
|
||||||
broken = next
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This commit is broken. Notify if the previous Commit is OK.
|
|
||||||
prev := new(Commit)
|
|
||||||
q = q.Filter("Hash=", com.ParentHash)
|
|
||||||
if err := firstMatch(c, q, prev); err != nil {
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
// No previous result, let the backfill of
|
|
||||||
// this result trigger the notification.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if pr := prev.Result(builder, ""); pr != nil && pr.OK {
|
|
||||||
c.Debugf("commit broken: %#v\nresult: %#v", com, cr)
|
|
||||||
c.Debugf("previous commit ok: %#v\nprevious result:%#v", prev, pr)
|
|
||||||
broken = com
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if broken == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r := broken.Result(builder, "")
|
|
||||||
if r == nil {
|
|
||||||
return fmt.Errorf("finding result for %q: %+v", builder, com)
|
|
||||||
}
|
|
||||||
return commonNotify(c, broken, builder, r.LogHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// firstMatch executes the query q and loads the first entity into v.
|
|
||||||
func firstMatch(c appengine.Context, q *datastore.Query, v interface{}) error {
|
|
||||||
t := q.Limit(1).Run(c)
|
|
||||||
_, err := t.Next(v)
|
|
||||||
if err == datastore.Done {
|
|
||||||
err = datastore.ErrNoSuchEntity
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var notifyLater = delay.Func("notify", notify)
|
|
||||||
|
|
||||||
// notify tries to update the CL for the given Commit with a failure message.
|
|
||||||
// If it doesn't succeed, it sends a failure email to golang-dev.
|
|
||||||
func notify(c appengine.Context, com *Commit, builder, logHash string) {
|
|
||||||
v := url.Values{"brokebuild": {builder}, "log": {logHash}}
|
|
||||||
if !updateCL(c, com, v) {
|
|
||||||
// Send a mail notification if the CL can't be found.
|
|
||||||
sendFailMail(c, com, builder, logHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateCL tells gobot to update the CL for the given Commit with
|
|
||||||
// the provided query values.
|
|
||||||
func updateCL(c appengine.Context, com *Commit, v url.Values) bool {
|
|
||||||
cl, err := lookupCL(c, com)
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("could not find CL for %v: %v", com.Hash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
u := fmt.Sprintf("%v?cl=%v&%s", gobotBase, cl, v.Encode())
|
|
||||||
r, err := urlfetch.Client(c).Post(u, "text/plain", nil)
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("could not update CL %v: %v", cl, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.Body.Close()
|
|
||||||
if r.StatusCode != http.StatusOK {
|
|
||||||
c.Errorf("could not update CL %v: %v", cl, r.Status)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var clURL = regexp.MustCompile(`https://codereview.appspot.com/([0-9]+)`)
|
|
||||||
|
|
||||||
// lookupCL consults code.google.com for the full change description for the
|
|
||||||
// provided Commit, and returns the relevant CL number.
|
|
||||||
func lookupCL(c appengine.Context, com *Commit) (string, error) {
|
|
||||||
url := "https://code.google.com/p/go/source/detail?r=" + com.Hash
|
|
||||||
r, err := urlfetch.Client(c).Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer r.Body.Close()
|
|
||||||
if r.StatusCode != http.StatusOK {
|
|
||||||
return "", fmt.Errorf("retrieving %v: %v", url, r.Status)
|
|
||||||
}
|
|
||||||
b, err := ioutil.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
m := clURL.FindAllSubmatch(b, -1)
|
|
||||||
if m == nil {
|
|
||||||
return "", errors.New("no CL URL found on changeset page")
|
|
||||||
}
|
|
||||||
// Return the last visible codereview URL on the page,
|
|
||||||
// in case the change description refers to another CL.
|
|
||||||
return string(m[len(m)-1][1]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var sendFailMailTmpl = template.Must(template.New("notify.txt").
|
|
||||||
Funcs(template.FuncMap(tmplFuncs)).
|
|
||||||
ParseFiles("build/notify.txt"))
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
gob.Register(&Commit{}) // for delay
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
sendPerfMailLater = delay.Func("sendPerfMail", sendPerfMailFunc)
|
|
||||||
sendPerfMailTmpl = template.Must(
|
|
||||||
template.New("perf_notify.txt").
|
|
||||||
Funcs(template.FuncMap(tmplFuncs)).
|
|
||||||
ParseFiles("build/perf_notify.txt"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// MUST be called from inside a transaction.
|
|
||||||
func sendPerfFailMail(c appengine.Context, builder string, res *PerfResult) error {
|
|
||||||
com := &Commit{Hash: res.CommitHash}
|
|
||||||
if err := datastore.Get(c, com.Key(c), com); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logHash := ""
|
|
||||||
parsed := res.ParseData()
|
|
||||||
for _, data := range parsed[builder] {
|
|
||||||
if !data.OK {
|
|
||||||
logHash = data.Artifacts["log"]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if logHash == "" {
|
|
||||||
return fmt.Errorf("can not find failed result for commit %v on builder %v", com.Hash, builder)
|
|
||||||
}
|
|
||||||
return commonNotify(c, com, builder, logHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// commonNotify MUST!!! be called from within a transaction inside which
|
|
||||||
// the provided Commit entity was retrieved from the datastore.
|
|
||||||
func commonNotify(c appengine.Context, com *Commit, builder, logHash string) error {
|
|
||||||
if com.Num == 0 || com.Desc == "" {
|
|
||||||
stk := make([]byte, 10000)
|
|
||||||
n := runtime.Stack(stk, false)
|
|
||||||
stk = stk[:n]
|
|
||||||
c.Errorf("refusing to notify with com=%+v\n%s", *com, string(stk))
|
|
||||||
return fmt.Errorf("misuse of commonNotify")
|
|
||||||
}
|
|
||||||
if com.FailNotificationSent {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.Infof("%s is broken commit; notifying", com.Hash)
|
|
||||||
notifyLater.Call(c, com, builder, logHash) // add task to queue
|
|
||||||
com.FailNotificationSent = true
|
|
||||||
return putCommit(c, com)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendFailMail sends a mail notification that the build failed on the
|
|
||||||
// provided commit and builder.
|
|
||||||
func sendFailMail(c appengine.Context, com *Commit, builder, logHash string) {
|
|
||||||
// get Log
|
|
||||||
k := datastore.NewKey(c, "Log", logHash, 0, nil)
|
|
||||||
l := new(Log)
|
|
||||||
if err := datastore.Get(c, k, l); err != nil {
|
|
||||||
c.Errorf("finding Log record %v: %v", logHash, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logText, err := l.Text()
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("unpacking Log record %v: %v", logHash, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare mail message
|
|
||||||
var body bytes.Buffer
|
|
||||||
err = sendFailMailTmpl.Execute(&body, map[string]interface{}{
|
|
||||||
"Builder": builder, "Commit": com, "LogHash": logHash, "LogText": logText,
|
|
||||||
"Hostname": domain,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("rendering mail template: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
subject := fmt.Sprintf("%s broken by %s", builder, shortDesc(com.Desc))
|
|
||||||
msg := &mail.Message{
|
|
||||||
Sender: mailFrom,
|
|
||||||
To: []string{failMailTo},
|
|
||||||
ReplyTo: failMailTo,
|
|
||||||
Subject: subject,
|
|
||||||
Body: body.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// send mail
|
|
||||||
if err := mail.Send(c, msg); err != nil {
|
|
||||||
c.Errorf("sending mail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfChangeBenchmark struct {
|
|
||||||
Name string
|
|
||||||
Metrics []*PerfChangeMetric
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfChangeMetric struct {
|
|
||||||
Name string
|
|
||||||
Old uint64
|
|
||||||
New uint64
|
|
||||||
Delta float64
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfChangeBenchmarkSlice []*PerfChangeBenchmark
|
|
||||||
|
|
||||||
func (l PerfChangeBenchmarkSlice) Len() int { return len(l) }
|
|
||||||
func (l PerfChangeBenchmarkSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l PerfChangeBenchmarkSlice) Less(i, j int) bool {
|
|
||||||
b1, p1 := splitBench(l[i].Name)
|
|
||||||
b2, p2 := splitBench(l[j].Name)
|
|
||||||
if b1 != b2 {
|
|
||||||
return b1 < b2
|
|
||||||
}
|
|
||||||
return p1 < p2
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfChangeMetricSlice []*PerfChangeMetric
|
|
||||||
|
|
||||||
func (l PerfChangeMetricSlice) Len() int { return len(l) }
|
|
||||||
func (l PerfChangeMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l PerfChangeMetricSlice) Less(i, j int) bool { return l[i].Name < l[j].Name }
|
|
||||||
|
|
||||||
func sendPerfMailFunc(c appengine.Context, com *Commit, prevCommitHash, builder string, changes []*PerfChange) {
|
|
||||||
// Sort the changes into the right order.
|
|
||||||
var benchmarks []*PerfChangeBenchmark
|
|
||||||
for _, ch := range changes {
|
|
||||||
// Find the benchmark.
|
|
||||||
var b *PerfChangeBenchmark
|
|
||||||
for _, b1 := range benchmarks {
|
|
||||||
if b1.Name == ch.Bench {
|
|
||||||
b = b1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if b == nil {
|
|
||||||
b = &PerfChangeBenchmark{Name: ch.Bench}
|
|
||||||
benchmarks = append(benchmarks, b)
|
|
||||||
}
|
|
||||||
b.Metrics = append(b.Metrics, &PerfChangeMetric{Name: ch.Metric, Old: ch.Old, New: ch.New, Delta: ch.Diff})
|
|
||||||
}
|
|
||||||
for _, b := range benchmarks {
|
|
||||||
sort.Sort(PerfChangeMetricSlice(b.Metrics))
|
|
||||||
}
|
|
||||||
sort.Sort(PerfChangeBenchmarkSlice(benchmarks))
|
|
||||||
|
|
||||||
u := fmt.Sprintf("http://%v/perfdetail?commit=%v&commit0=%v&kind=builder&builder=%v", domain, com.Hash, prevCommitHash, builder)
|
|
||||||
|
|
||||||
// Prepare mail message (without Commit, for updateCL).
|
|
||||||
var body bytes.Buffer
|
|
||||||
err := sendPerfMailTmpl.Execute(&body, map[string]interface{}{
|
|
||||||
"Builder": builder, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("rendering perf mail template: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, try to update the CL.
|
|
||||||
v := url.Values{"textmsg": {body.String()}}
|
|
||||||
if updateCL(c, com, v) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, send mail (with Commit, for independent mail message).
|
|
||||||
body.Reset()
|
|
||||||
err = sendPerfMailTmpl.Execute(&body, map[string]interface{}{
|
|
||||||
"Builder": builder, "Commit": com, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("rendering perf mail template: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
subject := fmt.Sprintf("Perf changes on %s by %s", builder, shortDesc(com.Desc))
|
|
||||||
msg := &mail.Message{
|
|
||||||
Sender: mailFrom,
|
|
||||||
To: []string{failMailTo},
|
|
||||||
ReplyTo: failMailTo,
|
|
||||||
Subject: subject,
|
|
||||||
Body: body.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// send mail
|
|
||||||
if err := mail.Send(c, msg); err != nil {
|
|
||||||
c.Errorf("sending mail: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
Change {{shortHash .Commit.Hash}} broke the {{.Builder}} build:
|
|
||||||
http://{{.Hostname}}/log/{{.LogHash}}
|
|
||||||
|
|
||||||
{{.Commit.Desc}}
|
|
||||||
|
|
||||||
https://golang.org/change/{{shortHash .Commit.Hash}}
|
|
||||||
|
|
||||||
$ tail -200 < log
|
|
||||||
{{printf "%s" .LogText | tail 200}}
|
|
@ -1,313 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
var knownTags = map[string]string{
|
|
||||||
"go1": "0051c7442fed9c888de6617fa9239a913904d96e",
|
|
||||||
"go1.1": "d29da2ced72ba2cf48ed6a8f1ec4abc01e4c5bf1",
|
|
||||||
"go1.2": "b1edf8faa5d6cbc50c6515785df9df9c19296564",
|
|
||||||
"go1.3": "f153208c0a0e306bfca14f71ef11f09859ccabc8",
|
|
||||||
"go1.4": "faa3ed1dc30e42771a68b6337dcf8be9518d5c07",
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastRelease = "go1.4"
|
|
||||||
|
|
||||||
func splitBench(benchProcs string) (string, int) {
|
|
||||||
ss := strings.Split(benchProcs, "-")
|
|
||||||
procs, _ := strconv.Atoi(ss[1])
|
|
||||||
return ss[0], procs
|
|
||||||
}
|
|
||||||
|
|
||||||
func dashPerfCommits(c appengine.Context, page int) ([]*Commit, error) {
|
|
||||||
q := datastore.NewQuery("Commit").
|
|
||||||
Ancestor((&Package{}).Key(c)).
|
|
||||||
Order("-Num").
|
|
||||||
Filter("NeedsBenchmarking =", true).
|
|
||||||
Limit(commitsPerPage).
|
|
||||||
Offset(page * commitsPerPage)
|
|
||||||
var commits []*Commit
|
|
||||||
_, err := q.GetAll(c, &commits)
|
|
||||||
if err == nil && len(commits) == 0 {
|
|
||||||
err = fmt.Errorf("no commits")
|
|
||||||
}
|
|
||||||
return commits, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func perfChangeStyle(pc *PerfConfig, v float64, builder, benchmark, metric string) string {
|
|
||||||
noise := pc.NoiseLevel(builder, benchmark, metric)
|
|
||||||
if isNoise(v, noise) {
|
|
||||||
return "noise"
|
|
||||||
}
|
|
||||||
if v > 0 {
|
|
||||||
return "bad"
|
|
||||||
}
|
|
||||||
return "good"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNoise(diff, noise float64) bool {
|
|
||||||
rnoise := -100 * noise / (noise + 100)
|
|
||||||
return diff < noise && diff > rnoise
|
|
||||||
}
|
|
||||||
|
|
||||||
func perfDiff(old, new uint64) float64 {
|
|
||||||
return 100*float64(new)/float64(old) - 100
|
|
||||||
}
|
|
||||||
|
|
||||||
func isPerfFailed(res *PerfResult, builder string) bool {
|
|
||||||
data := res.ParseData()[builder]
|
|
||||||
return data != nil && data["meta-done"] != nil && !data["meta-done"].OK
|
|
||||||
}
|
|
||||||
|
|
||||||
// PerfResultCache caches a set of PerfResults so that it's easy to access them
|
|
||||||
// without lots of duplicate accesses to datastore.
|
|
||||||
// It allows to iterate over newer or older results for some base commit.
|
|
||||||
type PerfResultCache struct {
|
|
||||||
c appengine.Context
|
|
||||||
newer bool
|
|
||||||
iter *datastore.Iterator
|
|
||||||
results map[int]*PerfResult
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakePerfResultCache(c appengine.Context, com *Commit, newer bool) *PerfResultCache {
|
|
||||||
p := &Package{}
|
|
||||||
q := datastore.NewQuery("PerfResult").Ancestor(p.Key(c)).Limit(100)
|
|
||||||
if newer {
|
|
||||||
q = q.Filter("CommitNum >=", com.Num).Order("CommitNum")
|
|
||||||
} else {
|
|
||||||
q = q.Filter("CommitNum <=", com.Num).Order("-CommitNum")
|
|
||||||
}
|
|
||||||
rc := &PerfResultCache{c: c, newer: newer, iter: q.Run(c), results: make(map[int]*PerfResult)}
|
|
||||||
return rc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *PerfResultCache) Get(commitNum int) *PerfResult {
|
|
||||||
rc.Next(commitNum) // fetch the commit, if necessary
|
|
||||||
return rc.results[commitNum]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next PerfResult for the commit commitNum.
|
|
||||||
// It does not care whether the result has any data, failed or whatever.
|
|
||||||
func (rc *PerfResultCache) Next(commitNum int) (*PerfResult, error) {
|
|
||||||
// See if we have next result in the cache.
|
|
||||||
next := -1
|
|
||||||
for ci := range rc.results {
|
|
||||||
if rc.newer {
|
|
||||||
if ci > commitNum && (next == -1 || ci < next) {
|
|
||||||
next = ci
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if ci < commitNum && (next == -1 || ci > next) {
|
|
||||||
next = ci
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if next != -1 {
|
|
||||||
return rc.results[next], nil
|
|
||||||
}
|
|
||||||
// Fetch next result from datastore.
|
|
||||||
res := new(PerfResult)
|
|
||||||
_, err := rc.iter.Next(res)
|
|
||||||
if err == datastore.Done {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("fetching perf results: %v", err)
|
|
||||||
}
|
|
||||||
if (rc.newer && res.CommitNum < commitNum) || (!rc.newer && res.CommitNum > commitNum) {
|
|
||||||
rc.c.Errorf("PerfResultCache.Next: bad commit num")
|
|
||||||
}
|
|
||||||
rc.results[res.CommitNum] = res
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextForComparison returns PerfResult which we need to use for performance comprison.
|
|
||||||
// It skips failed results, but does not skip results with no data.
|
|
||||||
func (rc *PerfResultCache) NextForComparison(commitNum int, builder string) (*PerfResult, error) {
|
|
||||||
for {
|
|
||||||
res, err := rc.Next(commitNum)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if res.CommitNum == commitNum {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parsed := res.ParseData()
|
|
||||||
if builder != "" {
|
|
||||||
// Comparing for a particular builder.
|
|
||||||
// This is used in perf_changes and in email notifications.
|
|
||||||
b := parsed[builder]
|
|
||||||
if b == nil || b["meta-done"] == nil {
|
|
||||||
// No results yet, must not do the comparison.
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if b["meta-done"].OK {
|
|
||||||
// Have complete results, compare.
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Comparing for all builders, find a result with at least
|
|
||||||
// one successful meta-done.
|
|
||||||
// This is used in perf_detail.
|
|
||||||
for _, benchs := range parsed {
|
|
||||||
if data := benchs["meta-done"]; data != nil && data.OK {
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Failed, try next result.
|
|
||||||
commitNum = res.CommitNum
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfChange struct {
|
|
||||||
Builder string
|
|
||||||
Bench string
|
|
||||||
Metric string
|
|
||||||
Old uint64
|
|
||||||
New uint64
|
|
||||||
Diff float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func significantPerfChanges(pc *PerfConfig, builder string, prevRes, res *PerfResult) (changes []*PerfChange) {
|
|
||||||
// First, collect all significant changes.
|
|
||||||
for builder1, benchmarks1 := range res.ParseData() {
|
|
||||||
if builder != "" && builder != builder1 {
|
|
||||||
// This is not the builder you're looking for, Luke.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
benchmarks0 := prevRes.ParseData()[builder1]
|
|
||||||
if benchmarks0 == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for benchmark, data1 := range benchmarks1 {
|
|
||||||
data0 := benchmarks0[benchmark]
|
|
||||||
if data0 == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for metric, val := range data1.Metrics {
|
|
||||||
val0 := data0.Metrics[metric]
|
|
||||||
if val0 == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
diff := perfDiff(val0, val)
|
|
||||||
noise := pc.NoiseLevel(builder, benchmark, metric)
|
|
||||||
if isNoise(diff, noise) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ch := &PerfChange{Builder: builder, Bench: benchmark, Metric: metric, Old: val0, New: val, Diff: diff}
|
|
||||||
changes = append(changes, ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Then, strip non-repeatable changes (flakes).
|
|
||||||
// The hypothesis is that a real change must show up with the majority of GOMAXPROCS values.
|
|
||||||
majority := len(pc.ProcList(builder))/2 + 1
|
|
||||||
cnt := make(map[string]int)
|
|
||||||
for _, ch := range changes {
|
|
||||||
b, _ := splitBench(ch.Bench)
|
|
||||||
name := b + "|" + ch.Metric
|
|
||||||
if ch.Diff < 0 {
|
|
||||||
name += "--"
|
|
||||||
}
|
|
||||||
cnt[name] = cnt[name] + 1
|
|
||||||
}
|
|
||||||
for i := 0; i < len(changes); i++ {
|
|
||||||
ch := changes[i]
|
|
||||||
b, _ := splitBench(ch.Bench)
|
|
||||||
name := b + "|" + ch.Metric
|
|
||||||
if cnt[name] >= majority {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cnt[name+"--"] >= majority {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Remove flake.
|
|
||||||
last := len(changes) - 1
|
|
||||||
changes[i] = changes[last]
|
|
||||||
changes = changes[:last]
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
return changes
|
|
||||||
}
|
|
||||||
|
|
||||||
// orderPerfTodo reorders commit nums for benchmarking todo.
|
|
||||||
// The resulting order is somewhat tricky. We want 2 things:
|
|
||||||
// 1. benchmark sequentially backwards (this provides information about most
|
|
||||||
// recent changes, and allows to estimate noise levels)
|
|
||||||
// 2. benchmark old commits in "scatter" order (this allows to quickly gather
|
|
||||||
// brief information about thousands of old commits)
|
|
||||||
// So this function interleaves the two orders.
|
|
||||||
func orderPerfTodo(nums []int) []int {
|
|
||||||
sort.Ints(nums)
|
|
||||||
n := len(nums)
|
|
||||||
pow2 := uint32(0) // next power-of-two that is >= n
|
|
||||||
npow2 := 0
|
|
||||||
for npow2 <= n {
|
|
||||||
pow2++
|
|
||||||
npow2 = 1 << pow2
|
|
||||||
}
|
|
||||||
res := make([]int, n)
|
|
||||||
resPos := n - 1 // result array is filled backwards
|
|
||||||
present := make([]bool, n) // denotes values that already present in result array
|
|
||||||
for i0, i1 := n-1, 0; i0 >= 0 || i1 < npow2; {
|
|
||||||
// i0 represents "benchmark sequentially backwards" sequence
|
|
||||||
// find the next commit that is not yet present and add it
|
|
||||||
for cnt := 0; cnt < 2; cnt++ {
|
|
||||||
for ; i0 >= 0; i0-- {
|
|
||||||
if !present[i0] {
|
|
||||||
present[i0] = true
|
|
||||||
res[resPos] = nums[i0]
|
|
||||||
resPos--
|
|
||||||
i0--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// i1 represents "scatter order" sequence
|
|
||||||
// find the next commit that is not yet present and add it
|
|
||||||
for ; i1 < npow2; i1++ {
|
|
||||||
// do the "recursive split-ordering" trick
|
|
||||||
idx := 0 // bitwise reverse of i1
|
|
||||||
for j := uint32(0); j <= pow2; j++ {
|
|
||||||
if (i1 & (1 << j)) != 0 {
|
|
||||||
idx = idx | (1 << (pow2 - j - 1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if idx < n && !present[idx] {
|
|
||||||
present[idx] = true
|
|
||||||
res[resPos] = nums[idx]
|
|
||||||
resPos--
|
|
||||||
i1++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The above can't possibly be correct. Do dump check.
|
|
||||||
res2 := make([]int, n)
|
|
||||||
copy(res2, res)
|
|
||||||
sort.Ints(res2)
|
|
||||||
for i := range res2 {
|
|
||||||
if res2[i] != nums[i] {
|
|
||||||
panic(fmt.Sprintf("diff at %v: expect %v, want %v\nwas: %v\n become: %v",
|
|
||||||
i, nums[i], res2[i], nums, res2))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
@ -1,282 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/perf", perfChangesHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// perfSummaryHandler draws the main benchmarking page.
|
|
||||||
func perfChangesHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
|
|
||||||
page, _ := strconv.Atoi(r.FormValue("page"))
|
|
||||||
if page < 0 {
|
|
||||||
page = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
commits, err := dashPerfCommits(c, page)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch PerfResult's for the commits.
|
|
||||||
var uiCommits []*perfChangesCommit
|
|
||||||
rc := MakePerfResultCache(c, commits[0], false)
|
|
||||||
|
|
||||||
// But first compare tip with the last release.
|
|
||||||
if page == 0 {
|
|
||||||
res0 := &PerfResult{CommitHash: knownTags[lastRelease]}
|
|
||||||
if err := datastore.Get(c, res0.Key(c), res0); err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
logErr(w, r, fmt.Errorf("getting PerfResult: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != datastore.ErrNoSuchEntity {
|
|
||||||
uiCom, err := handleOneCommit(pc, commits[0], rc, res0)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uiCom.IsSummary = true
|
|
||||||
uiCom.ParentHash = lastRelease
|
|
||||||
uiCommits = append(uiCommits, uiCom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, com := range commits {
|
|
||||||
uiCom, err := handleOneCommit(pc, com, rc, nil)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uiCommits = append(uiCommits, uiCom)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Pagination{}
|
|
||||||
if len(commits) == commitsPerPage {
|
|
||||||
p.Next = page + 1
|
|
||||||
}
|
|
||||||
if page > 0 {
|
|
||||||
p.Prev = page - 1
|
|
||||||
p.HasPrev = true
|
|
||||||
}
|
|
||||||
|
|
||||||
data := &perfChangesData{d, p, uiCommits}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := perfChangesTemplate.Execute(&buf, data); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteTo(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleOneCommit(pc *PerfConfig, com *Commit, rc *PerfResultCache, baseRes *PerfResult) (*perfChangesCommit, error) {
|
|
||||||
uiCom := new(perfChangesCommit)
|
|
||||||
uiCom.Commit = com
|
|
||||||
res1 := rc.Get(com.Num)
|
|
||||||
for builder, benchmarks1 := range res1.ParseData() {
|
|
||||||
for benchmark, data1 := range benchmarks1 {
|
|
||||||
if benchmark != "meta-done" || !data1.OK {
|
|
||||||
uiCom.NumResults++
|
|
||||||
}
|
|
||||||
if !data1.OK {
|
|
||||||
v := new(perfChangesChange)
|
|
||||||
v.diff = 10000
|
|
||||||
v.Style = "fail"
|
|
||||||
v.Builder = builder
|
|
||||||
v.Link = fmt.Sprintf("log/%v", data1.Artifacts["log"])
|
|
||||||
v.Val = builder
|
|
||||||
v.Hint = builder
|
|
||||||
if benchmark != "meta-done" {
|
|
||||||
v.Hint += "/" + benchmark
|
|
||||||
}
|
|
||||||
m := findMetric(uiCom, "failure")
|
|
||||||
m.BadChanges = append(m.BadChanges, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res0 := baseRes
|
|
||||||
if res0 == nil {
|
|
||||||
var err error
|
|
||||||
res0, err = rc.NextForComparison(com.Num, builder)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res0 == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
changes := significantPerfChanges(pc, builder, res0, res1)
|
|
||||||
changes = dedupPerfChanges(changes)
|
|
||||||
for _, ch := range changes {
|
|
||||||
v := new(perfChangesChange)
|
|
||||||
v.Builder = builder
|
|
||||||
v.Benchmark, v.Procs = splitBench(ch.Bench)
|
|
||||||
v.diff = ch.Diff
|
|
||||||
v.Val = fmt.Sprintf("%+.2f%%", ch.Diff)
|
|
||||||
v.Hint = fmt.Sprintf("%v/%v", builder, ch.Bench)
|
|
||||||
v.Link = fmt.Sprintf("perfdetail?commit=%v&commit0=%v&builder=%v&benchmark=%v", com.Hash, res0.CommitHash, builder, v.Benchmark)
|
|
||||||
m := findMetric(uiCom, ch.Metric)
|
|
||||||
if v.diff > 0 {
|
|
||||||
v.Style = "bad"
|
|
||||||
m.BadChanges = append(m.BadChanges, v)
|
|
||||||
} else {
|
|
||||||
v.Style = "good"
|
|
||||||
m.GoodChanges = append(m.GoodChanges, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort metrics and changes.
|
|
||||||
for _, m := range uiCom.Metrics {
|
|
||||||
sort.Sort(m.GoodChanges)
|
|
||||||
sort.Sort(m.BadChanges)
|
|
||||||
}
|
|
||||||
sort.Sort(uiCom.Metrics)
|
|
||||||
// Need at least one metric for UI.
|
|
||||||
if len(uiCom.Metrics) == 0 {
|
|
||||||
uiCom.Metrics = append(uiCom.Metrics, &perfChangesMetric{})
|
|
||||||
}
|
|
||||||
uiCom.Metrics[0].First = true
|
|
||||||
return uiCom, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find builder-procs with the maximum absolute diff for every benchmark-metric, drop the rest.
|
|
||||||
func dedupPerfChanges(changes []*PerfChange) (deduped []*PerfChange) {
|
|
||||||
maxDiff := make(map[string]float64)
|
|
||||||
maxBench := make(map[string]string)
|
|
||||||
// First, find the maximum.
|
|
||||||
for _, ch := range changes {
|
|
||||||
bench, _ := splitBench(ch.Bench)
|
|
||||||
k := bench + "|" + ch.Metric
|
|
||||||
v := ch.Diff
|
|
||||||
if v < 0 {
|
|
||||||
v = -v
|
|
||||||
}
|
|
||||||
if maxDiff[k] < v {
|
|
||||||
maxDiff[k] = v
|
|
||||||
maxBench[k] = ch.Builder + "|" + ch.Bench
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Then, remove the rest.
|
|
||||||
for _, ch := range changes {
|
|
||||||
bench, _ := splitBench(ch.Bench)
|
|
||||||
k := bench + "|" + ch.Metric
|
|
||||||
if maxBench[k] == ch.Builder+"|"+ch.Bench {
|
|
||||||
deduped = append(deduped, ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func findMetric(c *perfChangesCommit, metric string) *perfChangesMetric {
|
|
||||||
for _, m := range c.Metrics {
|
|
||||||
if m.Name == metric {
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m := new(perfChangesMetric)
|
|
||||||
m.Name = metric
|
|
||||||
c.Metrics = append(c.Metrics, m)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfConfig struct {
|
|
||||||
Builders []uiPerfConfigElem
|
|
||||||
Benchmarks []uiPerfConfigElem
|
|
||||||
Metrics []uiPerfConfigElem
|
|
||||||
Procs []uiPerfConfigElem
|
|
||||||
CommitsFrom []uiPerfConfigElem
|
|
||||||
CommitsTo []uiPerfConfigElem
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfConfigElem struct {
|
|
||||||
Name string
|
|
||||||
Selected bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var perfChangesTemplate = template.Must(
|
|
||||||
template.New("perf_changes.html").Funcs(tmplFuncs).ParseFiles("build/perf_changes.html"),
|
|
||||||
)
|
|
||||||
|
|
||||||
type perfChangesData struct {
|
|
||||||
Dashboard *Dashboard
|
|
||||||
Pagination *Pagination
|
|
||||||
Commits []*perfChangesCommit
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfChangesCommit struct {
|
|
||||||
*Commit
|
|
||||||
IsSummary bool
|
|
||||||
NumResults int
|
|
||||||
Metrics perfChangesMetricSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfChangesMetric struct {
|
|
||||||
Name string
|
|
||||||
First bool
|
|
||||||
BadChanges perfChangesChangeSlice
|
|
||||||
GoodChanges perfChangesChangeSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfChangesChange struct {
|
|
||||||
Builder string
|
|
||||||
Benchmark string
|
|
||||||
Link string
|
|
||||||
Hint string
|
|
||||||
Style string
|
|
||||||
Val string
|
|
||||||
Procs int
|
|
||||||
diff float64
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfChangesMetricSlice []*perfChangesMetric
|
|
||||||
|
|
||||||
func (l perfChangesMetricSlice) Len() int { return len(l) }
|
|
||||||
func (l perfChangesMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l perfChangesMetricSlice) Less(i, j int) bool {
|
|
||||||
if l[i].Name == "failure" || l[j].Name == "failure" {
|
|
||||||
return l[i].Name == "failure"
|
|
||||||
}
|
|
||||||
return l[i].Name < l[j].Name
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfChangesChangeSlice []*perfChangesChange
|
|
||||||
|
|
||||||
func (l perfChangesChangeSlice) Len() int { return len(l) }
|
|
||||||
func (l perfChangesChangeSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l perfChangesChangeSlice) Less(i, j int) bool {
|
|
||||||
vi, vj := l[i].diff, l[j].diff
|
|
||||||
if vi > 0 && vj > 0 {
|
|
||||||
return vi > vj
|
|
||||||
} else if vi < 0 && vj < 0 {
|
|
||||||
return vi < vj
|
|
||||||
} else {
|
|
||||||
panic("comparing positive and negative diff")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>{{$.Dashboard.Name}} Dashboard</title>
|
|
||||||
<link rel="stylesheet" href="/static/style.css"/>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<header id="topbar">
|
|
||||||
<h1>Go Dashboard</h1>
|
|
||||||
<nav>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/">Test</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perf">Perf</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perfgraph">Graphs</a>
|
|
||||||
</nav>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<div class="page">
|
|
||||||
<div class="build-container">
|
|
||||||
<table class="build">
|
|
||||||
<colgroup class="col-hash"></colgroup>
|
|
||||||
<colgroup class="col-numresults"></colgroup>
|
|
||||||
<colgroup class="col-metric"></colgroup>
|
|
||||||
<colgroup class="col-result"></colgroup>
|
|
||||||
<colgroup class="col-result"></colgroup>
|
|
||||||
<colgroup class="col-user"></colgroup>
|
|
||||||
<colgroup class="col-time"></colgroup>
|
|
||||||
<colgroup class="col-desc"></colgroup>
|
|
||||||
<tbody>
|
|
||||||
{{range $c := $.Commits}}
|
|
||||||
{{range $m := $c.Metrics}}
|
|
||||||
{{if $m.First}}
|
|
||||||
<tr class="row-commit">
|
|
||||||
{{if $c.IsSummary}}
|
|
||||||
<td class="hash">tip vs {{$c.ParentHash}}</td>
|
|
||||||
{{else}}
|
|
||||||
<td class="hash"><a href="{{repoURL $.Dashboard.Name $c.Hash ""}}">{{shortHash $c.Hash}}</a></td>
|
|
||||||
{{end}}
|
|
||||||
<td class="numresults">{{$c.NumResults}}</td>
|
|
||||||
{{else}}
|
|
||||||
<tr>
|
|
||||||
<td class="user"> </td>
|
|
||||||
<td class="numresults"> </td>
|
|
||||||
{{end}}
|
|
||||||
<td>{{$m.Name}}</td>
|
|
||||||
<td>
|
|
||||||
{{range $ch := $m.BadChanges}}
|
|
||||||
<a class="{{$ch.Style}}" href="{{$ch.Link}}" title="{{$ch.Hint}}">{{$ch.Val}}</a>
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
{{range $ch := $m.GoodChanges}}
|
|
||||||
<a class="{{$ch.Style}}" href="{{$ch.Link}}" title="{{$ch.Hint}}">{{$ch.Val}}</a>
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
{{if $m.First}}
|
|
||||||
<td class="user" title="{{$c.User}}">{{shortUser $c.User}}</td>
|
|
||||||
<td class="time">{{$c.Time.Format "Mon 02 Jan 15:04"}}</td>
|
|
||||||
<td class="desc" title="{{$c.Desc}}">{{shortDesc $c.Desc}}</td>
|
|
||||||
{{else}}
|
|
||||||
<td class="user"> </td>
|
|
||||||
<td class="time"> </td>
|
|
||||||
<td class="desc"> </td>
|
|
||||||
{{end}}
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{if $c.IsSummary}}
|
|
||||||
<tr class="row-commit"><td>---</td></tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
{{with $.Pagination}}
|
|
||||||
<div class="paginate">
|
|
||||||
<nav>
|
|
||||||
<a {{if .HasPrev}}href="?page={{.Prev}}"{{else}}class="inactive"{{end}}>newer</a>
|
|
||||||
<a {{if .Next}}href="?page={{.Next}}"{{else}}class="inactive"{{end}}>older</a>
|
|
||||||
<a {{if .HasPrev}}href="?"{{else}}class="inactive"{{end}}>latest</a>
|
|
||||||
<a href="https://golang.org/wiki/PerfDashboard">Help</a>
|
|
||||||
</nav>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
</div>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,219 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/perfdetail", perfDetailUIHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func perfDetailUIHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
kind := r.FormValue("kind")
|
|
||||||
builder := r.FormValue("builder")
|
|
||||||
benchmark := r.FormValue("benchmark")
|
|
||||||
if kind == "" {
|
|
||||||
kind = "benchmark"
|
|
||||||
}
|
|
||||||
if kind != "benchmark" && kind != "builder" {
|
|
||||||
logErr(w, r, fmt.Errorf("unknown kind %s", kind))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch the new commit.
|
|
||||||
com1 := new(Commit)
|
|
||||||
com1.Hash = r.FormValue("commit")
|
|
||||||
if hash, ok := knownTags[com1.Hash]; ok {
|
|
||||||
com1.Hash = hash
|
|
||||||
}
|
|
||||||
if err := datastore.Get(c, com1.Key(c), com1); err != nil {
|
|
||||||
logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com1.Hash, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Fetch the associated perf result.
|
|
||||||
ress1 := &PerfResult{CommitHash: com1.Hash}
|
|
||||||
if err := datastore.Get(c, ress1.Key(c), ress1); err != nil {
|
|
||||||
logErr(w, r, fmt.Errorf("failed to fetch perf result %s: %v", com1.Hash, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch the old commit.
|
|
||||||
var ress0 *PerfResult
|
|
||||||
com0 := new(Commit)
|
|
||||||
com0.Hash = r.FormValue("commit0")
|
|
||||||
if hash, ok := knownTags[com0.Hash]; ok {
|
|
||||||
com0.Hash = hash
|
|
||||||
}
|
|
||||||
if com0.Hash != "" {
|
|
||||||
// Have an exact commit hash, fetch directly.
|
|
||||||
if err := datastore.Get(c, com0.Key(c), com0); err != nil {
|
|
||||||
logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ress0 = &PerfResult{CommitHash: com0.Hash}
|
|
||||||
if err := datastore.Get(c, ress0.Key(c), ress0); err != nil {
|
|
||||||
logErr(w, r, fmt.Errorf("failed to fetch perf result for %s: %v", com0.Hash, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Don't have the commit hash, find the previous commit to compare.
|
|
||||||
rc := MakePerfResultCache(c, com1, false)
|
|
||||||
ress0, err = rc.NextForComparison(com1.Num, "")
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ress0 == nil {
|
|
||||||
logErr(w, r, fmt.Errorf("no previous commit with results"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Now that we know the right result, fetch the commit.
|
|
||||||
com0.Hash = ress0.CommitHash
|
|
||||||
if err := datastore.Get(c, com0.Key(c), com0); err != nil {
|
|
||||||
logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res0 := ress0.ParseData()
|
|
||||||
res1 := ress1.ParseData()
|
|
||||||
var benchmarks []*uiPerfDetailBenchmark
|
|
||||||
var list []string
|
|
||||||
if kind == "builder" {
|
|
||||||
list = pc.BenchmarksForBuilder(builder)
|
|
||||||
} else {
|
|
||||||
list = pc.BuildersForBenchmark(benchmark)
|
|
||||||
}
|
|
||||||
for _, other := range list {
|
|
||||||
if kind == "builder" {
|
|
||||||
benchmark = other
|
|
||||||
} else {
|
|
||||||
builder = other
|
|
||||||
}
|
|
||||||
var procs []*uiPerfDetailProcs
|
|
||||||
allProcs := pc.ProcList(builder)
|
|
||||||
for _, p := range allProcs {
|
|
||||||
BenchProcs := fmt.Sprintf("%v-%v", benchmark, p)
|
|
||||||
if res0[builder] == nil || res0[builder][BenchProcs] == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pp := &uiPerfDetailProcs{Procs: p}
|
|
||||||
for metric, val := range res0[builder][BenchProcs].Metrics {
|
|
||||||
var pm uiPerfDetailMetric
|
|
||||||
pm.Name = metric
|
|
||||||
pm.Val0 = fmt.Sprintf("%v", val)
|
|
||||||
val1 := uint64(0)
|
|
||||||
if res1[builder] != nil && res1[builder][BenchProcs] != nil {
|
|
||||||
val1 = res1[builder][BenchProcs].Metrics[metric]
|
|
||||||
}
|
|
||||||
pm.Val1 = fmt.Sprintf("%v", val1)
|
|
||||||
v0 := val
|
|
||||||
v1 := val1
|
|
||||||
valf := perfDiff(v0, v1)
|
|
||||||
pm.Delta = fmt.Sprintf("%+.2f%%", valf)
|
|
||||||
pm.Style = perfChangeStyle(pc, valf, builder, BenchProcs, pm.Name)
|
|
||||||
pp.Metrics = append(pp.Metrics, pm)
|
|
||||||
}
|
|
||||||
sort.Sort(pp.Metrics)
|
|
||||||
for artifact, hash := range res0[builder][BenchProcs].Artifacts {
|
|
||||||
var pm uiPerfDetailMetric
|
|
||||||
pm.Val0 = fmt.Sprintf("%v", artifact)
|
|
||||||
pm.Link0 = fmt.Sprintf("log/%v", hash)
|
|
||||||
pm.Val1 = fmt.Sprintf("%v", artifact)
|
|
||||||
if res1[builder] != nil && res1[builder][BenchProcs] != nil && res1[builder][BenchProcs].Artifacts[artifact] != "" {
|
|
||||||
pm.Link1 = fmt.Sprintf("log/%v", res1[builder][BenchProcs].Artifacts[artifact])
|
|
||||||
}
|
|
||||||
pp.Metrics = append(pp.Metrics, pm)
|
|
||||||
}
|
|
||||||
procs = append(procs, pp)
|
|
||||||
}
|
|
||||||
benchmarks = append(benchmarks, &uiPerfDetailBenchmark{other, procs})
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := new(uiPerfConfig)
|
|
||||||
for _, v := range pc.BuildersForBenchmark("") {
|
|
||||||
cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, v == builder})
|
|
||||||
}
|
|
||||||
for _, v := range pc.BenchmarksForBuilder("") {
|
|
||||||
cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, v == benchmark})
|
|
||||||
}
|
|
||||||
|
|
||||||
data := &uiPerfDetailTemplateData{d, cfg, kind == "builder", com0, com1, benchmarks}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := uiPerfDetailTemplate.Execute(&buf, data); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteTo(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func perfResultSplit(s string) (builder string, benchmark string, procs int) {
|
|
||||||
s1 := strings.Split(s, "|")
|
|
||||||
s2 := strings.Split(s1[1], "-")
|
|
||||||
procs, _ = strconv.Atoi(s2[1])
|
|
||||||
return s1[0], s2[0], procs
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfDetailTemplateData struct {
|
|
||||||
Dashboard *Dashboard
|
|
||||||
Config *uiPerfConfig
|
|
||||||
KindBuilder bool
|
|
||||||
Commit0 *Commit
|
|
||||||
Commit1 *Commit
|
|
||||||
Benchmarks []*uiPerfDetailBenchmark
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfDetailBenchmark struct {
|
|
||||||
Name string
|
|
||||||
Procs []*uiPerfDetailProcs
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfDetailProcs struct {
|
|
||||||
Procs int
|
|
||||||
Metrics uiPerfDetailMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfDetailMetric struct {
|
|
||||||
Name string
|
|
||||||
Val0 string
|
|
||||||
Val1 string
|
|
||||||
Link0 string
|
|
||||||
Link1 string
|
|
||||||
Delta string
|
|
||||||
Style string
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiPerfDetailMetrics []uiPerfDetailMetric
|
|
||||||
|
|
||||||
func (l uiPerfDetailMetrics) Len() int { return len(l) }
|
|
||||||
func (l uiPerfDetailMetrics) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l uiPerfDetailMetrics) Less(i, j int) bool { return l[i].Name < l[j].Name }
|
|
||||||
|
|
||||||
var uiPerfDetailTemplate = template.Must(
|
|
||||||
template.New("perf_detail.html").Funcs(tmplFuncs).ParseFiles("build/perf_detail.html"),
|
|
||||||
)
|
|
@ -1,101 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>{{$.Dashboard.Name}} Dashboard</title>
|
|
||||||
<link rel="stylesheet" href="/static/style.css"/>
|
|
||||||
<script type="text/javascript">
|
|
||||||
function kindBuilder() {
|
|
||||||
document.getElementById('checkBuilder').checked = true;
|
|
||||||
document.getElementById('controlBuilder').style.display='inline';
|
|
||||||
document.getElementById('controlBenchmark').style.display='none';
|
|
||||||
}
|
|
||||||
function kindBenchmark() {
|
|
||||||
document.getElementById('checkBenchmark').checked = true;
|
|
||||||
document.getElementById('controlBenchmark').style.display='inline';
|
|
||||||
document.getElementById('controlBuilder').style.display='none';
|
|
||||||
}
|
|
||||||
window.onload = {{if $.KindBuilder}} kindBuilder {{else}} kindBenchmark {{end}};
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<header id="topbar">
|
|
||||||
<h1>Go Dashboard</h1>
|
|
||||||
<nav>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/">Test</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perf">Perf</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perfgraph">Graphs</a>
|
|
||||||
</nav>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<div class="page">
|
|
||||||
<div class="diff-container">
|
|
||||||
<div class="diff-meta">
|
|
||||||
<form>
|
|
||||||
<div><b>New: </b><input type="edit" name="commit" value="{{$.Commit1.Hash}}" /> {{shortUser $.Commit1.User}} {{$.Commit1.Time.Format "Mon 02 Jan 15:04"}} {{shortDesc $.Commit1.Desc}} </div>
|
|
||||||
<div><b>Old: </b><input type="edit" name="commit0" value="{{$.Commit0.Hash}}" /> {{shortUser $.Commit0.User}} {{$.Commit0.Time.Format "Mon 02 Jan 15:04"}} {{shortDesc $.Commit0.Desc}} </div>
|
|
||||||
<div>
|
|
||||||
<input id="checkBuilder" type="radio" name="kind" value="builder" required onclick="kindBuilder()">builder</input>
|
|
||||||
<input id="checkBenchmark" type="radio" name="kind" value="benchmark" required onclick="kindBenchmark()">benchmark</input>
|
|
||||||
<select id="controlBuilder" name="builder">
|
|
||||||
{{range $.Config.Builders}}
|
|
||||||
<option {{if .Selected}}selected{{end}}>{{.Name}}</option>
|
|
||||||
{{end}}
|
|
||||||
</select>
|
|
||||||
<select id="controlBenchmark" name="benchmark">
|
|
||||||
{{range $.Config.Benchmarks}}
|
|
||||||
<option {{if .Selected}}selected{{end}}>{{.Name}}</option>
|
|
||||||
{{end}}
|
|
||||||
</select>
|
|
||||||
<input type="submit" value="Refresh" />
|
|
||||||
<a href="https://golang.org/wiki/PerfDashboard">Help</a>
|
|
||||||
</div>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
<p></p>
|
|
||||||
|
|
||||||
{{range $b := $.Benchmarks}}
|
|
||||||
<div class="diff-benchmark">
|
|
||||||
<h2>{{$b.Name}}</h2>
|
|
||||||
{{range $p := $b.Procs}}
|
|
||||||
<div class="diff">
|
|
||||||
<h1>GOMAXPROCS={{$p.Procs}}</h1>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Metric</th>
|
|
||||||
<th>old</th>
|
|
||||||
<th>new</th>
|
|
||||||
<th>delta</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range $m := $p.Metrics}}
|
|
||||||
<tr>
|
|
||||||
<td class="metric">{{$m.Name}}</td>
|
|
||||||
{{if $m.Link0}}
|
|
||||||
<td><a href="{{$.Dashboard.Prefix}}/{{$m.Link0}}">{{$m.Val0}}</td>
|
|
||||||
{{else}}
|
|
||||||
<td>{{$m.Val0}}</td>
|
|
||||||
{{end}}
|
|
||||||
{{if $m.Link1}}
|
|
||||||
<td><a href="{{$.Dashboard.Prefix}}/{{$m.Link1}}">{{$m.Val1}}</td>
|
|
||||||
{{else}}
|
|
||||||
<td>{{$m.Val1}}</td>
|
|
||||||
{{end}}
|
|
||||||
<td class="result"><span class="{{$m.Style}}">{{$m.Delta}}</span></td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
<div class="clear"></div>
|
|
||||||
</div>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,268 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/perfgraph", perfGraphHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func perfGraphHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
allBuilders := pc.BuildersForBenchmark("")
|
|
||||||
allBenchmarks := pc.BenchmarksForBuilder("")
|
|
||||||
allMetrics := pc.MetricsForBenchmark("")
|
|
||||||
allProcs := pc.ProcList("")
|
|
||||||
r.ParseForm()
|
|
||||||
selBuilders := r.Form["builder"]
|
|
||||||
selBenchmarks := r.Form["benchmark"]
|
|
||||||
selMetrics := r.Form["metric"]
|
|
||||||
selProcs := r.Form["procs"]
|
|
||||||
if len(selBuilders) == 0 {
|
|
||||||
selBuilders = append(selBuilders, allBuilders[0])
|
|
||||||
}
|
|
||||||
if len(selBenchmarks) == 0 {
|
|
||||||
selBenchmarks = append(selBenchmarks, "json")
|
|
||||||
}
|
|
||||||
if len(selMetrics) == 0 {
|
|
||||||
selMetrics = append(selMetrics, "time")
|
|
||||||
}
|
|
||||||
if len(selProcs) == 0 {
|
|
||||||
selProcs = append(selProcs, "1")
|
|
||||||
}
|
|
||||||
commitFrom := r.FormValue("commit-from")
|
|
||||||
if commitFrom == "" {
|
|
||||||
commitFrom = lastRelease
|
|
||||||
}
|
|
||||||
commitTo := r.FormValue("commit-to")
|
|
||||||
if commitTo == "" {
|
|
||||||
commitTo = "tip"
|
|
||||||
}
|
|
||||||
// TODO(dvyukov): validate input
|
|
||||||
|
|
||||||
// Figure out start and end commit from commitFrom/commitTo.
|
|
||||||
startCommitNum := 0
|
|
||||||
endCommitNum := 0
|
|
||||||
{
|
|
||||||
comFrom := &Commit{Hash: knownTags[commitFrom]}
|
|
||||||
if err := datastore.Get(c, comFrom.Key(c), comFrom); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
startCommitNum = comFrom.Num
|
|
||||||
|
|
||||||
retry:
|
|
||||||
if commitTo == "tip" {
|
|
||||||
p, err := GetPackage(c, "")
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
endCommitNum = p.NextNum
|
|
||||||
} else {
|
|
||||||
comTo := &Commit{Hash: knownTags[commitTo]}
|
|
||||||
if err := datastore.Get(c, comTo.Key(c), comTo); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
endCommitNum = comTo.Num + 1
|
|
||||||
}
|
|
||||||
if endCommitNum <= startCommitNum {
|
|
||||||
// User probably selected from:go1.3 to:go1.2. Fix go1.2 to tip.
|
|
||||||
if commitTo == "tip" {
|
|
||||||
logErr(w, r, fmt.Errorf("no commits to display (%v-%v)", commitFrom, commitTo))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
commitTo = "tip"
|
|
||||||
goto retry
|
|
||||||
}
|
|
||||||
}
|
|
||||||
commitsToDisplay := endCommitNum - startCommitNum
|
|
||||||
|
|
||||||
present := func(set []string, s string) bool {
|
|
||||||
for _, s1 := range set {
|
|
||||||
if s1 == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := &uiPerfConfig{}
|
|
||||||
for _, v := range allBuilders {
|
|
||||||
cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, present(selBuilders, v)})
|
|
||||||
}
|
|
||||||
for _, v := range allBenchmarks {
|
|
||||||
cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, present(selBenchmarks, v)})
|
|
||||||
}
|
|
||||||
for _, v := range allMetrics {
|
|
||||||
cfg.Metrics = append(cfg.Metrics, uiPerfConfigElem{v, present(selMetrics, v)})
|
|
||||||
}
|
|
||||||
for _, v := range allProcs {
|
|
||||||
cfg.Procs = append(cfg.Procs, uiPerfConfigElem{strconv.Itoa(v), present(selProcs, strconv.Itoa(v))})
|
|
||||||
}
|
|
||||||
for k := range knownTags {
|
|
||||||
cfg.CommitsFrom = append(cfg.CommitsFrom, uiPerfConfigElem{k, commitFrom == k})
|
|
||||||
}
|
|
||||||
for k := range knownTags {
|
|
||||||
cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{k, commitTo == k})
|
|
||||||
}
|
|
||||||
cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{"tip", commitTo == "tip"})
|
|
||||||
|
|
||||||
var vals [][]float64
|
|
||||||
var hints [][]string
|
|
||||||
var annotations [][]string
|
|
||||||
var certainty [][]bool
|
|
||||||
var headers []string
|
|
||||||
commits2, err := GetCommits(c, startCommitNum, commitsToDisplay)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, builder := range selBuilders {
|
|
||||||
for _, metric := range selMetrics {
|
|
||||||
for _, benchmark := range selBenchmarks {
|
|
||||||
for _, procs := range selProcs {
|
|
||||||
benchProcs := fmt.Sprintf("%v-%v", benchmark, procs)
|
|
||||||
vv, err := GetPerfMetricsForCommits(c, builder, benchProcs, metric, startCommitNum, commitsToDisplay)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hasdata := false
|
|
||||||
for _, v := range vv {
|
|
||||||
if v != 0 {
|
|
||||||
hasdata = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasdata {
|
|
||||||
noise := pc.NoiseLevel(builder, benchProcs, metric)
|
|
||||||
descBuilder := "/" + builder
|
|
||||||
descBenchmark := "/" + benchProcs
|
|
||||||
descMetric := "/" + metric
|
|
||||||
if len(selBuilders) == 1 {
|
|
||||||
descBuilder = ""
|
|
||||||
}
|
|
||||||
if len(selBenchmarks) == 1 && len(selProcs) == 1 {
|
|
||||||
descBenchmark = ""
|
|
||||||
}
|
|
||||||
if len(selMetrics) == 1 && (len(selBuilders) > 1 || len(selBenchmarks) > 1 || len(selProcs) > 1) {
|
|
||||||
descMetric = ""
|
|
||||||
}
|
|
||||||
desc := fmt.Sprintf("%v%v%v", descBuilder, descBenchmark, descMetric)[1:]
|
|
||||||
hh := make([]string, commitsToDisplay)
|
|
||||||
ann := make([]string, commitsToDisplay)
|
|
||||||
valf := make([]float64, commitsToDisplay)
|
|
||||||
cert := make([]bool, commitsToDisplay)
|
|
||||||
firstval := uint64(0)
|
|
||||||
lastval := uint64(0)
|
|
||||||
for i, v := range vv {
|
|
||||||
cert[i] = true
|
|
||||||
if v == 0 {
|
|
||||||
if lastval == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cert[i] = false
|
|
||||||
v = lastval
|
|
||||||
}
|
|
||||||
if firstval == 0 {
|
|
||||||
firstval = v
|
|
||||||
}
|
|
||||||
valf[i] = float64(v) / float64(firstval)
|
|
||||||
if cert[i] {
|
|
||||||
d := ""
|
|
||||||
if lastval != 0 {
|
|
||||||
diff := perfDiff(lastval, v)
|
|
||||||
d = fmt.Sprintf(" (%+.02f%%)", diff)
|
|
||||||
if !isNoise(diff, noise) {
|
|
||||||
ann[i] = fmt.Sprintf("%+.02f%%", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hh[i] = fmt.Sprintf("%v%v", v, d)
|
|
||||||
} else {
|
|
||||||
hh[i] = "NO DATA"
|
|
||||||
}
|
|
||||||
lastval = v
|
|
||||||
}
|
|
||||||
vals = append(vals, valf)
|
|
||||||
hints = append(hints, hh)
|
|
||||||
annotations = append(annotations, ann)
|
|
||||||
certainty = append(certainty, cert)
|
|
||||||
headers = append(headers, desc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var commits []perfGraphCommit
|
|
||||||
if len(vals) != 0 && len(vals[0]) != 0 {
|
|
||||||
idx := 0
|
|
||||||
for i := range vals[0] {
|
|
||||||
com := commits2[i]
|
|
||||||
if com == nil || !com.NeedsBenchmarking {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c := perfGraphCommit{Id: idx, Name: fmt.Sprintf("%v (%v)", com.Desc, com.Time.Format("Jan 2, 2006 1:04"))}
|
|
||||||
idx++
|
|
||||||
for j := range vals {
|
|
||||||
c.Vals = append(c.Vals, perfGraphValue{float64(vals[j][i]), certainty[j][i], hints[j][i], annotations[j][i]})
|
|
||||||
}
|
|
||||||
commits = append(commits, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data := &perfGraphData{d, cfg, headers, commits}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := perfGraphTemplate.Execute(&buf, data); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteTo(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
var perfGraphTemplate = template.Must(
|
|
||||||
template.New("perf_graph.html").ParseFiles("build/perf_graph.html"),
|
|
||||||
)
|
|
||||||
|
|
||||||
type perfGraphData struct {
|
|
||||||
Dashboard *Dashboard
|
|
||||||
Config *uiPerfConfig
|
|
||||||
Headers []string
|
|
||||||
Commits []perfGraphCommit
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfGraphCommit struct {
|
|
||||||
Id int
|
|
||||||
Name string
|
|
||||||
Vals []perfGraphValue
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfGraphValue struct {
|
|
||||||
Val float64
|
|
||||||
Certainty bool
|
|
||||||
Hint string
|
|
||||||
Ann string
|
|
||||||
}
|
|
@ -1,120 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>{{$.Dashboard.Name}} Dashboard</title>
|
|
||||||
<link rel="stylesheet" href="/static/style.css"/>
|
|
||||||
<style>
|
|
||||||
.graph-container { background: #eee; }
|
|
||||||
</style>
|
|
||||||
|
|
||||||
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
|
|
||||||
<script type="text/javascript">
|
|
||||||
google.load("visualization", "1", {packages:["corechart"]});
|
|
||||||
google.setOnLoadCallback(drawCharts);
|
|
||||||
function drawCharts() {
|
|
||||||
var data = new google.visualization.DataTable();
|
|
||||||
data.addColumn({type: 'number', label: 'Commit'});
|
|
||||||
data.addColumn({type: 'number'});
|
|
||||||
data.addColumn({type: 'string', role: 'tooltip'});
|
|
||||||
{{range $.Headers}}
|
|
||||||
data.addColumn({type: 'number', label: '{{.}}'});
|
|
||||||
data.addColumn({type: 'boolean', role: 'certainty'});
|
|
||||||
data.addColumn({type: 'string', role: 'tooltip'});
|
|
||||||
data.addColumn({type: 'string', role: 'annotation'});
|
|
||||||
{{end}}
|
|
||||||
data.addRows([
|
|
||||||
{{range $.Commits}}
|
|
||||||
[ {{.Id}}, 1, "{{.Name}}",
|
|
||||||
{{range .Vals}}
|
|
||||||
{{if .Val}}
|
|
||||||
{{.Val}}, {{.Certainty}}, '{{.Hint}}', '{{.Ann}}',
|
|
||||||
{{else}}
|
|
||||||
,,,,
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
],
|
|
||||||
{{end}}
|
|
||||||
]);
|
|
||||||
new google.visualization.LineChart(document.getElementById('graph_div')).
|
|
||||||
draw(data, {
|
|
||||||
width: "100%",
|
|
||||||
height: 700,
|
|
||||||
legend: {position: "bottom"},
|
|
||||||
focusTarget: "category",
|
|
||||||
hAxis: {textPosition: "none"},
|
|
||||||
chartArea: {left: "10%", top: "5%", width: "85%", height:"80%"},
|
|
||||||
explorer: {axis: 'horizontal', maxZoomIn: 0, maxZoomOut: 1, zoomDelta: 1.2, keepInBounds: true}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
|
|
||||||
<header id="topbar">
|
|
||||||
<h1>Go Dashboard</h1>
|
|
||||||
<nav>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/">Test</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perf">Perf</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perfgraph">Graphs</a>
|
|
||||||
</nav>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<div class="page">
|
|
||||||
<div id="graph_div" class="main-content graph-container">
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<aside>
|
|
||||||
<form>
|
|
||||||
<div class="panel">
|
|
||||||
<h1>Builders</h1>
|
|
||||||
{{range $.Config.Builders}}
|
|
||||||
<input type="checkbox" name="builder" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="panel">
|
|
||||||
<h1>Benchmarks</h1>
|
|
||||||
{{range $.Config.Benchmarks}}
|
|
||||||
<input type="checkbox" name="benchmark" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="panel">
|
|
||||||
<h1>Procs</h1>
|
|
||||||
{{range $.Config.Procs}}
|
|
||||||
<input type="checkbox" name="procs" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="panel">
|
|
||||||
<h1>Metrics</h1>
|
|
||||||
{{range $.Config.Metrics}}
|
|
||||||
<input type="checkbox" name="metric" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="panel">
|
|
||||||
<h1>Commits</h1>
|
|
||||||
<b>From:</b>
|
|
||||||
<select required name="commit-from">
|
|
||||||
{{range $.Config.CommitsFrom}}
|
|
||||||
<option {{if .Selected}}selected{{end}}>{{.Name}}</option>
|
|
||||||
{{end}}
|
|
||||||
</select>
|
|
||||||
<b>To:</b>
|
|
||||||
<select required name="commit-to">
|
|
||||||
{{range $.Config.CommitsTo}}
|
|
||||||
<option {{if .Selected}}selected{{end}}>{{.Name}}</option>
|
|
||||||
{{end}}
|
|
||||||
</select>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<input class="button" type="submit" value="Refresh" name="refresh"/>
|
|
||||||
<a href="https://golang.org/wiki/PerfDashboard">Help</a>
|
|
||||||
</form>
|
|
||||||
</aside>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,186 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/perflearn", perfLearnHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
learnPercentile = 0.95
|
|
||||||
learnSignalMultiplier = 1.1
|
|
||||||
learnMinSignal = 0.5
|
|
||||||
)
|
|
||||||
|
|
||||||
func perfLearnHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := GetPackage(c, "")
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
update := r.FormValue("update") != ""
|
|
||||||
noise := make(map[string]string)
|
|
||||||
|
|
||||||
data := &perfLearnData{}
|
|
||||||
|
|
||||||
commits, err := GetCommits(c, 0, p.NextNum)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, builder := range pc.BuildersForBenchmark("") {
|
|
||||||
for _, benchmark := range pc.BenchmarksForBuilder(builder) {
|
|
||||||
for _, metric := range pc.MetricsForBenchmark(benchmark) {
|
|
||||||
for _, procs := range pc.ProcList(builder) {
|
|
||||||
values, err := GetPerfMetricsForCommits(c, builder, fmt.Sprintf("%v-%v", benchmark, procs), metric, 0, p.NextNum)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var dd []float64
|
|
||||||
last := uint64(0)
|
|
||||||
for i, v := range values {
|
|
||||||
if v == 0 {
|
|
||||||
if com := commits[i]; com == nil || com.NeedsBenchmarking {
|
|
||||||
last = 0
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if last != 0 {
|
|
||||||
v1 := v
|
|
||||||
if v1 < last {
|
|
||||||
v1, last = last, v1
|
|
||||||
}
|
|
||||||
diff := float64(v1)/float64(last)*100 - 100
|
|
||||||
dd = append(dd, diff)
|
|
||||||
}
|
|
||||||
last = v
|
|
||||||
}
|
|
||||||
if len(dd) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sort.Float64s(dd)
|
|
||||||
|
|
||||||
baseIdx := int(float64(len(dd)) * learnPercentile)
|
|
||||||
baseVal := dd[baseIdx]
|
|
||||||
signalVal := baseVal * learnSignalMultiplier
|
|
||||||
if signalVal < learnMinSignal {
|
|
||||||
signalVal = learnMinSignal
|
|
||||||
}
|
|
||||||
signalIdx := -1
|
|
||||||
noiseNum := 0
|
|
||||||
signalNum := 0
|
|
||||||
|
|
||||||
var diffs []*perfLearnDiff
|
|
||||||
for i, d := range dd {
|
|
||||||
if d > 3*signalVal {
|
|
||||||
d = 3 * signalVal
|
|
||||||
}
|
|
||||||
diffs = append(diffs, &perfLearnDiff{Num: i, Val: d})
|
|
||||||
if signalIdx == -1 && d >= signalVal {
|
|
||||||
signalIdx = i
|
|
||||||
}
|
|
||||||
if d < signalVal {
|
|
||||||
noiseNum++
|
|
||||||
} else {
|
|
||||||
signalNum++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
diffs[baseIdx].Hint = "95%"
|
|
||||||
if signalIdx != -1 {
|
|
||||||
diffs[signalIdx].Hint = "signal"
|
|
||||||
}
|
|
||||||
diffs = diffs[len(diffs)*4/5:]
|
|
||||||
name := fmt.Sprintf("%v/%v-%v/%v", builder, benchmark, procs, metric)
|
|
||||||
data.Entries = append(data.Entries, &perfLearnEntry{len(data.Entries), name, baseVal, noiseNum, signalVal, signalNum, diffs})
|
|
||||||
|
|
||||||
if len(dd) >= 100 || r.FormValue("force") != "" {
|
|
||||||
nname := fmt.Sprintf("%v|%v-%v", builder, benchmark, procs)
|
|
||||||
n := noise[nname] + fmt.Sprintf("|%v=%.2f", metric, signalVal)
|
|
||||||
noise[nname] = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if update {
|
|
||||||
var noiseLevels []string
|
|
||||||
for k, v := range noise {
|
|
||||||
noiseLevels = append(noiseLevels, k+v)
|
|
||||||
}
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
pc, err := GetPerfConfig(c, r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pc.NoiseLevels = noiseLevels
|
|
||||||
if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfConfig: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := datastore.RunInTransaction(c, tx, nil); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := perfLearnTemplate.Execute(&buf, data); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteTo(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
var perfLearnTemplate = template.Must(
|
|
||||||
template.New("perf_learn.html").Funcs(tmplFuncs).ParseFiles("build/perf_learn.html"),
|
|
||||||
)
|
|
||||||
|
|
||||||
type perfLearnData struct {
|
|
||||||
Entries []*perfLearnEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfLearnEntry struct {
|
|
||||||
Num int
|
|
||||||
Name string
|
|
||||||
BaseVal float64
|
|
||||||
NoiseNum int
|
|
||||||
SignalVal float64
|
|
||||||
SignalNum int
|
|
||||||
Diffs []*perfLearnDiff
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfLearnDiff struct {
|
|
||||||
Num int
|
|
||||||
Val float64
|
|
||||||
Hint string
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
|
|
||||||
<script type="text/javascript">
|
|
||||||
google.load("visualization", "1", {packages:["corechart"]});
|
|
||||||
google.setOnLoadCallback(drawCharts);
|
|
||||||
function drawCharts() {
|
|
||||||
{
|
|
||||||
{{range $ent := $.Entries}}
|
|
||||||
var data = new google.visualization.DataTable();
|
|
||||||
data.addColumn('number', 'idx');
|
|
||||||
data.addColumn('number', '95%');
|
|
||||||
data.addColumn({type: 'boolean', role: 'certainty'});
|
|
||||||
data.addColumn('number', 'signal');
|
|
||||||
data.addColumn({type: 'boolean', role: 'certainty'});
|
|
||||||
data.addColumn('number', 'diff');
|
|
||||||
data.addColumn({type: 'string', role: 'annotation'});
|
|
||||||
data.addRows([
|
|
||||||
{{range .Diffs}} [{{.Num}}, {{$ent.BaseVal}}, false, {{$ent.SignalVal}}, false, {{.Val}}, '{{.Hint}}'], {{end}}
|
|
||||||
]);
|
|
||||||
new google.visualization.LineChart(document.getElementById('graph{{.Num}}')).
|
|
||||||
draw(data, {
|
|
||||||
width: 600,
|
|
||||||
height: 200,
|
|
||||||
legend: {position: "none"},
|
|
||||||
vAxis: {minValue: 0},
|
|
||||||
chartArea: {left: "10%", top: "1%", width: "90%", height:"95%"}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
{{range $.Entries}}
|
|
||||||
<p>
|
|
||||||
{{.Name}}: base={{printf "%.2f[%d]" .BaseVal .NoiseNum}} signal={{printf "%.2f[%d]" .SignalVal .SignalNum}}
|
|
||||||
<div id="graph{{.Num}}" width="100px" height="100px"> </div>
|
|
||||||
</p>
|
|
||||||
{{end}}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,13 +0,0 @@
|
|||||||
{{if .Commit}}Change {{shortHash .Commit.Hash}} caused perf changes on {{.Builder}}:
|
|
||||||
|
|
||||||
{{.Commit.Desc}}
|
|
||||||
|
|
||||||
http://code.google.com/p/go/source/detail?r={{shortHash .Commit.Hash}}
|
|
||||||
{{else}}This changed caused perf changes on {{.Builder}}:
|
|
||||||
{{end}}
|
|
||||||
{{range $b := .Benchmarks}}
|
|
||||||
{{printf "%-16s %12s %12s %10s" $b.Name "old" "new" "delta"}}
|
|
||||||
{{range $m := $b.Metrics}}{{printf "%-16s %12v %12v %+10.2f" $m.Name $m.Old $m.New $m.Delta}}
|
|
||||||
{{end}}{{end}}
|
|
||||||
{{.Url}}
|
|
||||||
|
|
@ -1,378 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
// TODO(adg): test authentication
|
|
||||||
// TODO(adg): refactor to use appengine/aetest instead
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/buildtest", testHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var testEntityKinds = []string{
|
|
||||||
"Package",
|
|
||||||
"Commit",
|
|
||||||
"CommitRun",
|
|
||||||
"Result",
|
|
||||||
"PerfResult",
|
|
||||||
"PerfMetricRun",
|
|
||||||
"PerfConfig",
|
|
||||||
"PerfTodo",
|
|
||||||
"Log",
|
|
||||||
}
|
|
||||||
|
|
||||||
const testPkg = "golang.org/x/test"
|
|
||||||
|
|
||||||
var testPackage = &Package{Name: "Test", Kind: "subrepo", Path: testPkg}
|
|
||||||
|
|
||||||
var testPackages = []*Package{
|
|
||||||
{Name: "Go", Path: ""},
|
|
||||||
testPackage,
|
|
||||||
}
|
|
||||||
|
|
||||||
var tCommitTime = time.Now().Add(-time.Hour * 24 * 7)
|
|
||||||
|
|
||||||
func tCommit(hash, parentHash, path string, bench bool) *Commit {
|
|
||||||
tCommitTime.Add(time.Hour) // each commit should have a different time
|
|
||||||
return &Commit{
|
|
||||||
PackagePath: path,
|
|
||||||
Hash: hash,
|
|
||||||
ParentHash: parentHash,
|
|
||||||
Time: tCommitTime,
|
|
||||||
User: "adg",
|
|
||||||
Desc: "change description " + hash,
|
|
||||||
NeedsBenchmarking: bench,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testRequests = []struct {
|
|
||||||
path string
|
|
||||||
vals url.Values
|
|
||||||
req interface{}
|
|
||||||
res interface{}
|
|
||||||
}{
|
|
||||||
// Packages
|
|
||||||
{"/packages", url.Values{"kind": {"subrepo"}}, nil, []*Package{testPackage}},
|
|
||||||
|
|
||||||
// Go repo
|
|
||||||
{"/commit", nil, tCommit("0001", "0000", "", true), nil},
|
|
||||||
{"/commit", nil, tCommit("0002", "0001", "", false), nil},
|
|
||||||
{"/commit", nil, tCommit("0003", "0002", "", true), nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0002", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
|
|
||||||
// Other builders, to test the UI.
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64-race", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "netbsd-386", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "plan9-386", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "windows-386", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "windows-amd64", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "windows-amd64-race", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64-temp", Hash: "0001", OK: true}, nil},
|
|
||||||
|
|
||||||
// multiple builders
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
|
|
||||||
|
|
||||||
// branches
|
|
||||||
{"/commit", nil, tCommit("0004", "0003", "", false), nil},
|
|
||||||
{"/commit", nil, tCommit("0005", "0002", "", false), nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0005", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0004", OK: false}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
|
|
||||||
// logs
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
|
|
||||||
{"/log/a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", nil, nil, "test"},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
|
|
||||||
|
|
||||||
// repeat failure (shouldn't re-send mail)
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
|
|
||||||
|
|
||||||
// non-Go repos
|
|
||||||
{"/commit", nil, tCommit("1001", "0000", testPkg, false), nil},
|
|
||||||
{"/commit", nil, tCommit("1002", "1001", testPkg, false), nil},
|
|
||||||
{"/commit", nil, tCommit("1003", "1002", testPkg, false), nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1002"}}},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1001"}}},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0002"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
|
|
||||||
|
|
||||||
// re-build Go revision for stale subrepos
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0005", OK: false, Log: "boo"}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
|
|
||||||
|
|
||||||
// benchmarks
|
|
||||||
// build-go-commit must have precedence over benchmark-go-commit
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
|
|
||||||
// drain build-go-commit todo
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0005", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0004", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0002", OK: true}, nil},
|
|
||||||
// drain sub-repo todos
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0005", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0005", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0005", OK: false}, nil},
|
|
||||||
// now we must get benchmark todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "json", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
|
|
||||||
// create new commit, it must appear in todo
|
|
||||||
{"/commit", nil, tCommit("0006", "0005", "", true), nil},
|
|
||||||
// drain build-go-commit todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0006"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0006", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0006", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0006", OK: false}, nil},
|
|
||||||
// now we must get benchmark todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
|
|
||||||
// create new benchmark, all commits must re-appear in todo
|
|
||||||
{"/commit", nil, tCommit("0007", "0006", "", true), nil},
|
|
||||||
// drain build-go-commit todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-amd64", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0007", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0007", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0007", OK: false}, nil},
|
|
||||||
// now we must get benchmark todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "bson", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{"bson"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{"http"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{"http"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
|
|
||||||
// attach second builder
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
|
|
||||||
// drain build-go-commit todo
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{Builder: "linux-386", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0007", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0007", OK: false}, nil},
|
|
||||||
{"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0007", OK: false}, nil},
|
|
||||||
// now we must get benchmark todo
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007"}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006"}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001"}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003"}}},
|
|
||||||
{"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
|
|
||||||
{"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, nil},
|
|
||||||
}
|
|
||||||
|
|
||||||
func testHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !appengine.IsDevAppServer() {
|
|
||||||
fmt.Fprint(w, "These tests must be run under the dev_appserver.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c := appengine.NewContext(r)
|
|
||||||
if err := nukeEntities(c, testEntityKinds); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if r.FormValue("nukeonly") != "" {
|
|
||||||
fmt.Fprint(w, "OK")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range testPackages {
|
|
||||||
if _, err := datastore.Put(c, p.Key(c), p); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
origReq := *r
|
|
||||||
defer func() {
|
|
||||||
// HACK: We need to clobber the original request (see below)
|
|
||||||
// so make sure we fix it before exiting the handler.
|
|
||||||
*r = origReq
|
|
||||||
}()
|
|
||||||
for i, t := range testRequests {
|
|
||||||
c.Infof("running test %d %s vals='%q' req='%q' res='%q'", i, t.path, t.vals, t.req, t.res)
|
|
||||||
errorf := func(format string, args ...interface{}) {
|
|
||||||
fmt.Fprintf(w, "%d %s: ", i, t.path)
|
|
||||||
fmt.Fprintf(w, format, args...)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
}
|
|
||||||
var body io.ReadWriter
|
|
||||||
if t.req != nil {
|
|
||||||
body = new(bytes.Buffer)
|
|
||||||
json.NewEncoder(body).Encode(t.req)
|
|
||||||
}
|
|
||||||
url := "http://" + domain + t.path
|
|
||||||
if t.vals != nil {
|
|
||||||
url += "?" + t.vals.Encode() + "&version=2"
|
|
||||||
} else {
|
|
||||||
url += "?version=2"
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", url, body)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if t.req != nil {
|
|
||||||
req.Method = "POST"
|
|
||||||
}
|
|
||||||
req.Header = origReq.Header
|
|
||||||
rec := httptest.NewRecorder()
|
|
||||||
|
|
||||||
// Make the request
|
|
||||||
*r = *req // HACK: App Engine uses the request pointer
|
|
||||||
// as a map key to resolve Contexts.
|
|
||||||
http.DefaultServeMux.ServeHTTP(rec, r)
|
|
||||||
|
|
||||||
if rec.Code != 0 && rec.Code != 200 {
|
|
||||||
errorf(rec.Body.String())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Infof("response='%v'", rec.Body.String())
|
|
||||||
resp := new(dashResponse)
|
|
||||||
|
|
||||||
// If we're expecting a *Todo value,
|
|
||||||
// prime the Response field with a Todo and a Commit inside it.
|
|
||||||
if t.path == "/todo" {
|
|
||||||
resp.Response = &Todo{Data: &Commit{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(t.path, "/log/") {
|
|
||||||
resp.Response = rec.Body.String()
|
|
||||||
} else {
|
|
||||||
err := json.NewDecoder(rec.Body).Decode(resp)
|
|
||||||
if err != nil {
|
|
||||||
errorf("decoding response: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if e, ok := t.res.(string); ok {
|
|
||||||
g, ok := resp.Response.(string)
|
|
||||||
if !ok {
|
|
||||||
errorf("Response not string: %T", resp.Response)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if g != e {
|
|
||||||
errorf("response mismatch: got %q want %q", g, e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if e, ok := t.res.(*Todo); ok {
|
|
||||||
g, ok := resp.Response.(*Todo)
|
|
||||||
if !ok {
|
|
||||||
errorf("Response not *Todo: %T", resp.Response)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if e.Data == nil && g.Data != nil {
|
|
||||||
errorf("Response.Data should be nil, got: %v", g.Data)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if g.Data == nil {
|
|
||||||
errorf("Response.Data is nil, want: %v", e.Data)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
gd, ok := g.Data.(*Commit)
|
|
||||||
if !ok {
|
|
||||||
errorf("Response.Data not *Commit: %T", g.Data)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if g.Kind != e.Kind {
|
|
||||||
errorf("kind don't match: got %q, want %q", g.Kind, e.Kind)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ed := e.Data.(*Commit)
|
|
||||||
if ed.Hash != gd.Hash {
|
|
||||||
errorf("hashes don't match: got %q, want %q", gd.Hash, ed.Hash)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(gd.PerfResults) != len(ed.PerfResults) {
|
|
||||||
errorf("result data len don't match: got %v, want %v", len(gd.PerfResults), len(ed.PerfResults))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := range gd.PerfResults {
|
|
||||||
if gd.PerfResults[i] != ed.PerfResults[i] {
|
|
||||||
errorf("result data %v don't match: got %v, want %v", i, gd.PerfResults[i], ed.PerfResults[i])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.res == nil && resp.Response != nil {
|
|
||||||
errorf("response mismatch: got %q expected <nil>", resp.Response)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "PASS\nYou should see only one mail notification (for 0003/linux-386) in the dev_appserver logs.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func nukeEntities(c appengine.Context, kinds []string) error {
|
|
||||||
if !appengine.IsDevAppServer() {
|
|
||||||
return errors.New("can't nuke production data")
|
|
||||||
}
|
|
||||||
var keys []*datastore.Key
|
|
||||||
for _, kind := range kinds {
|
|
||||||
q := datastore.NewQuery(kind).KeysOnly()
|
|
||||||
for t := q.Run(c); ; {
|
|
||||||
k, err := t.Next(nil)
|
|
||||||
if err == datastore.Done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return datastore.DeleteMulti(c, keys)
|
|
||||||
}
|
|
@ -1,605 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// TODO(adg): packages at weekly/release
|
|
||||||
// TODO(adg): some means to register new packages
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/dashboard/types"
|
|
||||||
|
|
||||||
"cache"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
"appengine/memcache"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/", uiHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// uiHandler draws the build status page.
|
|
||||||
func uiHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
c := d.Context(appengine.NewContext(r))
|
|
||||||
now := cache.Now(c)
|
|
||||||
key := "build-ui"
|
|
||||||
|
|
||||||
page, _ := strconv.Atoi(r.FormValue("page"))
|
|
||||||
if page < 0 {
|
|
||||||
page = 0
|
|
||||||
}
|
|
||||||
key += fmt.Sprintf("-page%v", page)
|
|
||||||
|
|
||||||
branch := r.FormValue("branch")
|
|
||||||
if branch != "" {
|
|
||||||
key += "-branch-" + branch
|
|
||||||
}
|
|
||||||
|
|
||||||
repo := r.FormValue("repo")
|
|
||||||
if repo != "" {
|
|
||||||
key += "-repo-" + repo
|
|
||||||
}
|
|
||||||
|
|
||||||
var b []byte
|
|
||||||
if cache.Get(r, now, key, &b) {
|
|
||||||
w.Write(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg := &Package{} // empty package is the main repository
|
|
||||||
if repo != "" {
|
|
||||||
var err error
|
|
||||||
pkg, err = GetPackage(c, repo)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
commits, err := dashCommits(c, pkg, page, branch)
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
builders := commitBuilders(commits)
|
|
||||||
|
|
||||||
var tipState *TagState
|
|
||||||
if pkg.Kind == "" && page == 0 && (branch == "" || branch == "default") {
|
|
||||||
// only show sub-repo state on first page of normal repo view
|
|
||||||
tipState, err = TagStateByName(c, "tip")
|
|
||||||
if err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Pagination{}
|
|
||||||
if len(commits) == commitsPerPage {
|
|
||||||
p.Next = page + 1
|
|
||||||
}
|
|
||||||
if page > 0 {
|
|
||||||
p.Prev = page - 1
|
|
||||||
p.HasPrev = true
|
|
||||||
}
|
|
||||||
data := &uiTemplateData{d, pkg, commits, builders, tipState, p, branch}
|
|
||||||
data.populateBuildingURLs(c)
|
|
||||||
|
|
||||||
switch r.FormValue("mode") {
|
|
||||||
case "failures":
|
|
||||||
failuresHandler(w, r, data)
|
|
||||||
return
|
|
||||||
case "json":
|
|
||||||
jsonHandler(w, r, data)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := uiTemplate.Execute(&buf, data); err != nil {
|
|
||||||
logErr(w, r, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.Set(r, now, key, buf.Bytes())
|
|
||||||
|
|
||||||
buf.WriteTo(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// failuresHandler is https://build.golang.org/?mode=failures , where it outputs
|
|
||||||
// one line per failure on the front page, in the form:
|
|
||||||
// hash builder failure-url
|
|
||||||
func failuresHandler(w http.ResponseWriter, r *http.Request, data *uiTemplateData) {
|
|
||||||
w.Header().Set("Content-Type", "text/plain")
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
for _, c := range data.Commits {
|
|
||||||
for _, b := range data.Builders {
|
|
||||||
res := c.Result(b, "")
|
|
||||||
if res == nil || res.OK || res.LogHash == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
url := fmt.Sprintf("https://%v%v/log/%v", r.Host, d.Prefix, res.LogHash)
|
|
||||||
fmt.Fprintln(w, c.Hash, b, url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// jsonHandler is https://build.golang.org/?mode=json
|
|
||||||
// The output is a types.BuildStatus JSON object.
|
|
||||||
func jsonHandler(w http.ResponseWriter, r *http.Request, data *uiTemplateData) {
|
|
||||||
d := dashboardForRequest(r)
|
|
||||||
|
|
||||||
// cell returns one of "" (no data), "ok", or a failure URL.
|
|
||||||
cell := func(res *Result) string {
|
|
||||||
switch {
|
|
||||||
case res == nil:
|
|
||||||
return ""
|
|
||||||
case res.OK:
|
|
||||||
return "ok"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("https://%v%v/log/%v", r.Host, d.Prefix, res.LogHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
var res types.BuildStatus
|
|
||||||
res.Builders = data.Builders
|
|
||||||
|
|
||||||
// First the commits from the main section (the "go" repo)
|
|
||||||
for _, c := range data.Commits {
|
|
||||||
rev := types.BuildRevision{
|
|
||||||
Repo: "go",
|
|
||||||
Revision: c.Hash,
|
|
||||||
Results: make([]string, len(data.Builders)),
|
|
||||||
}
|
|
||||||
for i, b := range data.Builders {
|
|
||||||
rev.Results[i] = cell(c.Result(b, ""))
|
|
||||||
}
|
|
||||||
res.Revisions = append(res.Revisions, rev)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then the one commit each for the subrepos.
|
|
||||||
// TODO(bradfitz): we'll probably want more than one later, for people looking at
|
|
||||||
// the subrepo-specific build history pages. But for now this gets me some data
|
|
||||||
// to make forward progress.
|
|
||||||
tip := data.TipState // a TagState
|
|
||||||
for _, pkgState := range tip.Packages {
|
|
||||||
goRev := tip.Tag.Hash
|
|
||||||
rev := types.BuildRevision{
|
|
||||||
Repo: pkgState.Package.Name,
|
|
||||||
Revision: pkgState.Commit.Hash,
|
|
||||||
GoRevision: goRev,
|
|
||||||
Results: make([]string, len(data.Builders)),
|
|
||||||
}
|
|
||||||
for i, b := range res.Builders {
|
|
||||||
rev.Results[i] = cell(pkgState.Commit.Result(b, goRev))
|
|
||||||
}
|
|
||||||
res.Revisions = append(res.Revisions, rev)
|
|
||||||
}
|
|
||||||
|
|
||||||
v, _ := json.MarshalIndent(res, "", "\t")
|
|
||||||
w.Header().Set("Content-Type", "text/json; charset=utf-8")
|
|
||||||
w.Write(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Pagination struct {
|
|
||||||
Next, Prev int
|
|
||||||
HasPrev bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// dashCommits gets a slice of the latest Commits to the current dashboard.
|
|
||||||
// If page > 0 it paginates by commitsPerPage.
|
|
||||||
func dashCommits(c appengine.Context, pkg *Package, page int, branch string) ([]*Commit, error) {
|
|
||||||
offset := page * commitsPerPage
|
|
||||||
q := datastore.NewQuery("Commit").
|
|
||||||
Ancestor(pkg.Key(c)).
|
|
||||||
Order("-Num")
|
|
||||||
|
|
||||||
var commits []*Commit
|
|
||||||
if branch == "" {
|
|
||||||
_, err := q.Limit(commitsPerPage).Offset(offset).
|
|
||||||
GetAll(c, &commits)
|
|
||||||
return commits, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for commits on a specific branch.
|
|
||||||
for t, n := q.Run(c), 0; len(commits) < commitsPerPage && n < 1000; {
|
|
||||||
var c Commit
|
|
||||||
_, err := t.Next(&c)
|
|
||||||
if err == datastore.Done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !isBranchCommit(&c, branch) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if n >= offset {
|
|
||||||
commits = append(commits, &c)
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
return commits, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isBranchCommit reports whether the given commit is on the specified branch.
|
|
||||||
// It does so by examining the commit description, so there will be some bad
|
|
||||||
// matches where the branch commits do not begin with the "[branch]" prefix.
|
|
||||||
func isBranchCommit(c *Commit, b string) bool {
|
|
||||||
d := strings.TrimSpace(c.Desc)
|
|
||||||
if b == "default" {
|
|
||||||
return !strings.HasPrefix(d, "[")
|
|
||||||
}
|
|
||||||
return strings.HasPrefix(d, "["+b+"]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// commitBuilders returns the names of the builders that provided
|
|
||||||
// Results for the provided commits.
|
|
||||||
func commitBuilders(commits []*Commit) []string {
|
|
||||||
builders := make(map[string]bool)
|
|
||||||
for _, commit := range commits {
|
|
||||||
for _, r := range commit.Results() {
|
|
||||||
builders[r.Builder] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
k := keys(builders)
|
|
||||||
sort.Sort(builderOrder(k))
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
|
|
||||||
func keys(m map[string]bool) (s []string) {
|
|
||||||
for k := range m {
|
|
||||||
s = append(s, k)
|
|
||||||
}
|
|
||||||
sort.Strings(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderOrder implements sort.Interface, sorting builder names
|
|
||||||
// ("darwin-amd64", etc) first by builderPriority and then alphabetically.
|
|
||||||
type builderOrder []string
|
|
||||||
|
|
||||||
func (s builderOrder) Len() int { return len(s) }
|
|
||||||
func (s builderOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s builderOrder) Less(i, j int) bool {
|
|
||||||
pi, pj := builderPriority(s[i]), builderPriority(s[j])
|
|
||||||
if pi == pj {
|
|
||||||
return s[i] < s[j]
|
|
||||||
}
|
|
||||||
return pi < pj
|
|
||||||
}
|
|
||||||
|
|
||||||
func builderPriority(builder string) (p int) {
|
|
||||||
// Put -temp builders at the end, always.
|
|
||||||
if strings.HasSuffix(builder, "-temp") {
|
|
||||||
defer func() { p += 20 }()
|
|
||||||
}
|
|
||||||
// Group race builders together.
|
|
||||||
if isRace(builder) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
// If the OS has a specified priority, use it.
|
|
||||||
if p, ok := osPriority[builderOS(builder)]; ok {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
// The rest.
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
|
|
||||||
func isRace(s string) bool {
|
|
||||||
return strings.Contains(s, "-race-") || strings.HasSuffix(s, "-race")
|
|
||||||
}
|
|
||||||
|
|
||||||
func unsupported(builder string) bool {
|
|
||||||
if strings.HasSuffix(builder, "-temp") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return unsupportedOS(builderOS(builder))
|
|
||||||
}
|
|
||||||
|
|
||||||
func unsupportedOS(os string) bool {
|
|
||||||
if os == "race" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p, ok := osPriority[os]
|
|
||||||
return !ok || p > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priorities for specific operating systems.
|
|
||||||
var osPriority = map[string]int{
|
|
||||||
"darwin": 0,
|
|
||||||
"freebsd": 0,
|
|
||||||
"linux": 0,
|
|
||||||
"windows": 0,
|
|
||||||
// race == 1
|
|
||||||
"openbsd": 2,
|
|
||||||
"netbsd": 3,
|
|
||||||
"dragonfly": 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagState represents the state of all Packages at a Tag.
|
|
||||||
type TagState struct {
|
|
||||||
Tag *Commit
|
|
||||||
Packages []*PackageState
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackageState represents the state of a Package at a Tag.
|
|
||||||
type PackageState struct {
|
|
||||||
Package *Package
|
|
||||||
Commit *Commit
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagStateByName fetches the results for all Go subrepos at the specified Tag.
|
|
||||||
func TagStateByName(c appengine.Context, name string) (*TagState, error) {
|
|
||||||
tag, err := GetTag(c, name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pkgs, err := Packages(c, "subrepo")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var st TagState
|
|
||||||
for _, pkg := range pkgs {
|
|
||||||
com, err := pkg.LastCommit(c)
|
|
||||||
if err != nil {
|
|
||||||
c.Warningf("%v: no Commit found: %v", pkg, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
st.Packages = append(st.Packages, &PackageState{pkg, com})
|
|
||||||
}
|
|
||||||
st.Tag, err = tag.Commit(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &st, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type uiTemplateData struct {
|
|
||||||
Dashboard *Dashboard
|
|
||||||
Package *Package
|
|
||||||
Commits []*Commit
|
|
||||||
Builders []string
|
|
||||||
TipState *TagState
|
|
||||||
Pagination *Pagination
|
|
||||||
Branch string
|
|
||||||
}
|
|
||||||
|
|
||||||
// populateBuildingURLs populates each commit in Commits' buildingURLs map with the
|
|
||||||
// URLs of builds which are currently in progress.
|
|
||||||
func (td *uiTemplateData) populateBuildingURLs(ctx appengine.Context) {
|
|
||||||
// need are memcache keys: "building|<hash>|<gohash>|<builder>"
|
|
||||||
// The hash is of the main "go" repo, or the subrepo commit hash.
|
|
||||||
// The gohash is empty for the main repo, else it's the Go hash.
|
|
||||||
var need []string
|
|
||||||
|
|
||||||
commit := map[string]*Commit{} // commit hash -> Commit
|
|
||||||
|
|
||||||
// TODO(bradfitz): this only populates the main repo, not subpackages currently.
|
|
||||||
for _, b := range td.Builders {
|
|
||||||
for _, c := range td.Commits {
|
|
||||||
if c.Result(b, "") == nil {
|
|
||||||
commit[c.Hash] = c
|
|
||||||
need = append(need, "building|"+c.Hash+"||"+b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(need) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m, err := memcache.GetMulti(ctx, need)
|
|
||||||
if err != nil {
|
|
||||||
// oh well. this is a cute non-critical feature anyway.
|
|
||||||
ctx.Debugf("GetMulti of building keys: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for k, it := range m {
|
|
||||||
f := strings.SplitN(k, "|", 4)
|
|
||||||
if len(f) != 4 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hash, goHash, builder := f[1], f[2], f[3]
|
|
||||||
c, ok := commit[hash]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m := c.buildingURLs
|
|
||||||
if m == nil {
|
|
||||||
m = make(map[builderAndGoHash]string)
|
|
||||||
c.buildingURLs = m
|
|
||||||
}
|
|
||||||
m[builderAndGoHash{builder, goHash}] = string(it.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var uiTemplate = template.Must(
|
|
||||||
template.New("ui.html").Funcs(tmplFuncs).ParseFiles("build/ui.html"),
|
|
||||||
)
|
|
||||||
|
|
||||||
var tmplFuncs = template.FuncMap{
|
|
||||||
"buildDashboards": buildDashboards,
|
|
||||||
"builderOS": builderOS,
|
|
||||||
"builderSpans": builderSpans,
|
|
||||||
"builderSubheading": builderSubheading,
|
|
||||||
"builderTitle": builderTitle,
|
|
||||||
"repoURL": repoURL,
|
|
||||||
"shortDesc": shortDesc,
|
|
||||||
"shortHash": shortHash,
|
|
||||||
"shortUser": shortUser,
|
|
||||||
"tail": tail,
|
|
||||||
"unsupported": unsupported,
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitDash(s string) (string, string) {
|
|
||||||
i := strings.Index(s, "-")
|
|
||||||
if i >= 0 {
|
|
||||||
return s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
return s, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderOS returns the os tag for a builder string
|
|
||||||
func builderOS(s string) string {
|
|
||||||
os, _ := splitDash(s)
|
|
||||||
return os
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderOSOrRace returns the builder OS or, if it is a race builder, "race".
|
|
||||||
func builderOSOrRace(s string) string {
|
|
||||||
if isRace(s) {
|
|
||||||
return "race"
|
|
||||||
}
|
|
||||||
return builderOS(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderArch returns the arch tag for a builder string
|
|
||||||
func builderArch(s string) string {
|
|
||||||
_, arch := splitDash(s)
|
|
||||||
arch, _ = splitDash(arch) // chop third part
|
|
||||||
return arch
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderSubheading returns a short arch tag for a builder string
|
|
||||||
// or, if it is a race builder, the builder OS.
|
|
||||||
func builderSubheading(s string) string {
|
|
||||||
if isRace(s) {
|
|
||||||
return builderOS(s)
|
|
||||||
}
|
|
||||||
arch := builderArch(s)
|
|
||||||
switch arch {
|
|
||||||
case "amd64":
|
|
||||||
return "x64"
|
|
||||||
}
|
|
||||||
return arch
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderArchChar returns the architecture letter for a builder string
|
|
||||||
func builderArchChar(s string) string {
|
|
||||||
arch := builderArch(s)
|
|
||||||
switch arch {
|
|
||||||
case "386":
|
|
||||||
return "8"
|
|
||||||
case "amd64":
|
|
||||||
return "6"
|
|
||||||
case "arm":
|
|
||||||
return "5"
|
|
||||||
}
|
|
||||||
return arch
|
|
||||||
}
|
|
||||||
|
|
||||||
type builderSpan struct {
|
|
||||||
N int
|
|
||||||
OS string
|
|
||||||
Unsupported bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderSpans creates a list of tags showing
|
|
||||||
// the builder's operating system names, spanning
|
|
||||||
// the appropriate number of columns.
|
|
||||||
func builderSpans(s []string) []builderSpan {
|
|
||||||
var sp []builderSpan
|
|
||||||
for len(s) > 0 {
|
|
||||||
i := 1
|
|
||||||
os := builderOSOrRace(s[0])
|
|
||||||
u := unsupportedOS(os) || strings.HasSuffix(s[0], "-temp")
|
|
||||||
for i < len(s) && builderOSOrRace(s[i]) == os {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sp = append(sp, builderSpan{i, os, u})
|
|
||||||
s = s[i:]
|
|
||||||
}
|
|
||||||
return sp
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderTitle formats "linux-amd64-foo" as "linux amd64 foo".
|
|
||||||
func builderTitle(s string) string {
|
|
||||||
return strings.Replace(s, "-", " ", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildDashboards returns the known public dashboards.
|
|
||||||
func buildDashboards() []*Dashboard {
|
|
||||||
return dashboards
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortDesc returns the first line of a description.
|
|
||||||
func shortDesc(desc string) string {
|
|
||||||
if i := strings.Index(desc, "\n"); i != -1 {
|
|
||||||
desc = desc[:i]
|
|
||||||
}
|
|
||||||
return limitStringLength(desc, 100)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortHash returns a short version of a hash.
|
|
||||||
func shortHash(hash string) string {
|
|
||||||
if len(hash) > 12 {
|
|
||||||
hash = hash[:12]
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortUser returns a shortened version of a user string.
|
|
||||||
func shortUser(user string) string {
|
|
||||||
if i, j := strings.Index(user, "<"), strings.Index(user, ">"); 0 <= i && i < j {
|
|
||||||
user = user[i+1 : j]
|
|
||||||
}
|
|
||||||
if i := strings.Index(user, "@"); i >= 0 {
|
|
||||||
return user[:i]
|
|
||||||
}
|
|
||||||
return user
|
|
||||||
}
|
|
||||||
|
|
||||||
// repoRe matches Google Code repositories and subrepositories (without paths).
|
|
||||||
var repoRe = regexp.MustCompile(`^code\.google\.com/p/([a-z0-9\-]+)(\.[a-z0-9\-]+)?$`)
|
|
||||||
|
|
||||||
// repoURL returns the URL of a change at a Google Code repository or subrepo.
|
|
||||||
func repoURL(dashboard, hash, packagePath string) (string, error) {
|
|
||||||
if packagePath == "" {
|
|
||||||
if dashboard == "Gccgo" {
|
|
||||||
return "https://code.google.com/p/gofrontend/source/detail?r=" + hash, nil
|
|
||||||
}
|
|
||||||
if dashboard == "Mercurial" {
|
|
||||||
return "https://golang.org/change/" + hash, nil
|
|
||||||
}
|
|
||||||
// TODO(adg): use the above once /change/ points to git hashes
|
|
||||||
return "https://go.googlesource.com/go/+/" + hash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(adg): remove this old hg stuff, one day.
|
|
||||||
if dashboard == "Mercurial" {
|
|
||||||
m := repoRe.FindStringSubmatch(packagePath)
|
|
||||||
if m == nil {
|
|
||||||
return "", errors.New("unrecognized package: " + packagePath)
|
|
||||||
}
|
|
||||||
url := "https://code.google.com/p/" + m[1] + "/source/detail?r=" + hash
|
|
||||||
if len(m) > 2 {
|
|
||||||
url += "&repo=" + m[2][1:]
|
|
||||||
}
|
|
||||||
return url, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
repo := strings.TrimPrefix(packagePath, "golang.org/x/")
|
|
||||||
return "https://go.googlesource.com/" + repo + "/+/" + hash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// tail returns the trailing n lines of s.
|
|
||||||
func tail(n int, s string) string {
|
|
||||||
lines := strings.Split(s, "\n")
|
|
||||||
if len(lines) < n {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strings.Join(lines[len(lines)-n:], "\n")
|
|
||||||
}
|
|
@ -1,212 +0,0 @@
|
|||||||
<!DOCTYPE HTML>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>{{$.Dashboard.Name}} Build Dashboard</title>
|
|
||||||
<link rel="stylesheet" href="/static/style.css"/>
|
|
||||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js"></script>
|
|
||||||
<script>
|
|
||||||
var showUnsupported = window.location.hash.substr(1) != "short";
|
|
||||||
function redraw() {
|
|
||||||
showUnsupported = !$("#showshort").prop('checked');
|
|
||||||
$('.unsupported')[showUnsupported?'show':'hide']();
|
|
||||||
window.location.hash = showUnsupported?'':'short';
|
|
||||||
}
|
|
||||||
$(document).ready(function() {
|
|
||||||
$("#showshort").attr('checked', !showUnsupported).change(redraw);
|
|
||||||
redraw();
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<header id="topbar">
|
|
||||||
<h1>Go Dashboard</h1>
|
|
||||||
<nav>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/">Test</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perf">Perf</a>
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/perfgraph">Graphs</a>
|
|
||||||
</nav>
|
|
||||||
<div class="clear"></div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<nav class="dashboards">
|
|
||||||
{{range buildDashboards}}
|
|
||||||
<a href="{{.Prefix}}/">{{.Name}}</a>
|
|
||||||
{{end}}
|
|
||||||
<label>
|
|
||||||
<input type=checkbox id="showshort">
|
|
||||||
show only <a href="http://golang.org/wiki/PortingPolicy">first-class ports</a>
|
|
||||||
</label>
|
|
||||||
</nav>
|
|
||||||
{{with $.Package.Name}}<h2>{{.}}</h2>{{end}}
|
|
||||||
|
|
||||||
<div class="page">
|
|
||||||
|
|
||||||
{{if $.Commits}}
|
|
||||||
|
|
||||||
<table class="build">
|
|
||||||
<colgroup class="col-hash" {{if $.Package.Path}}span="2"{{end}}></colgroup>
|
|
||||||
{{range $.Builders | builderSpans}}
|
|
||||||
<colgroup class="col-result{{if .Unsupported}} unsupported{{end}}" span="{{.N}}"></colgroup>
|
|
||||||
{{end}}
|
|
||||||
<colgroup class="col-user"></colgroup>
|
|
||||||
<colgroup class="col-time"></colgroup>
|
|
||||||
<colgroup class="col-desc"></colgroup>
|
|
||||||
<tr>
|
|
||||||
<!-- extra row to make alternating colors use dark for first result -->
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
{{if $.Package.Path}}
|
|
||||||
<th colspan="2">revision</th>
|
|
||||||
{{else}}
|
|
||||||
<th> </th>
|
|
||||||
{{end}}
|
|
||||||
{{range $.Builders | builderSpans}}
|
|
||||||
<th {{if .Unsupported}}class="unsupported"{{end}} colspan="{{.N}}">{{.OS}}</th>
|
|
||||||
{{end}}
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
{{if $.Package.Path}}
|
|
||||||
<th class="result arch">repo</th>
|
|
||||||
<th class="result arch">{{$.Dashboard.Name}}</th>
|
|
||||||
{{else}}
|
|
||||||
<th> </th>
|
|
||||||
{{end}}
|
|
||||||
{{range $.Builders}}
|
|
||||||
<th class="result arch{{if (unsupported .)}} unsupported{{end}}" title="{{.}}">{{builderSubheading .}}</th>
|
|
||||||
{{end}}
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
</tr>
|
|
||||||
{{range $c := $.Commits}}
|
|
||||||
{{range $i, $h := $c.ResultGoHashes}}
|
|
||||||
<tr class="commit">
|
|
||||||
{{if $i}}
|
|
||||||
<td> </td>
|
|
||||||
{{else}}
|
|
||||||
<td class="hash"><a href="{{repoURL $.Dashboard.Name $c.Hash $.Package.Path}}">{{shortHash $c.Hash}}</a></td>
|
|
||||||
{{end}}
|
|
||||||
{{if $h}}
|
|
||||||
<td class="hash"><a href="{{repoURL $.Dashboard.Name $h ""}}">{{shortHash $h}}</a></td>
|
|
||||||
{{end}}
|
|
||||||
{{range $.Builders}}
|
|
||||||
<td class="result{{if (unsupported .)}} unsupported{{end}}">
|
|
||||||
{{with $c.Result . $h}}
|
|
||||||
{{if .BuildingURL}}
|
|
||||||
<a href="{{.BuildingURL}}"><img src="https://golang.org/favicon.ico" border=0></a>
|
|
||||||
{{else if .OK}}
|
|
||||||
<span class="ok">ok</span>
|
|
||||||
{{else}}
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/log/{{.LogHash}}" class="fail">fail</a>
|
|
||||||
{{end}}
|
|
||||||
{{else}}
|
|
||||||
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
{{end}}
|
|
||||||
{{if $i}}
|
|
||||||
<td> </td>
|
|
||||||
<td> </td>
|
|
||||||
<td> </td>
|
|
||||||
{{else}}
|
|
||||||
<td class="user" title="{{$c.User}}">{{shortUser $c.User}}</td>
|
|
||||||
<td class="time">{{$c.Time.Format "Mon 02 Jan 15:04"}}</td>
|
|
||||||
<td class="desc" title="{{$c.Desc}}">{{shortDesc $c.Desc}}</td>
|
|
||||||
{{end}}
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</table>
|
|
||||||
|
|
||||||
{{with $.Pagination}}
|
|
||||||
<div class="paginate">
|
|
||||||
<nav>
|
|
||||||
<a {{if .HasPrev}}href="?{{with $.Package.Path}}repo={{.}}&{{end}}page={{.Prev}}{{with $.Branch}}&branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>newer</a>
|
|
||||||
<a {{if .Next}}href="?{{with $.Package.Path}}repo={{.}}&{{end}}page={{.Next}}{{with $.Branch}}&branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>older</a>
|
|
||||||
<a {{if .HasPrev}}href=".{{with $.Branch}}?branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>latest</a>
|
|
||||||
</nav>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{else}}
|
|
||||||
<p>No commits to display. Hm.</p>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{with $.TipState}}
|
|
||||||
{{$goHash := .Tag.Hash}}
|
|
||||||
{{if .Packages}}
|
|
||||||
<h2>
|
|
||||||
Sub-repositories at tip
|
|
||||||
<small>(<a href="{{repoURL $.Dashboard.Name .Tag.Hash ""}}">{{shortHash .Tag.Hash}}</a>)</small>
|
|
||||||
</h2>
|
|
||||||
|
|
||||||
<table class="build">
|
|
||||||
<colgroup class="col-package"></colgroup>
|
|
||||||
<colgroup class="col-hash"></colgroup>
|
|
||||||
{{range $.Builders | builderSpans}}
|
|
||||||
<colgroup class="col-result{{if .Unsupported}} unsupported{{end}}" span="{{.N}}"></colgroup>
|
|
||||||
{{end}}
|
|
||||||
<colgroup class="col-user"></colgroup>
|
|
||||||
<colgroup class="col-time"></colgroup>
|
|
||||||
<colgroup class="col-desc"></colgroup>
|
|
||||||
<tr>
|
|
||||||
<!-- extra row to make alternating colors use dark for first result -->
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
{{range $.Builders | builderSpans}}
|
|
||||||
<th {{if .Unsupported}}class="unsupported"{{end}} colspan="{{.N}}">{{.OS}}</th>
|
|
||||||
{{end}}
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
{{range $.Builders}}
|
|
||||||
<th class="result arch{{if (unsupported .)}} unsupported{{end}}" title="{{.}}">{{builderSubheading .}}</th>
|
|
||||||
{{end}}
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
<th></th>
|
|
||||||
</tr>
|
|
||||||
{{range $pkg := .Packages}}
|
|
||||||
<tr class="commit">
|
|
||||||
<td><a title="{{.Package.Path}}" href="?repo={{.Package.Path}}">{{.Package.Name}}</a></td>
|
|
||||||
<td class="hash">
|
|
||||||
{{$h := $pkg.Commit.Hash}}
|
|
||||||
<a href="{{repoURL $.Dashboard.Name $h $pkg.Commit.PackagePath}}">{{shortHash $h}}</a>
|
|
||||||
</td>
|
|
||||||
{{range $.Builders}}
|
|
||||||
<td class="result{{if (unsupported .)}} unsupported{{end}}">
|
|
||||||
{{with $pkg.Commit.Result . $goHash}}
|
|
||||||
{{if .OK}}
|
|
||||||
<span class="ok">ok</span>
|
|
||||||
{{else}}
|
|
||||||
<a href="{{$.Dashboard.Prefix}}/log/{{.LogHash}}" class="fail">fail</a>
|
|
||||||
{{end}}
|
|
||||||
{{else}}
|
|
||||||
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
{{end}}
|
|
||||||
{{with $pkg.Commit}}
|
|
||||||
<td class="user" title="{{.User}}">{{shortUser .User}}</td>
|
|
||||||
<td class="time">{{.Time.Format "Mon 02 Jan 15:04"}}</td>
|
|
||||||
<td class="desc" title="{{.Desc}}">{{shortDesc .Desc}}</td>
|
|
||||||
{{end}}
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</table>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
handleFunc("/updatebenchmark", updateBenchmark)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateBenchmark(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !appengine.IsDevAppServer() {
|
|
||||||
fmt.Fprint(w, "Update must not run on real server.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method != "POST" {
|
|
||||||
fmt.Fprintf(w, "bad request method")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c := contextForRequest(r)
|
|
||||||
if !validKey(c, r.FormValue("key"), r.FormValue("builder")) {
|
|
||||||
fmt.Fprintf(w, "bad builder/key")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer r.Body.Close()
|
|
||||||
var hashes []string
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&hashes); err != nil {
|
|
||||||
fmt.Fprintf(w, "failed to decode request: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ncommit := 0
|
|
||||||
nrun := 0
|
|
||||||
tx := func(c appengine.Context) error {
|
|
||||||
var cr *CommitRun
|
|
||||||
for _, hash := range hashes {
|
|
||||||
// Update Commit.
|
|
||||||
com := &Commit{Hash: hash}
|
|
||||||
err := datastore.Get(c, com.Key(c), com)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("fetching Commit: %v", err)
|
|
||||||
}
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
com.NeedsBenchmarking = true
|
|
||||||
com.PerfResults = nil
|
|
||||||
if err := putCommit(c, com); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ncommit++
|
|
||||||
|
|
||||||
// create PerfResult
|
|
||||||
res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
|
|
||||||
err = datastore.Get(c, res.Key(c), res)
|
|
||||||
if err != nil && err != datastore.ErrNoSuchEntity {
|
|
||||||
return fmt.Errorf("fetching PerfResult: %v", err)
|
|
||||||
}
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
if _, err := datastore.Put(c, res.Key(c), res); err != nil {
|
|
||||||
return fmt.Errorf("putting PerfResult: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update CommitRun.
|
|
||||||
if cr != nil && cr.StartCommitNum != com.Num/PerfRunLength*PerfRunLength {
|
|
||||||
if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
|
|
||||||
return fmt.Errorf("putting CommitRun: %v", err)
|
|
||||||
}
|
|
||||||
nrun++
|
|
||||||
cr = nil
|
|
||||||
}
|
|
||||||
if cr == nil {
|
|
||||||
var err error
|
|
||||||
cr, err = GetCommitRun(c, com.Num)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting CommitRun: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
|
|
||||||
return fmt.Errorf("commit num %v out of range [%v, %v)", com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
|
|
||||||
}
|
|
||||||
idx := com.Num - cr.StartCommitNum
|
|
||||||
cr.Hash[idx] = com.Hash
|
|
||||||
cr.User[idx] = shortDesc(com.User)
|
|
||||||
cr.Desc[idx] = shortDesc(com.Desc)
|
|
||||||
cr.Time[idx] = com.Time
|
|
||||||
cr.NeedsBenchmarking[idx] = com.NeedsBenchmarking
|
|
||||||
}
|
|
||||||
if cr != nil {
|
|
||||||
if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
|
|
||||||
return fmt.Errorf("putting CommitRun: %v", err)
|
|
||||||
}
|
|
||||||
nrun++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := datastore.RunInTransaction(c, tx, nil); err != nil {
|
|
||||||
fmt.Fprintf(w, "failed to execute tx: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "OK (updated %v commits and %v commit runs)", ncommit, nrun)
|
|
||||||
}
|
|
86
dashboard/app/cache/cache.go
vendored
86
dashboard/app/cache/cache.go
vendored
@ -1,86 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/memcache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TimeKey specifies the memcache entity that keeps the logical datastore time.
|
|
||||||
var TimeKey = "cachetime"
|
|
||||||
|
|
||||||
const (
|
|
||||||
nocache = "nocache"
|
|
||||||
expiry = 600 // 10 minutes
|
|
||||||
)
|
|
||||||
|
|
||||||
func newTime() uint64 { return uint64(time.Now().Unix()) << 32 }
|
|
||||||
|
|
||||||
// Now returns the current logical datastore time to use for cache lookups.
|
|
||||||
func Now(c appengine.Context) uint64 {
|
|
||||||
t, err := memcache.Increment(c, TimeKey, 0, newTime())
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("cache.Now: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tick sets the current logical datastore time to a never-before-used time
|
|
||||||
// and returns that time. It should be called to invalidate the cache.
|
|
||||||
func Tick(c appengine.Context) uint64 {
|
|
||||||
t, err := memcache.Increment(c, TimeKey, 1, newTime())
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("cache.Tick: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get fetches data for name at time now from memcache and unmarshals it into
|
|
||||||
// value. It reports whether it found the cache record and logs any errors to
|
|
||||||
// the admin console.
|
|
||||||
func Get(r *http.Request, now uint64, name string, value interface{}) bool {
|
|
||||||
if now == 0 || r.FormValue(nocache) != "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
c := appengine.NewContext(r)
|
|
||||||
key := fmt.Sprintf("%s.%d", name, now)
|
|
||||||
_, err := memcache.JSON.Get(c, key, value)
|
|
||||||
if err == nil {
|
|
||||||
c.Debugf("cache hit %q", key)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
c.Debugf("cache miss %q", key)
|
|
||||||
if err != memcache.ErrCacheMiss {
|
|
||||||
c.Errorf("get cache %q: %v", key, err)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set puts value into memcache under name at time now.
|
|
||||||
// It logs any errors to the admin console.
|
|
||||||
func Set(r *http.Request, now uint64, name string, value interface{}) {
|
|
||||||
if now == 0 || r.FormValue(nocache) != "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c := appengine.NewContext(r)
|
|
||||||
key := fmt.Sprintf("%s.%d", name, now)
|
|
||||||
err := memcache.JSON.Set(c, &memcache.Item{
|
|
||||||
Key: key,
|
|
||||||
Object: value,
|
|
||||||
Expiration: expiry,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
c.Errorf("set cache %q: %v", key, err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
cron:
|
|
||||||
- description: updates noise level for benchmarking results
|
|
||||||
url: /perflearn?update=1
|
|
||||||
schedule: every 24 hours
|
|
||||||
target: build
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
|||||||
indexes:
|
|
||||||
|
|
||||||
- kind: Commit
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: Num
|
|
||||||
direction: desc
|
|
||||||
|
|
||||||
- kind: Commit
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: Time
|
|
||||||
direction: desc
|
|
||||||
|
|
||||||
- kind: Commit
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: NeedsBenchmarking
|
|
||||||
- name: Num
|
|
||||||
direction: desc
|
|
||||||
|
|
||||||
- kind: CommitRun
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: StartCommitNum
|
|
||||||
direction: desc
|
|
||||||
|
|
||||||
- kind: PerfResult
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: CommitNum
|
|
||||||
direction: desc
|
|
||||||
|
|
||||||
- kind: PerfResult
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: CommitNum
|
|
||||||
direction: asc
|
|
||||||
|
|
||||||
- kind: CommitRun
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: StartCommitNum
|
|
||||||
direction: asc
|
|
||||||
|
|
||||||
- kind: PerfMetricRun
|
|
||||||
ancestor: yes
|
|
||||||
properties:
|
|
||||||
- name: Builder
|
|
||||||
- name: Benchmark
|
|
||||||
- name: Metric
|
|
||||||
- name: StartCommitNum
|
|
||||||
direction: asc
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package key
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"appengine"
|
|
||||||
"appengine/datastore"
|
|
||||||
)
|
|
||||||
|
|
||||||
var theKey struct {
|
|
||||||
sync.RWMutex
|
|
||||||
builderKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type builderKey struct {
|
|
||||||
Secret string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *builderKey) Key(c appengine.Context) *datastore.Key {
|
|
||||||
return datastore.NewKey(c, "BuilderKey", "root", 0, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Secret(c appengine.Context) string {
|
|
||||||
// check with rlock
|
|
||||||
theKey.RLock()
|
|
||||||
k := theKey.Secret
|
|
||||||
theKey.RUnlock()
|
|
||||||
if k != "" {
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare to fill; check with lock and keep lock
|
|
||||||
theKey.Lock()
|
|
||||||
defer theKey.Unlock()
|
|
||||||
if theKey.Secret != "" {
|
|
||||||
return theKey.Secret
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill
|
|
||||||
if err := datastore.Get(c, theKey.Key(c), &theKey.builderKey); err != nil {
|
|
||||||
if err == datastore.ErrNoSuchEntity {
|
|
||||||
// If the key is not stored in datastore, write it.
|
|
||||||
// This only happens at the beginning of a new deployment.
|
|
||||||
// The code is left here for SDK use and in case a fresh
|
|
||||||
// deployment is ever needed. "gophers rule" is not the
|
|
||||||
// real key.
|
|
||||||
if !appengine.IsDevAppServer() {
|
|
||||||
panic("lost key from datastore")
|
|
||||||
}
|
|
||||||
theKey.Secret = "gophers rule"
|
|
||||||
datastore.Put(c, theKey.Key(c), &theKey.builderKey)
|
|
||||||
return theKey.Secret
|
|
||||||
}
|
|
||||||
panic("cannot load builder key: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return theKey.Secret
|
|
||||||
}
|
|
Binary file not shown.
Before Width: | Height: | Size: 570 B |
Binary file not shown.
Before Width: | Height: | Size: 328 B |
@ -1,308 +0,0 @@
|
|||||||
* { box-sizing: border-box; }
|
|
||||||
|
|
||||||
.dashboards {
|
|
||||||
padding: 0.5em;
|
|
||||||
}
|
|
||||||
.dashboards a {
|
|
||||||
padding: 0.5em;
|
|
||||||
background: #eee;
|
|
||||||
color: blue;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
font-family: sans-serif;
|
|
||||||
padding: 0; margin: 0;
|
|
||||||
color: #222;
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
max-width: 900px;
|
|
||||||
margin: 0 auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
p, pre, ul, ol { margin: 20px; }
|
|
||||||
|
|
||||||
h1, h2, h3, h4 {
|
|
||||||
margin: 20px 0;
|
|
||||||
padding: 0;
|
|
||||||
color: #375EAB;
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 { font-size: 24px; }
|
|
||||||
h2 { font-size: 20px; }
|
|
||||||
h3 { font-size: 20px; }
|
|
||||||
h4 { font-size: 16px; }
|
|
||||||
|
|
||||||
h2 { background: #E0EBF5; padding: 2px 5px; }
|
|
||||||
h3, h4 { margin: 20px 5px; }
|
|
||||||
|
|
||||||
dl, dd { font-size: 14px; }
|
|
||||||
dl { margin: 20px; }
|
|
||||||
dd { margin: 2px 20px; }
|
|
||||||
|
|
||||||
.clear {
|
|
||||||
clear: both;
|
|
||||||
}
|
|
||||||
|
|
||||||
.button {
|
|
||||||
padding: 10px;
|
|
||||||
|
|
||||||
color: #222;
|
|
||||||
border: 1px solid #375EAB;
|
|
||||||
background: #E0EBF5;
|
|
||||||
|
|
||||||
border-radius: 5px;
|
|
||||||
|
|
||||||
cursor: pointer;
|
|
||||||
|
|
||||||
margin-left: 60px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* navigation bar */
|
|
||||||
|
|
||||||
#topbar {
|
|
||||||
padding: 10px 10px;
|
|
||||||
background: #E0EBF5;
|
|
||||||
}
|
|
||||||
|
|
||||||
#topbar a {
|
|
||||||
color: #222;
|
|
||||||
}
|
|
||||||
#topbar h1 {
|
|
||||||
float: left;
|
|
||||||
margin: 0;
|
|
||||||
padding-top: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#topbar nav {
|
|
||||||
float: left;
|
|
||||||
margin-left: 20px;
|
|
||||||
}
|
|
||||||
#topbar nav a {
|
|
||||||
display: inline-block;
|
|
||||||
padding: 10px;
|
|
||||||
|
|
||||||
margin: 0;
|
|
||||||
margin-right: 5px;
|
|
||||||
|
|
||||||
color: white;
|
|
||||||
background: #375EAB;
|
|
||||||
|
|
||||||
text-decoration: none;
|
|
||||||
font-size: 16px;
|
|
||||||
|
|
||||||
border: 1px solid #375EAB;
|
|
||||||
-webkit-border-radius: 5px;
|
|
||||||
-moz-border-radius: 5px;
|
|
||||||
border-radius: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.page {
|
|
||||||
margin-top: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* settings panels */
|
|
||||||
aside {
|
|
||||||
margin-top: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.panel {
|
|
||||||
border: 1px solid #aaa;
|
|
||||||
border-radius: 5px;
|
|
||||||
margin-bottom: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.panel h1 {
|
|
||||||
font-size: 16px;
|
|
||||||
margin: 0;
|
|
||||||
padding: 2px 8px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.panel select {
|
|
||||||
padding: 5px;
|
|
||||||
border: 0;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* results table */
|
|
||||||
|
|
||||||
table {
|
|
||||||
margin: 5px;
|
|
||||||
border-collapse: collapse;
|
|
||||||
font-size: 11px;
|
|
||||||
}
|
|
||||||
|
|
||||||
table td, table th, table td, table th {
|
|
||||||
vertical-align: top;
|
|
||||||
padding: 2px 6px;
|
|
||||||
}
|
|
||||||
|
|
||||||
table tr:nth-child(2n+1) {
|
|
||||||
background: #F4F4F4;
|
|
||||||
}
|
|
||||||
|
|
||||||
table thead tr {
|
|
||||||
background: #fff !important;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* build results */
|
|
||||||
|
|
||||||
.build td, .build th, .packages td, .packages th {
|
|
||||||
vertical-align: top;
|
|
||||||
padding: 2px 4px;
|
|
||||||
font-size: 10pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .hash {
|
|
||||||
font-family: monospace;
|
|
||||||
font-size: 9pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .result {
|
|
||||||
text-align: center;
|
|
||||||
width: 2em;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .col-hash, .build .col-result, .build .col-metric, .build .col-numresults {
|
|
||||||
border-right: 1px solid #ccc;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .row-commit {
|
|
||||||
border-top: 2px solid #ccc;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .arch {
|
|
||||||
font-size: 83%;
|
|
||||||
font-weight: normal;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .time {
|
|
||||||
color: #666;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .ok {
|
|
||||||
font-size: 83%;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .desc, .build .time, .build .user {
|
|
||||||
white-space: nowrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
.build .desc {
|
|
||||||
text-align: left;
|
|
||||||
max-width: 470px;
|
|
||||||
overflow: hidden;
|
|
||||||
text-overflow: ellipsis;
|
|
||||||
}
|
|
||||||
|
|
||||||
.good { text-decoration: none; color: #000000; border: 2px solid #00E700}
|
|
||||||
.bad { text-decoration: none; text-shadow: 1px 1px 0 #000000; color: #FFFFFF; background: #E70000;}
|
|
||||||
.noise { text-decoration: none; color: #888; }
|
|
||||||
.fail { color: #C00; }
|
|
||||||
|
|
||||||
/* pagination */
|
|
||||||
|
|
||||||
.paginate nav {
|
|
||||||
padding: 0.5em;
|
|
||||||
margin: 10px 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.paginate nav a {
|
|
||||||
padding: 0.5em;
|
|
||||||
background: #E0EBF5;
|
|
||||||
color: blue;
|
|
||||||
|
|
||||||
-webkit-border-radius: 5px;
|
|
||||||
-moz-border-radius: 5px;
|
|
||||||
border-radius: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.paginate nav a.inactive {
|
|
||||||
color: #888;
|
|
||||||
cursor: default;
|
|
||||||
text-decoration: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* diffs */
|
|
||||||
|
|
||||||
.diff-meta {
|
|
||||||
font-family: monospace;
|
|
||||||
margin-bottom: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.diff-container {
|
|
||||||
padding: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.diff table .metric {
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
.diff {
|
|
||||||
border: 1px solid #aaa;
|
|
||||||
border-radius: 5px;
|
|
||||||
margin-bottom: 5px;
|
|
||||||
margin-right: 10px;
|
|
||||||
float: left;
|
|
||||||
}
|
|
||||||
|
|
||||||
.diff h1 {
|
|
||||||
font-size: 16px;
|
|
||||||
margin: 0;
|
|
||||||
padding: 2px 8px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.diff-benchmark {
|
|
||||||
clear: both;
|
|
||||||
padding-top: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* positioning elements */
|
|
||||||
|
|
||||||
.page {
|
|
||||||
position: relative;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
aside {
|
|
||||||
position: absolute;
|
|
||||||
top: 0;
|
|
||||||
left: 0;
|
|
||||||
bottom: 0;
|
|
||||||
width: 200px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.main-content {
|
|
||||||
position: absolute;
|
|
||||||
top: 0;
|
|
||||||
left: 210px;
|
|
||||||
right: 5px;
|
|
||||||
min-height: 200px;
|
|
||||||
overflow: hidden;
|
|
||||||
}
|
|
||||||
|
|
||||||
@media only screen and (max-width: 900px) {
|
|
||||||
aside {
|
|
||||||
position: relative;
|
|
||||||
display: block;
|
|
||||||
width: auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
.main-content {
|
|
||||||
position: static;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
aside .panel {
|
|
||||||
float: left;
|
|
||||||
width: auto;
|
|
||||||
margin-right: 5px;
|
|
||||||
}
|
|
||||||
aside .button {
|
|
||||||
float: left;
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
// Package auth contains shared code related to OAuth2 and obtaining
|
|
||||||
// tokens for a project.
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/oauth2/google"
|
|
||||||
)
|
|
||||||
|
|
||||||
func homedir() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
|
||||||
}
|
|
||||||
return os.Getenv("HOME")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectTokenSource returns an OAuth2 TokenSource for the given Google Project ID.
|
|
||||||
func ProjectTokenSource(proj string, scopes ...string) (oauth2.TokenSource, error) {
|
|
||||||
// TODO(bradfitz): try different strategies too, like
|
|
||||||
// three-legged flow if the service account doesn't exist, and
|
|
||||||
// then cache the token file on disk somewhere. Or maybe that should be an
|
|
||||||
// option, for environments without stdin/stdout available to the user.
|
|
||||||
// We'll figure it out as needed.
|
|
||||||
fileName := filepath.Join(homedir(), "keys", proj+".key.json")
|
|
||||||
jsonConf, err := ioutil.ReadFile(fileName)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, fmt.Errorf("Missing JSON key configuration. Download the Service Account JSON key from https://console.developers.google.com/project/%s/apiui/credential and place it at %s", proj, fileName)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
conf, err := google.JWTConfigFromJSON(jsonConf, scopes...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading JSON config from %s: %v", fileName, err)
|
|
||||||
}
|
|
||||||
return conf.TokenSource(oauth2.NoContext), nil
|
|
||||||
}
|
|
@ -1,226 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package dashboard contains shared configuration and logic used by various
|
|
||||||
// pieces of the Go continuous build system.
|
|
||||||
package dashboard
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Builders are the different build configurations.
|
|
||||||
// The keys are like "darwin-amd64" or "linux-386-387".
|
|
||||||
// This map should not be modified by other packages.
|
|
||||||
var Builders = map[string]BuildConfig{}
|
|
||||||
|
|
||||||
// A BuildConfig describes how to run either a Docker-based or VM-based builder.
|
|
||||||
type BuildConfig struct {
|
|
||||||
// Name is the unique name of the builder, in the form of
|
|
||||||
// "darwin-386" or "linux-amd64-race".
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// VM-specific settings:
|
|
||||||
VMImage string // e.g. "openbsd-amd64-56"
|
|
||||||
machineType string // optional GCE instance type
|
|
||||||
|
|
||||||
// Docker-specific settings: (used if VMImage == "")
|
|
||||||
Image string // Docker image to use to build
|
|
||||||
cmd string // optional -cmd flag (relative to go/src/)
|
|
||||||
env []string // extra environment ("key=value") pairs
|
|
||||||
dashURL string // url of the build dashboard
|
|
||||||
tool string // the tool this configuration is for
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BuildConfig) GOOS() string { return c.Name[:strings.Index(c.Name, "-")] }
|
|
||||||
|
|
||||||
func (c *BuildConfig) GOARCH() string {
|
|
||||||
arch := c.Name[strings.Index(c.Name, "-")+1:]
|
|
||||||
i := strings.Index(arch, "-")
|
|
||||||
if i == -1 {
|
|
||||||
return arch
|
|
||||||
}
|
|
||||||
return arch[:i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllScript returns the relative path to the operating system's script to
|
|
||||||
// do the build and run its standard set of tests.
|
|
||||||
// Example values are "src/all.bash", "src/all.bat", "src/all.rc".
|
|
||||||
func (c *BuildConfig) AllScript() string {
|
|
||||||
if strings.HasPrefix(c.Name, "windows-") {
|
|
||||||
return "src/all.bat"
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(c.Name, "plan9-") {
|
|
||||||
return "src/all.rc"
|
|
||||||
}
|
|
||||||
// TODO(bradfitz): race.bash, etc, once the race builder runs
|
|
||||||
// via the buildlet.
|
|
||||||
return "src/all.bash"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BuildConfig) UsesDocker() bool { return c.VMImage == "" }
|
|
||||||
func (c *BuildConfig) UsesVM() bool { return c.VMImage != "" }
|
|
||||||
|
|
||||||
// MachineType returns the GCE machine type to use for this builder.
|
|
||||||
func (c *BuildConfig) MachineType() string {
|
|
||||||
if v := c.machineType; v != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return "n1-highcpu-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
// DockerRunArgs returns the arguments that go after "docker run" to execute
|
|
||||||
// this docker image. The rev (git hash) is required. The builderKey is optional.
|
|
||||||
// TODO(bradfitz): remove the builderKey being passed down, once the coordinator
|
|
||||||
// does the reporting to the dashboard for docker builds too.
|
|
||||||
func (conf BuildConfig) DockerRunArgs(rev, builderKey string) ([]string, error) {
|
|
||||||
if !conf.UsesDocker() {
|
|
||||||
return nil, errors.New("not a docker-based build")
|
|
||||||
}
|
|
||||||
var args []string
|
|
||||||
if builderKey != "" {
|
|
||||||
tmpKey := "/tmp/" + conf.Name + ".buildkey"
|
|
||||||
if _, err := os.Stat(tmpKey); err != nil {
|
|
||||||
if err := ioutil.WriteFile(tmpKey, []byte(builderKey), 0600); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Images may look for .gobuildkey in / or /root, so provide both.
|
|
||||||
// TODO(adg): fix images that look in the wrong place.
|
|
||||||
args = append(args, "-v", tmpKey+":/.gobuildkey")
|
|
||||||
args = append(args, "-v", tmpKey+":/root/.gobuildkey")
|
|
||||||
}
|
|
||||||
for _, pair := range conf.env {
|
|
||||||
args = append(args, "-e", pair)
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(conf.Name, "linux-amd64") {
|
|
||||||
args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-amd64/go")
|
|
||||||
} else if strings.HasPrefix(conf.Name, "linux-386") {
|
|
||||||
args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-386/go")
|
|
||||||
}
|
|
||||||
args = append(args,
|
|
||||||
conf.Image,
|
|
||||||
"/usr/local/bin/builder",
|
|
||||||
"-rev="+rev,
|
|
||||||
"-dashboard="+conf.dashURL,
|
|
||||||
"-tool="+conf.tool,
|
|
||||||
"-buildroot=/",
|
|
||||||
"-v",
|
|
||||||
)
|
|
||||||
if conf.cmd != "" {
|
|
||||||
args = append(args, "-cmd", conf.cmd)
|
|
||||||
}
|
|
||||||
args = append(args, conf.Name)
|
|
||||||
return args, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
addBuilder(BuildConfig{Name: "linux-386"})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-386-387", env: []string{"GO386=387"}})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64"})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64-race"})
|
|
||||||
addBuilder(BuildConfig{Name: "nacl-386"})
|
|
||||||
addBuilder(BuildConfig{Name: "nacl-amd64p32"})
|
|
||||||
addBuilder(BuildConfig{
|
|
||||||
Name: "linux-amd64-gccgo",
|
|
||||||
Image: "gobuilders/linux-x86-gccgo",
|
|
||||||
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
|
|
||||||
dashURL: "https://build.golang.org/gccgo",
|
|
||||||
tool: "gccgo",
|
|
||||||
})
|
|
||||||
addBuilder(BuildConfig{
|
|
||||||
Name: "linux-386-gccgo",
|
|
||||||
Image: "gobuilders/linux-x86-gccgo",
|
|
||||||
cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
|
|
||||||
dashURL: "https://build.golang.org/gccgo",
|
|
||||||
tool: "gccgo",
|
|
||||||
})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-386-sid", Image: "gobuilders/linux-x86-sid"})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64-sid", Image: "gobuilders/linux-x86-sid"})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-386-clang", Image: "gobuilders/linux-x86-clang"})
|
|
||||||
addBuilder(BuildConfig{Name: "linux-amd64-clang", Image: "gobuilders/linux-x86-clang"})
|
|
||||||
|
|
||||||
// VMs:
|
|
||||||
addBuilder(BuildConfig{
|
|
||||||
Name: "openbsd-amd64-gce56",
|
|
||||||
VMImage: "openbsd-amd64-56",
|
|
||||||
machineType: "n1-highcpu-2",
|
|
||||||
})
|
|
||||||
addBuilder(BuildConfig{
|
|
||||||
// It's named "partial" because the buildlet sets
|
|
||||||
// GOTESTONLY=std to stop after the "go test std"
|
|
||||||
// tests because it's so slow otherwise.
|
|
||||||
// TODO(braditz): move that env variable to the
|
|
||||||
// coordinator and into this config.
|
|
||||||
Name: "plan9-386-gcepartial",
|
|
||||||
VMImage: "plan9-386",
|
|
||||||
// We *were* using n1-standard-1 because Plan 9 can only
|
|
||||||
// reliably use a single CPU. Using 2 or 4 and we see
|
|
||||||
// test failures. See:
|
|
||||||
// https://golang.org/issue/8393
|
|
||||||
// https://golang.org/issue/9491
|
|
||||||
// n1-standard-1 has 3.6 GB of memory which is
|
|
||||||
// overkill (userspace probably only sees 2GB anyway),
|
|
||||||
// but it's the cheapest option. And plenty to keep
|
|
||||||
// our ~250 MB of inputs+outputs in its ramfs.
|
|
||||||
//
|
|
||||||
// But the docs says "For the n1 series of machine
|
|
||||||
// types, a virtual CPU is implemented as a single
|
|
||||||
// hyperthread on a 2.6GHz Intel Sandy Bridge Xeon or
|
|
||||||
// Intel Ivy Bridge Xeon (or newer) processor. This
|
|
||||||
// means that the n1-standard-2 machine type will see
|
|
||||||
// a whole physical core."
|
|
||||||
//
|
|
||||||
// ... so we use n1-highcpu-2 (1.80 RAM, still
|
|
||||||
// plenty), just so we can get 1 whole core for the
|
|
||||||
// single-core Plan 9. It will see 2 virtual cores and
|
|
||||||
// only use 1, but we hope that 1 will be more powerful
|
|
||||||
// and we'll stop timing out on tests.
|
|
||||||
machineType: "n1-highcpu-2",
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func addBuilder(c BuildConfig) {
|
|
||||||
if c.tool == "gccgo" {
|
|
||||||
// TODO(cmang,bradfitz,adg): fix gccgo
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.Name == "" {
|
|
||||||
panic("empty name")
|
|
||||||
}
|
|
||||||
if _, dup := Builders[c.Name]; dup {
|
|
||||||
panic("dup name")
|
|
||||||
}
|
|
||||||
if c.dashURL == "" {
|
|
||||||
c.dashURL = "https://build.golang.org"
|
|
||||||
}
|
|
||||||
if c.tool == "" {
|
|
||||||
c.tool = "go"
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(c.Name, "nacl-") {
|
|
||||||
if c.Image == "" {
|
|
||||||
c.Image = "gobuilders/linux-x86-nacl"
|
|
||||||
}
|
|
||||||
if c.cmd == "" {
|
|
||||||
c.cmd = "/usr/local/bin/build-command.pl"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(c.Name, "linux-") && c.Image == "" {
|
|
||||||
c.Image = "gobuilders/linux-x86-base"
|
|
||||||
}
|
|
||||||
if c.Image == "" && c.VMImage == "" {
|
|
||||||
panic("empty image and vmImage")
|
|
||||||
}
|
|
||||||
if c.Image != "" && c.VMImage != "" {
|
|
||||||
panic("can't specify both image and vmImage")
|
|
||||||
}
|
|
||||||
Builders[c.Name] = c
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package dashboard
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestOSARCHAccessors(t *testing.T) {
|
|
||||||
valid := func(s string) bool { return s != "" && !strings.Contains(s, "-") }
|
|
||||||
for _, conf := range Builders {
|
|
||||||
os := conf.GOOS()
|
|
||||||
arch := conf.GOARCH()
|
|
||||||
osArch := os + "-" + arch
|
|
||||||
if !valid(os) || !valid(arch) || !(conf.Name == osArch || strings.HasPrefix(conf.Name, osArch+"-")) {
|
|
||||||
t.Errorf("OS+ARCH(%q) = %q, %q; invalid", conf.Name, os, arch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,222 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
// Package buildlet contains client tools for working with a buildlet
|
|
||||||
// server.
|
|
||||||
package buildlet // import "golang.org/x/tools/dashboard/buildlet"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewClient returns a *Client that will manipulate ipPort,
|
|
||||||
// authenticated using the provided keypair.
|
|
||||||
//
|
|
||||||
// This constructor returns immediately without testing the host or auth.
|
|
||||||
func NewClient(ipPort string, kp KeyPair) *Client {
|
|
||||||
return &Client{
|
|
||||||
ipPort: ipPort,
|
|
||||||
tls: kp,
|
|
||||||
password: kp.Password(),
|
|
||||||
httpClient: &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
DialTLS: kp.tlsDialer(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client interacts with a single buildlet.
|
|
||||||
type Client struct {
|
|
||||||
ipPort string
|
|
||||||
tls KeyPair
|
|
||||||
password string // basic auth password or empty for none
|
|
||||||
httpClient *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the buildlet's URL prefix, without a trailing slash.
|
|
||||||
func (c *Client) URL() string {
|
|
||||||
if !c.tls.IsZero() {
|
|
||||||
return "https://" + strings.TrimSuffix(c.ipPort, ":443")
|
|
||||||
}
|
|
||||||
return "http://" + strings.TrimSuffix(c.ipPort, ":80")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) do(req *http.Request) (*http.Response, error) {
|
|
||||||
if c.password != "" {
|
|
||||||
req.SetBasicAuth("gomote", c.password)
|
|
||||||
}
|
|
||||||
return c.httpClient.Do(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// doOK sends the request and expects a 200 OK response.
|
|
||||||
func (c *Client) doOK(req *http.Request) error {
|
|
||||||
res, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10))
|
|
||||||
return fmt.Errorf("%v; body: %s", res.Status, slurp)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutTar writes files to the remote buildlet, rooted at the relative
|
|
||||||
// directory dir.
|
|
||||||
// If dir is empty, they're placed at the root of the buildlet's work directory.
|
|
||||||
// The dir is created if necessary.
|
|
||||||
// The Reader must be of a tar.gz file.
|
|
||||||
func (c *Client) PutTar(r io.Reader, dir string) error {
|
|
||||||
req, err := http.NewRequest("PUT", c.URL()+"/writetgz?dir="+url.QueryEscape(dir), r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return c.doOK(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutTarFromURL tells the buildlet to download the tar.gz file from tarURL
|
|
||||||
// and write it to dir, a relative directory from the workdir.
|
|
||||||
// If dir is empty, they're placed at the root of the buildlet's work directory.
|
|
||||||
// The dir is created if necessary.
|
|
||||||
// The url must be of a tar.gz file.
|
|
||||||
func (c *Client) PutTarFromURL(tarURL, dir string) error {
|
|
||||||
form := url.Values{
|
|
||||||
"url": {tarURL},
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", c.URL()+"/writetgz?dir="+url.QueryEscape(dir), strings.NewReader(form.Encode()))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
|
||||||
return c.doOK(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTar returns a .tar.gz stream of the given directory, relative to the buildlet's work dir.
|
|
||||||
// The provided dir may be empty to get everything.
|
|
||||||
func (c *Client) GetTar(dir string) (tgz io.ReadCloser, err error) {
|
|
||||||
req, err := http.NewRequest("GET", c.URL()+"/tgz?dir="+url.QueryEscape(dir), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10))
|
|
||||||
res.Body.Close()
|
|
||||||
return nil, fmt.Errorf("%v; body: %s", res.Status, slurp)
|
|
||||||
}
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecOpts are options for a remote command invocation.
|
|
||||||
type ExecOpts struct {
|
|
||||||
// Output is the output of stdout and stderr.
|
|
||||||
// If nil, the output is discarded.
|
|
||||||
Output io.Writer
|
|
||||||
|
|
||||||
// Args are the arguments to pass to the cmd given to Client.Exec.
|
|
||||||
Args []string
|
|
||||||
|
|
||||||
// SystemLevel controls whether the command is run outside of
|
|
||||||
// the buildlet's environment.
|
|
||||||
SystemLevel bool
|
|
||||||
|
|
||||||
// OnStartExec is an optional hook that runs after the 200 OK
|
|
||||||
// response from the buildlet, but before the output begins
|
|
||||||
// writing to Output.
|
|
||||||
OnStartExec func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec runs cmd on the buildlet.
|
|
||||||
//
|
|
||||||
// Two errors are returned: one is whether the command succeeded
|
|
||||||
// remotely (remoteErr), and the second (execErr) is whether there
|
|
||||||
// were system errors preventing the command from being started or
|
|
||||||
// seen to completition. If execErr is non-nil, the remoteErr is
|
|
||||||
// meaningless.
|
|
||||||
func (c *Client) Exec(cmd string, opts ExecOpts) (remoteErr, execErr error) {
|
|
||||||
var mode string
|
|
||||||
if opts.SystemLevel {
|
|
||||||
mode = "sys"
|
|
||||||
}
|
|
||||||
form := url.Values{
|
|
||||||
"cmd": {cmd},
|
|
||||||
"mode": {mode},
|
|
||||||
"cmdArg": opts.Args,
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", c.URL()+"/exec", strings.NewReader(form.Encode()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
|
||||||
res, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10))
|
|
||||||
return nil, fmt.Errorf("buildlet: HTTP status %v: %s", res.Status, slurp)
|
|
||||||
}
|
|
||||||
condRun(opts.OnStartExec)
|
|
||||||
|
|
||||||
// Stream the output:
|
|
||||||
out := opts.Output
|
|
||||||
if out == nil {
|
|
||||||
out = ioutil.Discard
|
|
||||||
}
|
|
||||||
if _, err := io.Copy(out, res.Body); err != nil {
|
|
||||||
return nil, fmt.Errorf("error copying response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't record to the dashboard unless we heard the trailer from
|
|
||||||
// the buildlet, otherwise it was probably some unrelated error
|
|
||||||
// (like the VM being killed, or the buildlet crashing due to
|
|
||||||
// e.g. https://golang.org/issue/9309, since we require a tip
|
|
||||||
// build of the buildlet to get Trailers support)
|
|
||||||
state := res.Trailer.Get("Process-State")
|
|
||||||
if state == "" {
|
|
||||||
return nil, errors.New("missing Process-State trailer from HTTP response; buildlet built with old (<= 1.4) Go?")
|
|
||||||
}
|
|
||||||
if state != "ok" {
|
|
||||||
return errors.New(state), nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy shuts down the buildlet, destroying all state immediately.
|
|
||||||
func (c *Client) Destroy() error {
|
|
||||||
req, err := http.NewRequest("POST", c.URL()+"/halt", nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10))
|
|
||||||
return fmt.Errorf("buildlet: HTTP status %v: %s", res.Status, slurp)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func condRun(fn func()) {
|
|
||||||
if fn != nil {
|
|
||||||
fn()
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,315 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package buildlet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/tools/dashboard"
|
|
||||||
"google.golang.org/api/compute/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// VMOpts control how new VMs are started.
|
|
||||||
type VMOpts struct {
|
|
||||||
// Zone is the GCE zone to create the VM in. Required.
|
|
||||||
Zone string
|
|
||||||
|
|
||||||
// ProjectID is the GCE project ID. Required.
|
|
||||||
ProjectID string
|
|
||||||
|
|
||||||
// TLS optionally specifies the TLS keypair to use.
|
|
||||||
// If zero, http without auth is used.
|
|
||||||
TLS KeyPair
|
|
||||||
|
|
||||||
// Optional description of the VM.
|
|
||||||
Description string
|
|
||||||
|
|
||||||
// Optional metadata to put on the instance.
|
|
||||||
Meta map[string]string
|
|
||||||
|
|
||||||
// DeleteIn optionally specifies a duration at which
|
|
||||||
|
|
||||||
// to delete the VM.
|
|
||||||
DeleteIn time.Duration
|
|
||||||
|
|
||||||
// OnInstanceRequested optionally specifies a hook to run synchronously
|
|
||||||
// after the computeService.Instances.Insert call, but before
|
|
||||||
// waiting for its operation to proceed.
|
|
||||||
OnInstanceRequested func()
|
|
||||||
|
|
||||||
// OnInstanceCreated optionally specifies a hook to run synchronously
|
|
||||||
// after the instance operation succeeds.
|
|
||||||
OnInstanceCreated func()
|
|
||||||
|
|
||||||
// OnInstanceCreated optionally specifies a hook to run synchronously
|
|
||||||
// after the computeService.Instances.Get call.
|
|
||||||
OnGotInstanceInfo func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartNewVM boots a new VM on GCE and returns a buildlet client
|
|
||||||
// configured to speak to it.
|
|
||||||
func StartNewVM(ts oauth2.TokenSource, instName, builderType string, opts VMOpts) (*Client, error) {
|
|
||||||
computeService, _ := compute.New(oauth2.NewClient(oauth2.NoContext, ts))
|
|
||||||
|
|
||||||
conf, ok := dashboard.Builders[builderType]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("invalid builder type %q", builderType)
|
|
||||||
}
|
|
||||||
|
|
||||||
zone := opts.Zone
|
|
||||||
if zone == "" {
|
|
||||||
// TODO: automatic? maybe that's not useful.
|
|
||||||
// For now just return an error.
|
|
||||||
return nil, errors.New("buildlet: missing required Zone option")
|
|
||||||
}
|
|
||||||
projectID := opts.ProjectID
|
|
||||||
if projectID == "" {
|
|
||||||
return nil, errors.New("buildlet: missing required ProjectID option")
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := "https://www.googleapis.com/compute/v1/projects/" + projectID
|
|
||||||
machType := prefix + "/zones/" + zone + "/machineTypes/" + conf.MachineType()
|
|
||||||
|
|
||||||
instance := &compute.Instance{
|
|
||||||
Name: instName,
|
|
||||||
Description: opts.Description,
|
|
||||||
MachineType: machType,
|
|
||||||
Disks: []*compute.AttachedDisk{
|
|
||||||
{
|
|
||||||
AutoDelete: true,
|
|
||||||
Boot: true,
|
|
||||||
Type: "PERSISTENT",
|
|
||||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
||||||
DiskName: instName,
|
|
||||||
SourceImage: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/global/images/" + conf.VMImage,
|
|
||||||
DiskType: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/zones/" + zone + "/diskTypes/pd-ssd",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Tags: &compute.Tags{
|
|
||||||
// Warning: do NOT list "http-server" or "allow-ssh" (our
|
|
||||||
// project's custom tag to allow ssh access) here; the
|
|
||||||
// buildlet provides full remote code execution.
|
|
||||||
// The https-server is authenticated, though.
|
|
||||||
Items: []string{"https-server"},
|
|
||||||
},
|
|
||||||
Metadata: &compute.Metadata{},
|
|
||||||
NetworkInterfaces: []*compute.NetworkInterface{
|
|
||||||
&compute.NetworkInterface{
|
|
||||||
AccessConfigs: []*compute.AccessConfig{
|
|
||||||
&compute.AccessConfig{
|
|
||||||
Type: "ONE_TO_ONE_NAT",
|
|
||||||
Name: "External NAT",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Network: prefix + "/global/networks/default",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
addMeta := func(key, value string) {
|
|
||||||
instance.Metadata.Items = append(instance.Metadata.Items, &compute.MetadataItems{
|
|
||||||
Key: key,
|
|
||||||
Value: value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// The buildlet-binary-url is the URL of the buildlet binary
|
|
||||||
// which the VMs are configured to download at boot and run.
|
|
||||||
// This lets us/ update the buildlet more easily than
|
|
||||||
// rebuilding the whole VM image.
|
|
||||||
addMeta("buildlet-binary-url",
|
|
||||||
"http://storage.googleapis.com/go-builder-data/buildlet."+conf.GOOS()+"-"+conf.GOARCH())
|
|
||||||
addMeta("builder-type", builderType)
|
|
||||||
if !opts.TLS.IsZero() {
|
|
||||||
addMeta("tls-cert", opts.TLS.CertPEM)
|
|
||||||
addMeta("tls-key", opts.TLS.KeyPEM)
|
|
||||||
addMeta("password", opts.TLS.Password())
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.DeleteIn != 0 {
|
|
||||||
// In case the VM gets away from us (generally: if the
|
|
||||||
// coordinator dies while a build is running), then we
|
|
||||||
// set this attribute of when it should be killed so
|
|
||||||
// we can kill it later when the coordinator is
|
|
||||||
// restarted. The cleanUpOldVMs goroutine loop handles
|
|
||||||
// that killing.
|
|
||||||
addMeta("delete-at", fmt.Sprint(time.Now().Add(opts.DeleteIn).Unix()))
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range opts.Meta {
|
|
||||||
addMeta(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
op, err := computeService.Instances.Insert(projectID, zone, instance).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to create instance: %v", err)
|
|
||||||
}
|
|
||||||
condRun(opts.OnInstanceRequested)
|
|
||||||
createOp := op.Name
|
|
||||||
|
|
||||||
// Wait for instance create operation to succeed.
|
|
||||||
OpLoop:
|
|
||||||
for {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
op, err := computeService.ZoneOperations.Get(projectID, zone, createOp).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to get op %s: %v", createOp, err)
|
|
||||||
}
|
|
||||||
switch op.Status {
|
|
||||||
case "PENDING", "RUNNING":
|
|
||||||
continue
|
|
||||||
case "DONE":
|
|
||||||
if op.Error != nil {
|
|
||||||
for _, operr := range op.Error.Errors {
|
|
||||||
return nil, fmt.Errorf("Error creating instance: %+v", operr)
|
|
||||||
}
|
|
||||||
return nil, errors.New("Failed to start.")
|
|
||||||
}
|
|
||||||
break OpLoop
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Unknown create status %q: %+v", op.Status, op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
condRun(opts.OnInstanceCreated)
|
|
||||||
|
|
||||||
inst, err := computeService.Instances.Get(projectID, zone, instName).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error getting instance %s details after creation: %v", instName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finds its internal and/or external IP addresses.
|
|
||||||
intIP, extIP := instanceIPs(inst)
|
|
||||||
|
|
||||||
// Wait for it to boot and its buildlet to come up.
|
|
||||||
var buildletURL string
|
|
||||||
var ipPort string
|
|
||||||
if !opts.TLS.IsZero() {
|
|
||||||
if extIP == "" {
|
|
||||||
return nil, errors.New("didn't find its external IP address")
|
|
||||||
}
|
|
||||||
buildletURL = "https://" + extIP
|
|
||||||
ipPort = extIP + ":443"
|
|
||||||
} else {
|
|
||||||
if intIP == "" {
|
|
||||||
return nil, errors.New("didn't find its internal IP address")
|
|
||||||
}
|
|
||||||
buildletURL = "http://" + intIP
|
|
||||||
ipPort = intIP + ":80"
|
|
||||||
}
|
|
||||||
condRun(opts.OnGotInstanceInfo)
|
|
||||||
|
|
||||||
const timeout = 90 * time.Second
|
|
||||||
var alive bool
|
|
||||||
impatientClient := &http.Client{
|
|
||||||
Timeout: 5 * time.Second,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deadline := time.Now().Add(timeout)
|
|
||||||
try := 0
|
|
||||||
for time.Now().Before(deadline) {
|
|
||||||
try++
|
|
||||||
res, err := impatientClient.Get(buildletURL)
|
|
||||||
if err != nil {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return nil, fmt.Errorf("buildlet returned HTTP status code %d on try number %d", res.StatusCode, try)
|
|
||||||
}
|
|
||||||
alive = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if !alive {
|
|
||||||
return nil, fmt.Errorf("buildlet didn't come up in %v", timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewClient(ipPort, opts.TLS), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DestroyVM sends a request to delete a VM. Actual VM description is
|
|
||||||
// currently (2015-01-19) very slow for no good reason. This function
|
|
||||||
// returns once it's been requested, not when it's done.
|
|
||||||
func DestroyVM(ts oauth2.TokenSource, proj, zone, instance string) error {
|
|
||||||
computeService, _ := compute.New(oauth2.NewClient(oauth2.NoContext, ts))
|
|
||||||
_, err := computeService.Instances.Delete(proj, zone, instance).Do()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type VM struct {
|
|
||||||
// Name is the name of the GCE VM instance.
|
|
||||||
// For example, it's of the form "mote-bradfitz-plan9-386-foo",
|
|
||||||
// and not "plan9-386-foo".
|
|
||||||
Name string
|
|
||||||
IPPort string
|
|
||||||
TLS KeyPair
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListVMs lists all VMs.
|
|
||||||
func ListVMs(ts oauth2.TokenSource, proj, zone string) ([]VM, error) {
|
|
||||||
var vms []VM
|
|
||||||
computeService, _ := compute.New(oauth2.NewClient(oauth2.NoContext, ts))
|
|
||||||
|
|
||||||
// TODO(bradfitz): paging over results if more than 500
|
|
||||||
list, err := computeService.Instances.List(proj, zone).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, inst := range list.Items {
|
|
||||||
if inst.Metadata == nil {
|
|
||||||
// Defensive. Not seen in practice.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
meta := map[string]string{}
|
|
||||||
for _, it := range inst.Metadata.Items {
|
|
||||||
meta[it.Key] = it.Value
|
|
||||||
}
|
|
||||||
builderType := meta["builder-type"]
|
|
||||||
if builderType == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
vm := VM{
|
|
||||||
Name: inst.Name,
|
|
||||||
Type: builderType,
|
|
||||||
TLS: KeyPair{
|
|
||||||
CertPEM: meta["tls-cert"],
|
|
||||||
KeyPEM: meta["tls-key"],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, extIP := instanceIPs(inst)
|
|
||||||
if extIP == "" || vm.TLS.IsZero() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
vm.IPPort = extIP + ":443"
|
|
||||||
vms = append(vms, vm)
|
|
||||||
}
|
|
||||||
return vms, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func instanceIPs(inst *compute.Instance) (intIP, extIP string) {
|
|
||||||
for _, iface := range inst.NetworkInterfaces {
|
|
||||||
if strings.HasPrefix(iface.NetworkIP, "10.") {
|
|
||||||
intIP = iface.NetworkIP
|
|
||||||
}
|
|
||||||
for _, accessConfig := range iface.AccessConfigs {
|
|
||||||
if accessConfig.Type == "ONE_TO_ONE_NAT" {
|
|
||||||
extIP = accessConfig.NatIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,132 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package buildlet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KeyPair is the TLS public certificate PEM file and its associated
|
|
||||||
// private key PEM file that a builder will use for its HTTPS
|
|
||||||
// server. The zero value means no HTTPs, which is used by the
|
|
||||||
// coordinator for machines running within a firewall.
|
|
||||||
type KeyPair struct {
|
|
||||||
CertPEM string
|
|
||||||
KeyPEM string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kp KeyPair) IsZero() bool { return kp == KeyPair{} }
|
|
||||||
|
|
||||||
// Password returns the SHA1 of the KeyPEM. This is used as the HTTP
|
|
||||||
// Basic Auth password.
|
|
||||||
func (kp KeyPair) Password() string {
|
|
||||||
if kp.KeyPEM != "" {
|
|
||||||
return fmt.Sprintf("%x", sha1.Sum([]byte(kp.KeyPEM)))
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// tlsDialer returns a TLS dialer for http.Transport.DialTLS that expects
|
|
||||||
// exactly our TLS cert.
|
|
||||||
func (kp KeyPair) tlsDialer() func(network, addr string) (net.Conn, error) {
|
|
||||||
if kp.IsZero() {
|
|
||||||
// Unused.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
wantCert, _ := tls.X509KeyPair([]byte(kp.CertPEM), []byte(kp.KeyPEM))
|
|
||||||
var wantPubKey *rsa.PublicKey = &wantCert.PrivateKey.(*rsa.PrivateKey).PublicKey
|
|
||||||
|
|
||||||
return func(network, addr string) (net.Conn, error) {
|
|
||||||
if network != "tcp" {
|
|
||||||
return nil, fmt.Errorf("unexpected network %q", network)
|
|
||||||
}
|
|
||||||
plainConn, err := net.Dial("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tlsConn := tls.Client(plainConn, &tls.Config{InsecureSkipVerify: true})
|
|
||||||
if err := tlsConn.Handshake(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
certs := tlsConn.ConnectionState().PeerCertificates
|
|
||||||
if len(certs) < 1 {
|
|
||||||
return nil, errors.New("no server peer certificate")
|
|
||||||
}
|
|
||||||
cert := certs[0]
|
|
||||||
peerPubRSA, ok := cert.PublicKey.(*rsa.PublicKey)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("peer cert was a %T; expected RSA", cert.PublicKey)
|
|
||||||
}
|
|
||||||
if peerPubRSA.N.Cmp(wantPubKey.N) != 0 {
|
|
||||||
return nil, fmt.Errorf("unexpected TLS certificate")
|
|
||||||
}
|
|
||||||
return tlsConn, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NoKeyPair is used by the coordinator to speak http directly to buildlets,
|
|
||||||
// inside their firewall, without TLS.
|
|
||||||
var NoKeyPair = KeyPair{}
|
|
||||||
|
|
||||||
func NewKeyPair() (KeyPair, error) {
|
|
||||||
fail := func(err error) (KeyPair, error) { return KeyPair{}, err }
|
|
||||||
failf := func(format string, args ...interface{}) (KeyPair, error) { return fail(fmt.Errorf(format, args...)) }
|
|
||||||
|
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, 2048)
|
|
||||||
if err != nil {
|
|
||||||
return failf("rsa.GenerateKey: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
notBefore := time.Now()
|
|
||||||
notAfter := notBefore.Add(5 * 365 * 24 * time.Hour) // 5 years
|
|
||||||
|
|
||||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
|
||||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
|
||||||
if err != nil {
|
|
||||||
return failf("failed to generate serial number: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
Subject: pkix.Name{
|
|
||||||
Organization: []string{"Gopher Co"},
|
|
||||||
},
|
|
||||||
NotBefore: notBefore,
|
|
||||||
NotAfter: notAfter,
|
|
||||||
|
|
||||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
DNSNames: []string{"localhost"},
|
|
||||||
}
|
|
||||||
|
|
||||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
|
||||||
if err != nil {
|
|
||||||
return failf("Failed to create certificate: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var certOut bytes.Buffer
|
|
||||||
pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
|
||||||
var keyOut bytes.Buffer
|
|
||||||
pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
|
||||||
return KeyPair{
|
|
||||||
CertPEM: certOut.String(),
|
|
||||||
KeyPEM: keyOut.String(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
@ -1,256 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// benchHash benchmarks a single commit.
|
|
||||||
func (b *Builder) benchHash(hash string, benchs []string) error {
|
|
||||||
if *verbose {
|
|
||||||
log.Println(b.name, "benchmarking", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &PerfResult{Hash: hash, Benchmark: "meta-done"}
|
|
||||||
|
|
||||||
// Create place in which to do work.
|
|
||||||
workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
|
|
||||||
// Prepare a workpath if we don't have one we can reuse.
|
|
||||||
update := false
|
|
||||||
if b.lastWorkpath != workpath {
|
|
||||||
if err := os.Mkdir(workpath, mkdirPerm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
buildLog, _, err := b.buildRepoOnHash(workpath, hash, makeCmd)
|
|
||||||
if err != nil {
|
|
||||||
removePath(workpath)
|
|
||||||
// record failure
|
|
||||||
res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
|
|
||||||
return b.recordPerfResult(res)
|
|
||||||
}
|
|
||||||
b.lastWorkpath = workpath
|
|
||||||
update = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the benchmark binary.
|
|
||||||
benchBin, buildLog, err := b.buildBenchmark(workpath, update)
|
|
||||||
if err != nil {
|
|
||||||
// record failure
|
|
||||||
res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
|
|
||||||
return b.recordPerfResult(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
benchmark, procs, affinity, last := chooseBenchmark(benchBin, benchs)
|
|
||||||
if benchmark != "" {
|
|
||||||
res.Benchmark = fmt.Sprintf("%v-%v", benchmark, procs)
|
|
||||||
res.Metrics, res.Artifacts, res.OK = b.executeBenchmark(workpath, hash, benchBin, benchmark, procs, affinity)
|
|
||||||
if err = b.recordPerfResult(res); err != nil {
|
|
||||||
return fmt.Errorf("recordResult: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if last {
|
|
||||||
// All benchmarks have beed executed, don't need workpath anymore.
|
|
||||||
removePath(b.lastWorkpath)
|
|
||||||
b.lastWorkpath = ""
|
|
||||||
// Notify the app.
|
|
||||||
res = &PerfResult{Hash: hash, Benchmark: "meta-done", OK: true}
|
|
||||||
if err = b.recordPerfResult(res); err != nil {
|
|
||||||
return fmt.Errorf("recordResult: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildBenchmark builds the benchmark binary.
|
|
||||||
func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) {
|
|
||||||
goroot := filepath.Join(workpath, "go")
|
|
||||||
gobin := filepath.Join(goroot, "bin", "go") + exeExt
|
|
||||||
gopath := filepath.Join(*buildroot, "gopath")
|
|
||||||
env := append([]string{
|
|
||||||
"GOROOT=" + goroot,
|
|
||||||
"GOPATH=" + gopath},
|
|
||||||
b.envv()...)
|
|
||||||
// First, download without installing.
|
|
||||||
args := []string{"get", "-d"}
|
|
||||||
if update {
|
|
||||||
args = append(args, "-u")
|
|
||||||
}
|
|
||||||
args = append(args, *benchPath)
|
|
||||||
var buildlog bytes.Buffer
|
|
||||||
runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)}
|
|
||||||
err = run(exec.Command(gobin, args...), runOpts...)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err)
|
|
||||||
return "", buildlog.String(), err
|
|
||||||
}
|
|
||||||
// Then, build into workpath.
|
|
||||||
benchBin = filepath.Join(workpath, "benchbin") + exeExt
|
|
||||||
args = []string{"build", "-o", benchBin, *benchPath}
|
|
||||||
buildlog.Reset()
|
|
||||||
err = run(exec.Command(gobin, args...), runOpts...)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err)
|
|
||||||
return "", buildlog.String(), err
|
|
||||||
}
|
|
||||||
return benchBin, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// chooseBenchmark chooses the next benchmark to run
|
|
||||||
// based on the list of available benchmarks, already executed benchmarks
|
|
||||||
// and -benchcpu list.
|
|
||||||
func chooseBenchmark(benchBin string, doneBenchs []string) (bench string, procs, affinity int, last bool) {
|
|
||||||
var out bytes.Buffer
|
|
||||||
err := run(exec.Command(benchBin), allOutput(&out))
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to query benchmark list: %v\n%s", err, &out)
|
|
||||||
last = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
outStr := out.String()
|
|
||||||
nlIdx := strings.Index(outStr, "\n")
|
|
||||||
if nlIdx < 0 {
|
|
||||||
log.Printf("Failed to parse benchmark list (no new line): %s", outStr)
|
|
||||||
last = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
localBenchs := strings.Split(outStr[:nlIdx], ",")
|
|
||||||
benchsMap := make(map[string]bool)
|
|
||||||
for _, b := range doneBenchs {
|
|
||||||
benchsMap[b] = true
|
|
||||||
}
|
|
||||||
cnt := 0
|
|
||||||
// We want to run all benchmarks with GOMAXPROCS=1 first.
|
|
||||||
for i, procs1 := range benchCPU {
|
|
||||||
for _, bench1 := range localBenchs {
|
|
||||||
if benchsMap[fmt.Sprintf("%v-%v", bench1, procs1)] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cnt++
|
|
||||||
if cnt == 1 {
|
|
||||||
bench = bench1
|
|
||||||
procs = procs1
|
|
||||||
if i < len(benchAffinity) {
|
|
||||||
affinity = benchAffinity[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
last = cnt <= 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// executeBenchmark runs a single benchmark and parses its output.
|
|
||||||
func (b *Builder) executeBenchmark(workpath, hash, benchBin, bench string, procs, affinity int) (metrics []PerfMetric, artifacts []PerfArtifact, ok bool) {
|
|
||||||
// Benchmarks runs mutually exclusive with other activities.
|
|
||||||
benchMutex.RUnlock()
|
|
||||||
defer benchMutex.RLock()
|
|
||||||
benchMutex.Lock()
|
|
||||||
defer benchMutex.Unlock()
|
|
||||||
|
|
||||||
log.Printf("%v executing benchmark %v-%v on %v", b.name, bench, procs, hash)
|
|
||||||
|
|
||||||
// The benchmark executes 'go build'/'go tool',
|
|
||||||
// so we need properly setup env.
|
|
||||||
env := append([]string{
|
|
||||||
"GOROOT=" + filepath.Join(workpath, "go"),
|
|
||||||
"PATH=" + filepath.Join(workpath, "go", "bin") + string(os.PathListSeparator) + os.Getenv("PATH"),
|
|
||||||
"GODEBUG=gctrace=1", // since Go1.2
|
|
||||||
"GOGCTRACE=1", // before Go1.2
|
|
||||||
fmt.Sprintf("GOMAXPROCS=%v", procs)},
|
|
||||||
b.envv()...)
|
|
||||||
args := []string{
|
|
||||||
"-bench", bench,
|
|
||||||
"-benchmem", strconv.Itoa(*benchMem),
|
|
||||||
"-benchtime", benchTime.String(),
|
|
||||||
"-benchnum", strconv.Itoa(*benchNum),
|
|
||||||
"-tmpdir", workpath}
|
|
||||||
if affinity != 0 {
|
|
||||||
args = append(args, "-affinity", strconv.Itoa(affinity))
|
|
||||||
}
|
|
||||||
benchlog := new(bytes.Buffer)
|
|
||||||
err := run(exec.Command(benchBin, args...), runEnv(env), allOutput(benchlog), runDir(workpath))
|
|
||||||
if strip := benchlog.Len() - 512<<10; strip > 0 {
|
|
||||||
// Leave the last 512K, that part contains metrics.
|
|
||||||
benchlog = bytes.NewBuffer(benchlog.Bytes()[strip:])
|
|
||||||
}
|
|
||||||
artifacts = []PerfArtifact{{Type: "log", Body: benchlog.String()}}
|
|
||||||
if err != nil {
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to execute benchmark '%v': %v", bench, err)
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics1, artifacts1, err := parseBenchmarkOutput(benchlog)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to parse benchmark output: %v", err)
|
|
||||||
ok = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
metrics = metrics1
|
|
||||||
artifacts = append(artifacts, artifacts1...)
|
|
||||||
ok = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseBenchmarkOutput fetches metrics and artifacts from benchmark output.
|
|
||||||
func parseBenchmarkOutput(out io.Reader) (metrics []PerfMetric, artifacts []PerfArtifact, err error) {
|
|
||||||
s := bufio.NewScanner(out)
|
|
||||||
metricRe := regexp.MustCompile("^GOPERF-METRIC:([a-z,0-9,-]+)=([0-9]+)$")
|
|
||||||
fileRe := regexp.MustCompile("^GOPERF-FILE:([a-z,0-9,-]+)=(.+)$")
|
|
||||||
for s.Scan() {
|
|
||||||
ln := s.Text()
|
|
||||||
if ss := metricRe.FindStringSubmatch(ln); ss != nil {
|
|
||||||
var v uint64
|
|
||||||
v, err = strconv.ParseUint(ss[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("Failed to parse metric '%v=%v': %v", ss[1], ss[2], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
metrics = append(metrics, PerfMetric{Type: ss[1], Val: v})
|
|
||||||
} else if ss := fileRe.FindStringSubmatch(ln); ss != nil {
|
|
||||||
var buf []byte
|
|
||||||
buf, err = ioutil.ReadFile(ss[2])
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("Failed to read file '%v': %v", ss[2], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
artifacts = append(artifacts, PerfArtifact{ss[1], string(buf)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// needsBenchmarking determines whether the commit needs benchmarking.
|
|
||||||
func needsBenchmarking(log *HgLog) bool {
|
|
||||||
// Do not benchmark branch commits, they are usually not interesting
|
|
||||||
// and fall out of the trunk succession.
|
|
||||||
if log.Branch != "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS).
|
|
||||||
for _, f := range strings.Split(log.Files, " ") {
|
|
||||||
if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
|
|
||||||
!strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
@ -1,58 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
Go Builder is a continuous build client for the Go project.
|
|
||||||
It integrates with the Go Dashboard AppEngine application.
|
|
||||||
|
|
||||||
Go Builder is intended to run continuously as a background process.
|
|
||||||
|
|
||||||
It periodically pulls updates from the Go Mercurial repository.
|
|
||||||
|
|
||||||
When a newer revision is found, Go Builder creates a clone of the repository,
|
|
||||||
runs all.bash, and reports build success or failure to the Go Dashboard.
|
|
||||||
|
|
||||||
For a release revision (a change description that matches "release.YYYY-MM-DD"),
|
|
||||||
Go Builder will create a tar.gz archive of the GOROOT and deliver it to the
|
|
||||||
Go Google Code project's downloads section.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
gobuilder goos-goarch...
|
|
||||||
|
|
||||||
Several goos-goarch combinations can be provided, and the builder will
|
|
||||||
build them in serial.
|
|
||||||
|
|
||||||
Optional flags:
|
|
||||||
|
|
||||||
-dashboard="godashboard.appspot.com": Go Dashboard Host
|
|
||||||
The location of the Go Dashboard application to which Go Builder will
|
|
||||||
report its results.
|
|
||||||
|
|
||||||
-release: Build and deliver binary release archive
|
|
||||||
|
|
||||||
-rev=N: Build revision N and exit
|
|
||||||
|
|
||||||
-cmd="./all.bash": Build command (specify absolute or relative to go/src)
|
|
||||||
|
|
||||||
-v: Verbose logging
|
|
||||||
|
|
||||||
-external: External package builder mode (will not report Go build
|
|
||||||
state to dashboard or issue releases)
|
|
||||||
|
|
||||||
The key file should be located at $HOME/.gobuildkey or, for a builder-specific
|
|
||||||
key, $HOME/.gobuildkey-$BUILDER (eg, $HOME/.gobuildkey-linux-amd64).
|
|
||||||
|
|
||||||
The build key file is a text file of the format:
|
|
||||||
|
|
||||||
godashboard-key
|
|
||||||
googlecode-username
|
|
||||||
googlecode-password
|
|
||||||
|
|
||||||
If the Google Code credentials are not provided the archival step
|
|
||||||
will be skipped.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package main // import "golang.org/x/tools/dashboard/cmd/builder"
|
|
@ -1,299 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/vcs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// builderEnv represents the environment that a Builder will run tests in.
|
|
||||||
type builderEnv interface {
|
|
||||||
// setup sets up the builder environment and returns the directory to run the buildCmd in.
|
|
||||||
setup(repo *Repo, workpath, hash string, envv []string) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// goEnv represents the builderEnv for the main Go repo.
|
|
||||||
type goEnv struct {
|
|
||||||
goos, goarch string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) envv() []string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return b.envvWindows()
|
|
||||||
}
|
|
||||||
|
|
||||||
var e []string
|
|
||||||
if *buildTool == "go" {
|
|
||||||
e = []string{
|
|
||||||
"GOOS=" + b.goos,
|
|
||||||
"GOARCH=" + b.goarch,
|
|
||||||
"GOROOT_FINAL=/usr/local/go",
|
|
||||||
}
|
|
||||||
switch b.goos {
|
|
||||||
case "android", "nacl":
|
|
||||||
// Cross compile.
|
|
||||||
default:
|
|
||||||
// If we are building, for example, linux/386 on a linux/amd64 machine we want to
|
|
||||||
// make sure that the whole build is done as a if this were compiled on a real
|
|
||||||
// linux/386 machine. In other words, we want to not do a cross compilation build.
|
|
||||||
// To do this we set GOHOSTOS and GOHOSTARCH to override the detection in make.bash.
|
|
||||||
//
|
|
||||||
// The exception to this rule is when we are doing nacl/android builds. These are by
|
|
||||||
// definition always cross compilation, and we have support built into cmd/go to be
|
|
||||||
// able to handle this case.
|
|
||||||
e = append(e, "GOHOSTOS="+b.goos, "GOHOSTARCH="+b.goarch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range extraEnv() {
|
|
||||||
if s, ok := getenvOk(k); ok {
|
|
||||||
e = append(e, k+"="+s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) envvWindows() []string {
|
|
||||||
var start map[string]string
|
|
||||||
if *buildTool == "go" {
|
|
||||||
start = map[string]string{
|
|
||||||
"GOOS": b.goos,
|
|
||||||
"GOHOSTOS": b.goos,
|
|
||||||
"GOARCH": b.goarch,
|
|
||||||
"GOHOSTARCH": b.goarch,
|
|
||||||
"GOROOT_FINAL": `c:\go`,
|
|
||||||
"GOBUILDEXIT": "1", // exit all.bat with completion status.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range extraEnv() {
|
|
||||||
if s, ok := getenvOk(name); ok {
|
|
||||||
start[name] = s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if b.goos == "windows" {
|
|
||||||
switch b.goarch {
|
|
||||||
case "amd64":
|
|
||||||
start["PATH"] = `c:\TDM-GCC-64\bin;` + start["PATH"]
|
|
||||||
case "386":
|
|
||||||
start["PATH"] = `c:\TDM-GCC-32\bin;` + start["PATH"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
skip := map[string]bool{
|
|
||||||
"GOBIN": true,
|
|
||||||
"GOPATH": true,
|
|
||||||
"GOROOT": true,
|
|
||||||
"INCLUDE": true,
|
|
||||||
"LIB": true,
|
|
||||||
}
|
|
||||||
var e []string
|
|
||||||
for name, v := range start {
|
|
||||||
e = append(e, name+"="+v)
|
|
||||||
skip[name] = true
|
|
||||||
}
|
|
||||||
for _, kv := range os.Environ() {
|
|
||||||
s := strings.SplitN(kv, "=", 2)
|
|
||||||
name := strings.ToUpper(s[0])
|
|
||||||
switch {
|
|
||||||
case name == "":
|
|
||||||
// variables, like "=C:=C:\", just copy them
|
|
||||||
e = append(e, kv)
|
|
||||||
case !skip[name]:
|
|
||||||
e = append(e, kv)
|
|
||||||
skip[name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup for a goEnv clones the main go repo to workpath/go at the provided hash
|
|
||||||
// and returns the path workpath/go/src, the location of all go build scripts.
|
|
||||||
func (env *goEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
|
|
||||||
goworkpath := filepath.Join(workpath, "go")
|
|
||||||
if err := repo.Export(goworkpath, hash); err != nil {
|
|
||||||
return "", fmt.Errorf("error exporting repository: %s", err)
|
|
||||||
}
|
|
||||||
return filepath.Join(goworkpath, "src"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gccgoEnv represents the builderEnv for the gccgo compiler.
|
|
||||||
type gccgoEnv struct{}
|
|
||||||
|
|
||||||
// setup for a gccgoEnv clones the gofrontend repo to workpath/go at the hash
|
|
||||||
// and clones the latest GCC branch to repo.Path/gcc. The gccgo sources are
|
|
||||||
// replaced with the updated sources in the gofrontend repo and gcc gets
|
|
||||||
// gets configured and built in workpath/gcc-objdir. The path to
|
|
||||||
// workpath/gcc-objdir is returned.
|
|
||||||
func (env *gccgoEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
|
|
||||||
gccpath := filepath.Join(repo.Path, "gcc")
|
|
||||||
|
|
||||||
// get a handle to Git vcs.Cmd for pulling down GCC from the mirror.
|
|
||||||
git := vcs.ByCmd("git")
|
|
||||||
|
|
||||||
// only pull down gcc if we don't have a local copy.
|
|
||||||
if _, err := os.Stat(gccpath); err != nil {
|
|
||||||
if err := timeout(*cmdTimeout, func() error {
|
|
||||||
// pull down a working copy of GCC.
|
|
||||||
|
|
||||||
cloneCmd := []string{
|
|
||||||
"clone",
|
|
||||||
// This is just a guess since there are ~6000 commits to
|
|
||||||
// GCC per year. It's likely there will be enough history
|
|
||||||
// to cross-reference the Gofrontend commit against GCC.
|
|
||||||
// The disadvantage would be if the commit being built is more than
|
|
||||||
// a year old; in this case, the user should make a clone that has
|
|
||||||
// the full history.
|
|
||||||
"--depth", "6000",
|
|
||||||
// We only care about the master branch.
|
|
||||||
"--branch", "master", "--single-branch",
|
|
||||||
*gccPath,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone Kind Clone Time(Dry run) Clone Size
|
|
||||||
// ---------------------------------------------------------------
|
|
||||||
// Full Clone 10 - 15 min 2.2 GiB
|
|
||||||
// Master Branch 2 - 3 min 1.5 GiB
|
|
||||||
// Full Clone(shallow) 1 min 900 MiB
|
|
||||||
// Master Branch(shallow) 40 sec 900 MiB
|
|
||||||
//
|
|
||||||
// The shallow clones have the same size, which is expected,
|
|
||||||
// but the full shallow clone will only have 6000 commits
|
|
||||||
// spread across all branches. There are ~50 branches.
|
|
||||||
return run(exec.Command("git", cloneCmd...), runEnv(envv), allOutput(os.Stdout), runDir(repo.Path))
|
|
||||||
}); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := git.Download(gccpath); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the modified files for this commit.
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := run(exec.Command("hg", "status", "--no-status", "--change", hash),
|
|
||||||
allOutput(&buf), runDir(repo.Path), runEnv(envv)); err != nil {
|
|
||||||
return "", fmt.Errorf("Failed to find the modified files for %s: %s", hash, err)
|
|
||||||
}
|
|
||||||
modifiedFiles := strings.Split(buf.String(), "\n")
|
|
||||||
var isMirrored bool
|
|
||||||
for _, f := range modifiedFiles {
|
|
||||||
if strings.HasPrefix(f, "go/") || strings.HasPrefix(f, "libgo/") {
|
|
||||||
isMirrored = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// use git log to find the corresponding commit to sync to in the gcc mirror.
|
|
||||||
// If the files modified in the gofrontend are mirrored to gcc, we expect a
|
|
||||||
// commit with a similar description in the gcc mirror. If the files modified are
|
|
||||||
// not mirrored, e.g. in support/, we can sync to the most recent gcc commit that
|
|
||||||
// occurred before those files were modified to verify gccgo's status at that point.
|
|
||||||
logCmd := []string{
|
|
||||||
"log",
|
|
||||||
"-1",
|
|
||||||
"--format=%H",
|
|
||||||
}
|
|
||||||
var errMsg string
|
|
||||||
if isMirrored {
|
|
||||||
commitDesc, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{desc|firstline|escape}")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
quotedDesc := regexp.QuoteMeta(string(commitDesc))
|
|
||||||
logCmd = append(logCmd, "--grep", quotedDesc, "--regexp-ignore-case", "--extended-regexp")
|
|
||||||
errMsg = fmt.Sprintf("Failed to find a commit with a similar description to '%s'", string(commitDesc))
|
|
||||||
} else {
|
|
||||||
commitDate, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{date|rfc3339date}")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
logCmd = append(logCmd, "--before", string(commitDate))
|
|
||||||
errMsg = fmt.Sprintf("Failed to find a commit before '%s'", string(commitDate))
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
if err := run(exec.Command("git", logCmd...), runEnv(envv), allOutput(&buf), runDir(gccpath)); err != nil {
|
|
||||||
return "", fmt.Errorf("%s: %s", errMsg, err)
|
|
||||||
}
|
|
||||||
gccRev := buf.String()
|
|
||||||
if gccRev == "" {
|
|
||||||
return "", fmt.Errorf(errMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkout gccRev
|
|
||||||
// TODO(cmang): Fix this to work in parallel mode.
|
|
||||||
if err := run(exec.Command("git", "reset", "--hard", strings.TrimSpace(gccRev)), runEnv(envv), runDir(gccpath)); err != nil {
|
|
||||||
return "", fmt.Errorf("Failed to checkout commit at revision %s: %s", gccRev, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// make objdir to work in
|
|
||||||
gccobjdir := filepath.Join(workpath, "gcc-objdir")
|
|
||||||
if err := os.Mkdir(gccobjdir, mkdirPerm); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// configure GCC with substituted gofrontend and libgo
|
|
||||||
if err := run(exec.Command(filepath.Join(gccpath, "configure"),
|
|
||||||
"--enable-languages=c,c++,go",
|
|
||||||
"--disable-bootstrap",
|
|
||||||
"--disable-multilib",
|
|
||||||
), runEnv(envv), runDir(gccobjdir)); err != nil {
|
|
||||||
return "", fmt.Errorf("Failed to configure GCC: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// build gcc
|
|
||||||
if err := run(exec.Command("make", *gccOpts), runTimeout(*buildTimeout), runEnv(envv), runDir(gccobjdir)); err != nil {
|
|
||||||
return "", fmt.Errorf("Failed to build GCC: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return gccobjdir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getenvOk(k string) (v string, ok bool) {
|
|
||||||
v = os.Getenv(k)
|
|
||||||
if v != "" {
|
|
||||||
return v, true
|
|
||||||
}
|
|
||||||
keq := k + "="
|
|
||||||
for _, kv := range os.Environ() {
|
|
||||||
if kv == keq {
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// extraEnv returns environment variables that need to be copied from
|
|
||||||
// the gobuilder's environment to the envv of its subprocesses.
|
|
||||||
func extraEnv() []string {
|
|
||||||
extra := []string{
|
|
||||||
"GOARM",
|
|
||||||
"GO386",
|
|
||||||
"GOROOT_BOOTSTRAP", // See https://golang.org/s/go15bootstrap
|
|
||||||
"CGO_ENABLED",
|
|
||||||
"CC",
|
|
||||||
"CC_FOR_TARGET",
|
|
||||||
"PATH",
|
|
||||||
"TMPDIR",
|
|
||||||
"USER",
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "plan9" {
|
|
||||||
extra = append(extra, "objtype", "cputype", "path")
|
|
||||||
}
|
|
||||||
return extra
|
|
||||||
}
|
|
@ -1,99 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os/exec"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// run runs a command with optional arguments.
|
|
||||||
func run(cmd *exec.Cmd, opts ...runOpt) error {
|
|
||||||
a := runArgs{cmd, *cmdTimeout}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.modArgs(&a)
|
|
||||||
}
|
|
||||||
if *verbose {
|
|
||||||
log.Printf("running %v in %v", a.cmd.Args, a.cmd.Dir)
|
|
||||||
}
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
log.Printf("failed to start command %v: %v", a.cmd.Args, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err := timeout(a.timeout, cmd.Wait)
|
|
||||||
if _, ok := err.(timeoutError); ok {
|
|
||||||
cmd.Process.Kill()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zero or more runOpts can be passed to run to modify the command
|
|
||||||
// before it's run.
|
|
||||||
type runOpt interface {
|
|
||||||
modArgs(*runArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// allOutput sends both stdout and stderr to w.
|
|
||||||
func allOutput(w io.Writer) optFunc {
|
|
||||||
return func(a *runArgs) {
|
|
||||||
a.cmd.Stdout = w
|
|
||||||
a.cmd.Stderr = w
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTimeout(timeout time.Duration) optFunc {
|
|
||||||
return func(a *runArgs) {
|
|
||||||
a.timeout = timeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runDir(dir string) optFunc {
|
|
||||||
return func(a *runArgs) {
|
|
||||||
a.cmd.Dir = dir
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runEnv(env []string) optFunc {
|
|
||||||
return func(a *runArgs) {
|
|
||||||
a.cmd.Env = env
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// timeout runs f and returns its error value, or if the function does not
|
|
||||||
// complete before the provided duration it returns a timeout error.
|
|
||||||
func timeout(d time.Duration, f func() error) error {
|
|
||||||
errc := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
errc <- f()
|
|
||||||
}()
|
|
||||||
t := time.NewTimer(d)
|
|
||||||
defer t.Stop()
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
return timeoutError(d)
|
|
||||||
case err := <-errc:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type timeoutError time.Duration
|
|
||||||
|
|
||||||
func (e timeoutError) Error() string {
|
|
||||||
return fmt.Sprintf("timed out after %v", time.Duration(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
// optFunc implements runOpt with a function, like http.HandlerFunc.
|
|
||||||
type optFunc func(*runArgs)
|
|
||||||
|
|
||||||
func (f optFunc) modArgs(a *runArgs) { f(a) }
|
|
||||||
|
|
||||||
// internal detail to exec.go:
|
|
||||||
type runArgs struct {
|
|
||||||
cmd *exec.Cmd
|
|
||||||
timeout time.Duration
|
|
||||||
}
|
|
@ -1,66 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
|
|
||||||
// This implementation is based on flock syscall.
|
|
||||||
type FileMutex struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
fd int
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakeFileMutex(filename string) *FileMutex {
|
|
||||||
if filename == "" {
|
|
||||||
return &FileMutex{fd: -1}
|
|
||||||
}
|
|
||||||
fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY, mkdirPerm)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return &FileMutex{fd: fd}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) Lock() {
|
|
||||||
m.mu.Lock()
|
|
||||||
if m.fd != -1 {
|
|
||||||
if err := syscall.Flock(m.fd, syscall.LOCK_EX); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) Unlock() {
|
|
||||||
if m.fd != -1 {
|
|
||||||
if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) RLock() {
|
|
||||||
m.mu.RLock()
|
|
||||||
if m.fd != -1 {
|
|
||||||
if err := syscall.Flock(m.fd, syscall.LOCK_SH); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) RUnlock() {
|
|
||||||
if m.fd != -1 {
|
|
||||||
if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.mu.RUnlock()
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build nacl plan9 solaris
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
|
|
||||||
// This implementation is a fallback that does not actually provide inter-process synchronization.
|
|
||||||
type FileMutex struct {
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakeFileMutex(filename string) *FileMutex {
|
|
||||||
return &FileMutex{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.Printf("WARNING: using fake file mutex." +
|
|
||||||
" Don't run more than one of these at once!!!")
|
|
||||||
}
|
|
@ -1,105 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
|
||||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
INVALID_FILE_HANDLE = ^syscall.Handle(0)
|
|
||||||
LOCKFILE_EXCLUSIVE_LOCK = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = error(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = error(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
|
|
||||||
// This implementation is based on flock syscall.
|
|
||||||
type FileMutex struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
fd syscall.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakeFileMutex(filename string) *FileMutex {
|
|
||||||
if filename == "" {
|
|
||||||
return &FileMutex{fd: INVALID_FILE_HANDLE}
|
|
||||||
}
|
|
||||||
fd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return &FileMutex{fd: fd}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) Lock() {
|
|
||||||
m.mu.Lock()
|
|
||||||
if m.fd != INVALID_FILE_HANDLE {
|
|
||||||
var ol syscall.Overlapped
|
|
||||||
if err := lockFileEx(m.fd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &ol); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) Unlock() {
|
|
||||||
if m.fd != INVALID_FILE_HANDLE {
|
|
||||||
var ol syscall.Overlapped
|
|
||||||
if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) RLock() {
|
|
||||||
m.mu.RLock()
|
|
||||||
if m.fd != INVALID_FILE_HANDLE {
|
|
||||||
var ol syscall.Overlapped
|
|
||||||
if err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FileMutex) RUnlock() {
|
|
||||||
if m.fd != INVALID_FILE_HANDLE {
|
|
||||||
var ol syscall.Overlapped
|
|
||||||
if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.mu.RUnlock()
|
|
||||||
}
|
|
@ -1,225 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const builderVersion = 1 // keep in sync with dashboard/app/build/handler.go
|
|
||||||
|
|
||||||
type obj map[string]interface{}
|
|
||||||
|
|
||||||
// dash runs the given method and command on the dashboard.
|
|
||||||
// If args is non-nil it is encoded as the URL query string.
|
|
||||||
// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST.
|
|
||||||
// If resp is non-nil the server's response is decoded into the value pointed
|
|
||||||
// to by resp (resp must be a pointer).
|
|
||||||
func dash(meth, cmd string, args url.Values, req, resp interface{}) error {
|
|
||||||
argsCopy := url.Values{"version": {fmt.Sprint(builderVersion)}}
|
|
||||||
for k, v := range args {
|
|
||||||
if k == "version" {
|
|
||||||
panic(`dash: reserved args key: "version"`)
|
|
||||||
}
|
|
||||||
argsCopy[k] = v
|
|
||||||
}
|
|
||||||
var r *http.Response
|
|
||||||
var err error
|
|
||||||
if *verbose {
|
|
||||||
log.Println("dash <-", meth, cmd, argsCopy, req)
|
|
||||||
}
|
|
||||||
cmd = *dashboard + "/" + cmd + "?" + argsCopy.Encode()
|
|
||||||
switch meth {
|
|
||||||
case "GET":
|
|
||||||
if req != nil {
|
|
||||||
log.Panicf("%s to %s with req", meth, cmd)
|
|
||||||
}
|
|
||||||
r, err = http.Get(cmd)
|
|
||||||
case "POST":
|
|
||||||
var body io.Reader
|
|
||||||
if req != nil {
|
|
||||||
b, err := json.Marshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
body = bytes.NewBuffer(b)
|
|
||||||
}
|
|
||||||
r, err = http.Post(cmd, "text/json", body)
|
|
||||||
default:
|
|
||||||
log.Panicf("%s: invalid method %q", cmd, meth)
|
|
||||||
panic("invalid method: " + meth)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer r.Body.Close()
|
|
||||||
if r.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("bad http response: %v", r.Status)
|
|
||||||
}
|
|
||||||
body := new(bytes.Buffer)
|
|
||||||
if _, err := body.ReadFrom(r.Body); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read JSON-encoded Response into provided resp
|
|
||||||
// and return an error if present.
|
|
||||||
var result = struct {
|
|
||||||
Response interface{}
|
|
||||||
Error string
|
|
||||||
}{
|
|
||||||
// Put the provided resp in here as it can be a pointer to
|
|
||||||
// some value we should unmarshal into.
|
|
||||||
Response: resp,
|
|
||||||
}
|
|
||||||
if err = json.Unmarshal(body.Bytes(), &result); err != nil {
|
|
||||||
log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if *verbose {
|
|
||||||
log.Println("dash ->", result)
|
|
||||||
}
|
|
||||||
if result.Error != "" {
|
|
||||||
return errors.New(result.Error)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo returns the next hash to build or benchmark.
|
|
||||||
func (b *Builder) todo(kinds []string, pkg, goHash string) (kind, rev string, benchs []string, err error) {
|
|
||||||
args := url.Values{
|
|
||||||
"builder": {b.name},
|
|
||||||
"packagePath": {pkg},
|
|
||||||
"goHash": {goHash},
|
|
||||||
}
|
|
||||||
for _, k := range kinds {
|
|
||||||
args.Add("kind", k)
|
|
||||||
}
|
|
||||||
var resp *struct {
|
|
||||||
Kind string
|
|
||||||
Data struct {
|
|
||||||
Hash string
|
|
||||||
PerfResults []string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = dash("GET", "todo", args, nil, &resp); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if resp == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if *verbose {
|
|
||||||
fmt.Printf("dash resp: %+v\n", *resp)
|
|
||||||
}
|
|
||||||
for _, k := range kinds {
|
|
||||||
if k == resp.Kind {
|
|
||||||
return resp.Kind, resp.Data.Hash, resp.Data.PerfResults, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("expecting Kinds %q, got %q", kinds, resp.Kind)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordResult sends build results to the dashboard
|
|
||||||
func (b *Builder) recordResult(ok bool, pkg, hash, goHash, buildLog string, runTime time.Duration) error {
|
|
||||||
if !*report {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
req := obj{
|
|
||||||
"Builder": b.name,
|
|
||||||
"PackagePath": pkg,
|
|
||||||
"Hash": hash,
|
|
||||||
"GoHash": goHash,
|
|
||||||
"OK": ok,
|
|
||||||
"Log": buildLog,
|
|
||||||
"RunTime": runTime,
|
|
||||||
}
|
|
||||||
args := url.Values{"key": {b.key}, "builder": {b.name}}
|
|
||||||
return dash("POST", "result", args, req, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result of running a single benchmark on a single commit.
|
|
||||||
type PerfResult struct {
|
|
||||||
Builder string
|
|
||||||
Benchmark string
|
|
||||||
Hash string
|
|
||||||
OK bool
|
|
||||||
Metrics []PerfMetric
|
|
||||||
Artifacts []PerfArtifact
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfMetric struct {
|
|
||||||
Type string
|
|
||||||
Val uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerfArtifact struct {
|
|
||||||
Type string
|
|
||||||
Body string
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordPerfResult sends benchmarking results to the dashboard
|
|
||||||
func (b *Builder) recordPerfResult(req *PerfResult) error {
|
|
||||||
if !*report {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
req.Builder = b.name
|
|
||||||
args := url.Values{"key": {b.key}, "builder": {b.name}}
|
|
||||||
return dash("POST", "perf-result", args, req, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func postCommit(key, pkg string, l *HgLog) error {
|
|
||||||
if !*report {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t, err := time.Parse(time.RFC3339, l.Date)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parsing %q: %v", l.Date, t)
|
|
||||||
}
|
|
||||||
return dash("POST", "commit", url.Values{"key": {key}}, obj{
|
|
||||||
"PackagePath": pkg,
|
|
||||||
"Hash": l.Hash,
|
|
||||||
"ParentHash": l.Parent,
|
|
||||||
"Time": t.Format(time.RFC3339),
|
|
||||||
"User": l.Author,
|
|
||||||
"Desc": l.Desc,
|
|
||||||
"NeedsBenchmarking": l.bench,
|
|
||||||
}, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func dashboardCommit(pkg, hash string) bool {
|
|
||||||
err := dash("GET", "commit", url.Values{
|
|
||||||
"packagePath": {pkg},
|
|
||||||
"hash": {hash},
|
|
||||||
}, nil, nil)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func dashboardPackages(kind string) []string {
|
|
||||||
args := url.Values{"kind": []string{kind}}
|
|
||||||
var resp []struct {
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
if err := dash("GET", "packages", args, nil, &resp); err != nil {
|
|
||||||
log.Println("dashboardPackages:", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if *verbose {
|
|
||||||
fmt.Printf("dash resp: %+v\n", resp)
|
|
||||||
}
|
|
||||||
var pkgs []string
|
|
||||||
for _, r := range resp {
|
|
||||||
pkgs = append(pkgs, r.Path)
|
|
||||||
}
|
|
||||||
return pkgs
|
|
||||||
}
|
|
@ -1,679 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/vcs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
codeProject = "go"
|
|
||||||
codePyScript = "misc/dashboard/googlecode_upload.py"
|
|
||||||
gofrontendImportPath = "code.google.com/p/gofrontend"
|
|
||||||
mkdirPerm = 0750
|
|
||||||
waitInterval = 30 * time.Second // time to wait before checking for new revs
|
|
||||||
pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours
|
|
||||||
)
|
|
||||||
|
|
||||||
type Builder struct {
|
|
||||||
goroot *Repo
|
|
||||||
name string
|
|
||||||
goos, goarch string
|
|
||||||
key string
|
|
||||||
env builderEnv
|
|
||||||
// Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit.
|
|
||||||
lastWorkpath string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
doBuild = flag.Bool("build", true, "Build and test packages")
|
|
||||||
doBench = flag.Bool("bench", false, "Run benchmarks")
|
|
||||||
buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build")
|
|
||||||
dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path")
|
|
||||||
buildRelease = flag.Bool("release", false, "Build and upload binary release archives")
|
|
||||||
buildRevision = flag.String("rev", "", "Build specified revision and exit")
|
|
||||||
buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)")
|
|
||||||
buildTool = flag.String("tool", "go", "Tool to build.")
|
|
||||||
gcPath = flag.String("gcpath", "go.googlesource.com/go", "Path to download gc from")
|
|
||||||
gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from")
|
|
||||||
gccOpts = flag.String("gccopts", "", "Command-line options to pass to `make` when building gccgo")
|
|
||||||
benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from")
|
|
||||||
failAll = flag.Bool("fail", false, "fail all builds")
|
|
||||||
parallel = flag.Bool("parallel", false, "Build multiple targets in parallel")
|
|
||||||
buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests")
|
|
||||||
cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command")
|
|
||||||
benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times")
|
|
||||||
benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run")
|
|
||||||
benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB")
|
|
||||||
fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)")
|
|
||||||
verbose = flag.Bool("v", false, "verbose")
|
|
||||||
report = flag.Bool("report", true, "whether to report results to the dashboard")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`)
|
|
||||||
releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`)
|
|
||||||
allCmd = "all" + suffix
|
|
||||||
makeCmd = "make" + suffix
|
|
||||||
raceCmd = "race" + suffix
|
|
||||||
cleanCmd = "clean" + suffix
|
|
||||||
suffix = defaultSuffix()
|
|
||||||
exeExt = defaultExeExt()
|
|
||||||
|
|
||||||
benchCPU = CpuList([]int{1})
|
|
||||||
benchAffinity = CpuList([]int{})
|
|
||||||
benchMutex *FileMutex // Isolates benchmarks from other activities
|
|
||||||
)
|
|
||||||
|
|
||||||
// CpuList is used as flag.Value for -benchcpu flag.
|
|
||||||
type CpuList []int
|
|
||||||
|
|
||||||
func (cl *CpuList) String() string {
|
|
||||||
str := ""
|
|
||||||
for _, cpu := range *cl {
|
|
||||||
if str == "" {
|
|
||||||
str = strconv.Itoa(cpu)
|
|
||||||
} else {
|
|
||||||
str += fmt.Sprintf(",%v", cpu)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cl *CpuList) Set(str string) error {
|
|
||||||
*cl = []int{}
|
|
||||||
for _, val := range strings.Split(str, ",") {
|
|
||||||
val = strings.TrimSpace(val)
|
|
||||||
if val == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cpu, err := strconv.Atoi(val)
|
|
||||||
if err != nil || cpu <= 0 {
|
|
||||||
return fmt.Errorf("%v is a bad value for GOMAXPROCS", val)
|
|
||||||
}
|
|
||||||
*cl = append(*cl, cpu)
|
|
||||||
}
|
|
||||||
if len(*cl) == 0 {
|
|
||||||
*cl = append(*cl, 1)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking")
|
|
||||||
flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking")
|
|
||||||
flag.Usage = func() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0])
|
|
||||||
flag.PrintDefaults()
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
flag.Parse()
|
|
||||||
if len(flag.Args()) == 0 {
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
vcs.ShowCmd = *verbose
|
|
||||||
vcs.Verbose = *verbose
|
|
||||||
|
|
||||||
benchMutex = MakeFileMutex(*fileLock)
|
|
||||||
|
|
||||||
rr, err := repoForTool()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("Error finding repository:", err)
|
|
||||||
}
|
|
||||||
rootPath := filepath.Join(*buildroot, "goroot")
|
|
||||||
goroot := &Repo{
|
|
||||||
Path: rootPath,
|
|
||||||
Master: rr,
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up work environment, use existing environment if possible
|
|
||||||
if goroot.Exists() || *failAll {
|
|
||||||
log.Print("Found old workspace, will use it")
|
|
||||||
} else {
|
|
||||||
if err := os.RemoveAll(*buildroot); err != nil {
|
|
||||||
log.Fatalf("Error removing build root (%s): %s", *buildroot, err)
|
|
||||||
}
|
|
||||||
if err := os.Mkdir(*buildroot, mkdirPerm); err != nil {
|
|
||||||
log.Fatalf("Error making build root (%s): %s", *buildroot, err)
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
goroot, err = RemoteRepo(goroot.Master.Root, rootPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
goroot, err = goroot.Clone(goroot.Path, "")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("Error cloning repository:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up builders
|
|
||||||
builders := make([]*Builder, len(flag.Args()))
|
|
||||||
for i, name := range flag.Args() {
|
|
||||||
b, err := NewBuilder(goroot, name)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
builders[i] = b
|
|
||||||
}
|
|
||||||
|
|
||||||
if *failAll {
|
|
||||||
failMode(builders)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// if specified, build revision and return
|
|
||||||
if *buildRevision != "" {
|
|
||||||
hash, err := goroot.FullHash(*buildRevision)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("Error finding revision: ", err)
|
|
||||||
}
|
|
||||||
var exitErr error
|
|
||||||
for _, b := range builders {
|
|
||||||
if err := b.buildHash(hash); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
exitErr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if exitErr != nil && !*report {
|
|
||||||
// This mode (-report=false) is used for
|
|
||||||
// testing Docker images, making sure the
|
|
||||||
// environment is correctly configured. For
|
|
||||||
// testing, we want a non-zero exit status, as
|
|
||||||
// returned by log.Fatal:
|
|
||||||
log.Fatal("Build error.")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !*doBuild && !*doBench {
|
|
||||||
fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n")
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// go continuous build mode
|
|
||||||
// check for new commits and build them
|
|
||||||
benchMutex.RLock()
|
|
||||||
for {
|
|
||||||
built := false
|
|
||||||
t := time.Now()
|
|
||||||
if *parallel {
|
|
||||||
done := make(chan bool)
|
|
||||||
for _, b := range builders {
|
|
||||||
go func(b *Builder) {
|
|
||||||
done <- b.buildOrBench()
|
|
||||||
}(b)
|
|
||||||
}
|
|
||||||
for _ = range builders {
|
|
||||||
built = <-done || built
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, b := range builders {
|
|
||||||
built = b.buildOrBench() || built
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// sleep if there was nothing to build
|
|
||||||
benchMutex.RUnlock()
|
|
||||||
if !built {
|
|
||||||
time.Sleep(waitInterval)
|
|
||||||
}
|
|
||||||
benchMutex.RLock()
|
|
||||||
// sleep if we're looping too fast.
|
|
||||||
dt := time.Now().Sub(t)
|
|
||||||
if dt < waitInterval {
|
|
||||||
time.Sleep(waitInterval - dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// go continuous fail mode
|
|
||||||
// check for new commits and FAIL them
|
|
||||||
func failMode(builders []*Builder) {
|
|
||||||
for {
|
|
||||||
built := false
|
|
||||||
for _, b := range builders {
|
|
||||||
built = b.failBuild() || built
|
|
||||||
}
|
|
||||||
// stop if there was nothing to fail
|
|
||||||
if !built {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBuilder(goroot *Repo, name string) (*Builder, error) {
|
|
||||||
b := &Builder{
|
|
||||||
goroot: goroot,
|
|
||||||
name: name,
|
|
||||||
}
|
|
||||||
|
|
||||||
// get builderEnv for this tool
|
|
||||||
var err error
|
|
||||||
if b.env, err = b.builderEnv(name); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if *report {
|
|
||||||
err = b.setKey()
|
|
||||||
}
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) setKey() error {
|
|
||||||
// read keys from keyfile
|
|
||||||
fn := ""
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "plan9":
|
|
||||||
fn = os.Getenv("home")
|
|
||||||
case "windows":
|
|
||||||
fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
|
||||||
default:
|
|
||||||
fn = os.Getenv("HOME")
|
|
||||||
}
|
|
||||||
fn = filepath.Join(fn, ".gobuildkey")
|
|
||||||
if s := fn + "-" + b.name; isFile(s) { // builder-specific file
|
|
||||||
fn = s
|
|
||||||
}
|
|
||||||
c, err := ioutil.ReadFile(fn)
|
|
||||||
if err != nil {
|
|
||||||
// If the on-disk file doesn't exist, also try the
|
|
||||||
// Google Compute Engine metadata.
|
|
||||||
if v := gceProjectMetadata("buildkey-" + b.name); v != "" {
|
|
||||||
b.key = v
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err)
|
|
||||||
}
|
|
||||||
b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0]))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func gceProjectMetadata(attr string) string {
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 750 * time.Millisecond,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
ResponseHeaderTimeout: 750 * time.Millisecond,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil)
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
res, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
slurp, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return string(bytes.TrimSpace(slurp))
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderEnv returns the builderEnv for this buildTool.
|
|
||||||
func (b *Builder) builderEnv(name string) (builderEnv, error) {
|
|
||||||
// get goos/goarch from builder string
|
|
||||||
s := strings.SplitN(b.name, "-", 3)
|
|
||||||
if len(s) < 2 {
|
|
||||||
return nil, fmt.Errorf("unsupported builder form: %s", name)
|
|
||||||
}
|
|
||||||
b.goos = s[0]
|
|
||||||
b.goarch = s[1]
|
|
||||||
|
|
||||||
switch *buildTool {
|
|
||||||
case "go":
|
|
||||||
return &goEnv{
|
|
||||||
goos: s[0],
|
|
||||||
goarch: s[1],
|
|
||||||
}, nil
|
|
||||||
case "gccgo":
|
|
||||||
return &gccgoEnv{}, nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported build tool: %s", *buildTool)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildCmd returns the build command to invoke.
|
|
||||||
// Builders which contain the string '-race' in their
|
|
||||||
// name will override *buildCmd and return raceCmd.
|
|
||||||
func (b *Builder) buildCmd() string {
|
|
||||||
if strings.Contains(b.name, "-race") {
|
|
||||||
return raceCmd
|
|
||||||
}
|
|
||||||
return *buildCmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildOrBench checks for a new commit for this builder
|
|
||||||
// and builds or benchmarks it if one is found.
|
|
||||||
// It returns true if a build/benchmark was attempted.
|
|
||||||
func (b *Builder) buildOrBench() bool {
|
|
||||||
var kinds []string
|
|
||||||
if *doBuild {
|
|
||||||
kinds = append(kinds, "build-go-commit")
|
|
||||||
}
|
|
||||||
if *doBench {
|
|
||||||
kinds = append(kinds, "benchmark-go-commit")
|
|
||||||
}
|
|
||||||
kind, hash, benchs, err := b.todo(kinds, "", "")
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hash == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch kind {
|
|
||||||
case "build-go-commit":
|
|
||||||
if err := b.buildHash(hash); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case "benchmark-go-commit":
|
|
||||||
if err := b.benchHash(hash, benchs); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
log.Printf("Unknown todo kind %v", kind)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) buildHash(hash string) error {
|
|
||||||
log.Println(b.name, "building", hash)
|
|
||||||
|
|
||||||
// create place in which to do work
|
|
||||||
workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
|
|
||||||
if err := os.Mkdir(workpath, mkdirPerm); err != nil {
|
|
||||||
if err2 := removePath(workpath); err2 != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Mkdir(workpath, mkdirPerm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer removePath(workpath)
|
|
||||||
|
|
||||||
buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd())
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("%s failed at %v: %v", b.name, hash, err)
|
|
||||||
// record failure
|
|
||||||
return b.recordResult(false, "", hash, "", buildLog, runTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// record success
|
|
||||||
if err = b.recordResult(true, "", hash, "", "", runTime); err != nil {
|
|
||||||
return fmt.Errorf("recordResult: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if *buildTool == "go" {
|
|
||||||
// build sub-repositories
|
|
||||||
goRoot := filepath.Join(workpath, *buildTool)
|
|
||||||
goPath := workpath
|
|
||||||
b.buildSubrepos(goRoot, goPath, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildRepoOnHash clones repo into workpath and builds it.
|
|
||||||
func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) {
|
|
||||||
// Delete the previous workdir, if necessary
|
|
||||||
// (benchmarking code can execute several benchmarks in the same workpath).
|
|
||||||
if b.lastWorkpath != "" {
|
|
||||||
if b.lastWorkpath == workpath {
|
|
||||||
panic("workpath already exists: " + workpath)
|
|
||||||
}
|
|
||||||
removePath(b.lastWorkpath)
|
|
||||||
b.lastWorkpath = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// pull before cloning to ensure we have the revision
|
|
||||||
if err = b.goroot.Pull(); err != nil {
|
|
||||||
buildLog = err.Error()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up builder's environment.
|
|
||||||
srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv())
|
|
||||||
if err != nil {
|
|
||||||
buildLog = err.Error()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// build
|
|
||||||
var buildbuf bytes.Buffer
|
|
||||||
logfile := filepath.Join(workpath, "build.log")
|
|
||||||
f, err := os.Create(logfile)
|
|
||||||
if err != nil {
|
|
||||||
return err.Error(), 0, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
w := io.MultiWriter(f, &buildbuf)
|
|
||||||
|
|
||||||
// go's build command is a script relative to the srcDir, whereas
|
|
||||||
// gccgo's build command is usually "make check-go" in the srcDir.
|
|
||||||
if *buildTool == "go" {
|
|
||||||
if !filepath.IsAbs(cmd) {
|
|
||||||
cmd = filepath.Join(srcDir, cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// naive splitting of command from its arguments:
|
|
||||||
args := strings.Split(cmd, " ")
|
|
||||||
c := exec.Command(args[0], args[1:]...)
|
|
||||||
c.Dir = srcDir
|
|
||||||
c.Env = b.envv()
|
|
||||||
if *verbose {
|
|
||||||
c.Stdout = io.MultiWriter(os.Stdout, w)
|
|
||||||
c.Stderr = io.MultiWriter(os.Stderr, w)
|
|
||||||
} else {
|
|
||||||
c.Stdout = w
|
|
||||||
c.Stderr = w
|
|
||||||
}
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
err = run(c, runTimeout(*buildTimeout))
|
|
||||||
runTime = time.Since(startTime)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime)
|
|
||||||
}
|
|
||||||
return buildbuf.String(), runTime, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// failBuild checks for a new commit for this builder
|
|
||||||
// and fails it if one is found.
|
|
||||||
// It returns true if a build was "attempted".
|
|
||||||
func (b *Builder) failBuild() bool {
|
|
||||||
_, hash, _, err := b.todo([]string{"build-go-commit"}, "", "")
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hash == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("fail %s %s\n", b.name, hash)
|
|
||||||
|
|
||||||
if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) {
|
|
||||||
for _, pkg := range dashboardPackages("subrepo") {
|
|
||||||
// get the latest todo for this package
|
|
||||||
_, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("buildSubrepos %s: %v", pkg, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if hash == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// build the package
|
|
||||||
if *verbose {
|
|
||||||
log.Printf("buildSubrepos %s: building %q", pkg, hash)
|
|
||||||
}
|
|
||||||
buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash)
|
|
||||||
if err != nil {
|
|
||||||
if buildLog == "" {
|
|
||||||
buildLog = err.Error()
|
|
||||||
}
|
|
||||||
log.Printf("buildSubrepos %s: %v", pkg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// record the result
|
|
||||||
err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("buildSubrepos %s: %v", pkg, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildSubrepo fetches the given package, updates it to the specified hash,
|
|
||||||
// and runs 'go test -short pkg/...'. It returns the build log and any error.
|
|
||||||
func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) {
|
|
||||||
goTool := filepath.Join(goRoot, "bin", "go") + exeExt
|
|
||||||
env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath)
|
|
||||||
|
|
||||||
// add $GOROOT/bin and $GOPATH/bin to PATH
|
|
||||||
for i, e := range env {
|
|
||||||
const p = "PATH="
|
|
||||||
if !strings.HasPrefix(e, p) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sep := string(os.PathListSeparator)
|
|
||||||
env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):]
|
|
||||||
}
|
|
||||||
|
|
||||||
// HACK: check out to new sub-repo location instead of old location.
|
|
||||||
pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1)
|
|
||||||
|
|
||||||
// fetch package and dependencies
|
|
||||||
var outbuf bytes.Buffer
|
|
||||||
err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath))
|
|
||||||
if err != nil {
|
|
||||||
return outbuf.String(), err
|
|
||||||
}
|
|
||||||
outbuf.Reset()
|
|
||||||
|
|
||||||
// hg update to the specified hash
|
|
||||||
pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err)
|
|
||||||
}
|
|
||||||
repo := &Repo{
|
|
||||||
Path: filepath.Join(goPath, "src", pkg),
|
|
||||||
Master: pkgmaster,
|
|
||||||
}
|
|
||||||
if err := repo.UpdateTo(hash); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// test the package
|
|
||||||
err = run(exec.Command(goTool, "test", "-short", pkg+"/..."),
|
|
||||||
runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath))
|
|
||||||
return outbuf.String(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// repoForTool returns the correct RepoRoot for the buildTool, or an error if
|
|
||||||
// the tool is unknown.
|
|
||||||
func repoForTool() (*vcs.RepoRoot, error) {
|
|
||||||
switch *buildTool {
|
|
||||||
case "go":
|
|
||||||
return vcs.RepoRootForImportPath(*gcPath, *verbose)
|
|
||||||
case "gccgo":
|
|
||||||
return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown build tool: %s", *buildTool)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDirectory(name string) bool {
|
|
||||||
s, err := os.Stat(name)
|
|
||||||
return err == nil && s.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFile(name string) bool {
|
|
||||||
s, err := os.Stat(name)
|
|
||||||
return err == nil && !s.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultSuffix returns file extension used for command files in
|
|
||||||
// current os environment.
|
|
||||||
func defaultSuffix() string {
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "windows":
|
|
||||||
return ".bat"
|
|
||||||
case "plan9":
|
|
||||||
return ".rc"
|
|
||||||
default:
|
|
||||||
return ".bash"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultExeExt() string {
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "windows":
|
|
||||||
return ".exe"
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultBuildRoot returns default buildroot directory.
|
|
||||||
func defaultBuildRoot() string {
|
|
||||||
var d string
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// will use c:\, otherwise absolute paths become too long
|
|
||||||
// during builder run, see http://golang.org/issue/3358.
|
|
||||||
d = `c:\`
|
|
||||||
} else {
|
|
||||||
d = os.TempDir()
|
|
||||||
}
|
|
||||||
return filepath.Join(d, "gobuilder")
|
|
||||||
}
|
|
||||||
|
|
||||||
// removePath is a more robust version of os.RemoveAll.
|
|
||||||
// On windows, if remove fails (which can happen if test/benchmark timeouts
|
|
||||||
// and keeps some files open) it tries to rename the dir.
|
|
||||||
func removePath(path string) error {
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
err = os.Rename(path, filepath.Clean(path)+"_remove_me")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,225 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/vcs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Repo represents a mercurial repository.
|
|
||||||
type Repo struct {
|
|
||||||
Path string
|
|
||||||
Master *vcs.RepoRoot
|
|
||||||
sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteRepo constructs a *Repo representing a remote repository.
|
|
||||||
func RemoteRepo(url, path string) (*Repo, error) {
|
|
||||||
rr, err := vcs.RepoRootForImportPath(url, *verbose)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Repo{
|
|
||||||
Path: path,
|
|
||||||
Master: rr,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone clones the current Repo to a new destination
|
|
||||||
// returning a new *Repo if successful.
|
|
||||||
func (r *Repo) Clone(path, rev string) (*Repo, error) {
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
|
|
||||||
err := timeout(*cmdTimeout, func() error {
|
|
||||||
downloadPath := r.Path
|
|
||||||
if !r.Exists() {
|
|
||||||
downloadPath = r.Master.Repo
|
|
||||||
}
|
|
||||||
if rev == "" {
|
|
||||||
return r.Master.VCS.Create(path, downloadPath)
|
|
||||||
}
|
|
||||||
return r.Master.VCS.CreateAtRev(path, downloadPath, rev)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Repo{
|
|
||||||
Path: path,
|
|
||||||
Master: r.Master,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export exports the current Repo at revision rev to a new destination.
|
|
||||||
func (r *Repo) Export(path, rev string) error {
|
|
||||||
// TODO(adg,cmang): implement Export in go/vcs
|
|
||||||
_, err := r.Clone(path, rev)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateTo updates the working copy of this Repo to the
|
|
||||||
// supplied revision.
|
|
||||||
func (r *Repo) UpdateTo(hash string) error {
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
|
|
||||||
if r.Master.VCS.Cmd == "git" {
|
|
||||||
cmd := exec.Command("git", "reset", "--hard", hash)
|
|
||||||
var log bytes.Buffer
|
|
||||||
err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&log))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error running git update -C %v: %v ; output=%s", hash, err, log.Bytes())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Else go down three more levels of abstractions, at
|
|
||||||
// least two of which are broken for git.
|
|
||||||
return timeout(*cmdTimeout, func() error {
|
|
||||||
return r.Master.VCS.TagSync(r.Path, hash)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists reports whether this Repo represents a valid Mecurial repository.
|
|
||||||
func (r *Repo) Exists() bool {
|
|
||||||
fi, err := os.Stat(filepath.Join(r.Path, "."+r.Master.VCS.Cmd))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return fi.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pull pulls changes from the default path, that is, the path
|
|
||||||
// this Repo was cloned from.
|
|
||||||
func (r *Repo) Pull() error {
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
|
|
||||||
return timeout(*cmdTimeout, func() error {
|
|
||||||
return r.Master.VCS.Download(r.Path)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log returns the changelog for this repository.
|
|
||||||
func (r *Repo) Log() ([]HgLog, error) {
|
|
||||||
if err := r.Pull(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
|
|
||||||
var logStruct struct {
|
|
||||||
Log []HgLog
|
|
||||||
}
|
|
||||||
err := timeout(*cmdTimeout, func() error {
|
|
||||||
data, err := r.Master.VCS.Log(r.Path, xmlLogTemplate)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have a commit with description that contains 0x1b byte.
|
|
||||||
// Mercurial does not escape it, but xml.Unmarshal does not accept it.
|
|
||||||
data = bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1)
|
|
||||||
|
|
||||||
err = xml.Unmarshal([]byte("<Top>"+string(data)+"</Top>"), &logStruct)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unmarshal %s log: %v", r.Master.VCS, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for i, log := range logStruct.Log {
|
|
||||||
// Let's pretend there can be only one parent.
|
|
||||||
if log.Parent != "" && strings.Contains(log.Parent, " ") {
|
|
||||||
logStruct.Log[i].Parent = strings.Split(log.Parent, " ")[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return logStruct.Log, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullHash returns the full hash for the given Git or Mercurial revision.
|
|
||||||
func (r *Repo) FullHash(rev string) (string, error) {
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
|
|
||||||
var hash string
|
|
||||||
err := timeout(*cmdTimeout, func() error {
|
|
||||||
var data []byte
|
|
||||||
// Avoid the vcs package for git, since it's broken
|
|
||||||
// for git, and and we're trying to remove levels of
|
|
||||||
// abstraction which are increasingly getting
|
|
||||||
// difficult to navigate.
|
|
||||||
if r.Master.VCS.Cmd == "git" {
|
|
||||||
cmd := exec.Command("git", "rev-parse", rev)
|
|
||||||
var out bytes.Buffer
|
|
||||||
err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&out))
|
|
||||||
data = out.Bytes()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to find FullHash of %q; git rev-parse: %v, %s", rev, err, data)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
data, err = r.Master.VCS.LogAtRev(r.Path, rev, "{node}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s := strings.TrimSpace(string(data))
|
|
||||||
if s == "" {
|
|
||||||
return fmt.Errorf("cannot find revision")
|
|
||||||
}
|
|
||||||
if len(s) != 40 { // correct for both hg and git
|
|
||||||
return fmt.Errorf("%s returned invalid hash: %s", r.Master.VCS, s)
|
|
||||||
}
|
|
||||||
hash = s
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return hash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HgLog represents a single Mercurial revision.
|
|
||||||
type HgLog struct {
|
|
||||||
Hash string
|
|
||||||
Author string
|
|
||||||
Date string
|
|
||||||
Desc string
|
|
||||||
Parent string
|
|
||||||
Branch string
|
|
||||||
Files string
|
|
||||||
|
|
||||||
// Internal metadata
|
|
||||||
added bool
|
|
||||||
bench bool // needs to be benchmarked?
|
|
||||||
}
|
|
||||||
|
|
||||||
// xmlLogTemplate is a template to pass to Mercurial to make
|
|
||||||
// hg log print the log in valid XML for parsing with xml.Unmarshal.
|
|
||||||
// Can not escape branches and files, because it crashes python with:
|
|
||||||
// AttributeError: 'NoneType' object has no attribute 'replace'
|
|
||||||
const xmlLogTemplate = `
|
|
||||||
<Log>
|
|
||||||
<Hash>{node|escape}</Hash>
|
|
||||||
<Parent>{p1node}</Parent>
|
|
||||||
<Author>{author|escape}</Author>
|
|
||||||
<Date>{date|rfc3339date}</Date>
|
|
||||||
<Desc>{desc|escape}</Desc>
|
|
||||||
<Branch>{branches}</Branch>
|
|
||||||
<Files>{files}</Files>
|
|
||||||
</Log>
|
|
||||||
`
|
|
5
dashboard/cmd/buildlet/.gitignore
vendored
5
dashboard/cmd/buildlet/.gitignore
vendored
@ -1,5 +0,0 @@
|
|||||||
buildlet
|
|
||||||
buildlet.*-*
|
|
||||||
stage0/buildlet-stage0.*
|
|
||||||
cert.pem
|
|
||||||
key.pem
|
|
@ -1,31 +0,0 @@
|
|||||||
buildlet: buildlet.go
|
|
||||||
go build --tags=extdep
|
|
||||||
|
|
||||||
buildlet.darwin-amd64: buildlet.go
|
|
||||||
GOOS=darwin GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.freebsd-amd64: buildlet.go
|
|
||||||
GOOS=freebsd GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.linux-amd64: buildlet.go
|
|
||||||
GOOS=linux GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.netbsd-amd64: buildlet.go
|
|
||||||
GOOS=netbsd GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.openbsd-amd64: buildlet.go
|
|
||||||
GOOS=openbsd GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.plan9-386: buildlet.go
|
|
||||||
GOOS=plan9 GOARCH=386 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
||||||
buildlet.windows-amd64: buildlet.go
|
|
||||||
GOOS=windows GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
|||||||
Local development notes:
|
|
||||||
|
|
||||||
Server: (TLS stuff is optional)
|
|
||||||
$ go run $GOROOT/src/crypto/tls/generate_cert.go --host=example.com
|
|
||||||
$ GCEMETA_password=foo GCEMETA_tls_cert=@cert.pem GCEMETA_tls_key='@key.pem' ./buildlet
|
|
||||||
|
|
||||||
Client:
|
|
||||||
$ curl -O https://go.googlesource.com/go/+archive/3b76b017cabb.tar.gz
|
|
||||||
$ curl -k --user :foo -X PUT --data-binary "@go-3b76b017cabb.tar.gz" https://localhost:5936/writetgz
|
|
||||||
$ curl -k --user :foo -d "cmd=src/make.bash" http://127.0.0.1:5937/exec
|
|
||||||
etc
|
|
||||||
|
|
@ -1,654 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
// The buildlet is an HTTP server that untars content to disk and runs
|
|
||||||
// commands it has untarred, streaming their output back over HTTP.
|
|
||||||
// It is part of Go's continuous build system.
|
|
||||||
//
|
|
||||||
// This program intentionally allows remote code execution, and
|
|
||||||
// provides no security of its own. It is assumed that any user uses
|
|
||||||
// it with an appropriately-configured firewall between their VM
|
|
||||||
// instances.
|
|
||||||
package main // import "golang.org/x/tools/dashboard/cmd/buildlet"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/cloud/compute/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
haltEntireOS = flag.Bool("halt", true, "halt OS in /halt handler. If false, the buildlet process just ends.")
|
|
||||||
scratchDir = flag.String("scratchdir", "", "Temporary directory to use. The contents of this directory may be deleted at any time. If empty, TempDir is used to create one.")
|
|
||||||
listenAddr = flag.String("listen", defaultListenAddr(), "address to listen on. Warning: this service is inherently insecure and offers no protection of its own. Do not expose this port to the world.")
|
|
||||||
)
|
|
||||||
|
|
||||||
func defaultListenAddr() string {
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
// Darwin will never run on GCE, so let's always
|
|
||||||
// listen on a high port (so we don't need to be
|
|
||||||
// root).
|
|
||||||
return ":5936"
|
|
||||||
}
|
|
||||||
if !metadata.OnGCE() {
|
|
||||||
return "localhost:5936"
|
|
||||||
}
|
|
||||||
// In production, default to port 80 or 443, depending on
|
|
||||||
// whether TLS is configured.
|
|
||||||
if metadataValue("tls-cert") != "" {
|
|
||||||
return ":443"
|
|
||||||
}
|
|
||||||
return ":80"
|
|
||||||
}
|
|
||||||
|
|
||||||
var osHalt func() // set by some machines
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
onGCE := metadata.OnGCE()
|
|
||||||
if !onGCE && !strings.HasPrefix(*listenAddr, "localhost:") {
|
|
||||||
log.Printf("** WARNING *** This server is unsafe and offers no security. Be careful.")
|
|
||||||
}
|
|
||||||
if onGCE {
|
|
||||||
fixMTU()
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "plan9" {
|
|
||||||
// Plan 9 is too slow on GCE, so stop running run.rc after the basics.
|
|
||||||
// See https://golang.org/cl/2522 and https://golang.org/issue/9491
|
|
||||||
// TODO(bradfitz): once the buildlet has environment variable support,
|
|
||||||
// the coordinator can send this in, and this variable can be part of
|
|
||||||
// the build configuration struct instead of hard-coded here.
|
|
||||||
// But no need for environment variables quite yet.
|
|
||||||
os.Setenv("GOTESTONLY", "std")
|
|
||||||
}
|
|
||||||
if *scratchDir == "" {
|
|
||||||
dir, err := ioutil.TempDir("", "buildlet-scatch")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error creating scratchdir with ioutil.TempDir: %v", err)
|
|
||||||
}
|
|
||||||
*scratchDir = dir
|
|
||||||
}
|
|
||||||
// TODO(bradfitz): if this becomes more of a general tool,
|
|
||||||
// perhaps we want to remove this hard-coded here. Also,
|
|
||||||
// if/once the exec handler ever gets generic environment
|
|
||||||
// variable support, it would make sense to remove this too
|
|
||||||
// and push it to the client. This hard-codes policy. But
|
|
||||||
// that's okay for now.
|
|
||||||
os.Setenv("GOROOT_BOOTSTRAP", filepath.Join(*scratchDir, "go1.4"))
|
|
||||||
os.Setenv("WORKDIR", *scratchDir) // mostly for demos
|
|
||||||
|
|
||||||
if _, err := os.Lstat(*scratchDir); err != nil {
|
|
||||||
log.Fatalf("invalid --scratchdir %q: %v", *scratchDir, err)
|
|
||||||
}
|
|
||||||
http.HandleFunc("/", handleRoot)
|
|
||||||
http.HandleFunc("/debug/goroutines", handleGoroutines)
|
|
||||||
http.HandleFunc("/debug/x", handleX)
|
|
||||||
|
|
||||||
password := metadataValue("password")
|
|
||||||
requireAuth := func(handler func(w http.ResponseWriter, r *http.Request)) http.Handler {
|
|
||||||
return requirePasswordHandler{http.HandlerFunc(handler), password}
|
|
||||||
}
|
|
||||||
http.Handle("/writetgz", requireAuth(handleWriteTGZ))
|
|
||||||
http.Handle("/exec", requireAuth(handleExec))
|
|
||||||
http.Handle("/halt", requireAuth(handleHalt))
|
|
||||||
http.Handle("/tgz", requireAuth(handleGetTGZ))
|
|
||||||
// TODO: removeall
|
|
||||||
|
|
||||||
tlsCert, tlsKey := metadataValue("tls-cert"), metadataValue("tls-key")
|
|
||||||
if (tlsCert == "") != (tlsKey == "") {
|
|
||||||
log.Fatalf("tls-cert and tls-key must both be supplied, or neither.")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Listening on %s ...", *listenAddr)
|
|
||||||
ln, err := net.Listen("tcp", *listenAddr)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to listen on %s: %v", *listenAddr, err)
|
|
||||||
}
|
|
||||||
ln = tcpKeepAliveListener{ln.(*net.TCPListener)}
|
|
||||||
|
|
||||||
var srv http.Server
|
|
||||||
if tlsCert != "" {
|
|
||||||
cert, err := tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("TLS cert error: %v", err)
|
|
||||||
}
|
|
||||||
tlsConf := &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
}
|
|
||||||
ln = tls.NewListener(ln, tlsConf)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Fatalf("Serve: %v", srv.Serve(ln))
|
|
||||||
}
|
|
||||||
|
|
||||||
// metadataValue returns the GCE metadata instance value for the given key.
|
|
||||||
// If the metadata is not defined, the returned string is empty.
|
|
||||||
//
|
|
||||||
// If not running on GCE, it falls back to using environment variables
|
|
||||||
// for local development.
|
|
||||||
func metadataValue(key string) string {
|
|
||||||
// The common case:
|
|
||||||
if metadata.OnGCE() {
|
|
||||||
v, err := metadata.InstanceAttributeValue(key)
|
|
||||||
if _, notDefined := err.(metadata.NotDefinedError); notDefined {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("metadata.InstanceAttributeValue(%q): %v", key, err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Else let developers use environment variables to fake
|
|
||||||
// metadata keys, for local testing.
|
|
||||||
envKey := "GCEMETA_" + strings.Replace(key, "-", "_", -1)
|
|
||||||
v := os.Getenv(envKey)
|
|
||||||
// Respect curl-style '@' prefix to mean the rest is a filename.
|
|
||||||
if strings.HasPrefix(v, "@") {
|
|
||||||
slurp, err := ioutil.ReadFile(v[1:])
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error reading file for GCEMETA_%v: %v", key, err)
|
|
||||||
}
|
|
||||||
return string(slurp)
|
|
||||||
}
|
|
||||||
if v == "" {
|
|
||||||
log.Printf("Warning: not running on GCE, and no %v environment variable defined", envKey)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// tcpKeepAliveListener is a net.Listener that sets TCP keep-alive
|
|
||||||
// timeouts on accepted connections.
|
|
||||||
type tcpKeepAliveListener struct {
|
|
||||||
*net.TCPListener
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
|
|
||||||
tc, err := ln.AcceptTCP()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tc.SetKeepAlive(true)
|
|
||||||
tc.SetKeepAlivePeriod(3 * time.Minute)
|
|
||||||
return tc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixMTU_freebsd() error { return fixMTU_ifconfig("vtnet0") }
|
|
||||||
func fixMTU_openbsd() error { return fixMTU_ifconfig("vio0") }
|
|
||||||
func fixMTU_ifconfig(iface string) error {
|
|
||||||
out, err := exec.Command("/sbin/ifconfig", iface, "mtu", "1460").CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("/sbin/ifconfig %s mtu 1460: %v, %s", iface, err, out)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixMTU_plan9() error {
|
|
||||||
f, err := os.OpenFile("/net/ipifc/0/ctl", os.O_WRONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := io.WriteString(f, "mtu 1400\n"); err != nil { // not 1460
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixMTU() {
|
|
||||||
fn, ok := map[string]func() error{
|
|
||||||
"openbsd": fixMTU_openbsd,
|
|
||||||
"freebsd": fixMTU_freebsd,
|
|
||||||
"plan9": fixMTU_plan9,
|
|
||||||
}[runtime.GOOS]
|
|
||||||
if ok {
|
|
||||||
if err := fn(); err != nil {
|
|
||||||
log.Printf("Failed to set MTU: %v", err)
|
|
||||||
} else {
|
|
||||||
log.Printf("Adjusted MTU.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mtuWriter is a hack for environments where we can't (or can't yet)
|
|
||||||
// fix the machine's MTU.
|
|
||||||
// Instead of telling the operating system the MTU, we just cut up our
|
|
||||||
// writes into small pieces to make sure we don't get too near the
|
|
||||||
// MTU, and we hope the kernel doesn't coalesce different flushed
|
|
||||||
// writes back together into the same TCP IP packets.
|
|
||||||
type mtuWriter struct {
|
|
||||||
rw http.ResponseWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mw mtuWriter) Write(p []byte) (n int, err error) {
|
|
||||||
const mtu = 1000 // way less than 1460; since HTTP response headers might be in there too
|
|
||||||
for len(p) > 0 {
|
|
||||||
chunk := p
|
|
||||||
if len(chunk) > mtu {
|
|
||||||
chunk = p[:mtu]
|
|
||||||
}
|
|
||||||
n0, err := mw.rw.Write(chunk)
|
|
||||||
n += n0
|
|
||||||
if n0 != len(chunk) && err == nil {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
p = p[n0:]
|
|
||||||
mw.rw.(http.Flusher).Flush()
|
|
||||||
if len(p) > 0 {
|
|
||||||
// Whitelisted operating systems:
|
|
||||||
if runtime.GOOS == "openbsd" || runtime.GOOS == "linux" {
|
|
||||||
// Nothing
|
|
||||||
} else {
|
|
||||||
// Try to prevent the kernel from Nagel-ing the IP packets
|
|
||||||
// together into one that's too large.
|
|
||||||
time.Sleep(250 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleRoot(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.URL.Path != "/" {
|
|
||||||
http.NotFound(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "buildlet running on %s-%s\n", runtime.GOOS, runtime.GOARCH)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unauthenticated /debug/goroutines handler
|
|
||||||
func handleGoroutines(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
w := mtuWriter{rw}
|
|
||||||
log.Printf("Dumping goroutines.")
|
|
||||||
rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
||||||
buf := make([]byte, 2<<20)
|
|
||||||
buf = buf[:runtime.Stack(buf, true)]
|
|
||||||
w.Write(buf)
|
|
||||||
log.Printf("Dumped goroutines.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// unauthenticated /debug/x handler, to test MTU settings.
|
|
||||||
func handleX(w http.ResponseWriter, r *http.Request) {
|
|
||||||
n, _ := strconv.Atoi(r.FormValue("n"))
|
|
||||||
if n > 1<<20 {
|
|
||||||
n = 1 << 20
|
|
||||||
}
|
|
||||||
log.Printf("Dumping %d X.", n)
|
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
||||||
buf := make([]byte, n)
|
|
||||||
for i := range buf {
|
|
||||||
buf[i] = 'X'
|
|
||||||
}
|
|
||||||
w.Write(buf)
|
|
||||||
log.Printf("Dumped X.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a remote code execution daemon, so security is kinda pointless, but:
|
|
||||||
func validRelativeDir(dir string) bool {
|
|
||||||
if strings.Contains(dir, `\`) || path.IsAbs(dir) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
dir = path.Clean(dir)
|
|
||||||
if strings.HasPrefix(dir, "../") || strings.HasSuffix(dir, "/..") || dir == ".." {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleGetTGZ(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method != "GET" {
|
|
||||||
http.Error(rw, "requires GET method", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dir := r.FormValue("dir")
|
|
||||||
if !validRelativeDir(dir) {
|
|
||||||
http.Error(rw, "bogus dir", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
zw := gzip.NewWriter(mtuWriter{rw})
|
|
||||||
tw := tar.NewWriter(zw)
|
|
||||||
base := filepath.Join(*scratchDir, filepath.FromSlash(dir))
|
|
||||||
err := filepath.Walk(base, func(path string, fi os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rel := strings.TrimPrefix(strings.TrimPrefix(path, base), "/")
|
|
||||||
var linkName string
|
|
||||||
if fi.Mode()&os.ModeSymlink != 0 {
|
|
||||||
linkName, err = os.Readlink(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
th, err := tar.FileInfoHeader(fi, linkName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
th.Name = rel
|
|
||||||
if fi.IsDir() && !strings.HasSuffix(th.Name, "/") {
|
|
||||||
th.Name += "/"
|
|
||||||
}
|
|
||||||
if th.Name == "/" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(th); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if fi.Mode().IsRegular() {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
if _, err := io.Copy(tw, f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Walk error: %v", err)
|
|
||||||
// Decent way to signal failure to the caller, since it'll break
|
|
||||||
// the chunked response, rather than have a valid EOF.
|
|
||||||
conn, _, _ := rw.(http.Hijacker).Hijack()
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
tw.Close()
|
|
||||||
zw.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleWriteTGZ(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var tgz io.Reader
|
|
||||||
switch r.Method {
|
|
||||||
case "PUT":
|
|
||||||
tgz = r.Body
|
|
||||||
case "POST":
|
|
||||||
urlStr := r.FormValue("url")
|
|
||||||
if urlStr == "" {
|
|
||||||
http.Error(w, "missing url POST param", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
res, err := http.Get(urlStr)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, fmt.Sprintf("fetching URL %s: %v", urlStr, err), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
http.Error(w, fmt.Sprintf("fetching provided url: %s", res.Status), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tgz = res.Body
|
|
||||||
default:
|
|
||||||
http.Error(w, "requires PUT or POST method", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
urlParam, _ := url.ParseQuery(r.URL.RawQuery)
|
|
||||||
baseDir := *scratchDir
|
|
||||||
if dir := urlParam.Get("dir"); dir != "" {
|
|
||||||
if !validRelativeDir(dir) {
|
|
||||||
http.Error(w, "bogus dir", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dir = filepath.FromSlash(dir)
|
|
||||||
baseDir = filepath.Join(baseDir, dir)
|
|
||||||
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
|
||||||
http.Error(w, "mkdir of base: "+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := untar(tgz, baseDir)
|
|
||||||
if err != nil {
|
|
||||||
status := http.StatusInternalServerError
|
|
||||||
if he, ok := err.(httpStatuser); ok {
|
|
||||||
status = he.httpStatus()
|
|
||||||
}
|
|
||||||
http.Error(w, err.Error(), status)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
io.WriteString(w, "OK")
|
|
||||||
}
|
|
||||||
|
|
||||||
// untar reads the gzip-compressed tar file from r and writes it into dir.
|
|
||||||
func untar(r io.Reader, dir string) error {
|
|
||||||
zr, err := gzip.NewReader(r)
|
|
||||||
if err != nil {
|
|
||||||
return badRequest("requires gzip-compressed body: " + err.Error())
|
|
||||||
}
|
|
||||||
tr := tar.NewReader(zr)
|
|
||||||
for {
|
|
||||||
f, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("tar reading error: %v", err)
|
|
||||||
return badRequest("tar error: " + err.Error())
|
|
||||||
}
|
|
||||||
if !validRelPath(f.Name) {
|
|
||||||
return badRequest(fmt.Sprintf("tar file contained invalid name %q", f.Name))
|
|
||||||
}
|
|
||||||
rel := filepath.FromSlash(f.Name)
|
|
||||||
abs := filepath.Join(dir, rel)
|
|
||||||
|
|
||||||
fi := f.FileInfo()
|
|
||||||
mode := fi.Mode()
|
|
||||||
switch {
|
|
||||||
case mode.IsRegular():
|
|
||||||
// Make the directory. This is redundant because it should
|
|
||||||
// already be made by a directory entry in the tar
|
|
||||||
// beforehand. Thus, don't check for errors; the next
|
|
||||||
// write will fail with the same error.
|
|
||||||
os.MkdirAll(filepath.Dir(abs), 0755)
|
|
||||||
wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := io.Copy(wf, tr)
|
|
||||||
if closeErr := wf.Close(); closeErr != nil && err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error writing to %s: %v", abs, err)
|
|
||||||
}
|
|
||||||
if n != f.Size {
|
|
||||||
return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size)
|
|
||||||
}
|
|
||||||
log.Printf("wrote %s", abs)
|
|
||||||
case mode.IsDir():
|
|
||||||
if err := os.MkdirAll(abs, 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return badRequest(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process-State is an HTTP Trailer set in the /exec handler to "ok"
|
|
||||||
// on success, or os.ProcessState.String() on failure.
|
|
||||||
const hdrProcessState = "Process-State"
|
|
||||||
|
|
||||||
func handleExec(w http.ResponseWriter, r *http.Request) {
|
|
||||||
cn := w.(http.CloseNotifier)
|
|
||||||
clientGone := cn.CloseNotify()
|
|
||||||
handlerDone := make(chan bool)
|
|
||||||
defer close(handlerDone)
|
|
||||||
|
|
||||||
if r.Method != "POST" {
|
|
||||||
http.Error(w, "requires POST method", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if r.ProtoMajor*10+r.ProtoMinor < 11 {
|
|
||||||
// We need trailers, only available in HTTP/1.1 or HTTP/2.
|
|
||||||
http.Error(w, "HTTP/1.1 or higher required", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Trailer", hdrProcessState) // declare it so we can set it
|
|
||||||
|
|
||||||
cmdPath := r.FormValue("cmd") // required
|
|
||||||
absCmd := cmdPath
|
|
||||||
sysMode := r.FormValue("mode") == "sys"
|
|
||||||
if sysMode {
|
|
||||||
if cmdPath == "" {
|
|
||||||
http.Error(w, "requires 'cmd' parameter", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !validRelPath(cmdPath) {
|
|
||||||
http.Error(w, "requires 'cmd' parameter", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
absCmd = filepath.Join(*scratchDir, filepath.FromSlash(cmdPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
if f, ok := w.(http.Flusher); ok {
|
|
||||||
f.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command(absCmd, r.PostForm["cmdArg"]...)
|
|
||||||
if sysMode {
|
|
||||||
cmd.Dir = *scratchDir
|
|
||||||
} else {
|
|
||||||
cmd.Dir = filepath.Dir(absCmd)
|
|
||||||
}
|
|
||||||
cmdOutput := mtuWriter{w}
|
|
||||||
cmd.Stdout = cmdOutput
|
|
||||||
cmd.Stderr = cmdOutput
|
|
||||||
err := cmd.Start()
|
|
||||||
if err == nil {
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-clientGone:
|
|
||||||
cmd.Process.Kill()
|
|
||||||
case <-handlerDone:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
err = cmd.Wait()
|
|
||||||
}
|
|
||||||
state := "ok"
|
|
||||||
if err != nil {
|
|
||||||
if ps := cmd.ProcessState; ps != nil {
|
|
||||||
state = ps.String()
|
|
||||||
} else {
|
|
||||||
state = err.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Header().Set(hdrProcessState, state)
|
|
||||||
log.Printf("Run = %s", state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleHalt(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method != "POST" {
|
|
||||||
http.Error(w, "requires POST method", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("Halting in 1 second.")
|
|
||||||
// do the halt in 1 second, to give the HTTP response time to complete:
|
|
||||||
time.AfterFunc(1*time.Second, haltMachine)
|
|
||||||
}
|
|
||||||
|
|
||||||
func haltMachine() {
|
|
||||||
if !*haltEntireOS {
|
|
||||||
log.Printf("Ending buildlet process due to halt.")
|
|
||||||
os.Exit(0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("Halting machine.")
|
|
||||||
time.AfterFunc(5*time.Second, func() { os.Exit(0) })
|
|
||||||
if osHalt != nil {
|
|
||||||
// TODO: Windows: http://msdn.microsoft.com/en-us/library/windows/desktop/aa376868%28v=vs.85%29.aspx
|
|
||||||
osHalt()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
// Backup mechanism, if exec hangs for any reason:
|
|
||||||
var err error
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "openbsd":
|
|
||||||
// Quick, no fs flush, and power down:
|
|
||||||
err = exec.Command("halt", "-q", "-n", "-p").Run()
|
|
||||||
case "freebsd":
|
|
||||||
// Power off (-p), via halt (-o), now.
|
|
||||||
err = exec.Command("shutdown", "-p", "-o", "now").Run()
|
|
||||||
case "linux":
|
|
||||||
// Don't sync (-n), force without shutdown (-f), and power off (-p).
|
|
||||||
err = exec.Command("/bin/halt", "-n", "-f", "-p").Run()
|
|
||||||
case "plan9":
|
|
||||||
err = exec.Command("fshalt").Run()
|
|
||||||
default:
|
|
||||||
err = errors.New("No system-specific halt command run; will just end buildlet process.")
|
|
||||||
}
|
|
||||||
log.Printf("Shutdown: %v", err)
|
|
||||||
log.Printf("Ending buildlet process post-halt")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validRelPath(p string) bool {
|
|
||||||
if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpStatuser interface {
|
|
||||||
error
|
|
||||||
httpStatus() int
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpError struct {
|
|
||||||
statusCode int
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (he httpError) Error() string { return he.msg }
|
|
||||||
func (he httpError) httpStatus() int { return he.statusCode }
|
|
||||||
|
|
||||||
func badRequest(msg string) error {
|
|
||||||
return httpError{http.StatusBadRequest, msg}
|
|
||||||
}
|
|
||||||
|
|
||||||
// requirePassword is an http.Handler auth wrapper that enforces a
|
|
||||||
// HTTP Basic password. The username is ignored.
|
|
||||||
type requirePasswordHandler struct {
|
|
||||||
h http.Handler
|
|
||||||
password string // empty means no password
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h requirePasswordHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
_, gotPass, _ := r.BasicAuth()
|
|
||||||
if h.password != "" && h.password != gotPass {
|
|
||||||
http.Error(w, "invalid password", http.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.h.ServeHTTP(w, r)
|
|
||||||
}
|
|
@ -1,3 +0,0 @@
|
|||||||
buildlet-stage0.windows-amd64: stage0.go
|
|
||||||
GOOS=windows GOARCH=amd64 go build -o $@ --tags=extdep
|
|
||||||
cat $@ | (cd ../../upload && go run --tags=extdep upload.go --public go-builder-data/$@)
|
|
@ -1,78 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
// The stage0 command looks up the buildlet's URL from the GCE metadata
|
|
||||||
// service, downloads it, and runs it. It's used primarily by Windows,
|
|
||||||
// since it can be written in a couple lines of shell elsewhere.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/cloud/compute/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
const attr = "buildlet-binary-url"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
buildletURL, err := metadata.InstanceAttributeValue(attr)
|
|
||||||
if err != nil {
|
|
||||||
sleepFatalf("Failed to look up %q attribute value: %v", attr, err)
|
|
||||||
}
|
|
||||||
target := filepath.FromSlash("./buildlet.exe")
|
|
||||||
if err := download(target, buildletURL); err != nil {
|
|
||||||
sleepFatalf("Downloading %s: %v", buildletURL, err)
|
|
||||||
}
|
|
||||||
cmd := exec.Command(target)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
sleepFatalf("Error running buildlet: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sleepFatalf(format string, args ...interface{}) {
|
|
||||||
log.Printf(format, args...)
|
|
||||||
time.Sleep(time.Minute) // so user has time to see it in cmd.exe, maybe
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func download(file, url string) error {
|
|
||||||
log.Printf("Downloading %s to %s ...\n", url, file)
|
|
||||||
res, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error fetching %v: %v", url, err)
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("HTTP status code of %s was %v", url, res.Status)
|
|
||||||
}
|
|
||||||
tmp := file + ".tmp"
|
|
||||||
os.Remove(tmp)
|
|
||||||
os.Remove(file)
|
|
||||||
f, err := os.Create(tmp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := io.Copy(f, res.Body)
|
|
||||||
res.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error reading %v: %v", url, err)
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
err = os.Rename(tmp, file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf("Downloaded %s (%d bytes)", file, n)
|
|
||||||
return nil
|
|
||||||
}
|
|
3
dashboard/cmd/coordinator/.gitignore
vendored
3
dashboard/cmd/coordinator/.gitignore
vendored
@ -1,3 +0,0 @@
|
|||||||
buildongce/client-*.dat
|
|
||||||
buildongce/token.dat
|
|
||||||
coordinator
|
|
@ -1,9 +0,0 @@
|
|||||||
coordinator: coordinator.go
|
|
||||||
GOOS=linux go build --tags=extdep -o coordinator .
|
|
||||||
|
|
||||||
# After "make upload", either reboot the machine, or ssh to it and:
|
|
||||||
# sudo systemctl restart gobuild.service
|
|
||||||
# And watch its logs with:
|
|
||||||
# sudo journalctl -f -u gobuild.service
|
|
||||||
upload: coordinator
|
|
||||||
cat coordinator | (cd ../upload && go run --tags=extdep upload.go --public go-builder-data/coordinator)
|
|
@ -1,299 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main // import "golang.org/x/tools/dashboard/cmd/coordinator/buildongce"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/oauth2/google"
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
proj = flag.String("project", "symbolic-datum-552", "name of Project")
|
|
||||||
zone = flag.String("zone", "us-central1-a", "GCE zone")
|
|
||||||
mach = flag.String("machinetype", "n1-standard-16", "Machine type")
|
|
||||||
instName = flag.String("instance_name", "go-builder-1", "Name of VM instance.")
|
|
||||||
sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
|
|
||||||
staticIP = flag.String("static_ip", "", "Static IP to use. If empty, automatic.")
|
|
||||||
reuseDisk = flag.Bool("reuse_disk", true, "Whether disk images should be reused between shutdowns/restarts.")
|
|
||||||
ssd = flag.Bool("ssd", false, "use a solid state disk (faster, more expensive)")
|
|
||||||
)
|
|
||||||
|
|
||||||
func readFile(v string) string {
|
|
||||||
slurp, err := ioutil.ReadFile(v)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error reading %s: %v", v, err)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(string(slurp))
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = &oauth2.Config{
|
|
||||||
// The client-id and secret should be for an "Installed Application" when using
|
|
||||||
// the CLI. Later we'll use a web application with a callback.
|
|
||||||
ClientID: readFile("client-id.dat"),
|
|
||||||
ClientSecret: readFile("client-secret.dat"),
|
|
||||||
Endpoint: google.Endpoint,
|
|
||||||
Scopes: []string{
|
|
||||||
compute.DevstorageFull_controlScope,
|
|
||||||
compute.ComputeScope,
|
|
||||||
"https://www.googleapis.com/auth/sqlservice",
|
|
||||||
"https://www.googleapis.com/auth/sqlservice.admin",
|
|
||||||
},
|
|
||||||
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
|
|
||||||
}
|
|
||||||
|
|
||||||
const baseConfig = `#cloud-config
|
|
||||||
coreos:
|
|
||||||
update:
|
|
||||||
group: alpha
|
|
||||||
reboot-strategy: off
|
|
||||||
units:
|
|
||||||
- name: gobuild.service
|
|
||||||
command: start
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Go Builders
|
|
||||||
After=docker.service
|
|
||||||
Requires=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/coordinator http://storage.googleapis.com/go-builder-data/coordinator && chmod +x /opt/bin/coordinator'
|
|
||||||
ExecStart=/opt/bin/coordinator
|
|
||||||
RestartSec=10s
|
|
||||||
Restart=always
|
|
||||||
Type=simple
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
`
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *proj == "" {
|
|
||||||
log.Fatalf("Missing --project flag")
|
|
||||||
}
|
|
||||||
prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
|
|
||||||
machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
|
|
||||||
|
|
||||||
const tokenFileName = "token.dat"
|
|
||||||
tokenFile := tokenCacheFile(tokenFileName)
|
|
||||||
tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
|
|
||||||
token, err := tokenSource.Token()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error getting token from %s: %v", tokenFileName, err)
|
|
||||||
log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
|
|
||||||
fmt.Print("\nEnter auth code: ")
|
|
||||||
sc := bufio.NewScanner(os.Stdin)
|
|
||||||
sc.Scan()
|
|
||||||
authCode := strings.TrimSpace(sc.Text())
|
|
||||||
token, err = config.Exchange(oauth2.NoContext, authCode)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error exchanging auth code for a token: %v", err)
|
|
||||||
}
|
|
||||||
if err := tokenFile.WriteToken(token); err != nil {
|
|
||||||
log.Fatalf("Error writing to %s: %v", tokenFileName, err)
|
|
||||||
}
|
|
||||||
tokenSource = oauth2.ReuseTokenSource(token, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
|
|
||||||
|
|
||||||
computeService, _ := compute.New(oauthClient)
|
|
||||||
|
|
||||||
natIP := *staticIP
|
|
||||||
if natIP == "" {
|
|
||||||
// Try to find it by name.
|
|
||||||
aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList
|
|
||||||
IPLoop:
|
|
||||||
for _, asl := range aggAddrList.Items {
|
|
||||||
for _, addr := range asl.Addresses {
|
|
||||||
if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
|
|
||||||
natIP = addr.Address
|
|
||||||
break IPLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cloudConfig := baseConfig
|
|
||||||
if *sshPub != "" {
|
|
||||||
key := strings.TrimSpace(readFile(*sshPub))
|
|
||||||
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
|
|
||||||
}
|
|
||||||
if os.Getenv("USER") == "bradfitz" {
|
|
||||||
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
|
|
||||||
}
|
|
||||||
const maxCloudConfig = 32 << 10 // per compute API docs
|
|
||||||
if len(cloudConfig) > maxCloudConfig {
|
|
||||||
log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
instance := &compute.Instance{
|
|
||||||
Name: *instName,
|
|
||||||
Description: "Go Builder",
|
|
||||||
MachineType: machType,
|
|
||||||
Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
|
|
||||||
Tags: &compute.Tags{
|
|
||||||
Items: []string{"http-server", "https-server"},
|
|
||||||
},
|
|
||||||
Metadata: &compute.Metadata{
|
|
||||||
Items: []*compute.MetadataItems{
|
|
||||||
{
|
|
||||||
Key: "user-data",
|
|
||||||
Value: cloudConfig,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
NetworkInterfaces: []*compute.NetworkInterface{
|
|
||||||
&compute.NetworkInterface{
|
|
||||||
AccessConfigs: []*compute.AccessConfig{
|
|
||||||
&compute.AccessConfig{
|
|
||||||
Type: "ONE_TO_ONE_NAT",
|
|
||||||
Name: "External NAT",
|
|
||||||
NatIP: natIP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Network: prefix + "/global/networks/default",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ServiceAccounts: []*compute.ServiceAccount{
|
|
||||||
{
|
|
||||||
Email: "default",
|
|
||||||
Scopes: []string{
|
|
||||||
compute.DevstorageFull_controlScope,
|
|
||||||
compute.ComputeScope,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Creating instance...")
|
|
||||||
op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to create instance: %v", err)
|
|
||||||
}
|
|
||||||
opName := op.Name
|
|
||||||
log.Printf("Created. Waiting on operation %v", opName)
|
|
||||||
OpLoop:
|
|
||||||
for {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to get op %s: %v", opName, err)
|
|
||||||
}
|
|
||||||
switch op.Status {
|
|
||||||
case "PENDING", "RUNNING":
|
|
||||||
log.Printf("Waiting on operation %v", opName)
|
|
||||||
continue
|
|
||||||
case "DONE":
|
|
||||||
if op.Error != nil {
|
|
||||||
for _, operr := range op.Error.Errors {
|
|
||||||
log.Printf("Error: %+v", operr)
|
|
||||||
}
|
|
||||||
log.Fatalf("Failed to start.")
|
|
||||||
}
|
|
||||||
log.Printf("Success. %+v", op)
|
|
||||||
break OpLoop
|
|
||||||
default:
|
|
||||||
log.Fatalf("Unknown status %q: %+v", op.Status, op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error getting instance after creation: %v", err)
|
|
||||||
}
|
|
||||||
ij, _ := json.MarshalIndent(inst, "", " ")
|
|
||||||
log.Printf("Instance: %s", ij)
|
|
||||||
}
|
|
||||||
|
|
||||||
func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
|
|
||||||
const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807"
|
|
||||||
diskName := *instName + "-coreos-stateless-pd"
|
|
||||||
|
|
||||||
if *reuseDisk {
|
|
||||||
dl, err := svc.Disks.List(*proj, *zone).Do()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error listing disks: %v", err)
|
|
||||||
}
|
|
||||||
for _, disk := range dl.Items {
|
|
||||||
if disk.Name != diskName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return &compute.AttachedDisk{
|
|
||||||
AutoDelete: false,
|
|
||||||
Boot: true,
|
|
||||||
DeviceName: diskName,
|
|
||||||
Type: "PERSISTENT",
|
|
||||||
Source: disk.SelfLink,
|
|
||||||
Mode: "READ_WRITE",
|
|
||||||
|
|
||||||
// The GCP web UI's "Show REST API" link includes a
|
|
||||||
// "zone" parameter, but it's not in the API
|
|
||||||
// description. But it wants this form (disk.Zone, a
|
|
||||||
// full zone URL, not *zone):
|
|
||||||
// Zone: disk.Zone,
|
|
||||||
// ... but it seems to work without it. Keep this
|
|
||||||
// comment here until I file a bug with the GCP
|
|
||||||
// people.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
diskType := ""
|
|
||||||
if *ssd {
|
|
||||||
diskType = "https://www.googleapis.com/compute/v1/projects/" + *proj + "/zones/" + *zone + "/diskTypes/pd-ssd"
|
|
||||||
}
|
|
||||||
|
|
||||||
return &compute.AttachedDisk{
|
|
||||||
AutoDelete: !*reuseDisk,
|
|
||||||
Boot: true,
|
|
||||||
Type: "PERSISTENT",
|
|
||||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
||||||
DiskName: diskName,
|
|
||||||
SourceImage: imageURL,
|
|
||||||
DiskSizeGb: 50,
|
|
||||||
DiskType: diskType,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tokenCacheFile string
|
|
||||||
|
|
||||||
func (f tokenCacheFile) Token() (*oauth2.Token, error) {
|
|
||||||
slurp, err := ioutil.ReadFile(string(f))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t := new(oauth2.Token)
|
|
||||||
if err := json.Unmarshal(slurp, t); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
|
|
||||||
jt, err := json.Marshal(t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutil.WriteFile(string(f), jt, 0600)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,87 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/tools/dashboard/auth"
|
|
||||||
"golang.org/x/tools/dashboard/buildlet"
|
|
||||||
"google.golang.org/api/compute/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func username() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return os.Getenv("USERNAME")
|
|
||||||
}
|
|
||||||
return os.Getenv("USER")
|
|
||||||
}
|
|
||||||
|
|
||||||
func homeDir() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
|
||||||
}
|
|
||||||
return os.Getenv("HOME")
|
|
||||||
}
|
|
||||||
|
|
||||||
func configDir() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return filepath.Join(os.Getenv("APPDATA"), "Gomote")
|
|
||||||
}
|
|
||||||
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
|
|
||||||
return filepath.Join(xdg, "gomote")
|
|
||||||
}
|
|
||||||
return filepath.Join(homeDir(), ".config", "gomote")
|
|
||||||
}
|
|
||||||
|
|
||||||
func projTokenSource() oauth2.TokenSource {
|
|
||||||
ts, err := auth.ProjectTokenSource(*proj, compute.ComputeScope)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to get OAuth2 token source for project %s: %v", *proj, err)
|
|
||||||
}
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
func userKeyPair() buildlet.KeyPair {
|
|
||||||
keyDir := configDir()
|
|
||||||
crtFile := filepath.Join(keyDir, "gomote.crt")
|
|
||||||
keyFile := filepath.Join(keyDir, "gomote.key")
|
|
||||||
_, crtErr := os.Stat(crtFile)
|
|
||||||
_, keyErr := os.Stat(keyFile)
|
|
||||||
if crtErr == nil && keyErr == nil {
|
|
||||||
return buildlet.KeyPair{
|
|
||||||
CertPEM: slurpString(crtFile),
|
|
||||||
KeyPEM: slurpString(keyFile),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check := func(what string, err error) {
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("%s: %v", what, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check("making key dir", os.MkdirAll(keyDir, 0700))
|
|
||||||
kp, err := buildlet.NewKeyPair()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error generating new key pair: %v", err)
|
|
||||||
}
|
|
||||||
check("writing cert file: ", ioutil.WriteFile(crtFile, []byte(kp.CertPEM), 0600))
|
|
||||||
check("writing key file: ", ioutil.WriteFile(keyFile, []byte(kp.KeyPEM), 0600))
|
|
||||||
return kp
|
|
||||||
}
|
|
||||||
|
|
||||||
func slurpString(f string) string {
|
|
||||||
slurp, err := ioutil.ReadFile(f)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return string(slurp)
|
|
||||||
}
|
|
@ -1,77 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/tools/dashboard"
|
|
||||||
"golang.org/x/tools/dashboard/buildlet"
|
|
||||||
)
|
|
||||||
|
|
||||||
func create(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("create", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote create [create-opts] <type>\n\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
var timeout time.Duration
|
|
||||||
fs.DurationVar(&timeout, "timeout", 60*time.Minute, "how long the VM will live before being deleted.")
|
|
||||||
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() != 1 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
builderType := fs.Arg(0)
|
|
||||||
conf, ok := dashboard.Builders[builderType]
|
|
||||||
if !ok || !conf.UsesVM() {
|
|
||||||
var valid []string
|
|
||||||
var prefixMatch []string
|
|
||||||
for k, conf := range dashboard.Builders {
|
|
||||||
if conf.UsesVM() {
|
|
||||||
valid = append(valid, k)
|
|
||||||
if strings.HasPrefix(k, builderType) {
|
|
||||||
prefixMatch = append(prefixMatch, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(prefixMatch) == 1 {
|
|
||||||
builderType = prefixMatch[0]
|
|
||||||
conf, _ = dashboard.Builders[builderType]
|
|
||||||
} else {
|
|
||||||
sort.Strings(valid)
|
|
||||||
return fmt.Errorf("Invalid builder type %q. Valid options include: %q", builderType, valid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
instName := fmt.Sprintf("mote-%s-%s", username(), builderType)
|
|
||||||
client, err := buildlet.StartNewVM(projTokenSource(), instName, builderType, buildlet.VMOpts{
|
|
||||||
Zone: *zone,
|
|
||||||
ProjectID: *proj,
|
|
||||||
TLS: userKeyPair(),
|
|
||||||
DeleteIn: timeout,
|
|
||||||
Description: fmt.Sprintf("gomote buildlet for %s", username()),
|
|
||||||
OnInstanceRequested: func() {
|
|
||||||
log.Printf("Sent create request. Waiting for operation.")
|
|
||||||
},
|
|
||||||
OnInstanceCreated: func() {
|
|
||||||
log.Printf("Instance created.")
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create VM: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%s\t%s\n", builderType, client.URL())
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,81 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/tools/dashboard/buildlet"
|
|
||||||
)
|
|
||||||
|
|
||||||
func destroy(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("destroy", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote destroy <instance>\n\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() != 1 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
name := fs.Arg(0)
|
|
||||||
bc, err := namedClient(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ask it to kill itself, and tell GCE to kill it too:
|
|
||||||
gceErrc := make(chan error, 1)
|
|
||||||
buildletErrc := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
gceErrc <- buildlet.DestroyVM(projTokenSource(), *proj, *zone, fmt.Sprintf("mote-%s-%s", username(), name))
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
buildletErrc <- bc.Destroy()
|
|
||||||
}()
|
|
||||||
timeout := time.NewTimer(5 * time.Second)
|
|
||||||
defer timeout.Stop()
|
|
||||||
|
|
||||||
var retErr error
|
|
||||||
var gceDone, buildletDone bool
|
|
||||||
for !gceDone || !buildletDone {
|
|
||||||
select {
|
|
||||||
case err := <-gceErrc:
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("GCE: %v", err)
|
|
||||||
retErr = err
|
|
||||||
} else {
|
|
||||||
log.Printf("Requested GCE delete.")
|
|
||||||
}
|
|
||||||
gceDone = true
|
|
||||||
case err := <-buildletErrc:
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Buildlet: %v", err)
|
|
||||||
retErr = err
|
|
||||||
} else {
|
|
||||||
log.Printf("Requested buildlet to shut down.")
|
|
||||||
}
|
|
||||||
buildletDone = true
|
|
||||||
case <-timeout.C:
|
|
||||||
if !buildletDone {
|
|
||||||
log.Printf("timeout asking buildlet to shut down")
|
|
||||||
}
|
|
||||||
if !gceDone {
|
|
||||||
log.Printf("timeout asking GCE to delete builder VM")
|
|
||||||
}
|
|
||||||
return errors.New("timeout")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return retErr
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// get a .tar.gz
|
|
||||||
func getTar(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("get", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote gettar [get-opts] <buildlet-name>\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
var dir string
|
|
||||||
fs.StringVar(&dir, "dir", "", "relative directory from buildlet's work dir to tar up")
|
|
||||||
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() != 1 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
name := fs.Arg(0)
|
|
||||||
bc, err := namedClient(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tgz, err := bc.GetTar(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tgz.Close()
|
|
||||||
_, err = io.Copy(os.Stdout, tgz)
|
|
||||||
return err
|
|
||||||
}
|
|
@ -1,106 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
/*
|
|
||||||
The gomote command is a client for the Go builder infrastructure.
|
|
||||||
It's a remote control for remote Go builder machines.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
gomote [global-flags] cmd [cmd-flags]
|
|
||||||
|
|
||||||
For example,
|
|
||||||
$ gomote create openbsd-amd64-gce56
|
|
||||||
$ gomote push
|
|
||||||
$ gomote run openbsd-amd64-gce56 src/make.bash
|
|
||||||
|
|
||||||
TODO: document more, and figure out the CLI interface more.
|
|
||||||
*/
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
proj = flag.String("project", "symbolic-datum-552", "GCE project owning builders")
|
|
||||||
zone = flag.String("zone", "us-central1-a", "GCE zone")
|
|
||||||
)
|
|
||||||
|
|
||||||
type command struct {
|
|
||||||
name string
|
|
||||||
des string
|
|
||||||
run func([]string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var commands = map[string]command{}
|
|
||||||
|
|
||||||
func sortedCommands() []string {
|
|
||||||
s := make([]string, 0, len(commands))
|
|
||||||
for name := range commands {
|
|
||||||
s = append(s, name)
|
|
||||||
}
|
|
||||||
sort.Strings(s)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, `Usage of gomote: gomote [global-flags] <cmd> [cmd-flags]
|
|
||||||
|
|
||||||
Global flags:
|
|
||||||
`)
|
|
||||||
flag.PrintDefaults()
|
|
||||||
fmt.Fprintf(os.Stderr, "Commands:\n\n")
|
|
||||||
for _, name := range sortedCommands() {
|
|
||||||
fmt.Fprintf(os.Stderr, " %-10s %s\n", name, commands[name].des)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerCommand(name, des string, run func([]string) error) {
|
|
||||||
if _, dup := commands[name]; dup {
|
|
||||||
panic("duplicate registration of " + name)
|
|
||||||
}
|
|
||||||
commands[name] = command{
|
|
||||||
name: name,
|
|
||||||
des: des,
|
|
||||||
run: run,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerCommands() {
|
|
||||||
registerCommand("create", "create a buildlet", create)
|
|
||||||
registerCommand("destroy", "destroy a buildlet", destroy)
|
|
||||||
registerCommand("list", "list buildlets", list)
|
|
||||||
registerCommand("run", "run a command on a buildlet", run)
|
|
||||||
registerCommand("put", "put files on a buildlet", put)
|
|
||||||
registerCommand("puttar", "extract a tar.gz to a buildlet", putTar)
|
|
||||||
registerCommand("gettar", "extract a tar.gz from a buildlet", getTar)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
registerCommands()
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
args := flag.Args()
|
|
||||||
if len(args) == 0 {
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
cmdName := args[0]
|
|
||||||
cmd, ok := commands[cmdName]
|
|
||||||
if !ok {
|
|
||||||
fmt.Fprintf(os.Stderr, "Unknown command %q\n", cmdName)
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
err := cmd.run(args[1:])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error running %s: %v\n", cmdName, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,68 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/dashboard/buildlet"
|
|
||||||
)
|
|
||||||
|
|
||||||
func list(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("list", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "list usage: gomote list\n\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() != 0 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := fmt.Sprintf("mote-%s-", username())
|
|
||||||
vms, err := buildlet.ListVMs(projTokenSource(), *proj, *zone)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to list VMs: %v", err)
|
|
||||||
}
|
|
||||||
for _, vm := range vms {
|
|
||||||
if !strings.HasPrefix(vm.Name, prefix) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("%s\thttps://%s\n", vm.Type, strings.TrimSuffix(vm.IPPort, ":443"))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func namedClient(name string) (*buildlet.Client, error) {
|
|
||||||
// TODO(bradfitz): cache the list on disk and avoid the API call?
|
|
||||||
vms, err := buildlet.ListVMs(projTokenSource(), *proj, *zone)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error listing VMs while looking up %q: %v", name, err)
|
|
||||||
}
|
|
||||||
wantName := fmt.Sprintf("mote-%s-%s", username(), name)
|
|
||||||
var matches []buildlet.VM
|
|
||||||
for _, vm := range vms {
|
|
||||||
if vm.Name == wantName {
|
|
||||||
return buildlet.NewClient(vm.IPPort, vm.TLS), nil
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(vm.Name, wantName) {
|
|
||||||
matches = append(matches, vm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(matches) == 1 {
|
|
||||||
vm := matches[0]
|
|
||||||
return buildlet.NewClient(vm.IPPort, vm.TLS), nil
|
|
||||||
}
|
|
||||||
if len(matches) > 1 {
|
|
||||||
return nil, fmt.Errorf("prefix %q is ambiguous")
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("buildlet %q not running", name)
|
|
||||||
}
|
|
@ -1,84 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// put a .tar.gz
|
|
||||||
func putTar(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("put", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote puttar [put-opts] <buildlet-name> [tar.gz file or '-' for stdin]\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
var rev string
|
|
||||||
fs.StringVar(&rev, "gorev", "", "If non-empty, git hash to download from gerrit and put to the buildlet. e.g. 886b02d705ff for Go 1.4.1. This just maps to the --URL flag, so the two options are mutually exclusive.")
|
|
||||||
var dir string
|
|
||||||
fs.StringVar(&dir, "dir", "", "relative directory from buildlet's work dir to extra tarball into")
|
|
||||||
var tarURL string
|
|
||||||
fs.StringVar(&tarURL, "url", "", "URL of tarball, instead of provided file.")
|
|
||||||
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() < 1 || fs.NArg() > 2 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
if rev != "" {
|
|
||||||
if tarURL != "" {
|
|
||||||
fmt.Fprintln(os.Stderr, "--gorev and --url are mutually exclusive")
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
tarURL = "https://go.googlesource.com/go/+archive/" + rev + ".tar.gz"
|
|
||||||
}
|
|
||||||
|
|
||||||
name := fs.Arg(0)
|
|
||||||
bc, err := namedClient(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if tarURL != "" {
|
|
||||||
if fs.NArg() != 1 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
return bc.PutTarFromURL(tarURL, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tgz io.Reader = os.Stdin
|
|
||||||
if fs.NArg() == 2 && fs.Arg(1) != "-" {
|
|
||||||
f, err := os.Open(fs.Arg(1))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
tgz = f
|
|
||||||
}
|
|
||||||
return bc.PutTar(tgz, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// put single files
|
|
||||||
func put(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("put", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote put [put-opts] <type>\n\n")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() != 1 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
return fmt.Errorf("TODO")
|
|
||||||
builderType := fs.Arg(0)
|
|
||||||
_ = builderType
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,47 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/dashboard/buildlet"
|
|
||||||
)
|
|
||||||
|
|
||||||
func run(args []string) error {
|
|
||||||
fs := flag.NewFlagSet("run", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "create usage: gomote run [run-opts] <instance> <cmd> [args...]")
|
|
||||||
fs.PrintDefaults()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
var sys bool
|
|
||||||
fs.BoolVar(&sys, "system", false, "run inside the system, and not inside the workdir; this is implicit if cmd starts with '/'")
|
|
||||||
|
|
||||||
fs.Parse(args)
|
|
||||||
if fs.NArg() < 2 {
|
|
||||||
fs.Usage()
|
|
||||||
}
|
|
||||||
name, cmd := fs.Arg(0), fs.Arg(1)
|
|
||||||
bc, err := namedClient(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteErr, execErr := bc.Exec(cmd, buildlet.ExecOpts{
|
|
||||||
SystemLevel: sys || strings.HasPrefix(cmd, "/"),
|
|
||||||
Output: os.Stdout,
|
|
||||||
Args: fs.Args()[2:],
|
|
||||||
})
|
|
||||||
if execErr != nil {
|
|
||||||
return fmt.Errorf("Error trying to execute %s: %v", cmd, execErr)
|
|
||||||
}
|
|
||||||
return remoteErr
|
|
||||||
}
|
|
@ -1,238 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// The retrybuilds command clears build failures from the build.golang.org dashboard
|
|
||||||
// to force them to be rebuilt.
|
|
||||||
//
|
|
||||||
// Valid usage modes:
|
|
||||||
//
|
|
||||||
// retrybuilds -loghash=f45f0eb8
|
|
||||||
// retrybuilds -builder=openbsd-amd64
|
|
||||||
// retrybuilds -builder=openbsd-amd64 -hash=6fecb7
|
|
||||||
// retrybuilds -redo-flaky
|
|
||||||
// retrybuilds -redo-flaky -builder=linux-amd64-clang
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/md5"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
masterKeyFile = flag.String("masterkey", filepath.Join(os.Getenv("HOME"), "keys", "gobuilder-master.key"), "path to Go builder master key. If present, the key argument is not necessary")
|
|
||||||
keyFile = flag.String("key", "", "path to key file")
|
|
||||||
builder = flag.String("builder", "", "builder to wipe a result for.")
|
|
||||||
hash = flag.String("hash", "", "Hash to wipe. If empty, all will be wiped.")
|
|
||||||
redoFlaky = flag.Bool("redo-flaky", false, "Reset all flaky builds. If builder is empty, the master key is required.")
|
|
||||||
builderPrefix = flag.String("builder-prefix", "https://build.golang.org", "builder URL prefix")
|
|
||||||
logHash = flag.String("loghash", "", "If non-empty, clear the build that failed with this loghash prefix")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Failure struct {
|
|
||||||
Builder string
|
|
||||||
Hash string
|
|
||||||
LogURL string
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
*builderPrefix = strings.TrimSuffix(*builderPrefix, "/")
|
|
||||||
if *logHash != "" {
|
|
||||||
substr := "/log/" + *logHash
|
|
||||||
for _, f := range failures() {
|
|
||||||
if strings.Contains(f.LogURL, substr) {
|
|
||||||
wipe(f.Builder, f.Hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if *redoFlaky {
|
|
||||||
fixTheFlakes()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if *builder == "" {
|
|
||||||
log.Fatalf("Missing -builder, -redo-flaky, or -loghash flag.")
|
|
||||||
}
|
|
||||||
wipe(*builder, fullHash(*hash))
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixTheFlakes() {
|
|
||||||
gate := make(chan bool, 50)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, f := range failures() {
|
|
||||||
f := f
|
|
||||||
if *builder != "" && f.Builder != *builder {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
gate <- true
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
defer func() { <-gate }()
|
|
||||||
res, err := http.Get(f.LogURL)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error fetching %s: %v", f.LogURL, err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
failLog, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error reading %s: %v", f.LogURL, err)
|
|
||||||
}
|
|
||||||
if isFlaky(string(failLog)) {
|
|
||||||
log.Printf("Restarting flaky %+v", f)
|
|
||||||
wipe(f.Builder, f.Hash)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
var flakePhrases = []string{
|
|
||||||
"No space left on device",
|
|
||||||
"fatal error: error in backend: IO failure on output stream",
|
|
||||||
"Boffset: unknown state 0",
|
|
||||||
"Bseek: unknown state 0",
|
|
||||||
"error exporting repository: exit status",
|
|
||||||
"remote error: User Is Over Quota",
|
|
||||||
"fatal: remote did not send all necessary objects",
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFlaky(failLog string) bool {
|
|
||||||
if strings.HasPrefix(failLog, "exit status ") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(failLog, "timed out after ") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, phrase := range flakePhrases {
|
|
||||||
if strings.Contains(failLog, phrase) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
numLines := strings.Count(failLog, "\n")
|
|
||||||
if numLines < 20 && strings.Contains(failLog, "error: exit status") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// e.g. fatal: destination path 'go.tools.TMP' already exists and is not an empty directory.
|
|
||||||
// To be fixed in golang.org/issue/9407
|
|
||||||
if strings.Contains(failLog, "fatal: destination path '") &&
|
|
||||||
strings.Contains(failLog, "' already exists and is not an empty directory.") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func fullHash(h string) string {
|
|
||||||
if h == "" || len(h) == 40 {
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
for _, f := range failures() {
|
|
||||||
if strings.HasPrefix(f.Hash, h) {
|
|
||||||
return f.Hash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Fatalf("invalid hash %q; failed to finds its full hash. Not a recent failure?", h)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash may be empty
|
|
||||||
func wipe(builder, hash string) {
|
|
||||||
if hash != "" {
|
|
||||||
log.Printf("Clearing %s, hash %s", builder, hash)
|
|
||||||
} else {
|
|
||||||
log.Printf("Clearing all builds for %s", builder)
|
|
||||||
}
|
|
||||||
vals := url.Values{
|
|
||||||
"builder": {builder},
|
|
||||||
"hash": {hash},
|
|
||||||
"key": {builderKey(builder)},
|
|
||||||
}
|
|
||||||
res, err := http.PostForm(*builderPrefix+"/clear-results?"+vals.Encode(), nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
log.Fatalf("Error clearing %v hash %q: %v", builder, hash, res.Status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func builderKey(builder string) string {
|
|
||||||
if v, ok := builderKeyFromMaster(builder); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
if *keyFile == "" {
|
|
||||||
log.Fatalf("No --key specified for builder %s", builder)
|
|
||||||
}
|
|
||||||
slurp, err := ioutil.ReadFile(*keyFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error reading builder key %s: %v", builder, err)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(string(slurp))
|
|
||||||
}
|
|
||||||
|
|
||||||
func builderKeyFromMaster(builder string) (key string, ok bool) {
|
|
||||||
if *masterKeyFile == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
slurp, err := ioutil.ReadFile(*masterKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h := hmac.New(md5.New, bytes.TrimSpace(slurp))
|
|
||||||
h.Write([]byte(builder))
|
|
||||||
return fmt.Sprintf("%x", h.Sum(nil)), true
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
failMu sync.Mutex
|
|
||||||
failCache []Failure
|
|
||||||
)
|
|
||||||
|
|
||||||
func failures() (ret []Failure) {
|
|
||||||
failMu.Lock()
|
|
||||||
ret = failCache
|
|
||||||
failMu.Unlock()
|
|
||||||
if ret != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ret = []Failure{} // non-nil
|
|
||||||
|
|
||||||
res, err := http.Get(*builderPrefix + "/?mode=failures")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
slurp, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
body := string(slurp)
|
|
||||||
for _, line := range strings.Split(body, "\n") {
|
|
||||||
f := strings.Fields(line)
|
|
||||||
if len(f) == 3 {
|
|
||||||
ret = append(ret, Failure{
|
|
||||||
Hash: f[0],
|
|
||||||
Builder: f[1],
|
|
||||||
LogURL: f[2],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
failMu.Lock()
|
|
||||||
failCache = ret
|
|
||||||
failMu.Unlock()
|
|
||||||
return ret
|
|
||||||
}
|
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build extdep
|
|
||||||
|
|
||||||
// The upload command writes a file to Google Cloud Storage. It's used
|
|
||||||
// exclusively by the Makefiles in the Go project repos. Think of it
|
|
||||||
// as a very light version of gsutil or gcloud, but with some
|
|
||||||
// Go-specific configuration knowledge baked in.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/tools/dashboard/auth"
|
|
||||||
"google.golang.org/cloud"
|
|
||||||
"google.golang.org/cloud/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
public = flag.Bool("public", false, "object should be world-readable")
|
|
||||||
file = flag.String("file", "-", "Filename to read object from, or '-' for stdin.")
|
|
||||||
verbose = flag.Bool("verbose", false, "verbose logging")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = func() {
|
|
||||||
fmt.Fprintf(os.Stderr, "Usage: upload [--public] [--file=...] <bucket/object>\n")
|
|
||||||
flag.PrintDefaults()
|
|
||||||
}
|
|
||||||
flag.Parse()
|
|
||||||
if flag.NArg() != 1 {
|
|
||||||
flag.Usage()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
args := strings.SplitN(flag.Arg(0), "/", 2)
|
|
||||||
if len(args) != 2 {
|
|
||||||
flag.Usage()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
bucket, object := args[0], args[1]
|
|
||||||
|
|
||||||
proj, ok := bucketProject[bucket]
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("bucket %q doesn't have an associated project in upload.go")
|
|
||||||
}
|
|
||||||
|
|
||||||
ts, err := tokenSource(bucket)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to get an OAuth2 token source: %v", err)
|
|
||||||
}
|
|
||||||
httpClient := oauth2.NewClient(oauth2.NoContext, ts)
|
|
||||||
|
|
||||||
ctx := cloud.NewContext(proj, httpClient)
|
|
||||||
w := storage.NewWriter(ctx, bucket, object)
|
|
||||||
// If you don't give the owners access, the web UI seems to
|
|
||||||
// have a bug and doesn't have access to see that it's public, so
|
|
||||||
// won't render the "Shared Publicly" link. So we do that, even
|
|
||||||
// though it's dumb and unnecessary otherwise:
|
|
||||||
w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner})
|
|
||||||
if *public {
|
|
||||||
w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
|
|
||||||
}
|
|
||||||
var content io.Reader
|
|
||||||
if *file == "-" {
|
|
||||||
content = os.Stdin
|
|
||||||
} else {
|
|
||||||
content, err = os.Open(*file)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxSlurp = 1 << 20
|
|
||||||
var buf bytes.Buffer
|
|
||||||
n, err := io.CopyN(&buf, content, maxSlurp)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
log.Fatalf("Error reading from stdin: %v, %v", n, err)
|
|
||||||
}
|
|
||||||
w.ContentType = http.DetectContentType(buf.Bytes())
|
|
||||||
|
|
||||||
_, err = io.Copy(w, io.MultiReader(&buf, content))
|
|
||||||
if cerr := w.Close(); cerr != nil && err == nil {
|
|
||||||
err = cerr
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Write error: %v", err)
|
|
||||||
}
|
|
||||||
if *verbose {
|
|
||||||
log.Printf("Wrote %v", object)
|
|
||||||
}
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var bucketProject = map[string]string{
|
|
||||||
"go-builder-data": "symbolic-datum-552",
|
|
||||||
"http2-demo-server-tls": "symbolic-datum-552",
|
|
||||||
"winstrap": "999119582588",
|
|
||||||
"gobuilder": "999119582588", // deprecated
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokenSource(bucket string) (oauth2.TokenSource, error) {
|
|
||||||
proj, ok := bucketProject[bucket]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown project for bucket %q", bucket)
|
|
||||||
}
|
|
||||||
return auth.ProjectTokenSource(proj, storage.ScopeReadWrite)
|
|
||||||
}
|
|
16
dashboard/env/commit-watcher/Dockerfile
vendored
16
dashboard/env/commit-watcher/Dockerfile
vendored
@ -1,16 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# Commit watcher for Go repos.
|
|
||||||
|
|
||||||
FROM debian:wheezy
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/build-commit-watcher.sh /scripts/
|
|
||||||
RUN GO_REV=go1.4 WATCHER_REV=a54d0066172 /scripts/build-commit-watcher.sh && test -f /usr/local/bin/watcher
|
|
9
dashboard/env/commit-watcher/Makefile
vendored
9
dashboard/env/commit-watcher/Makefile
vendored
@ -1,9 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t go-commit-watcher .
|
|
||||||
|
|
||||||
docker-commit-watcher.tar.gz: docker
|
|
||||||
docker save go-commit-watcher | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-commit-watcher.tar.gz)
|
|
@ -1,20 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
export GOPATH=/gopath
|
|
||||||
export GOROOT=/goroot
|
|
||||||
PREFIX=/usr/local
|
|
||||||
: ${GO_REV:?"need to be set to the golang repo revision used to build the commit watcher."}
|
|
||||||
: ${WATCHER_REV:?"need to be set to the go.tools repo revision for the commit watcher."}
|
|
||||||
|
|
||||||
mkdir -p $GOROOT
|
|
||||||
git clone https://go.googlesource.com/go $GOROOT
|
|
||||||
(cd $GOROOT/src && git reset --hard $GO_REV && find && ./make.bash)
|
|
||||||
|
|
||||||
GO_TOOLS=$GOPATH/src/golang.org/x/tools
|
|
||||||
mkdir -p $GO_TOOLS
|
|
||||||
git clone https://go.googlesource.com/tools $GO_TOOLS
|
|
||||||
|
|
||||||
mkdir -p $PREFIX/bin
|
|
||||||
(cd $GO_TOOLS && git reset --hard $WATCHER_REV && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/watcher)
|
|
||||||
|
|
||||||
rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
|
|
@ -1,11 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends ca-certificates
|
|
||||||
# For building Go's bootstrap 'dist' prog
|
|
||||||
apt-get install -y --no-install-recommends gcc libc6-dev
|
|
||||||
# For interacting with the Go source & subrepos:
|
|
||||||
apt-get install -y --no-install-recommends git-core
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -fr /var/lib/apt/lists
|
|
19
dashboard/env/linux-x86-base/Dockerfile
vendored
19
dashboard/env/linux-x86-base/Dockerfile
vendored
@ -1,19 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# Base builder image: gobuilders/linux-x86-base
|
|
||||||
|
|
||||||
FROM debian:wheezy
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/build-go-builder.sh /scripts/
|
|
||||||
RUN GO_REV=go1.4 BUILDER_REV=d79e0375a /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
|
|
||||||
|
|
||||||
RUN mkdir -p /go1.4-386 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-386.tar.gz | tar -C /go1.4-386 -zxv)
|
|
||||||
RUN mkdir -p /go1.4-amd64 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-amd64.tar.gz | tar -C /go1.4-amd64 -zxv)
|
|
15
dashboard/env/linux-x86-base/Makefile
vendored
15
dashboard/env/linux-x86-base/Makefile
vendored
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t gobuilders/linux-x86-base .
|
|
||||||
|
|
||||||
docker-linux.base.tar.gz: docker
|
|
||||||
docker save gobuilders/linux-x86-base | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-linux.base.tar.gz)
|
|
||||||
|
|
||||||
check: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-base /usr/local/bin/builder -rev=20a10e7ddd1 -buildroot=/ -v -report=false linux-amd64-temp
|
|
||||||
|
|
||||||
check32: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-386/go gobuilders/linux-x86-base /usr/local/bin/builder -rev=20a10e7ddd1 -buildroot=/ -v -report=false linux-386-temp
|
|
11
dashboard/env/linux-x86-base/README
vendored
11
dashboard/env/linux-x86-base/README
vendored
@ -1,11 +0,0 @@
|
|||||||
For now, you can at least do a single build of a single revision:
|
|
||||||
|
|
||||||
$ export BUILD=linux-amd64-temp
|
|
||||||
$ docker run \
|
|
||||||
-v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
|
|
||||||
gobuilders/linux-x86-base \
|
|
||||||
/usr/local/bin/builder -rev=50ac9eded6ad -buildroot=/ -v $BUILD
|
|
||||||
|
|
||||||
TODO(bradfitz): automate with CoreOS + GCE, ala:
|
|
||||||
https://github.com/bradfitz/camlistore/blob/master/misc/gce/create.go
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
export GOPATH=/gopath
|
|
||||||
export GOROOT=/goroot
|
|
||||||
PREFIX=/usr/local
|
|
||||||
: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
|
|
||||||
: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
|
|
||||||
|
|
||||||
mkdir -p $GOROOT
|
|
||||||
git clone https://go.googlesource.com/go $GOROOT
|
|
||||||
(cd $GOROOT/src && git checkout $GO_REV && find && ./make.bash)
|
|
||||||
|
|
||||||
GO_TOOLS=$GOPATH/src/golang.org/x/tools
|
|
||||||
mkdir -p $GO_TOOLS
|
|
||||||
git clone https://go.googlesource.com/tools $GO_TOOLS
|
|
||||||
|
|
||||||
mkdir -p $PREFIX/bin
|
|
||||||
(cd $GO_TOOLS && git reset --hard $BUILDER_REV && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
|
|
||||||
|
|
||||||
rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
|
|
||||||
|
|
||||||
cd $GOROOT
|
|
||||||
git clean -f -d -x
|
|
||||||
git checkout master
|
|
@ -1,18 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends ca-certificates
|
|
||||||
# Optionally used by some net/http tests:
|
|
||||||
apt-get install -y --no-install-recommends strace
|
|
||||||
# For building Go's bootstrap 'dist' prog
|
|
||||||
apt-get install -y --no-install-recommends gcc libc6-dev
|
|
||||||
# For 32-bit builds:
|
|
||||||
# TODO(bradfitz): move these into a 386 image that derives from this one.
|
|
||||||
apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
|
|
||||||
# For interacting with the Go source & subrepos:
|
|
||||||
apt-get install -y --no-install-recommends git-core
|
|
||||||
# For downloading Go 1.4:
|
|
||||||
apt-get install -y --no-install-recommends curl
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -fr /var/lib/apt/lists
|
|
23
dashboard/env/linux-x86-clang/Dockerfile
vendored
23
dashboard/env/linux-x86-clang/Dockerfile
vendored
@ -1,23 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# gobuilders/linux-x86-clang for building with clang instead of gcc.
|
|
||||||
|
|
||||||
FROM debian:wheezy
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /sources/clang-deps.list /etc/apt/sources.list.d/
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/build-go-builder.sh /scripts/
|
|
||||||
RUN GO_REV=go1.4 BUILDER_REV=d79e0375a /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
|
|
||||||
|
|
||||||
ENV CC /usr/bin/clang
|
|
||||||
|
|
||||||
RUN mkdir -p /go1.4-386 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-386.tar.gz | tar -C /go1.4-386 -zxv)
|
|
||||||
RUN mkdir -p /go1.4-amd64 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-amd64.tar.gz | tar -C /go1.4-amd64 -zxv)
|
|
15
dashboard/env/linux-x86-clang/Makefile
vendored
15
dashboard/env/linux-x86-clang/Makefile
vendored
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t gobuilders/linux-x86-clang .
|
|
||||||
|
|
||||||
docker-linux.clang.tar.gz: docker
|
|
||||||
docker save gobuilders/linux-x86-clang | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-linux.clang.tar.gz)
|
|
||||||
|
|
||||||
check: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-clang /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-amd64-temp
|
|
||||||
|
|
||||||
check32: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-386/go gobuilders/linux-x86-clang /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-386-temp
|
|
@ -1,24 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
export GOPATH=/gopath
|
|
||||||
export GOROOT=/goroot
|
|
||||||
PREFIX=/usr/local
|
|
||||||
: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
|
|
||||||
: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
|
|
||||||
|
|
||||||
mkdir -p $GOROOT
|
|
||||||
git clone https://go.googlesource.com/go $GOROOT
|
|
||||||
(cd $GOROOT/src && git checkout $GO_REV && find && ./make.bash)
|
|
||||||
|
|
||||||
GO_TOOLS=$GOPATH/src/golang.org/x/tools
|
|
||||||
mkdir -p $GO_TOOLS
|
|
||||||
git clone https://go.googlesource.com/tools $GO_TOOLS
|
|
||||||
|
|
||||||
mkdir -p $PREFIX/bin
|
|
||||||
(cd $GO_TOOLS && git reset --hard $BUILDER_REV && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
|
|
||||||
|
|
||||||
rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
|
|
||||||
|
|
||||||
cd $GOROOT
|
|
||||||
git clean -f -d -x
|
|
||||||
git checkout master
|
|
@ -1,22 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends ca-certificates
|
|
||||||
# Optionally used by some net/http tests:
|
|
||||||
apt-get install -y --no-install-recommends strace
|
|
||||||
# For building Go's bootstrap 'dist' prog
|
|
||||||
apt-get install -y --no-install-recommends wget
|
|
||||||
wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add -
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends clang-3.5
|
|
||||||
# TODO(cmang): move these into a 386 image that derives from this one.
|
|
||||||
apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
|
|
||||||
# Remove gcc binary so it doesn't interfere with clang
|
|
||||||
rm -f /usr/bin/gcc
|
|
||||||
# For interacting with the Go source & subrepos:
|
|
||||||
apt-get install -y --no-install-recommends git-core
|
|
||||||
# For installing Go 1.4:
|
|
||||||
apt-get install -y --no-install-recommends curl
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -fr /var/lib/apt/lists
|
|
@ -1,3 +0,0 @@
|
|||||||
# The debian sources for stable clang builds, taken from http://llvm.org/apt/
|
|
||||||
deb http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main
|
|
||||||
deb-src http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main
|
|
19
dashboard/env/linux-x86-gccgo/Dockerfile
vendored
19
dashboard/env/linux-x86-gccgo/Dockerfile
vendored
@ -1,19 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# gobuilders/linux-x86-gccgo for 32- and 64-bit gccgo.
|
|
||||||
|
|
||||||
FROM debian:wheezy
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/install-gold.sh /scripts/
|
|
||||||
RUN /scripts/install-gold.sh
|
|
||||||
|
|
||||||
ADD /scripts/install-gccgo-builder.sh /scripts/
|
|
||||||
RUN /scripts/install-gccgo-builder.sh && test -f /usr/local/bin/builder
|
|
15
dashboard/env/linux-x86-gccgo/Makefile
vendored
15
dashboard/env/linux-x86-gccgo/Makefile
vendored
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t gobuilders/linux-x86-gccgo .
|
|
||||||
|
|
||||||
docker-linux.gccgo.tar.gz: docker
|
|
||||||
docker save gobuilders/linux-x86-gccgo | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-linux.gccgo.tar.gz)
|
|
||||||
|
|
||||||
check: docker
|
|
||||||
docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m64" check-go' -report=false linux-amd64-gccgo-temp
|
|
||||||
|
|
||||||
check32: docker
|
|
||||||
docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m32" check-go' -report=false linux-386-gccgo-temp
|
|
6
dashboard/env/linux-x86-gccgo/README
vendored
6
dashboard/env/linux-x86-gccgo/README
vendored
@ -1,6 +0,0 @@
|
|||||||
$ export BUILD=linux-amd64-gccgo
|
|
||||||
$ export BUILDREV=b9151e911a54
|
|
||||||
$ docker run \
|
|
||||||
-v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
|
|
||||||
gobuilders/linux-x86-gccgo \
|
|
||||||
/usr/local/bin/builder -tool=gccgo -dashboard='https://build.golang.org/gccgo' -rev=$BUILDREV -buildroot=/gccgo -v -cmd='make check-go -kj' $BUILD
|
|
@ -1,20 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
# For running curl to get the gccgo builder binary.
|
|
||||||
apt-get install -y --no-install-recommends curl ca-certificates
|
|
||||||
# Optionally used by some net/http tests:
|
|
||||||
apt-get install -y --no-install-recommends strace
|
|
||||||
# For using numeric libraries within GCC.
|
|
||||||
apt-get install -y --no-install-recommends libgmp10-dev libmpc-dev libmpfr-dev
|
|
||||||
# For building binutils and gcc from source.
|
|
||||||
apt-get install -y --no-install-recommends make g++ flex bison
|
|
||||||
# Same as above, but for 32-bit builds as well.
|
|
||||||
apt-get install -y --no-install-recommends libc6-dev-i386 g++-multilib
|
|
||||||
# For running the extended gccgo testsuite
|
|
||||||
apt-get install -y --no-install-recommends dejagnu
|
|
||||||
# For interacting with the gccgo source and git mirror:
|
|
||||||
apt-get install -y --no-install-recommends mercurial git-core
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -rf /var/lib/apt/lists
|
|
@ -1,7 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
# Installs a version of the go.tools dashboard builder that runs the gccgo build
|
|
||||||
# command assuming there are 16 cores available to speed up build times.
|
|
||||||
# TODO(cmang): There should be an option in the builder to specify this.
|
|
||||||
|
|
||||||
curl -o /usr/local/bin/builder http://storage.googleapis.com/go-builder-data/gccgo_builder && chmod +x /usr/local/bin/builder
|
|
@ -1,9 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
# gccgo uses the Gold linker from binutils.
|
|
||||||
export BINUTILS_VERSION=binutils-2.24
|
|
||||||
mkdir -p binutils-objdir
|
|
||||||
curl -s http://ftp.gnu.org/gnu/binutils/$BINUTILS_VERSION.tar.gz | tar x --no-same-owner -zv
|
|
||||||
(cd binutils-objdir && ../$BINUTILS_VERSION/configure --enable-gold --enable-plugins --prefix=/opt/gold && make -sj && make install -sj)
|
|
||||||
|
|
||||||
rm -rf binutils*
|
|
27
dashboard/env/linux-x86-nacl/Dockerfile
vendored
27
dashboard/env/linux-x86-nacl/Dockerfile
vendored
@ -1,27 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
# gobuilders/linux-x86-nacl for 32- and 64-bit nacl.
|
|
||||||
#
|
|
||||||
# We need more modern libc than Debian stable as used in base, so we're
|
|
||||||
# using Ubuntu LTS here.
|
|
||||||
#
|
|
||||||
# TODO(bradfitz): make both be Ubuntu? But we also want Debian, Fedora,
|
|
||||||
# etc coverage., so deal with unifying these later, once there's a plan
|
|
||||||
# or a generator for them and the other builders are turned down.
|
|
||||||
|
|
||||||
FROM ubuntu:trusty
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/build-go-builder.sh /scripts/
|
|
||||||
RUN GO_REV=go1.4 BUILDER_REV=6735829f /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
|
|
||||||
|
|
||||||
ADD build-command.pl /usr/local/bin/
|
|
||||||
|
|
||||||
ENV PATH /usr/local/bin:$GOROOT/bin:$PATH
|
|
12
dashboard/env/linux-x86-nacl/Makefile
vendored
12
dashboard/env/linux-x86-nacl/Makefile
vendored
@ -1,12 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t gobuilders/linux-x86-nacl .
|
|
||||||
|
|
||||||
upload: docker
|
|
||||||
docker save gobuilders/linux-x86-nacl | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-linux.nacl.tar.gz)
|
|
||||||
|
|
||||||
check: docker
|
|
||||||
docker run gobuilders/linux-x86-nacl /usr/local/bin/builder -rev=77e96c9208d0 -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl -report=false nacl-amd64p32
|
|
6
dashboard/env/linux-x86-nacl/README
vendored
6
dashboard/env/linux-x86-nacl/README
vendored
@ -1,6 +0,0 @@
|
|||||||
$ export BUILD=nacl-amd64p32-temp
|
|
||||||
$ export BUILDREV=59b1bb4bf045
|
|
||||||
$ docker run \
|
|
||||||
-v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
|
|
||||||
gobuilders/linux-x86-nacl \
|
|
||||||
/usr/local/bin/builder -rev=$BUILDREV -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl $BUILD
|
|
13
dashboard/env/linux-x86-nacl/build-command.pl
vendored
13
dashboard/env/linux-x86-nacl/build-command.pl
vendored
@ -1,13 +0,0 @@
|
|||||||
#!/usr/bin/perl
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
|
|
||||||
if ($ENV{GOOS} eq "nacl") {
|
|
||||||
delete $ENV{GOROOT_FINAL};
|
|
||||||
exec("./nacltest.bash", @ARGV);
|
|
||||||
die "Failed to run nacltest.bash: $!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
exec("./all.bash", @ARGV);
|
|
||||||
die "Failed to run all.bash: $!\n";
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
export GOPATH=/gopath
|
|
||||||
export GOROOT=/goroot
|
|
||||||
PREFIX=/usr/local
|
|
||||||
: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
|
|
||||||
: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
|
|
||||||
|
|
||||||
mkdir -p $GOROOT
|
|
||||||
git clone https://go.googlesource.com/go $GOROOT
|
|
||||||
(cd $GOROOT/src && git checkout $GO_REV && find && ./make.bash)
|
|
||||||
|
|
||||||
GO_TOOLS=$GOPATH/src/golang.org/x/tools
|
|
||||||
mkdir -p $GO_TOOLS
|
|
||||||
git clone https://go.googlesource.com/tools $GO_TOOLS
|
|
||||||
|
|
||||||
mkdir -p $PREFIX/bin
|
|
||||||
(cd $GO_TOOLS && git reset --hard $BUILDER_REV && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
|
|
||||||
|
|
||||||
rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
|
|
||||||
|
|
||||||
(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_32 && chmod +x sel_ldr_x86_32)
|
|
||||||
(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_64 && chmod +x sel_ldr_x86_64)
|
|
||||||
|
|
||||||
ln -s $GOROOT/misc/nacl/go_nacl_386_exec /usr/local/bin/
|
|
||||||
ln -s $GOROOT/misc/nacl/go_nacl_amd64p32_exec /usr/local/bin/
|
|
||||||
|
|
||||||
cd $GOROOT
|
|
||||||
git clean -f -d -x
|
|
||||||
git checkout master
|
|
@ -1,14 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
# curl is needed to fetch the sel_ldr nacl binaries:
|
|
||||||
apt-get install -y --no-install-recommends curl ca-certificates
|
|
||||||
# For building Go's bootstrap 'dist' prog
|
|
||||||
apt-get install -y --no-install-recommends gcc libc6-dev
|
|
||||||
# For interacting with the Go source & subrepos:
|
|
||||||
apt-get install -y --no-install-recommends git-core
|
|
||||||
# For 32-bit nacl:
|
|
||||||
apt-get install -y --no-install-recommends libc6-i386 libc6-dev-i386 lib32stdc++6 gcc-multilib
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -fr /var/lib/apt/lists
|
|
17
dashboard/env/linux-x86-sid/Dockerfile
vendored
17
dashboard/env/linux-x86-sid/Dockerfile
vendored
@ -1,17 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
FROM debian:sid
|
|
||||||
MAINTAINER golang-dev <golang-dev@googlegroups.com>
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
|
||||||
|
|
||||||
ADD /scripts/install-apt-deps.sh /scripts/
|
|
||||||
RUN /scripts/install-apt-deps.sh
|
|
||||||
|
|
||||||
ADD /scripts/build-go-builder.sh /scripts/
|
|
||||||
RUN GO_REV=go1.4 BUILDER_REV=d79e0375a /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
|
|
||||||
|
|
||||||
RUN mkdir -p /go1.4-386 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-386.tar.gz | tar -C /go1.4-386 -zxv)
|
|
||||||
RUN mkdir -p /go1.4-amd64 && (curl --silent https://storage.googleapis.com/golang/go1.4.linux-amd64.tar.gz | tar -C /go1.4-amd64 -zxv)
|
|
15
dashboard/env/linux-x86-sid/Makefile
vendored
15
dashboard/env/linux-x86-sid/Makefile
vendored
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
docker: Dockerfile
|
|
||||||
docker build -t gobuilders/linux-x86-sid .
|
|
||||||
|
|
||||||
docker-linux.sid.tar.gz: docker
|
|
||||||
docker save gobuilders/linux-x86-sid | gzip | (cd ../../cmd/upload && go run --tags=extdep upload.go --public go-builder-data/docker-linux.sid.tar.gz)
|
|
||||||
|
|
||||||
check: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-sid /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-amd64-sid
|
|
||||||
|
|
||||||
check32: docker
|
|
||||||
docker run -e GOROOT_BOOTSTRAP=/go1.4-386/go gobuilders/linux-x86-sid /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-386-sid
|
|
@ -1,24 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
export GOPATH=/gopath
|
|
||||||
export GOROOT=/goroot
|
|
||||||
PREFIX=/usr/local
|
|
||||||
: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
|
|
||||||
: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
|
|
||||||
|
|
||||||
mkdir -p $GOROOT
|
|
||||||
git clone https://go.googlesource.com/go $GOROOT
|
|
||||||
(cd $GOROOT/src && git checkout $GO_REV && find && ./make.bash)
|
|
||||||
|
|
||||||
GO_TOOLS=$GOPATH/src/golang.org/x/tools
|
|
||||||
mkdir -p $GO_TOOLS
|
|
||||||
git clone https://go.googlesource.com/tools $GO_TOOLS
|
|
||||||
|
|
||||||
mkdir -p $PREFIX/bin
|
|
||||||
(cd $GO_TOOLS && git reset --hard $BUILDER_REV && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
|
|
||||||
|
|
||||||
rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
|
|
||||||
|
|
||||||
cd $GOROOT
|
|
||||||
git clean -f -d -x
|
|
||||||
git checkout master
|
|
@ -1,18 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends ca-certificates
|
|
||||||
# Optionally used by some net/http tests:
|
|
||||||
apt-get install -y --no-install-recommends strace
|
|
||||||
# For building Go's bootstrap 'dist' prog
|
|
||||||
apt-get install -y --no-install-recommends gcc libc6-dev
|
|
||||||
# For 32-bit builds:
|
|
||||||
# TODO(bradfitz): move these into a 386 image that derives from this one.
|
|
||||||
apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
|
|
||||||
# For interacting with the Go source & subrepos:
|
|
||||||
apt-get install -y --no-install-recommends git-core
|
|
||||||
# For installing Go 1.4:
|
|
||||||
apt-get install -y --no-install-recommends curl
|
|
||||||
|
|
||||||
apt-get clean
|
|
||||||
rm -fr /var/lib/apt/lists
|
|
8
dashboard/env/openbsd-amd64/.gitignore
vendored
8
dashboard/env/openbsd-amd64/.gitignore
vendored
@ -1,8 +0,0 @@
|
|||||||
boot.conf
|
|
||||||
disk.raw
|
|
||||||
etc
|
|
||||||
install.site
|
|
||||||
install56-patched.iso
|
|
||||||
install56.iso
|
|
||||||
random.seed
|
|
||||||
site56.tgz
|
|
20
dashboard/env/openbsd-amd64/README
vendored
20
dashboard/env/openbsd-amd64/README
vendored
@ -1,20 +0,0 @@
|
|||||||
make.bash creates a Google Compute Engine VM image to run the Go
|
|
||||||
OpenBSD builder, booting up to run the buildlet.
|
|
||||||
|
|
||||||
make.bash should be run on a Linux box with qemu.
|
|
||||||
|
|
||||||
After it completes, it creates a file openbsd-amd64-gce.tar.gz
|
|
||||||
|
|
||||||
Then:
|
|
||||||
gsutil cp -a public-read openbsd-amd64-gce.tar.gz gs://go-builder-data/openbsd-amd64-gce.tar.gz
|
|
||||||
Or just use the web UI at:
|
|
||||||
https://console.developers.google.com/project/symbolic-datum-552/storage/browser/go-builder-data/
|
|
||||||
|
|
||||||
Then:
|
|
||||||
gcloud compute --project symbolic-datum-552 images delete openbsd-amd64-56
|
|
||||||
gcloud compute --project symbolic-datum-552 images create openbsd-amd64-56 --source-uri gs://go-builder-data/openbsd-amd64-gce.tar.gz
|
|
||||||
|
|
||||||
The VM needs to be run with the GCE metadata attribute "buildlet-binary-url" set to a URL
|
|
||||||
of the OpenBSD buildlet (cross-compiled, typically).
|
|
||||||
|
|
||||||
buildlet-binary-url == http://storage.googleapis.com/go-builder-data/buildlet.openbsd-amd64
|
|
227
dashboard/env/openbsd-amd64/make.bash
vendored
227
dashboard/env/openbsd-amd64/make.bash
vendored
@ -1,227 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# Use of this source code is governed by a BSD-style
|
|
||||||
# license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Download kernel, sets, etc. from ftp.usa.openbsd.org
|
|
||||||
if ! [ -e install56.iso ]; then
|
|
||||||
curl -O ftp://ftp.usa.openbsd.org/pub/OpenBSD/5.6/amd64/install56.iso
|
|
||||||
fi
|
|
||||||
|
|
||||||
# XXX: Download and save bash, curl, and their dependencies too?
|
|
||||||
# Currently we download them from the network during the install process.
|
|
||||||
|
|
||||||
# Create custom site56.tgz set.
|
|
||||||
mkdir -p etc
|
|
||||||
cat >install.site <<EOF
|
|
||||||
#!/bin/sh
|
|
||||||
env PKG_PATH=ftp://ftp.usa.openbsd.org/pub/OpenBSD/5.6/packages/amd64 pkg_add -iv bash curl git
|
|
||||||
|
|
||||||
# See https://code.google.com/p/google-compute-engine/issues/detail?id=77
|
|
||||||
echo "ignore classless-static-routes;" >> /etc/dhclient.conf
|
|
||||||
EOF
|
|
||||||
cat >etc/rc.local <<EOF
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
echo "starting buildlet script"
|
|
||||||
netstat -rn
|
|
||||||
cat /etc/resolv.conf
|
|
||||||
dig metadata.google.internal
|
|
||||||
(
|
|
||||||
set -e
|
|
||||||
export PATH="\$PATH:/usr/local/bin"
|
|
||||||
/usr/local/bin/curl -o /buildlet \$(/usr/local/bin/curl -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/buildlet-binary-url)
|
|
||||||
chmod +x /buildlet
|
|
||||||
exec /buildlet
|
|
||||||
)
|
|
||||||
echo "giving up"
|
|
||||||
sleep 10
|
|
||||||
halt -p
|
|
||||||
)
|
|
||||||
EOF
|
|
||||||
chmod +x install.site
|
|
||||||
tar -zcvf site56.tgz install.site etc/rc.local
|
|
||||||
|
|
||||||
# Hack install CD a bit.
|
|
||||||
echo 'set tty com0' > boot.conf
|
|
||||||
dd if=/dev/urandom of=random.seed bs=4096 count=1
|
|
||||||
cp install56.iso install56-patched.iso
|
|
||||||
growisofs -M install56-patched.iso -l -R -graft-points \
|
|
||||||
/5.6/amd64/site56.tgz=site56.tgz \
|
|
||||||
/etc/boot.conf=boot.conf \
|
|
||||||
/etc/random.seed=random.seed
|
|
||||||
|
|
||||||
# Initialize disk image.
|
|
||||||
rm -f disk.raw
|
|
||||||
qemu-img create -f raw disk.raw 10G
|
|
||||||
|
|
||||||
# Run the installer to create the disk image.
|
|
||||||
expect <<EOF
|
|
||||||
spawn qemu-system-x86_64 -nographic -smp 2 -drive if=virtio,file=disk.raw -cdrom install56-patched.iso -net nic,model=virtio -net user -boot once=d
|
|
||||||
|
|
||||||
expect "boot>"
|
|
||||||
send "\n"
|
|
||||||
|
|
||||||
# Need to wait for the kernel to boot.
|
|
||||||
expect -timeout 600 "\(I\)nstall, \(U\)pgrade, \(A\)utoinstall or \(S\)hell\?"
|
|
||||||
send "i\n"
|
|
||||||
|
|
||||||
expect "Terminal type\?"
|
|
||||||
send "vt220\n"
|
|
||||||
|
|
||||||
expect "System hostname\?"
|
|
||||||
send "buildlet\n"
|
|
||||||
|
|
||||||
expect "Which network interface do you wish to configure\?"
|
|
||||||
send "vio0\n"
|
|
||||||
|
|
||||||
expect "IPv4 address for vio0\?"
|
|
||||||
send "dhcp\n"
|
|
||||||
|
|
||||||
expect "IPv6 address for vio0\?"
|
|
||||||
send "none\n"
|
|
||||||
|
|
||||||
expect "Which network interface do you wish to configure\?"
|
|
||||||
send "done\n"
|
|
||||||
|
|
||||||
expect "Password for root account\?"
|
|
||||||
send "root\n"
|
|
||||||
|
|
||||||
expect "Password for root account\?"
|
|
||||||
send "root\n"
|
|
||||||
|
|
||||||
expect "Start sshd\(8\) by default\?"
|
|
||||||
send "yes\n"
|
|
||||||
|
|
||||||
expect "Start ntpd\(8\) by default\?"
|
|
||||||
send "no\n"
|
|
||||||
|
|
||||||
expect "Do you expect to run the X Window System\?"
|
|
||||||
send "no\n"
|
|
||||||
|
|
||||||
expect "Do you want the X Window System to be started by xdm\(1\)\?"
|
|
||||||
send "no\n"
|
|
||||||
|
|
||||||
expect "Do you want to suspend on lid close\?"
|
|
||||||
send "no\n"
|
|
||||||
|
|
||||||
expect "Change the default console to com0\?"
|
|
||||||
send "yes\n"
|
|
||||||
|
|
||||||
expect "Which speed should com0 use\?"
|
|
||||||
send "115200\n"
|
|
||||||
|
|
||||||
expect "Setup a user\?"
|
|
||||||
send "gopher\n"
|
|
||||||
|
|
||||||
expect "Full name for user gopher\?"
|
|
||||||
send "Gopher Gopherson\n"
|
|
||||||
|
|
||||||
expect "Password for user gopher\?"
|
|
||||||
send "gopher\n"
|
|
||||||
|
|
||||||
expect "Password for user gopher\?"
|
|
||||||
send "gopher\n"
|
|
||||||
|
|
||||||
expect "Since you set up a user, disable sshd\(8\) logins to root\?"
|
|
||||||
send "yes\n"
|
|
||||||
|
|
||||||
expect "What timezone are you in\?"
|
|
||||||
send "US/Pacific\n"
|
|
||||||
|
|
||||||
expect "Which disk is the root disk\?"
|
|
||||||
send "sd0\n"
|
|
||||||
|
|
||||||
expect "Use DUIDs rather than device names in fstab\?"
|
|
||||||
send "yes\n"
|
|
||||||
|
|
||||||
expect "Use \(W\)hole disk or \(E\)dit the MBR\?"
|
|
||||||
send "whole\n"
|
|
||||||
|
|
||||||
expect "Use \(A\)uto layout, \(E\)dit auto layout, or create \(C\)ustom layout\?"
|
|
||||||
send "custom\n"
|
|
||||||
|
|
||||||
expect "> "
|
|
||||||
send "z\n"
|
|
||||||
|
|
||||||
expect "> "
|
|
||||||
send "a b\n"
|
|
||||||
expect "offset: "
|
|
||||||
send "\n"
|
|
||||||
expect "size: "
|
|
||||||
send "1G\n"
|
|
||||||
expect "FS type: "
|
|
||||||
send "swap\n"
|
|
||||||
|
|
||||||
expect "> "
|
|
||||||
send "a a\n"
|
|
||||||
expect "offset: "
|
|
||||||
send "\n"
|
|
||||||
expect "size: "
|
|
||||||
send "\n"
|
|
||||||
expect "FS type: "
|
|
||||||
send "4.2BSD\n"
|
|
||||||
expect "mount point: "
|
|
||||||
send "/\n"
|
|
||||||
|
|
||||||
expect "> "
|
|
||||||
send "w\n"
|
|
||||||
expect "> "
|
|
||||||
send "q\n"
|
|
||||||
|
|
||||||
expect "Location of sets\?"
|
|
||||||
send "cd\n"
|
|
||||||
|
|
||||||
expect "Which CD-ROM contains the install media\?"
|
|
||||||
send "cd0\n"
|
|
||||||
|
|
||||||
expect "Pathname to the sets\?"
|
|
||||||
send "5.6/amd64\n"
|
|
||||||
|
|
||||||
expect "Set name\(s\)\?"
|
|
||||||
send "+*\n"
|
|
||||||
|
|
||||||
expect "Set name\(s\)\?"
|
|
||||||
send " -x*\n"
|
|
||||||
|
|
||||||
expect "Set name\(s\)\?"
|
|
||||||
send " -game*\n"
|
|
||||||
|
|
||||||
expect "Set name\(s\)\?"
|
|
||||||
send " -man*\n"
|
|
||||||
|
|
||||||
expect "Set name\(s\)\?"
|
|
||||||
send "done\n"
|
|
||||||
|
|
||||||
expect "Directory does not contain SHA256\.sig\. Continue without verification\?"
|
|
||||||
send "yes\n"
|
|
||||||
|
|
||||||
# Need to wait for previous sets to unpack.
|
|
||||||
expect -timeout 600 "Location of sets\?"
|
|
||||||
send "done\n"
|
|
||||||
|
|
||||||
expect "Ambiguous: choose dependency for git"
|
|
||||||
send "0\n"
|
|
||||||
|
|
||||||
# Need to wait for install.site to install curl, git, et
|
|
||||||
expect -timeout 600 "CONGRATULATIONS!"
|
|
||||||
|
|
||||||
expect "# "
|
|
||||||
send "halt\n"
|
|
||||||
|
|
||||||
expect "Please press any key to reboot.\n"
|
|
||||||
send "\n"
|
|
||||||
|
|
||||||
expect "boot>"
|
|
||||||
send "\n"
|
|
||||||
|
|
||||||
expect -timeout 600 eof
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create Compute Engine disk image.
|
|
||||||
echo "Archiving disk.raw... (this may take a while)"
|
|
||||||
tar -Szcf openbsd-amd64-gce.tar.gz disk.raw
|
|
||||||
|
|
||||||
echo "Done. GCE image is openbsd-amd64-gce.tar.gz."
|
|
2
dashboard/env/plan9-386/.gitignore
vendored
2
dashboard/env/plan9-386/.gitignore
vendored
@ -1,2 +0,0 @@
|
|||||||
disk.raw
|
|
||||||
plan9-gce.iso
|
|
21
dashboard/env/plan9-386/README
vendored
21
dashboard/env/plan9-386/README
vendored
@ -1,21 +0,0 @@
|
|||||||
make.bash creates a Google Compute Engine VM image to run the Go
|
|
||||||
Plan 9 builder, booting up to run the buildlet.
|
|
||||||
|
|
||||||
make.bash should be run on a Linux box with qemu.
|
|
||||||
|
|
||||||
After it completes, it creates a file plan9-386-gce.tar.gz
|
|
||||||
|
|
||||||
The make.bash script depends on the following packages:
|
|
||||||
|
|
||||||
$ sudo apt-get install bzip2 curl expect qemu
|
|
||||||
or
|
|
||||||
$ sudo yum install bzip2 curl expect qemu
|
|
||||||
|
|
||||||
It has been tested with QEMU 1.4.2 to 2.2.0, as provided with:
|
|
||||||
|
|
||||||
- Ubuntu 14.04 (Trusty Tahr) and above
|
|
||||||
- Debian 8.0 (Jessie) and above
|
|
||||||
- Fedora 19 and above
|
|
||||||
|
|
||||||
Also, due to an ATA bug affecting QEMU 1.6 and 1.7, the
|
|
||||||
Plan 9 CD can't be booted with these versions.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user