mirror of
https://github.com/golang/go.git
synced 2025-05-06 08:03:03 +00:00
go.tools/cmd/godoc: copy godoc from core reposistory
The plan for godoc: - Copy godoc source from the core repo to go.tools (this CL). - Break godoc into several packages inside go.tools, leaving a package main that merely sets up a local file system, interprets the command line, and otherwise delegates the heavy-lifting to the new packages. - Remove godoc from the core repo. - Update cmd/go to install this godoc binary in $GOROOT/bin. - Update misc/dist to include godoc when building binary distributions. R=bradfitz CC=golang-dev https://golang.org/cl/11408043
This commit is contained in:
parent
732dbe9ff8
commit
d79f4fe25b
61
cmd/godoc/README.godoc-app
Normal file
61
cmd/godoc/README.godoc-app
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
Use of this source code is governed by a BSD-style
|
||||||
|
license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
godoc on appengine
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
-------------
|
||||||
|
|
||||||
|
* Go appengine SDK
|
||||||
|
https://developers.google.com/appengine/downloads#Google_App_Engine_SDK_for_Go
|
||||||
|
|
||||||
|
* Go sources at tip under $GOROOT
|
||||||
|
|
||||||
|
|
||||||
|
Directory structure
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
* Let $APPDIR be the directory containing the app engine files.
|
||||||
|
(e.g., $APPDIR=$HOME/godoc-app)
|
||||||
|
|
||||||
|
* $APPDIR contains the following entries (this may change depending on
|
||||||
|
app-engine release and version of godoc):
|
||||||
|
|
||||||
|
app.yaml
|
||||||
|
godoc.zip
|
||||||
|
godoc/
|
||||||
|
index.split.*
|
||||||
|
|
||||||
|
* The app.yaml file is set up per app engine documentation.
|
||||||
|
For instance:
|
||||||
|
|
||||||
|
application: godoc-app
|
||||||
|
version: 1
|
||||||
|
runtime: go
|
||||||
|
api_version: go1
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- url: /.*
|
||||||
|
script: _go_app
|
||||||
|
|
||||||
|
* The godoc/ directory contains a copy of the files under $GOROOT/src/cmd/godoc
|
||||||
|
with doc.go excluded (it belongs to pseudo-package "documentation")
|
||||||
|
|
||||||
|
|
||||||
|
Configuring and running godoc
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
To configure godoc, run
|
||||||
|
|
||||||
|
bash setup-godoc-app.bash
|
||||||
|
|
||||||
|
to create the godoc.zip, index.split.*, and godoc/appconfig.go files
|
||||||
|
based on $GOROOT and $APPDIR. See the script for details on usage.
|
||||||
|
|
||||||
|
To run godoc locally, using the app-engine emulator, run
|
||||||
|
|
||||||
|
<path to google_appengine>/dev_appserver.py $APPDIR
|
||||||
|
|
||||||
|
godoc should come up at http://localhost:8080 .
|
69
cmd/godoc/appinit.go
Normal file
69
cmd/godoc/appinit.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file replaces main.go when running godoc under app-engine.
|
||||||
|
// See README.godoc-app for details.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
servePage(w, Page{
|
||||||
|
Title: "File " + relpath,
|
||||||
|
Subtitle: relpath,
|
||||||
|
Body: applyTemplate(errorHTML, "errorHTML", err), // err may contain an absolute path!
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Println("initializing godoc ...")
|
||||||
|
log.Printf(".zip file = %s", zipFilename)
|
||||||
|
log.Printf(".zip GOROOT = %s", zipGoroot)
|
||||||
|
log.Printf("index files = %s", indexFilenames)
|
||||||
|
|
||||||
|
// initialize flags for app engine
|
||||||
|
*goroot = path.Join("/", zipGoroot) // fsHttp paths are relative to '/'
|
||||||
|
*indexEnabled = true
|
||||||
|
*indexFiles = indexFilenames
|
||||||
|
*maxResults = 100 // reduce latency by limiting the number of fulltext search results
|
||||||
|
*indexThrottle = 0.3 // in case *indexFiles is empty (and thus the indexer is run)
|
||||||
|
*showPlayground = true
|
||||||
|
|
||||||
|
// read .zip file and set up file systems
|
||||||
|
const zipfile = zipFilename
|
||||||
|
rc, err := zip.OpenReader(zipfile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s\n", zipfile, err)
|
||||||
|
}
|
||||||
|
// rc is never closed (app running forever)
|
||||||
|
fs.Bind("/", NewZipFS(rc, zipFilename), *goroot, bindReplace)
|
||||||
|
|
||||||
|
// initialize http handlers
|
||||||
|
readTemplates()
|
||||||
|
initHandlers()
|
||||||
|
registerPublicHandlers(http.DefaultServeMux)
|
||||||
|
registerPlaygroundHandlers(http.DefaultServeMux)
|
||||||
|
|
||||||
|
// initialize default directory tree with corresponding timestamp.
|
||||||
|
initFSTree()
|
||||||
|
|
||||||
|
// Immediately update metadata.
|
||||||
|
updateMetadata()
|
||||||
|
|
||||||
|
// initialize search index
|
||||||
|
if *indexEnabled {
|
||||||
|
go indexer()
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("godoc initialization complete")
|
||||||
|
}
|
494
cmd/godoc/codewalk.go
Normal file
494
cmd/godoc/codewalk.go
Normal file
@ -0,0 +1,494 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// The /doc/codewalk/ tree is synthesized from codewalk descriptions,
|
||||||
|
// files named $GOROOT/doc/codewalk/*.xml.
|
||||||
|
// For an example and a description of the format, see
|
||||||
|
// http://golang.org/doc/codewalk/codewalk or run godoc -http=:6060
|
||||||
|
// and see http://localhost:6060/doc/codewalk/codewalk .
|
||||||
|
// That page is itself a codewalk; the source code for it is
|
||||||
|
// $GOROOT/doc/codewalk/codewalk.xml.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler for /doc/codewalk/ and below.
|
||||||
|
func codewalk(w http.ResponseWriter, r *http.Request) {
|
||||||
|
relpath := r.URL.Path[len("/doc/codewalk/"):]
|
||||||
|
abspath := r.URL.Path
|
||||||
|
|
||||||
|
r.ParseForm()
|
||||||
|
if f := r.FormValue("fileprint"); f != "" {
|
||||||
|
codewalkFileprint(w, r, f)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If directory exists, serve list of code walks.
|
||||||
|
dir, err := fs.Lstat(abspath)
|
||||||
|
if err == nil && dir.IsDir() {
|
||||||
|
codewalkDir(w, r, relpath, abspath)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If file exists, serve using standard file server.
|
||||||
|
if err == nil {
|
||||||
|
serveFile(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise append .xml and hope to find
|
||||||
|
// a codewalk description, but before trim
|
||||||
|
// the trailing /.
|
||||||
|
abspath = strings.TrimRight(abspath, "/")
|
||||||
|
cw, err := loadCodewalk(abspath + ".xml")
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
serveError(w, r, relpath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize the path and redirect if changed
|
||||||
|
if redirect(w, r) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
servePage(w, Page{
|
||||||
|
Title: "Codewalk: " + cw.Title,
|
||||||
|
Tabtitle: cw.Title,
|
||||||
|
Body: applyTemplate(codewalkHTML, "codewalk", cw),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Codewalk represents a single codewalk read from an XML file.
|
||||||
|
type Codewalk struct {
|
||||||
|
Title string `xml:"title,attr"`
|
||||||
|
File []string `xml:"file"`
|
||||||
|
Step []*Codestep `xml:"step"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Codestep is a single step in a codewalk.
|
||||||
|
type Codestep struct {
|
||||||
|
// Filled in from XML
|
||||||
|
Src string `xml:"src,attr"`
|
||||||
|
Title string `xml:"title,attr"`
|
||||||
|
XML string `xml:",innerxml"`
|
||||||
|
|
||||||
|
// Derived from Src; not in XML.
|
||||||
|
Err error
|
||||||
|
File string
|
||||||
|
Lo int
|
||||||
|
LoByte int
|
||||||
|
Hi int
|
||||||
|
HiByte int
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// String method for printing in template.
|
||||||
|
// Formats file address nicely.
|
||||||
|
func (st *Codestep) String() string {
|
||||||
|
s := st.File
|
||||||
|
if st.Lo != 0 || st.Hi != 0 {
|
||||||
|
s += fmt.Sprintf(":%d", st.Lo)
|
||||||
|
if st.Lo != st.Hi {
|
||||||
|
s += fmt.Sprintf(",%d", st.Hi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadCodewalk reads a codewalk from the named XML file.
|
||||||
|
func loadCodewalk(filename string) (*Codewalk, error) {
|
||||||
|
f, err := fs.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
cw := new(Codewalk)
|
||||||
|
d := xml.NewDecoder(f)
|
||||||
|
d.Entity = xml.HTMLEntity
|
||||||
|
err = d.Decode(cw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, &os.PathError{Op: "parsing", Path: filename, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute file list, evaluate line numbers for addresses.
|
||||||
|
m := make(map[string]bool)
|
||||||
|
for _, st := range cw.Step {
|
||||||
|
i := strings.Index(st.Src, ":")
|
||||||
|
if i < 0 {
|
||||||
|
i = len(st.Src)
|
||||||
|
}
|
||||||
|
filename := st.Src[0:i]
|
||||||
|
data, err := ReadFile(fs, filename)
|
||||||
|
if err != nil {
|
||||||
|
st.Err = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i < len(st.Src) {
|
||||||
|
lo, hi, err := addrToByteRange(st.Src[i+1:], 0, data)
|
||||||
|
if err != nil {
|
||||||
|
st.Err = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Expand match to line boundaries.
|
||||||
|
for lo > 0 && data[lo-1] != '\n' {
|
||||||
|
lo--
|
||||||
|
}
|
||||||
|
for hi < len(data) && (hi == 0 || data[hi-1] != '\n') {
|
||||||
|
hi++
|
||||||
|
}
|
||||||
|
st.Lo = byteToLine(data, lo)
|
||||||
|
st.Hi = byteToLine(data, hi-1)
|
||||||
|
}
|
||||||
|
st.Data = data
|
||||||
|
st.File = filename
|
||||||
|
m[filename] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make list of files
|
||||||
|
cw.File = make([]string, len(m))
|
||||||
|
i := 0
|
||||||
|
for f := range m {
|
||||||
|
cw.File[i] = f
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
sort.Strings(cw.File)
|
||||||
|
|
||||||
|
return cw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// codewalkDir serves the codewalk directory listing.
|
||||||
|
// It scans the directory for subdirectories or files named *.xml
|
||||||
|
// and prepares a table.
|
||||||
|
func codewalkDir(w http.ResponseWriter, r *http.Request, relpath, abspath string) {
|
||||||
|
type elem struct {
|
||||||
|
Name string
|
||||||
|
Title string
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := fs.ReadDir(abspath)
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
serveError(w, r, relpath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var v []interface{}
|
||||||
|
for _, fi := range dir {
|
||||||
|
name := fi.Name()
|
||||||
|
if fi.IsDir() {
|
||||||
|
v = append(v, &elem{name + "/", ""})
|
||||||
|
} else if strings.HasSuffix(name, ".xml") {
|
||||||
|
cw, err := loadCodewalk(abspath + "/" + name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v = append(v, &elem{name[0 : len(name)-len(".xml")], cw.Title})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
servePage(w, Page{
|
||||||
|
Title: "Codewalks",
|
||||||
|
Body: applyTemplate(codewalkdirHTML, "codewalkdir", v),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// codewalkFileprint serves requests with ?fileprint=f&lo=lo&hi=hi.
|
||||||
|
// The filename f has already been retrieved and is passed as an argument.
|
||||||
|
// Lo and hi are the numbers of the first and last line to highlight
|
||||||
|
// in the response. This format is used for the middle window pane
|
||||||
|
// of the codewalk pages. It is a separate iframe and does not get
|
||||||
|
// the usual godoc HTML wrapper.
|
||||||
|
func codewalkFileprint(w http.ResponseWriter, r *http.Request, f string) {
|
||||||
|
abspath := f
|
||||||
|
data, err := ReadFile(fs, abspath)
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
serveError(w, r, f, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lo, _ := strconv.Atoi(r.FormValue("lo"))
|
||||||
|
hi, _ := strconv.Atoi(r.FormValue("hi"))
|
||||||
|
if hi < lo {
|
||||||
|
hi = lo
|
||||||
|
}
|
||||||
|
lo = lineToByte(data, lo)
|
||||||
|
hi = lineToByte(data, hi+1)
|
||||||
|
|
||||||
|
// Put the mark 4 lines before lo, so that the iframe
|
||||||
|
// shows a few lines of context before the highlighted
|
||||||
|
// section.
|
||||||
|
n := 4
|
||||||
|
mark := lo
|
||||||
|
for ; mark > 0 && n > 0; mark-- {
|
||||||
|
if data[mark-1] == '\n' {
|
||||||
|
if n--; n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
io.WriteString(w, `<style type="text/css">@import "/doc/codewalk/codewalk.css";</style><pre>`)
|
||||||
|
template.HTMLEscape(w, data[0:mark])
|
||||||
|
io.WriteString(w, "<a name='mark'></a>")
|
||||||
|
template.HTMLEscape(w, data[mark:lo])
|
||||||
|
if lo < hi {
|
||||||
|
io.WriteString(w, "<div class='codewalkhighlight'>")
|
||||||
|
template.HTMLEscape(w, data[lo:hi])
|
||||||
|
io.WriteString(w, "</div>")
|
||||||
|
}
|
||||||
|
template.HTMLEscape(w, data[hi:])
|
||||||
|
io.WriteString(w, "</pre>")
|
||||||
|
}
|
||||||
|
|
||||||
|
// addrToByte evaluates the given address starting at offset start in data.
|
||||||
|
// It returns the lo and hi byte offset of the matched region within data.
|
||||||
|
// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
|
||||||
|
// for details on the syntax.
|
||||||
|
func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
|
||||||
|
var (
|
||||||
|
dir byte
|
||||||
|
prevc byte
|
||||||
|
charOffset bool
|
||||||
|
)
|
||||||
|
lo = start
|
||||||
|
hi = start
|
||||||
|
for addr != "" && err == nil {
|
||||||
|
c := addr[0]
|
||||||
|
switch c {
|
||||||
|
default:
|
||||||
|
err = errors.New("invalid address syntax near " + string(c))
|
||||||
|
case ',':
|
||||||
|
if len(addr) == 1 {
|
||||||
|
hi = len(data)
|
||||||
|
} else {
|
||||||
|
_, hi, err = addrToByteRange(addr[1:], hi, data)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
case '+', '-':
|
||||||
|
if prevc == '+' || prevc == '-' {
|
||||||
|
lo, hi, err = addrNumber(data, lo, hi, prevc, 1, charOffset)
|
||||||
|
}
|
||||||
|
dir = c
|
||||||
|
|
||||||
|
case '$':
|
||||||
|
lo = len(data)
|
||||||
|
hi = len(data)
|
||||||
|
if len(addr) > 1 {
|
||||||
|
dir = '+'
|
||||||
|
}
|
||||||
|
|
||||||
|
case '#':
|
||||||
|
charOffset = true
|
||||||
|
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||||
|
var i int
|
||||||
|
for i = 1; i < len(addr); i++ {
|
||||||
|
if addr[i] < '0' || addr[i] > '9' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var n int
|
||||||
|
n, err = strconv.Atoi(addr[0:i])
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lo, hi, err = addrNumber(data, lo, hi, dir, n, charOffset)
|
||||||
|
dir = 0
|
||||||
|
charOffset = false
|
||||||
|
prevc = c
|
||||||
|
addr = addr[i:]
|
||||||
|
continue
|
||||||
|
|
||||||
|
case '/':
|
||||||
|
var i, j int
|
||||||
|
Regexp:
|
||||||
|
for i = 1; i < len(addr); i++ {
|
||||||
|
switch addr[i] {
|
||||||
|
case '\\':
|
||||||
|
i++
|
||||||
|
case '/':
|
||||||
|
j = i + 1
|
||||||
|
break Regexp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if j == 0 {
|
||||||
|
j = i
|
||||||
|
}
|
||||||
|
pattern := addr[1:i]
|
||||||
|
lo, hi, err = addrRegexp(data, lo, hi, dir, pattern)
|
||||||
|
prevc = c
|
||||||
|
addr = addr[j:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prevc = c
|
||||||
|
addr = addr[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && dir != 0 {
|
||||||
|
lo, hi, err = addrNumber(data, lo, hi, dir, 1, charOffset)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
return lo, hi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
|
||||||
|
// dir is '+' or '-', n is the count, and charOffset is true if the syntax
|
||||||
|
// used was #n. Applying +n (or +#n) means to advance n lines
|
||||||
|
// (or characters) after hi. Applying -n (or -#n) means to back up n lines
|
||||||
|
// (or characters) before lo.
|
||||||
|
// The return value is the new lo, hi.
|
||||||
|
func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, error) {
|
||||||
|
switch dir {
|
||||||
|
case 0:
|
||||||
|
lo = 0
|
||||||
|
hi = 0
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case '+':
|
||||||
|
if charOffset {
|
||||||
|
pos := hi
|
||||||
|
for ; n > 0 && pos < len(data); n-- {
|
||||||
|
_, size := utf8.DecodeRune(data[pos:])
|
||||||
|
pos += size
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return pos, pos, nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// find next beginning of line
|
||||||
|
if hi > 0 {
|
||||||
|
for hi < len(data) && data[hi-1] != '\n' {
|
||||||
|
hi++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lo = hi
|
||||||
|
if n == 0 {
|
||||||
|
return lo, hi, nil
|
||||||
|
}
|
||||||
|
for ; hi < len(data); hi++ {
|
||||||
|
if data[hi] != '\n' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch n--; n {
|
||||||
|
case 1:
|
||||||
|
lo = hi + 1
|
||||||
|
case 0:
|
||||||
|
return lo, hi + 1, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case '-':
|
||||||
|
if charOffset {
|
||||||
|
// Scan backward for bytes that are not UTF-8 continuation bytes.
|
||||||
|
pos := lo
|
||||||
|
for ; pos > 0 && n > 0; pos-- {
|
||||||
|
if data[pos]&0xc0 != 0x80 {
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return pos, pos, nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// find earlier beginning of line
|
||||||
|
for lo > 0 && data[lo-1] != '\n' {
|
||||||
|
lo--
|
||||||
|
}
|
||||||
|
hi = lo
|
||||||
|
if n == 0 {
|
||||||
|
return lo, hi, nil
|
||||||
|
}
|
||||||
|
for ; lo >= 0; lo-- {
|
||||||
|
if lo > 0 && data[lo-1] != '\n' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch n--; n {
|
||||||
|
case 1:
|
||||||
|
hi = lo
|
||||||
|
case 0:
|
||||||
|
return lo, hi, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, 0, errors.New("address out of range")
|
||||||
|
}
|
||||||
|
|
||||||
|
// addrRegexp searches for pattern in the given direction starting at lo, hi.
|
||||||
|
// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
|
||||||
|
// Backward searches are unimplemented.
|
||||||
|
func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, error) {
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
if dir == '-' {
|
||||||
|
// Could implement reverse search using binary search
|
||||||
|
// through file, but that seems like overkill.
|
||||||
|
return 0, 0, errors.New("reverse search not implemented")
|
||||||
|
}
|
||||||
|
m := re.FindIndex(data[hi:])
|
||||||
|
if len(m) > 0 {
|
||||||
|
m[0] += hi
|
||||||
|
m[1] += hi
|
||||||
|
} else if hi > 0 {
|
||||||
|
// No match. Wrap to beginning of data.
|
||||||
|
m = re.FindIndex(data)
|
||||||
|
}
|
||||||
|
if len(m) == 0 {
|
||||||
|
return 0, 0, errors.New("no match for " + pattern)
|
||||||
|
}
|
||||||
|
return m[0], m[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lineToByte returns the byte index of the first byte of line n.
|
||||||
|
// Line numbers begin at 1.
|
||||||
|
func lineToByte(data []byte, n int) int {
|
||||||
|
if n <= 1 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
n--
|
||||||
|
for i, c := range data {
|
||||||
|
if c == '\n' {
|
||||||
|
if n--; n == 0 {
|
||||||
|
return i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byteToLine returns the number of the line containing the byte at index i.
|
||||||
|
func byteToLine(data []byte, i int) int {
|
||||||
|
l := 1
|
||||||
|
for j, c := range data {
|
||||||
|
if j == i {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
if c == '\n' {
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
320
cmd/godoc/dirtrees.go
Normal file
320
cmd/godoc/dirtrees.go
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains the code dealing with package directory trees.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"go/doc"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
pathpkg "path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Conventional name for directories containing test data.
|
||||||
|
// Excluded from directory trees.
|
||||||
|
//
|
||||||
|
const testdataDirName = "testdata"
|
||||||
|
|
||||||
|
type Directory struct {
|
||||||
|
Depth int
|
||||||
|
Path string // directory path; includes Name
|
||||||
|
Name string // directory name
|
||||||
|
HasPkg bool // true if the directory contains at least one package
|
||||||
|
Synopsis string // package documentation, if any
|
||||||
|
Dirs []*Directory // subdirectories
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGoFile(fi os.FileInfo) bool {
|
||||||
|
name := fi.Name()
|
||||||
|
return !fi.IsDir() &&
|
||||||
|
len(name) > 0 && name[0] != '.' && // ignore .files
|
||||||
|
pathpkg.Ext(name) == ".go"
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPkgFile(fi os.FileInfo) bool {
|
||||||
|
return isGoFile(fi) &&
|
||||||
|
!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPkgDir(fi os.FileInfo) bool {
|
||||||
|
name := fi.Name()
|
||||||
|
return fi.IsDir() && len(name) > 0 &&
|
||||||
|
name[0] != '_' && name[0] != '.' // ignore _files and .files
|
||||||
|
}
|
||||||
|
|
||||||
|
type treeBuilder struct {
|
||||||
|
maxDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
|
||||||
|
if name == testdataDirName {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth >= b.maxDepth {
|
||||||
|
// return a dummy directory so that the parent directory
|
||||||
|
// doesn't get discarded just because we reached the max
|
||||||
|
// directory depth
|
||||||
|
return &Directory{
|
||||||
|
Depth: depth,
|
||||||
|
Path: path,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list, _ := fs.ReadDir(path)
|
||||||
|
|
||||||
|
// determine number of subdirectories and if there are package files
|
||||||
|
ndirs := 0
|
||||||
|
hasPkgFiles := false
|
||||||
|
var synopses [3]string // prioritized package documentation (0 == highest priority)
|
||||||
|
for _, d := range list {
|
||||||
|
switch {
|
||||||
|
case isPkgDir(d):
|
||||||
|
ndirs++
|
||||||
|
case isPkgFile(d):
|
||||||
|
// looks like a package file, but may just be a file ending in ".go";
|
||||||
|
// don't just count it yet (otherwise we may end up with hasPkgFiles even
|
||||||
|
// though the directory doesn't contain any real package files - was bug)
|
||||||
|
if synopses[0] == "" {
|
||||||
|
// no "optimal" package synopsis yet; continue to collect synopses
|
||||||
|
file, err := parseFile(fset, pathpkg.Join(path, d.Name()),
|
||||||
|
parser.ParseComments|parser.PackageClauseOnly)
|
||||||
|
if err == nil {
|
||||||
|
hasPkgFiles = true
|
||||||
|
if file.Doc != nil {
|
||||||
|
// prioritize documentation
|
||||||
|
i := -1
|
||||||
|
switch file.Name.Name {
|
||||||
|
case name:
|
||||||
|
i = 0 // normal case: directory name matches package name
|
||||||
|
case "main":
|
||||||
|
i = 1 // directory contains a main package
|
||||||
|
default:
|
||||||
|
i = 2 // none of the above
|
||||||
|
}
|
||||||
|
if 0 <= i && i < len(synopses) && synopses[i] == "" {
|
||||||
|
synopses[i] = doc.Synopsis(file.Doc.Text())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create subdirectory tree
|
||||||
|
var dirs []*Directory
|
||||||
|
if ndirs > 0 {
|
||||||
|
dirs = make([]*Directory, ndirs)
|
||||||
|
i := 0
|
||||||
|
for _, d := range list {
|
||||||
|
if isPkgDir(d) {
|
||||||
|
name := d.Name()
|
||||||
|
dd := b.newDirTree(fset, pathpkg.Join(path, name), name, depth+1)
|
||||||
|
if dd != nil {
|
||||||
|
dirs[i] = dd
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dirs = dirs[0:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there are no package files and no subdirectories
|
||||||
|
// containing package files, ignore the directory
|
||||||
|
if !hasPkgFiles && len(dirs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// select the highest-priority synopsis for the directory entry, if any
|
||||||
|
synopsis := ""
|
||||||
|
for _, synopsis = range synopses {
|
||||||
|
if synopsis != "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Directory{
|
||||||
|
Depth: depth,
|
||||||
|
Path: path,
|
||||||
|
Name: name,
|
||||||
|
HasPkg: hasPkgFiles,
|
||||||
|
Synopsis: synopsis,
|
||||||
|
Dirs: dirs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDirectory creates a new package directory tree with at most maxDepth
|
||||||
|
// levels, anchored at root. The result tree is pruned such that it only
|
||||||
|
// contains directories that contain package files or that contain
|
||||||
|
// subdirectories containing package files (transitively). If a non-nil
|
||||||
|
// pathFilter is provided, directory paths additionally must be accepted
|
||||||
|
// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
|
||||||
|
// provided for maxDepth, nodes at larger depths are pruned as well; they
|
||||||
|
// are assumed to contain package files even if their contents are not known
|
||||||
|
// (i.e., in this case the tree may contain directories w/o any package files).
|
||||||
|
//
|
||||||
|
func newDirectory(root string, maxDepth int) *Directory {
|
||||||
|
// The root could be a symbolic link so use Stat not Lstat.
|
||||||
|
d, err := fs.Stat(root)
|
||||||
|
// If we fail here, report detailed error messages; otherwise
|
||||||
|
// is is hard to see why a directory tree was not built.
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
log.Printf("newDirectory(%s): %s", root, err)
|
||||||
|
return nil
|
||||||
|
case !isPkgDir(d):
|
||||||
|
log.Printf("newDirectory(%s): not a package directory", root)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if maxDepth < 0 {
|
||||||
|
maxDepth = 1e6 // "infinity"
|
||||||
|
}
|
||||||
|
b := treeBuilder{maxDepth}
|
||||||
|
// the file set provided is only for local parsing, no position
|
||||||
|
// information escapes and thus we don't need to save the set
|
||||||
|
return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
|
||||||
|
if dir != nil {
|
||||||
|
if len(dir.Dirs) == 0 {
|
||||||
|
buf.WriteString(dir.Path)
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range dir.Dirs {
|
||||||
|
d.writeLeafs(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
|
||||||
|
if dir != nil {
|
||||||
|
if !skipRoot {
|
||||||
|
c <- dir
|
||||||
|
}
|
||||||
|
for _, d := range dir.Dirs {
|
||||||
|
d.walk(c, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
|
||||||
|
c := make(chan *Directory)
|
||||||
|
go func() {
|
||||||
|
dir.walk(c, skipRoot)
|
||||||
|
close(c)
|
||||||
|
}()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir *Directory) lookupLocal(name string) *Directory {
|
||||||
|
for _, d := range dir.Dirs {
|
||||||
|
if d.Name == name {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitPath(p string) []string {
|
||||||
|
p = strings.TrimPrefix(p, "/")
|
||||||
|
if p == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return strings.Split(p, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup looks for the *Directory for a given path, relative to dir.
|
||||||
|
func (dir *Directory) lookup(path string) *Directory {
|
||||||
|
d := splitPath(dir.Path)
|
||||||
|
p := splitPath(path)
|
||||||
|
i := 0
|
||||||
|
for i < len(d) {
|
||||||
|
if i >= len(p) || d[i] != p[i] {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for dir != nil && i < len(p) {
|
||||||
|
dir = dir.lookupLocal(p[i])
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirEntry describes a directory entry. The Depth and Height values
|
||||||
|
// are useful for presenting an entry in an indented fashion.
|
||||||
|
//
|
||||||
|
type DirEntry struct {
|
||||||
|
Depth int // >= 0
|
||||||
|
Height int // = DirList.MaxHeight - Depth, > 0
|
||||||
|
Path string // directory path; includes Name, relative to DirList root
|
||||||
|
Name string // directory name
|
||||||
|
HasPkg bool // true if the directory contains at least one package
|
||||||
|
Synopsis string // package documentation, if any
|
||||||
|
}
|
||||||
|
|
||||||
|
type DirList struct {
|
||||||
|
MaxHeight int // directory tree height, > 0
|
||||||
|
List []DirEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// listing creates a (linear) directory listing from a directory tree.
|
||||||
|
// If skipRoot is set, the root directory itself is excluded from the list.
|
||||||
|
//
|
||||||
|
func (root *Directory) listing(skipRoot bool) *DirList {
|
||||||
|
if root == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine number of entries n and maximum height
|
||||||
|
n := 0
|
||||||
|
minDepth := 1 << 30 // infinity
|
||||||
|
maxDepth := 0
|
||||||
|
for d := range root.iter(skipRoot) {
|
||||||
|
n++
|
||||||
|
if minDepth > d.Depth {
|
||||||
|
minDepth = d.Depth
|
||||||
|
}
|
||||||
|
if maxDepth < d.Depth {
|
||||||
|
maxDepth = d.Depth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maxHeight := maxDepth - minDepth + 1
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// create list
|
||||||
|
list := make([]DirEntry, n)
|
||||||
|
i := 0
|
||||||
|
for d := range root.iter(skipRoot) {
|
||||||
|
p := &list[i]
|
||||||
|
p.Depth = d.Depth - minDepth
|
||||||
|
p.Height = maxHeight - p.Depth
|
||||||
|
// the path is relative to root.Path - remove the root.Path
|
||||||
|
// prefix (the prefix should always be present but avoid
|
||||||
|
// crashes and check)
|
||||||
|
path := strings.TrimPrefix(d.Path, root.Path)
|
||||||
|
// remove leading separator if any - path must be relative
|
||||||
|
path = strings.TrimPrefix(path, "/")
|
||||||
|
p.Path = path
|
||||||
|
p.Name = d.Name
|
||||||
|
p.HasPkg = d.HasPkg
|
||||||
|
p.Synopsis = d.Synopsis
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return &DirList{maxHeight, list}
|
||||||
|
}
|
135
cmd/godoc/doc.go
Normal file
135
cmd/godoc/doc.go
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Godoc extracts and generates documentation for Go programs.
|
||||||
|
|
||||||
|
It has two modes.
|
||||||
|
|
||||||
|
Without the -http flag, it runs in command-line mode and prints plain text
|
||||||
|
documentation to standard output and exits. If both a library package and
|
||||||
|
a command with the same name exists, using the prefix cmd/ will force
|
||||||
|
documentation on the command rather than the library package. If the -src
|
||||||
|
flag is specified, godoc prints the exported interface of a package in Go
|
||||||
|
source form, or the implementation of a specific exported language entity:
|
||||||
|
|
||||||
|
godoc fmt # documentation for package fmt
|
||||||
|
godoc fmt Printf # documentation for fmt.Printf
|
||||||
|
godoc cmd/go # force documentation for the go command
|
||||||
|
godoc -src fmt # fmt package interface in Go source form
|
||||||
|
godoc -src fmt Printf # implementation of fmt.Printf
|
||||||
|
|
||||||
|
In command-line mode, the -q flag enables search queries against a godoc running
|
||||||
|
as a webserver. If no explicit server address is specified with the -server flag,
|
||||||
|
godoc first tries localhost:6060 and then http://golang.org.
|
||||||
|
|
||||||
|
godoc -q Reader
|
||||||
|
godoc -q math.Sin
|
||||||
|
godoc -server=:6060 -q sin
|
||||||
|
|
||||||
|
With the -http flag, it runs as a web server and presents the documentation as a
|
||||||
|
web page.
|
||||||
|
|
||||||
|
godoc -http=:6060
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
godoc [flag] package [name ...]
|
||||||
|
|
||||||
|
The flags are:
|
||||||
|
-v
|
||||||
|
verbose mode
|
||||||
|
-q
|
||||||
|
arguments are considered search queries: a legal query is a
|
||||||
|
single identifier (such as ToLower) or a qualified identifier
|
||||||
|
(such as math.Sin).
|
||||||
|
-src
|
||||||
|
print (exported) source in command-line mode
|
||||||
|
-tabwidth=4
|
||||||
|
width of tabs in units of spaces
|
||||||
|
-timestamps=true
|
||||||
|
show timestamps with directory listings
|
||||||
|
-index
|
||||||
|
enable identifier and full text search index
|
||||||
|
(no search box is shown if -index is not set)
|
||||||
|
-index_files=""
|
||||||
|
glob pattern specifying index files; if not empty,
|
||||||
|
the index is read from these files in sorted order
|
||||||
|
-index_throttle=0.75
|
||||||
|
index throttle value; a value of 0 means no time is allocated
|
||||||
|
to the indexer (the indexer will never finish), a value of 1.0
|
||||||
|
means that index creation is running at full throttle (other
|
||||||
|
goroutines may get no time while the index is built)
|
||||||
|
-links=true:
|
||||||
|
link identifiers to their declarations
|
||||||
|
-write_index=false
|
||||||
|
write index to a file; the file name must be specified with
|
||||||
|
-index_files
|
||||||
|
-maxresults=10000
|
||||||
|
maximum number of full text search results shown
|
||||||
|
(no full text index is built if maxresults <= 0)
|
||||||
|
-notes="BUG"
|
||||||
|
regular expression matching note markers to show
|
||||||
|
(e.g., "BUG|TODO", ".*")
|
||||||
|
-html
|
||||||
|
print HTML in command-line mode
|
||||||
|
-goroot=$GOROOT
|
||||||
|
Go root directory
|
||||||
|
-http=addr
|
||||||
|
HTTP service address (e.g., '127.0.0.1:6060' or just ':6060')
|
||||||
|
-server=addr
|
||||||
|
webserver address for command line searches
|
||||||
|
-templates=""
|
||||||
|
directory containing alternate template files; if set,
|
||||||
|
the directory may provide alternative template files
|
||||||
|
for the files in $GOROOT/lib/godoc
|
||||||
|
-url=path
|
||||||
|
print to standard output the data that would be served by
|
||||||
|
an HTTP request for path
|
||||||
|
-zip=""
|
||||||
|
zip file providing the file system to serve; disabled if empty
|
||||||
|
|
||||||
|
By default, godoc looks at the packages it finds via $GOROOT and $GOPATH (if set).
|
||||||
|
This behavior can be altered by providing an alternative $GOROOT with the -goroot
|
||||||
|
flag.
|
||||||
|
|
||||||
|
When godoc runs as a web server and -index is set, a search index is maintained.
|
||||||
|
The index is created at startup.
|
||||||
|
|
||||||
|
The index contains both identifier and full text search information (searchable
|
||||||
|
via regular expressions). The maximum number of full text search results shown
|
||||||
|
can be set with the -maxresults flag; if set to 0, no full text results are
|
||||||
|
shown, and only an identifier index but no full text search index is created.
|
||||||
|
|
||||||
|
The presentation mode of web pages served by godoc can be controlled with the
|
||||||
|
"m" URL parameter; it accepts a comma-separated list of flag names as value:
|
||||||
|
|
||||||
|
all show documentation for all declarations, not just the exported ones
|
||||||
|
methods show all embedded methods, not just those of unexported anonymous fields
|
||||||
|
src show the original source code rather then the extracted documentation
|
||||||
|
text present the page in textual (command-line) form rather than HTML
|
||||||
|
flat present flat (not indented) directory listings using full paths
|
||||||
|
|
||||||
|
For instance, http://golang.org/pkg/math/big/?m=all,text shows the documentation
|
||||||
|
for all (not just the exported) declarations of package big, in textual form (as
|
||||||
|
it would appear when using godoc from the command line: "godoc -src math/big .*").
|
||||||
|
|
||||||
|
By default, godoc serves files from the file system of the underlying OS.
|
||||||
|
Instead, a .zip file may be provided via the -zip flag, which contains
|
||||||
|
the file system to serve. The file paths stored in the .zip file must use
|
||||||
|
slash ('/') as path separator; and they must be unrooted. $GOROOT (or -goroot)
|
||||||
|
must be set to the .zip file directory path containing the Go root directory.
|
||||||
|
For instance, for a .zip file created by the command:
|
||||||
|
|
||||||
|
zip go.zip $HOME/go
|
||||||
|
|
||||||
|
one may run godoc as follows:
|
||||||
|
|
||||||
|
godoc -http=:6060 -zip=go.zip -goroot=$HOME/go
|
||||||
|
|
||||||
|
See "Godoc: documenting Go code" for how to write good comments for godoc:
|
||||||
|
http://golang.org/doc/articles/godoc_documenting_go_code.html
|
||||||
|
|
||||||
|
*/
|
||||||
|
package main
|
562
cmd/godoc/filesystem.go
Normal file
562
cmd/godoc/filesystem.go
Normal file
@ -0,0 +1,562 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file defines types for abstract file system access and
|
||||||
|
// provides an implementation accessing the file system of the
|
||||||
|
// underlying OS.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
pathpkg "path"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fs is the file system that godoc reads from and serves.
|
||||||
|
// It is a virtual file system that operates on slash-separated paths,
|
||||||
|
// and its root corresponds to the Go distribution root: /src/pkg
|
||||||
|
// holds the source tree, and so on. This means that the URLs served by
|
||||||
|
// the godoc server are the same as the paths in the virtual file
|
||||||
|
// system, which helps keep things simple.
|
||||||
|
//
|
||||||
|
// New file trees - implementations of FileSystem - can be added to
|
||||||
|
// the virtual file system using nameSpace's Bind method.
|
||||||
|
// The usual setup is to bind OS(runtime.GOROOT) to the root
|
||||||
|
// of the name space and then bind any GOPATH/src directories
|
||||||
|
// on top of /src/pkg, so that all sources are in /src/pkg.
|
||||||
|
//
|
||||||
|
// For more about name spaces, see the nameSpace type's
|
||||||
|
// documentation below.
|
||||||
|
//
|
||||||
|
// The use of this virtual file system means that most code processing
|
||||||
|
// paths can assume they are slash-separated and should be using
|
||||||
|
// package path (often imported as pathpkg) to manipulate them,
|
||||||
|
// even on Windows.
|
||||||
|
//
|
||||||
|
var fs = nameSpace{} // the underlying file system for godoc
|
||||||
|
|
||||||
|
// Setting debugNS = true will enable debugging prints about
|
||||||
|
// name space translations.
|
||||||
|
const debugNS = false
|
||||||
|
|
||||||
|
// The FileSystem interface specifies the methods godoc is using
|
||||||
|
// to access the file system for which it serves documentation.
|
||||||
|
type FileSystem interface {
|
||||||
|
Open(path string) (readSeekCloser, error)
|
||||||
|
Lstat(path string) (os.FileInfo, error)
|
||||||
|
Stat(path string) (os.FileInfo, error)
|
||||||
|
ReadDir(path string) ([]os.FileInfo, error)
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type readSeekCloser interface {
|
||||||
|
io.Reader
|
||||||
|
io.Seeker
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFile reads the file named by path from fs and returns the contents.
|
||||||
|
func ReadFile(fs FileSystem, path string) ([]byte, error) {
|
||||||
|
rc, err := fs.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
return ioutil.ReadAll(rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OS returns an implementation of FileSystem reading from the
|
||||||
|
// tree rooted at root. Recording a root is convenient everywhere
|
||||||
|
// but necessary on Windows, because the slash-separated path
|
||||||
|
// passed to Open has no way to specify a drive letter. Using a root
|
||||||
|
// lets code refer to OS(`c:\`), OS(`d:\`) and so on.
|
||||||
|
func OS(root string) FileSystem {
|
||||||
|
return osFS(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
type osFS string
|
||||||
|
|
||||||
|
func (root osFS) String() string { return "os(" + string(root) + ")" }
|
||||||
|
|
||||||
|
func (root osFS) resolve(path string) string {
|
||||||
|
// Clean the path so that it cannot possibly begin with ../.
|
||||||
|
// If it did, the result of filepath.Join would be outside the
|
||||||
|
// tree rooted at root. We probably won't ever see a path
|
||||||
|
// with .. in it, but be safe anyway.
|
||||||
|
path = pathpkg.Clean("/" + path)
|
||||||
|
|
||||||
|
return filepath.Join(string(root), path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (root osFS) Open(path string) (readSeekCloser, error) {
|
||||||
|
f, err := os.Open(root.resolve(path))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fi, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
return nil, fmt.Errorf("Open: %s is a directory", path)
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (root osFS) Lstat(path string) (os.FileInfo, error) {
|
||||||
|
return os.Lstat(root.resolve(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (root osFS) Stat(path string) (os.FileInfo, error) {
|
||||||
|
return os.Stat(root.resolve(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (root osFS) ReadDir(path string) ([]os.FileInfo, error) {
|
||||||
|
return ioutil.ReadDir(root.resolve(path)) // is sorted
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasPathPrefix returns true if x == y or x == y + "/" + more
|
||||||
|
func hasPathPrefix(x, y string) bool {
|
||||||
|
return x == y || strings.HasPrefix(x, y) && (strings.HasSuffix(y, "/") || strings.HasPrefix(x[len(y):], "/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A nameSpace is a file system made up of other file systems
|
||||||
|
// mounted at specific locations in the name space.
|
||||||
|
//
|
||||||
|
// The representation is a map from mount point locations
|
||||||
|
// to the list of file systems mounted at that location. A traditional
|
||||||
|
// Unix mount table would use a single file system per mount point,
|
||||||
|
// but we want to be able to mount multiple file systems on a single
|
||||||
|
// mount point and have the system behave as if the union of those
|
||||||
|
// file systems were present at the mount point.
|
||||||
|
// For example, if the OS file system has a Go installation in
|
||||||
|
// c:\Go and additional Go path trees in d:\Work1 and d:\Work2, then
|
||||||
|
// this name space creates the view we want for the godoc server:
|
||||||
|
//
|
||||||
|
// nameSpace{
|
||||||
|
// "/": {
|
||||||
|
// {old: "/", fs: OS(`c:\Go`), new: "/"},
|
||||||
|
// },
|
||||||
|
// "/src/pkg": {
|
||||||
|
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This is created by executing:
|
||||||
|
//
|
||||||
|
// ns := nameSpace{}
|
||||||
|
// ns.Bind("/", OS(`c:\Go`), "/", bindReplace)
|
||||||
|
// ns.Bind("/src/pkg", OS(`d:\Work1`), "/src", bindAfter)
|
||||||
|
// ns.Bind("/src/pkg", OS(`d:\Work2`), "/src", bindAfter)
|
||||||
|
//
|
||||||
|
// A particular mount point entry is a triple (old, fs, new), meaning that to
|
||||||
|
// operate on a path beginning with old, replace that prefix (old) with new
|
||||||
|
// and then pass that path to the FileSystem implementation fs.
|
||||||
|
//
|
||||||
|
// Given this name space, a ReadDir of /src/pkg/code will check each prefix
|
||||||
|
// of the path for a mount point (first /src/pkg/code, then /src/pkg, then /src,
|
||||||
|
// then /), stopping when it finds one. For the above example, /src/pkg/code
|
||||||
|
// will find the mount point at /src/pkg:
|
||||||
|
//
|
||||||
|
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||||
|
//
|
||||||
|
// ReadDir will when execute these three calls and merge the results:
|
||||||
|
//
|
||||||
|
// OS(`c:\Go`).ReadDir("/src/pkg/code")
|
||||||
|
// OS(`d:\Work1').ReadDir("/src/code")
|
||||||
|
// OS(`d:\Work2').ReadDir("/src/code")
|
||||||
|
//
|
||||||
|
// Note that the "/src/pkg" in "/src/pkg/code" has been replaced by
|
||||||
|
// just "/src" in the final two calls.
|
||||||
|
//
|
||||||
|
// OS is itself an implementation of a file system: it implements
|
||||||
|
// OS(`c:\Go`).ReadDir("/src/pkg/code") as ioutil.ReadDir(`c:\Go\src\pkg\code`).
|
||||||
|
//
|
||||||
|
// Because the new path is evaluated by fs (here OS(root)), another way
|
||||||
|
// to read the mount table is to mentally combine fs+new, so that this table:
|
||||||
|
//
|
||||||
|
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||||
|
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||||
|
//
|
||||||
|
// reads as:
|
||||||
|
//
|
||||||
|
// "/src/pkg" -> c:\Go\src\pkg
|
||||||
|
// "/src/pkg" -> d:\Work1\src
|
||||||
|
// "/src/pkg" -> d:\Work2\src
|
||||||
|
//
|
||||||
|
// An invariant (a redundancy) of the name space representation is that
|
||||||
|
// ns[mtpt][i].old is always equal to mtpt (in the example, ns["/src/pkg"]'s
|
||||||
|
// mount table entries always have old == "/src/pkg"). The 'old' field is
|
||||||
|
// useful to callers, because they receive just a []mountedFS and not any
|
||||||
|
// other indication of which mount point was found.
|
||||||
|
//
|
||||||
|
type nameSpace map[string][]mountedFS
|
||||||
|
|
||||||
|
// A mountedFS handles requests for path by replacing
|
||||||
|
// a prefix 'old' with 'new' and then calling the fs methods.
|
||||||
|
type mountedFS struct {
|
||||||
|
old string
|
||||||
|
fs FileSystem
|
||||||
|
new string
|
||||||
|
}
|
||||||
|
|
||||||
|
// translate translates path for use in m, replacing old with new.
|
||||||
|
//
|
||||||
|
// mountedFS{"/src/pkg", fs, "/src"}.translate("/src/pkg/code") == "/src/code".
|
||||||
|
func (m mountedFS) translate(path string) string {
|
||||||
|
path = pathpkg.Clean("/" + path)
|
||||||
|
if !hasPathPrefix(path, m.old) {
|
||||||
|
panic("translate " + path + " but old=" + m.old)
|
||||||
|
}
|
||||||
|
return pathpkg.Join(m.new, path[len(m.old):])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nameSpace) String() string {
|
||||||
|
return "ns"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint writes a text representation of the name space to w.
|
||||||
|
func (ns nameSpace) Fprint(w io.Writer) {
|
||||||
|
fmt.Fprint(w, "name space {\n")
|
||||||
|
var all []string
|
||||||
|
for mtpt := range ns {
|
||||||
|
all = append(all, mtpt)
|
||||||
|
}
|
||||||
|
sort.Strings(all)
|
||||||
|
for _, mtpt := range all {
|
||||||
|
fmt.Fprintf(w, "\t%s:\n", mtpt)
|
||||||
|
for _, m := range ns[mtpt] {
|
||||||
|
fmt.Fprintf(w, "\t\t%s %s\n", m.fs, m.new)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean returns a cleaned, rooted path for evaluation.
|
||||||
|
// It canonicalizes the path so that we can use string operations
|
||||||
|
// to analyze it.
|
||||||
|
func (nameSpace) clean(path string) string {
|
||||||
|
return pathpkg.Clean("/" + path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind causes references to old to redirect to the path new in newfs.
|
||||||
|
// If mode is bindReplace, old redirections are discarded.
|
||||||
|
// If mode is bindBefore, this redirection takes priority over existing ones,
|
||||||
|
// but earlier ones are still consulted for paths that do not exist in newfs.
|
||||||
|
// If mode is bindAfter, this redirection happens only after existing ones
|
||||||
|
// have been tried and failed.
|
||||||
|
|
||||||
|
const (
|
||||||
|
bindReplace = iota
|
||||||
|
bindBefore
|
||||||
|
bindAfter
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ns nameSpace) Bind(old string, newfs FileSystem, new string, mode int) {
|
||||||
|
old = ns.clean(old)
|
||||||
|
new = ns.clean(new)
|
||||||
|
m := mountedFS{old, newfs, new}
|
||||||
|
var mtpt []mountedFS
|
||||||
|
switch mode {
|
||||||
|
case bindReplace:
|
||||||
|
mtpt = append(mtpt, m)
|
||||||
|
case bindAfter:
|
||||||
|
mtpt = append(mtpt, ns.resolve(old)...)
|
||||||
|
mtpt = append(mtpt, m)
|
||||||
|
case bindBefore:
|
||||||
|
mtpt = append(mtpt, m)
|
||||||
|
mtpt = append(mtpt, ns.resolve(old)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend m.old, m.new in inherited mount point entries.
|
||||||
|
for i := range mtpt {
|
||||||
|
m := &mtpt[i]
|
||||||
|
if m.old != old {
|
||||||
|
if !hasPathPrefix(old, m.old) {
|
||||||
|
// This should not happen. If it does, panic so
|
||||||
|
// that we can see the call trace that led to it.
|
||||||
|
panic(fmt.Sprintf("invalid Bind: old=%q m={%q, %s, %q}", old, m.old, m.fs.String(), m.new))
|
||||||
|
}
|
||||||
|
suffix := old[len(m.old):]
|
||||||
|
m.old = pathpkg.Join(m.old, suffix)
|
||||||
|
m.new = pathpkg.Join(m.new, suffix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ns[old] = mtpt
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolve resolves a path to the list of mountedFS to use for path.
|
||||||
|
func (ns nameSpace) resolve(path string) []mountedFS {
|
||||||
|
path = ns.clean(path)
|
||||||
|
for {
|
||||||
|
if m := ns[path]; m != nil {
|
||||||
|
if debugNS {
|
||||||
|
fmt.Printf("resolve %s: %v\n", path, m)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
if path == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
path = pathpkg.Dir(path)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open implements the FileSystem Open method.
|
||||||
|
func (ns nameSpace) Open(path string) (readSeekCloser, error) {
|
||||||
|
var err error
|
||||||
|
for _, m := range ns.resolve(path) {
|
||||||
|
if debugNS {
|
||||||
|
fmt.Printf("tx %s: %v\n", path, m.translate(path))
|
||||||
|
}
|
||||||
|
r, err1 := m.fs.Open(m.translate(path))
|
||||||
|
if err1 == nil {
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// stat implements the FileSystem Stat and Lstat methods.
|
||||||
|
func (ns nameSpace) stat(path string, f func(FileSystem, string) (os.FileInfo, error)) (os.FileInfo, error) {
|
||||||
|
var err error
|
||||||
|
for _, m := range ns.resolve(path) {
|
||||||
|
fi, err1 := f(m.fs, m.translate(path))
|
||||||
|
if err1 == nil {
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = &os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns nameSpace) Stat(path string) (os.FileInfo, error) {
|
||||||
|
return ns.stat(path, FileSystem.Stat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns nameSpace) Lstat(path string) (os.FileInfo, error) {
|
||||||
|
return ns.stat(path, FileSystem.Lstat)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirInfo is a trivial implementation of os.FileInfo for a directory.
|
||||||
|
type dirInfo string
|
||||||
|
|
||||||
|
func (d dirInfo) Name() string { return string(d) }
|
||||||
|
func (d dirInfo) Size() int64 { return 0 }
|
||||||
|
func (d dirInfo) Mode() os.FileMode { return os.ModeDir | 0555 }
|
||||||
|
func (d dirInfo) ModTime() time.Time { return startTime }
|
||||||
|
func (d dirInfo) IsDir() bool { return true }
|
||||||
|
func (d dirInfo) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
var startTime = time.Now()
|
||||||
|
|
||||||
|
// ReadDir implements the FileSystem ReadDir method. It's where most of the magic is.
|
||||||
|
// (The rest is in resolve.)
|
||||||
|
//
|
||||||
|
// Logically, ReadDir must return the union of all the directories that are named
|
||||||
|
// by path. In order to avoid misinterpreting Go packages, of all the directories
|
||||||
|
// that contain Go source code, we only include the files from the first,
|
||||||
|
// but we include subdirectories from all.
|
||||||
|
//
|
||||||
|
// ReadDir must also return directory entries needed to reach mount points.
|
||||||
|
// If the name space looks like the example in the type nameSpace comment,
|
||||||
|
// but c:\Go does not have a src/pkg subdirectory, we still want to be able
|
||||||
|
// to find that subdirectory, because we've mounted d:\Work1 and d:\Work2
|
||||||
|
// there. So if we don't see "src" in the directory listing for c:\Go, we add an
|
||||||
|
// entry for it before returning.
|
||||||
|
//
|
||||||
|
func (ns nameSpace) ReadDir(path string) ([]os.FileInfo, error) {
|
||||||
|
path = ns.clean(path)
|
||||||
|
|
||||||
|
var (
|
||||||
|
haveGo = false
|
||||||
|
haveName = map[string]bool{}
|
||||||
|
all []os.FileInfo
|
||||||
|
err error
|
||||||
|
first []os.FileInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, m := range ns.resolve(path) {
|
||||||
|
dir, err1 := m.fs.ReadDir(m.translate(path))
|
||||||
|
if err1 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == nil {
|
||||||
|
dir = []os.FileInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if first == nil {
|
||||||
|
first = dir
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't yet have Go files in 'all' and this directory
|
||||||
|
// has some, add all the files from this directory.
|
||||||
|
// Otherwise, only add subdirectories.
|
||||||
|
useFiles := false
|
||||||
|
if !haveGo {
|
||||||
|
for _, d := range dir {
|
||||||
|
if strings.HasSuffix(d.Name(), ".go") {
|
||||||
|
useFiles = true
|
||||||
|
haveGo = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range dir {
|
||||||
|
name := d.Name()
|
||||||
|
if (d.IsDir() || useFiles) && !haveName[name] {
|
||||||
|
haveName[name] = true
|
||||||
|
all = append(all, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We didn't find any directories containing Go files.
|
||||||
|
// If some directory returned successfully, use that.
|
||||||
|
if !haveGo {
|
||||||
|
for _, d := range first {
|
||||||
|
if !haveName[d.Name()] {
|
||||||
|
haveName[d.Name()] = true
|
||||||
|
all = append(all, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Built union. Add any missing directories needed to reach mount points.
|
||||||
|
for old := range ns {
|
||||||
|
if hasPathPrefix(old, path) && old != path {
|
||||||
|
// Find next element after path in old.
|
||||||
|
elem := old[len(path):]
|
||||||
|
elem = strings.TrimPrefix(elem, "/")
|
||||||
|
if i := strings.Index(elem, "/"); i >= 0 {
|
||||||
|
elem = elem[:i]
|
||||||
|
}
|
||||||
|
if !haveName[elem] {
|
||||||
|
haveName[elem] = true
|
||||||
|
all = append(all, dirInfo(elem))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(all) == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(byName(all))
|
||||||
|
return all, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// byName implements sort.Interface.
|
||||||
|
type byName []os.FileInfo
|
||||||
|
|
||||||
|
func (f byName) Len() int { return len(f) }
|
||||||
|
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
|
||||||
|
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||||
|
|
||||||
|
// An httpFS implements http.FileSystem using a FileSystem.
|
||||||
|
type httpFS struct {
|
||||||
|
fs FileSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpFS) Open(name string) (http.File, error) {
|
||||||
|
fi, err := h.fs.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
return &httpDir{h.fs, name, nil}, nil
|
||||||
|
}
|
||||||
|
f, err := h.fs.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &httpFile{h.fs, f, name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpDir implements http.File for a directory in a FileSystem.
|
||||||
|
type httpDir struct {
|
||||||
|
fs FileSystem
|
||||||
|
name string
|
||||||
|
pending []os.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpDir) Close() error { return nil }
|
||||||
|
func (h *httpDir) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
|
||||||
|
func (h *httpDir) Read([]byte) (int, error) {
|
||||||
|
return 0, fmt.Errorf("cannot Read from directory %s", h.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpDir) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if offset == 0 && whence == 0 {
|
||||||
|
h.pending = nil
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("unsupported Seek in directory %s", h.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpDir) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
if h.pending == nil {
|
||||||
|
d, err := h.fs.ReadDir(h.name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if d == nil {
|
||||||
|
d = []os.FileInfo{} // not nil
|
||||||
|
}
|
||||||
|
h.pending = d
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(h.pending) == 0 && count > 0 {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
if count <= 0 || count > len(h.pending) {
|
||||||
|
count = len(h.pending)
|
||||||
|
}
|
||||||
|
d := h.pending[:count]
|
||||||
|
h.pending = h.pending[count:]
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpFile implements http.File for a file (not directory) in a FileSystem.
|
||||||
|
type httpFile struct {
|
||||||
|
fs FileSystem
|
||||||
|
readSeekCloser
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpFile) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
|
||||||
|
func (h *httpFile) Readdir(int) ([]os.FileInfo, error) {
|
||||||
|
return nil, fmt.Errorf("cannot Readdir from file %s", h.name)
|
||||||
|
}
|
372
cmd/godoc/format.go
Normal file
372
cmd/godoc/format.go
Normal file
@ -0,0 +1,372 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file implements FormatSelections and FormatText.
|
||||||
|
// FormatText is used to HTML-format Go and non-Go source
|
||||||
|
// text with line numbers and highlighted sections. It is
|
||||||
|
// built on top of FormatSelections, a generic formatter
|
||||||
|
// for "selected" text.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/scanner"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Implementation of FormatSelections
|
||||||
|
|
||||||
|
// A Segment describes a text segment [start, end).
|
||||||
|
// The zero value of a Segment is a ready-to-use empty segment.
|
||||||
|
//
|
||||||
|
type Segment struct {
|
||||||
|
start, end int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (seg *Segment) isEmpty() bool { return seg.start >= seg.end }
|
||||||
|
|
||||||
|
// A Selection is an "iterator" function returning a text segment.
|
||||||
|
// Repeated calls to a selection return consecutive, non-overlapping,
|
||||||
|
// non-empty segments, followed by an infinite sequence of empty
|
||||||
|
// segments. The first empty segment marks the end of the selection.
|
||||||
|
//
|
||||||
|
type Selection func() Segment
|
||||||
|
|
||||||
|
// A LinkWriter writes some start or end "tag" to w for the text offset offs.
|
||||||
|
// It is called by FormatSelections at the start or end of each link segment.
|
||||||
|
//
|
||||||
|
type LinkWriter func(w io.Writer, offs int, start bool)
|
||||||
|
|
||||||
|
// A SegmentWriter formats a text according to selections and writes it to w.
|
||||||
|
// The selections parameter is a bit set indicating which selections provided
|
||||||
|
// to FormatSelections overlap with the text segment: If the n'th bit is set
|
||||||
|
// in selections, the n'th selection provided to FormatSelections is overlapping
|
||||||
|
// with the text.
|
||||||
|
//
|
||||||
|
type SegmentWriter func(w io.Writer, text []byte, selections int)
|
||||||
|
|
||||||
|
// FormatSelections takes a text and writes it to w using link and segment
|
||||||
|
// writers lw and sw as follows: lw is invoked for consecutive segment starts
|
||||||
|
// and ends as specified through the links selection, and sw is invoked for
|
||||||
|
// consecutive segments of text overlapped by the same selections as specified
|
||||||
|
// by selections. The link writer lw may be nil, in which case the links
|
||||||
|
// Selection is ignored.
|
||||||
|
//
|
||||||
|
func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
|
||||||
|
// If we have a link writer, make the links
|
||||||
|
// selection the last entry in selections
|
||||||
|
if lw != nil {
|
||||||
|
selections = append(selections, links)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute the sequence of consecutive segment changes
|
||||||
|
changes := newMerger(selections)
|
||||||
|
|
||||||
|
// The i'th bit in bitset indicates that the text
|
||||||
|
// at the current offset is covered by selections[i].
|
||||||
|
bitset := 0
|
||||||
|
lastOffs := 0
|
||||||
|
|
||||||
|
// Text segments are written in a delayed fashion
|
||||||
|
// such that consecutive segments belonging to the
|
||||||
|
// same selection can be combined (peephole optimization).
|
||||||
|
// last describes the last segment which has not yet been written.
|
||||||
|
var last struct {
|
||||||
|
begin, end int // valid if begin < end
|
||||||
|
bitset int
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush writes the last delayed text segment
|
||||||
|
flush := func() {
|
||||||
|
if last.begin < last.end {
|
||||||
|
sw(w, text[last.begin:last.end], last.bitset)
|
||||||
|
}
|
||||||
|
last.begin = last.end // invalidate last
|
||||||
|
}
|
||||||
|
|
||||||
|
// segment runs the segment [lastOffs, end) with the selection
|
||||||
|
// indicated by bitset through the segment peephole optimizer.
|
||||||
|
segment := func(end int) {
|
||||||
|
if lastOffs < end { // ignore empty segments
|
||||||
|
if last.end != lastOffs || last.bitset != bitset {
|
||||||
|
// the last segment is not adjacent to or
|
||||||
|
// differs from the new one
|
||||||
|
flush()
|
||||||
|
// start a new segment
|
||||||
|
last.begin = lastOffs
|
||||||
|
}
|
||||||
|
last.end = end
|
||||||
|
last.bitset = bitset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// get the next segment change
|
||||||
|
index, offs, start := changes.next()
|
||||||
|
if index < 0 || offs > len(text) {
|
||||||
|
// no more segment changes or the next change
|
||||||
|
// is past the end of the text - we're done
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// determine the kind of segment change
|
||||||
|
if lw != nil && index == len(selections)-1 {
|
||||||
|
// we have a link segment change (see start of this function):
|
||||||
|
// format the previous selection segment, write the
|
||||||
|
// link tag and start a new selection segment
|
||||||
|
segment(offs)
|
||||||
|
flush()
|
||||||
|
lastOffs = offs
|
||||||
|
lw(w, offs, start)
|
||||||
|
} else {
|
||||||
|
// we have a selection change:
|
||||||
|
// format the previous selection segment, determine
|
||||||
|
// the new selection bitset and start a new segment
|
||||||
|
segment(offs)
|
||||||
|
lastOffs = offs
|
||||||
|
mask := 1 << uint(index)
|
||||||
|
if start {
|
||||||
|
bitset |= mask
|
||||||
|
} else {
|
||||||
|
bitset &^= mask
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
segment(len(text))
|
||||||
|
flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A merger merges a slice of Selections and produces a sequence of
|
||||||
|
// consecutive segment change events through repeated next() calls.
|
||||||
|
//
|
||||||
|
type merger struct {
|
||||||
|
selections []Selection
|
||||||
|
segments []Segment // segments[i] is the next segment of selections[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
const infinity int = 2e9
|
||||||
|
|
||||||
|
func newMerger(selections []Selection) *merger {
|
||||||
|
segments := make([]Segment, len(selections))
|
||||||
|
for i, sel := range selections {
|
||||||
|
segments[i] = Segment{infinity, infinity}
|
||||||
|
if sel != nil {
|
||||||
|
if seg := sel(); !seg.isEmpty() {
|
||||||
|
segments[i] = seg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &merger{selections, segments}
|
||||||
|
}
|
||||||
|
|
||||||
|
// next returns the next segment change: index specifies the Selection
|
||||||
|
// to which the segment belongs, offs is the segment start or end offset
|
||||||
|
// as determined by the start value. If there are no more segment changes,
|
||||||
|
// next returns an index value < 0.
|
||||||
|
//
|
||||||
|
func (m *merger) next() (index, offs int, start bool) {
|
||||||
|
// find the next smallest offset where a segment starts or ends
|
||||||
|
offs = infinity
|
||||||
|
index = -1
|
||||||
|
for i, seg := range m.segments {
|
||||||
|
switch {
|
||||||
|
case seg.start < offs:
|
||||||
|
offs = seg.start
|
||||||
|
index = i
|
||||||
|
start = true
|
||||||
|
case seg.end < offs:
|
||||||
|
offs = seg.end
|
||||||
|
index = i
|
||||||
|
start = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if index < 0 {
|
||||||
|
// no offset found => all selections merged
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// offset found - it's either the start or end offset but
|
||||||
|
// either way it is ok to consume the start offset: set it
|
||||||
|
// to infinity so it won't be considered in the following
|
||||||
|
// next call
|
||||||
|
m.segments[index].start = infinity
|
||||||
|
if start {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// end offset found - consume it
|
||||||
|
m.segments[index].end = infinity
|
||||||
|
// advance to the next segment for that selection
|
||||||
|
seg := m.selections[index]()
|
||||||
|
if !seg.isEmpty() {
|
||||||
|
m.segments[index] = seg
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Implementation of FormatText
|
||||||
|
|
||||||
|
// lineSelection returns the line segments for text as a Selection.
|
||||||
|
func lineSelection(text []byte) Selection {
|
||||||
|
i, j := 0, 0
|
||||||
|
return func() (seg Segment) {
|
||||||
|
// find next newline, if any
|
||||||
|
for j < len(text) {
|
||||||
|
j++
|
||||||
|
if text[j-1] == '\n' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i < j {
|
||||||
|
// text[i:j] constitutes a line
|
||||||
|
seg = Segment{i, j}
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenSelection returns, as a selection, the sequence of
|
||||||
|
// consecutive occurrences of token sel in the Go src text.
|
||||||
|
//
|
||||||
|
func tokenSelection(src []byte, sel token.Token) Selection {
|
||||||
|
var s scanner.Scanner
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
file := fset.AddFile("", fset.Base(), len(src))
|
||||||
|
s.Init(file, src, nil, scanner.ScanComments)
|
||||||
|
return func() (seg Segment) {
|
||||||
|
for {
|
||||||
|
pos, tok, lit := s.Scan()
|
||||||
|
if tok == token.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offs := file.Offset(pos)
|
||||||
|
if tok == sel {
|
||||||
|
seg = Segment{offs, offs + len(lit)}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeSelection is a helper function to make a Selection from a slice of pairs.
|
||||||
|
// Pairs describing empty segments are ignored.
|
||||||
|
//
|
||||||
|
func makeSelection(matches [][]int) Selection {
|
||||||
|
i := 0
|
||||||
|
return func() Segment {
|
||||||
|
for i < len(matches) {
|
||||||
|
m := matches[i]
|
||||||
|
i++
|
||||||
|
if m[0] < m[1] {
|
||||||
|
// non-empty segment
|
||||||
|
return Segment{m[0], m[1]}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Segment{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// regexpSelection computes the Selection for the regular expression expr in text.
|
||||||
|
func regexpSelection(text []byte, expr string) Selection {
|
||||||
|
var matches [][]int
|
||||||
|
if rx, err := regexp.Compile(expr); err == nil {
|
||||||
|
matches = rx.FindAllIndex(text, -1)
|
||||||
|
}
|
||||||
|
return makeSelection(matches)
|
||||||
|
}
|
||||||
|
|
||||||
|
var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
|
||||||
|
|
||||||
|
// rangeSelection computes the Selection for a text range described
|
||||||
|
// by the argument str; the range description must match the selRx
|
||||||
|
// regular expression.
|
||||||
|
//
|
||||||
|
func rangeSelection(str string) Selection {
|
||||||
|
m := selRx.FindStringSubmatch(str)
|
||||||
|
if len(m) >= 2 {
|
||||||
|
from, _ := strconv.Atoi(m[1])
|
||||||
|
to, _ := strconv.Atoi(m[2])
|
||||||
|
if from < to {
|
||||||
|
return makeSelection([][]int{{from, to}})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Span tags for all the possible selection combinations that may
|
||||||
|
// be generated by FormatText. Selections are indicated by a bitset,
|
||||||
|
// and the value of the bitset specifies the tag to be used.
|
||||||
|
//
|
||||||
|
// bit 0: comments
|
||||||
|
// bit 1: highlights
|
||||||
|
// bit 2: selections
|
||||||
|
//
|
||||||
|
var startTags = [][]byte{
|
||||||
|
/* 000 */ []byte(``),
|
||||||
|
/* 001 */ []byte(`<span class="comment">`),
|
||||||
|
/* 010 */ []byte(`<span class="highlight">`),
|
||||||
|
/* 011 */ []byte(`<span class="highlight-comment">`),
|
||||||
|
/* 100 */ []byte(`<span class="selection">`),
|
||||||
|
/* 101 */ []byte(`<span class="selection-comment">`),
|
||||||
|
/* 110 */ []byte(`<span class="selection-highlight">`),
|
||||||
|
/* 111 */ []byte(`<span class="selection-highlight-comment">`),
|
||||||
|
}
|
||||||
|
|
||||||
|
var endTag = []byte(`</span>`)
|
||||||
|
|
||||||
|
func selectionTag(w io.Writer, text []byte, selections int) {
|
||||||
|
if selections < len(startTags) {
|
||||||
|
if tag := startTags[selections]; len(tag) > 0 {
|
||||||
|
w.Write(tag)
|
||||||
|
template.HTMLEscape(w, text)
|
||||||
|
w.Write(endTag)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template.HTMLEscape(w, text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatText HTML-escapes text and writes it to w.
|
||||||
|
// Consecutive text segments are wrapped in HTML spans (with tags as
|
||||||
|
// defined by startTags and endTag) as follows:
|
||||||
|
//
|
||||||
|
// - if line >= 0, line number (ln) spans are inserted before each line,
|
||||||
|
// starting with the value of line
|
||||||
|
// - if the text is Go source, comments get the "comment" span class
|
||||||
|
// - each occurrence of the regular expression pattern gets the "highlight"
|
||||||
|
// span class
|
||||||
|
// - text segments covered by selection get the "selection" span class
|
||||||
|
//
|
||||||
|
// Comments, highlights, and selections may overlap arbitrarily; the respective
|
||||||
|
// HTML span classes are specified in the startTags variable.
|
||||||
|
//
|
||||||
|
func FormatText(w io.Writer, text []byte, line int, goSource bool, pattern string, selection Selection) {
|
||||||
|
var comments, highlights Selection
|
||||||
|
if goSource {
|
||||||
|
comments = tokenSelection(text, token.COMMENT)
|
||||||
|
}
|
||||||
|
if pattern != "" {
|
||||||
|
highlights = regexpSelection(text, pattern)
|
||||||
|
}
|
||||||
|
if line >= 0 || comments != nil || highlights != nil || selection != nil {
|
||||||
|
var lineTag LinkWriter
|
||||||
|
if line >= 0 {
|
||||||
|
lineTag = func(w io.Writer, _ int, start bool) {
|
||||||
|
if start {
|
||||||
|
fmt.Fprintf(w, "<a id=\"L%d\"></a><span class=\"ln\">%6d</span>\t", line, line)
|
||||||
|
line++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FormatSelections(w, text, lineTag, lineSelection(text), selectionTag, comments, highlights, selection)
|
||||||
|
} else {
|
||||||
|
template.HTMLEscape(w, text)
|
||||||
|
}
|
||||||
|
}
|
1586
cmd/godoc/godoc.go
Normal file
1586
cmd/godoc/godoc.go
Normal file
File diff suppressed because it is too large
Load Diff
1079
cmd/godoc/index.go
Normal file
1079
cmd/godoc/index.go
Normal file
File diff suppressed because it is too large
Load Diff
234
cmd/godoc/linkify.go
Normal file
234
cmd/godoc/linkify.go
Normal file
@ -0,0 +1,234 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file implements LinkifyText which introduces
|
||||||
|
// links for identifiers pointing to their declarations.
|
||||||
|
// The approach does not cover all cases because godoc
|
||||||
|
// doesn't have complete type information, but it's
|
||||||
|
// reasonably good for browsing.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LinkifyText HTML-escapes source text and writes it to w.
|
||||||
|
// Identifiers that are in a "use" position (i.e., that are
|
||||||
|
// not being declared), are wrapped with HTML links pointing
|
||||||
|
// to the respective declaration, if possible. Comments are
|
||||||
|
// formatted the same way as with FormatText.
|
||||||
|
//
|
||||||
|
func LinkifyText(w io.Writer, text []byte, n ast.Node) {
|
||||||
|
links := linksFor(n)
|
||||||
|
|
||||||
|
i := 0 // links index
|
||||||
|
prev := "" // prev HTML tag
|
||||||
|
linkWriter := func(w io.Writer, _ int, start bool) {
|
||||||
|
// end tag
|
||||||
|
if !start {
|
||||||
|
if prev != "" {
|
||||||
|
fmt.Fprintf(w, `</%s>`, prev)
|
||||||
|
prev = ""
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// start tag
|
||||||
|
prev = ""
|
||||||
|
if i < len(links) {
|
||||||
|
switch info := links[i]; {
|
||||||
|
case info.path != "" && info.name == "":
|
||||||
|
// package path
|
||||||
|
fmt.Fprintf(w, `<a href="/pkg/%s/">`, info.path)
|
||||||
|
prev = "a"
|
||||||
|
case info.path != "" && info.name != "":
|
||||||
|
// qualified identifier
|
||||||
|
fmt.Fprintf(w, `<a href="/pkg/%s/#%s">`, info.path, info.name)
|
||||||
|
prev = "a"
|
||||||
|
case info.path == "" && info.name != "":
|
||||||
|
// local identifier
|
||||||
|
if info.mode == identVal {
|
||||||
|
fmt.Fprintf(w, `<span id="%s">`, info.name)
|
||||||
|
prev = "span"
|
||||||
|
} else if ast.IsExported(info.name) {
|
||||||
|
fmt.Fprintf(w, `<a href="#%s">`, info.name)
|
||||||
|
prev = "a"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idents := tokenSelection(text, token.IDENT)
|
||||||
|
comments := tokenSelection(text, token.COMMENT)
|
||||||
|
FormatSelections(w, text, linkWriter, idents, selectionTag, comments)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A link describes the (HTML) link information for an identifier.
|
||||||
|
// The zero value of a link represents "no link".
|
||||||
|
//
|
||||||
|
type link struct {
|
||||||
|
mode identMode
|
||||||
|
path, name string // package path, identifier name
|
||||||
|
}
|
||||||
|
|
||||||
|
// linksFor returns the list of links for the identifiers used
|
||||||
|
// by node in the same order as they appear in the source.
|
||||||
|
//
|
||||||
|
func linksFor(node ast.Node) (list []link) {
|
||||||
|
modes := identModesFor(node)
|
||||||
|
|
||||||
|
// NOTE: We are expecting ast.Inspect to call the
|
||||||
|
// callback function in source text order.
|
||||||
|
ast.Inspect(node, func(node ast.Node) bool {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.Ident:
|
||||||
|
m := modes[n]
|
||||||
|
info := link{mode: m}
|
||||||
|
switch m {
|
||||||
|
case identUse:
|
||||||
|
if n.Obj == nil && predeclared[n.Name] {
|
||||||
|
info.path = builtinPkgPath
|
||||||
|
}
|
||||||
|
info.name = n.Name
|
||||||
|
case identDef:
|
||||||
|
// any declaration expect const or var - empty link
|
||||||
|
case identVal:
|
||||||
|
// const or var declaration
|
||||||
|
info.name = n.Name
|
||||||
|
}
|
||||||
|
list = append(list, info)
|
||||||
|
return false
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
// Detect qualified identifiers of the form pkg.ident.
|
||||||
|
// If anything fails we return true and collect individual
|
||||||
|
// identifiers instead.
|
||||||
|
if x, _ := n.X.(*ast.Ident); x != nil {
|
||||||
|
// x must be a package for a qualified identifier
|
||||||
|
if obj := x.Obj; obj != nil && obj.Kind == ast.Pkg {
|
||||||
|
if spec, _ := obj.Decl.(*ast.ImportSpec); spec != nil {
|
||||||
|
// spec.Path.Value is the import path
|
||||||
|
if path, err := strconv.Unquote(spec.Path.Value); err == nil {
|
||||||
|
// Register two links, one for the package
|
||||||
|
// and one for the qualified identifier.
|
||||||
|
info := link{path: path}
|
||||||
|
list = append(list, info)
|
||||||
|
info.name = n.Sel.Name
|
||||||
|
list = append(list, info)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The identMode describes how an identifier is "used" at its source location.
|
||||||
|
type identMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
identUse identMode = iota // identifier is used (must be zero value for identMode)
|
||||||
|
identDef // identifier is defined
|
||||||
|
identVal // identifier is defined in a const or var declaration
|
||||||
|
)
|
||||||
|
|
||||||
|
// identModesFor returns a map providing the identMode for each identifier used by node.
|
||||||
|
func identModesFor(node ast.Node) map[*ast.Ident]identMode {
|
||||||
|
m := make(map[*ast.Ident]identMode)
|
||||||
|
|
||||||
|
ast.Inspect(node, func(node ast.Node) bool {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.Field:
|
||||||
|
for _, n := range n.Names {
|
||||||
|
m[n] = identDef
|
||||||
|
}
|
||||||
|
case *ast.ImportSpec:
|
||||||
|
if name := n.Name; name != nil {
|
||||||
|
m[name] = identDef
|
||||||
|
}
|
||||||
|
case *ast.ValueSpec:
|
||||||
|
for _, n := range n.Names {
|
||||||
|
m[n] = identVal
|
||||||
|
}
|
||||||
|
case *ast.TypeSpec:
|
||||||
|
m[n.Name] = identDef
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
m[n.Name] = identDef
|
||||||
|
case *ast.AssignStmt:
|
||||||
|
// Short variable declarations only show up if we apply
|
||||||
|
// this code to all source code (as opposed to exported
|
||||||
|
// declarations only).
|
||||||
|
if n.Tok == token.DEFINE {
|
||||||
|
// Some of the lhs variables may be re-declared,
|
||||||
|
// so technically they are not defs. We don't
|
||||||
|
// care for now.
|
||||||
|
for _, x := range n.Lhs {
|
||||||
|
// Each lhs expression should be an
|
||||||
|
// ident, but we are conservative and check.
|
||||||
|
if n, _ := x.(*ast.Ident); n != nil {
|
||||||
|
m[n] = identVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// The predeclared map represents the set of all predeclared identifiers.
|
||||||
|
// TODO(gri) This information is also encoded in similar maps in go/doc,
|
||||||
|
// but not exported. Consider exporting an accessor and using
|
||||||
|
// it instead.
|
||||||
|
var predeclared = map[string]bool{
|
||||||
|
"bool": true,
|
||||||
|
"byte": true,
|
||||||
|
"complex64": true,
|
||||||
|
"complex128": true,
|
||||||
|
"error": true,
|
||||||
|
"float32": true,
|
||||||
|
"float64": true,
|
||||||
|
"int": true,
|
||||||
|
"int8": true,
|
||||||
|
"int16": true,
|
||||||
|
"int32": true,
|
||||||
|
"int64": true,
|
||||||
|
"rune": true,
|
||||||
|
"string": true,
|
||||||
|
"uint": true,
|
||||||
|
"uint8": true,
|
||||||
|
"uint16": true,
|
||||||
|
"uint32": true,
|
||||||
|
"uint64": true,
|
||||||
|
"uintptr": true,
|
||||||
|
"true": true,
|
||||||
|
"false": true,
|
||||||
|
"iota": true,
|
||||||
|
"nil": true,
|
||||||
|
"append": true,
|
||||||
|
"cap": true,
|
||||||
|
"close": true,
|
||||||
|
"complex": true,
|
||||||
|
"copy": true,
|
||||||
|
"delete": true,
|
||||||
|
"imag": true,
|
||||||
|
"len": true,
|
||||||
|
"make": true,
|
||||||
|
"new": true,
|
||||||
|
"panic": true,
|
||||||
|
"print": true,
|
||||||
|
"println": true,
|
||||||
|
"real": true,
|
||||||
|
"recover": true,
|
||||||
|
}
|
470
cmd/godoc/main.go
Normal file
470
cmd/godoc/main.go
Normal file
@ -0,0 +1,470 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// godoc: Go Documentation Server
|
||||||
|
|
||||||
|
// Web server tree:
|
||||||
|
//
|
||||||
|
// http://godoc/ main landing page
|
||||||
|
// http://godoc/doc/ serve from $GOROOT/doc - spec, mem, etc.
|
||||||
|
// http://godoc/src/ serve files from $GOROOT/src; .go gets pretty-printed
|
||||||
|
// http://godoc/cmd/ serve documentation about commands
|
||||||
|
// http://godoc/pkg/ serve documentation about packages
|
||||||
|
// (idea is if you say import "compress/zlib", you go to
|
||||||
|
// http://godoc/pkg/compress/zlib)
|
||||||
|
//
|
||||||
|
// Command-line interface:
|
||||||
|
//
|
||||||
|
// godoc packagepath [name ...]
|
||||||
|
//
|
||||||
|
// godoc compress/zlib
|
||||||
|
// - prints doc for package compress/zlib
|
||||||
|
// godoc crypto/block Cipher NewCMAC
|
||||||
|
// - prints doc for Cipher and NewCMAC in package crypto/block
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
_ "expvar" // to serve /debug/vars
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
|
"go/printer"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof" // to serve /debug/pprof/*
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
pathpkg "path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultAddr = ":6060" // default webserver address
|
||||||
|
|
||||||
|
var (
|
||||||
|
// file system to serve
|
||||||
|
// (with e.g.: zip -r go.zip $GOROOT -i \*.go -i \*.html -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i favicon.ico)
|
||||||
|
zipfile = flag.String("zip", "", "zip file providing the file system to serve; disabled if empty")
|
||||||
|
|
||||||
|
// file-based index
|
||||||
|
writeIndex = flag.Bool("write_index", false, "write index to a file; the file name must be specified with -index_files")
|
||||||
|
|
||||||
|
// network
|
||||||
|
httpAddr = flag.String("http", "", "HTTP service address (e.g., '"+defaultAddr+"')")
|
||||||
|
serverAddr = flag.String("server", "", "webserver address for command line searches")
|
||||||
|
|
||||||
|
// layout control
|
||||||
|
html = flag.Bool("html", false, "print HTML in command-line mode")
|
||||||
|
srcMode = flag.Bool("src", false, "print (exported) source in command-line mode")
|
||||||
|
urlFlag = flag.String("url", "", "print HTML for named URL")
|
||||||
|
|
||||||
|
// command-line searches
|
||||||
|
query = flag.Bool("q", false, "arguments are considered search queries")
|
||||||
|
)
|
||||||
|
|
||||||
|
func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
servePage(w, Page{
|
||||||
|
Title: "File " + relpath,
|
||||||
|
Subtitle: relpath,
|
||||||
|
Body: applyTemplate(errorHTML, "errorHTML", err), // err may contain an absolute path!
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprintf(os.Stderr,
|
||||||
|
"usage: godoc package [name ...]\n"+
|
||||||
|
" godoc -http="+defaultAddr+"\n")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loggingHandler(h http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
log.Printf("%s\t%s", req.RemoteAddr, req.URL)
|
||||||
|
h.ServeHTTP(w, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func remoteSearch(query string) (res *http.Response, err error) {
|
||||||
|
// list of addresses to try
|
||||||
|
var addrs []string
|
||||||
|
if *serverAddr != "" {
|
||||||
|
// explicit server address - only try this one
|
||||||
|
addrs = []string{*serverAddr}
|
||||||
|
} else {
|
||||||
|
addrs = []string{
|
||||||
|
defaultAddr,
|
||||||
|
"golang.org",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remote search
|
||||||
|
search := remoteSearchURL(query, *html)
|
||||||
|
for _, addr := range addrs {
|
||||||
|
url := "http://" + addr + search
|
||||||
|
res, err = http.Get(url)
|
||||||
|
if err == nil && res.StatusCode == http.StatusOK {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && res.StatusCode != http.StatusOK {
|
||||||
|
err = errors.New(res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does s look like a regular expression?
|
||||||
|
func isRegexp(s string) bool {
|
||||||
|
return strings.IndexAny(s, ".(|)*+?^$[]") >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a regular expression of the form
|
||||||
|
// names[0]|names[1]|...names[len(names)-1].
|
||||||
|
// Returns nil if the regular expression is illegal.
|
||||||
|
func makeRx(names []string) (rx *regexp.Regexp) {
|
||||||
|
if len(names) > 0 {
|
||||||
|
s := ""
|
||||||
|
for i, name := range names {
|
||||||
|
if i > 0 {
|
||||||
|
s += "|"
|
||||||
|
}
|
||||||
|
if isRegexp(name) {
|
||||||
|
s += name
|
||||||
|
} else {
|
||||||
|
s += "^" + name + "$" // must match exactly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rx, _ = regexp.Compile(s) // rx is nil if there's a compilation error
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// Check usage: either server and no args, command line and args, or index creation mode
|
||||||
|
if (*httpAddr != "" || *urlFlag != "") != (flag.NArg() == 0) && !*writeIndex {
|
||||||
|
usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
if *tabwidth < 0 {
|
||||||
|
log.Fatalf("negative tabwidth %d", *tabwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine file system to use.
|
||||||
|
// TODO(gri) - fs and fsHttp should really be the same. Try to unify.
|
||||||
|
// - fsHttp doesn't need to be set up in command-line mode,
|
||||||
|
// same is true for the http handlers in initHandlers.
|
||||||
|
if *zipfile == "" {
|
||||||
|
// use file system of underlying OS
|
||||||
|
fs.Bind("/", OS(*goroot), "/", bindReplace)
|
||||||
|
if *templateDir != "" {
|
||||||
|
fs.Bind("/lib/godoc", OS(*templateDir), "/", bindBefore)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// use file system specified via .zip file (path separator must be '/')
|
||||||
|
rc, err := zip.OpenReader(*zipfile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s\n", *zipfile, err)
|
||||||
|
}
|
||||||
|
defer rc.Close() // be nice (e.g., -writeIndex mode)
|
||||||
|
fs.Bind("/", NewZipFS(rc, *zipfile), *goroot, bindReplace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind $GOPATH trees into Go root.
|
||||||
|
for _, p := range filepath.SplitList(build.Default.GOPATH) {
|
||||||
|
fs.Bind("/src/pkg", OS(p), "/src", bindAfter)
|
||||||
|
}
|
||||||
|
|
||||||
|
readTemplates()
|
||||||
|
initHandlers()
|
||||||
|
|
||||||
|
if *writeIndex {
|
||||||
|
// Write search index and exit.
|
||||||
|
if *indexFiles == "" {
|
||||||
|
log.Fatal("no index file specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("initialize file systems")
|
||||||
|
*verbose = true // want to see what happens
|
||||||
|
initFSTree()
|
||||||
|
|
||||||
|
*indexThrottle = 1
|
||||||
|
updateIndex()
|
||||||
|
|
||||||
|
log.Println("writing index file", *indexFiles)
|
||||||
|
f, err := os.Create(*indexFiles)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
index, _ := searchIndex.get()
|
||||||
|
err = index.(*Index).Write(f)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("done")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print content that would be served at the URL *urlFlag.
|
||||||
|
if *urlFlag != "" {
|
||||||
|
registerPublicHandlers(http.DefaultServeMux)
|
||||||
|
initFSTree()
|
||||||
|
updateMetadata()
|
||||||
|
// Try up to 10 fetches, following redirects.
|
||||||
|
urlstr := *urlFlag
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
// Prepare request.
|
||||||
|
u, err := url.Parse(urlstr)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
req := &http.Request{
|
||||||
|
URL: u,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke default HTTP handler to serve request
|
||||||
|
// to our buffering httpWriter.
|
||||||
|
w := &httpWriter{h: http.Header{}, code: 200}
|
||||||
|
http.DefaultServeMux.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
// Return data, error, or follow redirect.
|
||||||
|
switch w.code {
|
||||||
|
case 200: // ok
|
||||||
|
os.Stdout.Write(w.Bytes())
|
||||||
|
return
|
||||||
|
case 301, 302, 303, 307: // redirect
|
||||||
|
redirect := w.h.Get("Location")
|
||||||
|
if redirect == "" {
|
||||||
|
log.Fatalf("HTTP %d without Location header", w.code)
|
||||||
|
}
|
||||||
|
urlstr = redirect
|
||||||
|
default:
|
||||||
|
log.Fatalf("HTTP error %d", w.code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Fatalf("too many redirects")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *httpAddr != "" {
|
||||||
|
// HTTP server mode.
|
||||||
|
var handler http.Handler = http.DefaultServeMux
|
||||||
|
if *verbose {
|
||||||
|
log.Printf("Go Documentation Server")
|
||||||
|
log.Printf("version = %s", runtime.Version())
|
||||||
|
log.Printf("address = %s", *httpAddr)
|
||||||
|
log.Printf("goroot = %s", *goroot)
|
||||||
|
log.Printf("tabwidth = %d", *tabwidth)
|
||||||
|
switch {
|
||||||
|
case !*indexEnabled:
|
||||||
|
log.Print("search index disabled")
|
||||||
|
case *maxResults > 0:
|
||||||
|
log.Printf("full text index enabled (maxresults = %d)", *maxResults)
|
||||||
|
default:
|
||||||
|
log.Print("identifier search index enabled")
|
||||||
|
}
|
||||||
|
fs.Fprint(os.Stderr)
|
||||||
|
handler = loggingHandler(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
registerPublicHandlers(http.DefaultServeMux)
|
||||||
|
registerPlaygroundHandlers(http.DefaultServeMux)
|
||||||
|
|
||||||
|
// Initialize default directory tree with corresponding timestamp.
|
||||||
|
// (Do it in a goroutine so that launch is quick.)
|
||||||
|
go initFSTree()
|
||||||
|
|
||||||
|
// Immediately update metadata.
|
||||||
|
updateMetadata()
|
||||||
|
// Periodically refresh metadata.
|
||||||
|
go refreshMetadataLoop()
|
||||||
|
|
||||||
|
// Initialize search index.
|
||||||
|
if *indexEnabled {
|
||||||
|
go indexer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start http server.
|
||||||
|
if err := http.ListenAndServe(*httpAddr, handler); err != nil {
|
||||||
|
log.Fatalf("ListenAndServe %s: %v", *httpAddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command line mode.
|
||||||
|
if *html {
|
||||||
|
packageText = packageHTML
|
||||||
|
searchText = packageHTML
|
||||||
|
}
|
||||||
|
|
||||||
|
if *query {
|
||||||
|
// Command-line queries.
|
||||||
|
for i := 0; i < flag.NArg(); i++ {
|
||||||
|
res, err := remoteSearch(flag.Arg(i))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("remoteSearch: %s", err)
|
||||||
|
}
|
||||||
|
io.Copy(os.Stdout, res.Body)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine paths.
|
||||||
|
//
|
||||||
|
// If we are passed an operating system path like . or ./foo or /foo/bar or c:\mysrc,
|
||||||
|
// we need to map that path somewhere in the fs name space so that routines
|
||||||
|
// like getPageInfo will see it. We use the arbitrarily-chosen virtual path "/target"
|
||||||
|
// for this. That is, if we get passed a directory like the above, we map that
|
||||||
|
// directory so that getPageInfo sees it as /target.
|
||||||
|
const target = "/target"
|
||||||
|
const cmdPrefix = "cmd/"
|
||||||
|
path := flag.Arg(0)
|
||||||
|
var forceCmd bool
|
||||||
|
var abspath, relpath string
|
||||||
|
if filepath.IsAbs(path) {
|
||||||
|
fs.Bind(target, OS(path), "/", bindReplace)
|
||||||
|
abspath = target
|
||||||
|
} else if build.IsLocalImport(path) {
|
||||||
|
cwd, _ := os.Getwd() // ignore errors
|
||||||
|
path = filepath.Join(cwd, path)
|
||||||
|
fs.Bind(target, OS(path), "/", bindReplace)
|
||||||
|
abspath = target
|
||||||
|
} else if strings.HasPrefix(path, cmdPrefix) {
|
||||||
|
path = strings.TrimPrefix(path, cmdPrefix)
|
||||||
|
forceCmd = true
|
||||||
|
} else if bp, _ := build.Import(path, "", build.FindOnly); bp.Dir != "" && bp.ImportPath != "" {
|
||||||
|
fs.Bind(target, OS(bp.Dir), "/", bindReplace)
|
||||||
|
abspath = target
|
||||||
|
relpath = bp.ImportPath
|
||||||
|
} else {
|
||||||
|
abspath = pathpkg.Join(pkgHandler.fsRoot, path)
|
||||||
|
}
|
||||||
|
if relpath == "" {
|
||||||
|
relpath = abspath
|
||||||
|
}
|
||||||
|
|
||||||
|
var mode PageInfoMode
|
||||||
|
if relpath == builtinPkgPath {
|
||||||
|
// the fake built-in package contains unexported identifiers
|
||||||
|
mode = noFiltering
|
||||||
|
}
|
||||||
|
if *srcMode {
|
||||||
|
// only filter exports if we don't have explicit command-line filter arguments
|
||||||
|
if flag.NArg() > 1 {
|
||||||
|
mode |= noFiltering
|
||||||
|
}
|
||||||
|
mode |= showSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// first, try as package unless forced as command
|
||||||
|
var info *PageInfo
|
||||||
|
if !forceCmd {
|
||||||
|
info = pkgHandler.getPageInfo(abspath, relpath, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// second, try as command unless the path is absolute
|
||||||
|
// (the go command invokes godoc w/ absolute paths; don't override)
|
||||||
|
var cinfo *PageInfo
|
||||||
|
if !filepath.IsAbs(path) {
|
||||||
|
abspath = pathpkg.Join(cmdHandler.fsRoot, path)
|
||||||
|
cinfo = cmdHandler.getPageInfo(abspath, relpath, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine what to use
|
||||||
|
if info == nil || info.IsEmpty() {
|
||||||
|
if cinfo != nil && !cinfo.IsEmpty() {
|
||||||
|
// only cinfo exists - switch to cinfo
|
||||||
|
info = cinfo
|
||||||
|
}
|
||||||
|
} else if cinfo != nil && !cinfo.IsEmpty() {
|
||||||
|
// both info and cinfo exist - use cinfo if info
|
||||||
|
// contains only subdirectory information
|
||||||
|
if info.PAst == nil && info.PDoc == nil {
|
||||||
|
info = cinfo
|
||||||
|
} else {
|
||||||
|
fmt.Printf("use 'godoc %s%s' for documentation on the %s command \n\n", cmdPrefix, relpath, relpath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if info == nil {
|
||||||
|
log.Fatalf("%s: no such directory or package", flag.Arg(0))
|
||||||
|
}
|
||||||
|
if info.Err != nil {
|
||||||
|
log.Fatalf("%v", info.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.PDoc != nil && info.PDoc.ImportPath == target {
|
||||||
|
// Replace virtual /target with actual argument from command line.
|
||||||
|
info.PDoc.ImportPath = flag.Arg(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have more than one argument, use the remaining arguments for filtering.
|
||||||
|
if flag.NArg() > 1 {
|
||||||
|
args := flag.Args()[1:]
|
||||||
|
rx := makeRx(args)
|
||||||
|
if rx == nil {
|
||||||
|
log.Fatalf("illegal regular expression from %v", args)
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := func(s string) bool { return rx.MatchString(s) }
|
||||||
|
switch {
|
||||||
|
case info.PAst != nil:
|
||||||
|
cmap := ast.NewCommentMap(info.FSet, info.PAst, info.PAst.Comments)
|
||||||
|
ast.FilterFile(info.PAst, filter)
|
||||||
|
// Special case: Don't use templates for printing
|
||||||
|
// so we only get the filtered declarations without
|
||||||
|
// package clause or extra whitespace.
|
||||||
|
for i, d := range info.PAst.Decls {
|
||||||
|
// determine the comments associated with d only
|
||||||
|
comments := cmap.Filter(d).Comments()
|
||||||
|
cn := &printer.CommentedNode{Node: d, Comments: comments}
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
if *html {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writeNode(&buf, info.FSet, cn)
|
||||||
|
FormatText(os.Stdout, buf.Bytes(), -1, true, "", nil)
|
||||||
|
} else {
|
||||||
|
writeNode(os.Stdout, info.FSet, cn)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
case info.PDoc != nil:
|
||||||
|
info.PDoc.Filter(filter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := packageText.Execute(os.Stdout, info); err != nil {
|
||||||
|
log.Printf("packageText.Execute: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An httpWriter is an http.ResponseWriter writing to a bytes.Buffer.
|
||||||
|
type httpWriter struct {
|
||||||
|
bytes.Buffer
|
||||||
|
h http.Header
|
||||||
|
code int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *httpWriter) Header() http.Header { return w.h }
|
||||||
|
func (w *httpWriter) WriteHeader(code int) { w.code = code }
|
37
cmd/godoc/parser.go
Normal file
37
cmd/godoc/parser.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains support functions for parsing .go files
|
||||||
|
// accessed via godoc's file system fs.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
pathpkg "path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseFile(fset *token.FileSet, filename string, mode parser.Mode) (*ast.File, error) {
|
||||||
|
src, err := ReadFile(fs, filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parser.ParseFile(fset, filename, src, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFiles(fset *token.FileSet, abspath string, localnames []string) (map[string]*ast.File, error) {
|
||||||
|
files := make(map[string]*ast.File)
|
||||||
|
for _, f := range localnames {
|
||||||
|
absname := pathpkg.Join(abspath, f)
|
||||||
|
file, err := parseFile(fset, absname, parser.ParseComments)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
files[absname] = file
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
35
cmd/godoc/play-appengine.go
Normal file
35
cmd/godoc/play-appengine.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// App Engine godoc Playground functionality.
|
||||||
|
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"appengine"
|
||||||
|
"appengine/urlfetch"
|
||||||
|
)
|
||||||
|
|
||||||
|
func bounceToPlayground(w http.ResponseWriter, req *http.Request) {
|
||||||
|
c := appengine.NewContext(req)
|
||||||
|
client := urlfetch.Client(c)
|
||||||
|
url := playgroundBaseURL + req.URL.Path
|
||||||
|
defer req.Body.Close()
|
||||||
|
resp, err := client.Post(url, req.Header.Get("Content-type"), req.Body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Internal Server Error", 500)
|
||||||
|
c.Errorf("making POST request: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if _, err := io.Copy(w, resp.Body); err != nil {
|
||||||
|
http.Error(w, "Internal Server Error", 500)
|
||||||
|
c.Errorf("making POST request: %v", err)
|
||||||
|
}
|
||||||
|
}
|
41
cmd/godoc/play-local.go
Normal file
41
cmd/godoc/play-local.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Stand-alone godoc Playground functionality.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
var playgroundScheme, playgroundHost string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
u, err := url.Parse(playgroundBaseURL)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
playgroundScheme = u.Scheme
|
||||||
|
playgroundHost = u.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// bounceToPlayground forwards the request to play.golang.org.
|
||||||
|
func bounceToPlayground(w http.ResponseWriter, req *http.Request) {
|
||||||
|
defer req.Body.Close()
|
||||||
|
req.URL.Scheme = playgroundScheme
|
||||||
|
req.URL.Host = playgroundHost
|
||||||
|
resp, err := http.Post(req.URL.String(), req.Header.Get("Content-type"), req.Body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
|
io.Copy(w, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
52
cmd/godoc/play.go
Normal file
52
cmd/godoc/play.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Common Playground functionality.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The server that will service compile and share requests.
|
||||||
|
const playgroundBaseURL = "http://play.golang.org"
|
||||||
|
|
||||||
|
func registerPlaygroundHandlers(mux *http.ServeMux) {
|
||||||
|
if *showPlayground {
|
||||||
|
mux.HandleFunc("/compile", bounceToPlayground)
|
||||||
|
mux.HandleFunc("/share", bounceToPlayground)
|
||||||
|
} else {
|
||||||
|
mux.HandleFunc("/compile", disabledHandler)
|
||||||
|
mux.HandleFunc("/share", disabledHandler)
|
||||||
|
}
|
||||||
|
http.HandleFunc("/fmt", fmtHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fmtResponse struct {
|
||||||
|
Body string
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmtHandler takes a Go program in its "body" form value, formats it with
|
||||||
|
// standard gofmt formatting, and writes a fmtResponse as a JSON object.
|
||||||
|
func fmtHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
resp := new(fmtResponse)
|
||||||
|
body, err := format.Source([]byte(r.FormValue("body")))
|
||||||
|
if err != nil {
|
||||||
|
resp.Error = err.Error()
|
||||||
|
} else {
|
||||||
|
resp.Body = string(body)
|
||||||
|
}
|
||||||
|
json.NewEncoder(w).Encode(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// disabledHandler serves a 501 "Not Implemented" response.
|
||||||
|
func disabledHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusNotImplemented)
|
||||||
|
fmt.Fprint(w, "This functionality is not available via local godoc.")
|
||||||
|
}
|
140
cmd/godoc/setup-godoc-app.bash
Executable file
140
cmd/godoc/setup-godoc-app.bash
Executable file
@ -0,0 +1,140 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style
|
||||||
|
# license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
# This script creates a complete godoc app in $APPDIR.
|
||||||
|
# It copies the cmd/godoc and src/pkg/go/... sources from GOROOT,
|
||||||
|
# synthesizes an app.yaml file, and creates the .zip, index, and
|
||||||
|
# configuration files.
|
||||||
|
#
|
||||||
|
# If an argument is provided it is assumed to be the app-engine godoc directory.
|
||||||
|
# Without an argument, $APPDIR is used instead. If GOROOT is not set, "go env"
|
||||||
|
# is consulted to find the $GOROOT.
|
||||||
|
#
|
||||||
|
# The script creates a .zip file representing the $GOROOT file system
|
||||||
|
# and computes the correspondig search index files. These files are then
|
||||||
|
# copied to $APPDIR. A corresponding godoc configuration file is created
|
||||||
|
# in $APPDIR/appconfig.go.
|
||||||
|
|
||||||
|
ZIPFILE=godoc.zip
|
||||||
|
INDEXFILE=godoc.index
|
||||||
|
SPLITFILES=index.split.
|
||||||
|
CONFIGFILE=godoc/appconfig.go
|
||||||
|
|
||||||
|
error() {
|
||||||
|
echo "error: $1"
|
||||||
|
exit 2
|
||||||
|
}
|
||||||
|
|
||||||
|
getArgs() {
|
||||||
|
if [ -z $GOROOT ]; then
|
||||||
|
GOROOT=$(go env GOROOT)
|
||||||
|
echo "GOROOT not set explicitly, using $GOROOT instead"
|
||||||
|
fi
|
||||||
|
if [ -z $APPDIR ]; then
|
||||||
|
if [ $# == 0 ]; then
|
||||||
|
error "APPDIR not set, and no argument provided"
|
||||||
|
fi
|
||||||
|
APPDIR=$1
|
||||||
|
echo "APPDIR not set, using argument instead"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# safety checks
|
||||||
|
if [ ! -d $GOROOT ]; then
|
||||||
|
error "$GOROOT is not a directory"
|
||||||
|
fi
|
||||||
|
if [ ! -x $GOROOT/bin/godoc ]; then
|
||||||
|
error "$GOROOT/bin/godoc does not exist or is not executable"
|
||||||
|
fi
|
||||||
|
if [ -e $APPDIR ]; then
|
||||||
|
error "$APPDIR exists; check and remove it before trying again"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# reporting
|
||||||
|
echo "GOROOT = $GOROOT"
|
||||||
|
echo "APPDIR = $APPDIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
copyGodoc() {
|
||||||
|
echo "*** copy $GOROOT/src/cmd/godoc to $APPDIR/godoc"
|
||||||
|
cp -r $GOROOT/src/cmd/godoc $APPDIR/godoc
|
||||||
|
}
|
||||||
|
|
||||||
|
copyGoPackages() {
|
||||||
|
echo "*** copy $GOROOT/src/pkg/go to $APPDIR/newgo and rewrite imports"
|
||||||
|
cp -r $GOROOT/src/pkg/go $APPDIR/newgo
|
||||||
|
find $APPDIR/newgo -type d -name testdata | xargs rm -r
|
||||||
|
gofiles=$(find $APPDIR -name '*.go')
|
||||||
|
sed -i '' 's_^\(."\)\(go/[a-z]*\)"$_\1new\2"_' $gofiles
|
||||||
|
sed -i '' 's_^\(import "\)\(go/[a-z]*\)"$_\1new\2"_' $gofiles
|
||||||
|
}
|
||||||
|
|
||||||
|
makeAppYaml() {
|
||||||
|
echo "*** make $APPDIR/app.yaml"
|
||||||
|
cat > $APPDIR/app.yaml <<EOF
|
||||||
|
application: godoc
|
||||||
|
version: 1
|
||||||
|
runtime: go
|
||||||
|
api_version: go1
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- url: /.*
|
||||||
|
script: _go_app
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
makeZipfile() {
|
||||||
|
echo "*** make $APPDIR/$ZIPFILE"
|
||||||
|
zip -q -r $APPDIR/$ZIPFILE $GOROOT -i \*.go -i \*.html -i \*.xml -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i \*.ico
|
||||||
|
}
|
||||||
|
|
||||||
|
makeIndexfile() {
|
||||||
|
echo "*** make $APPDIR/$INDEXFILE"
|
||||||
|
OUT=/tmp/godoc.out
|
||||||
|
$GOROOT/bin/godoc -write_index -index_files=$APPDIR/$INDEXFILE -zip=$APPDIR/$ZIPFILE 2> $OUT
|
||||||
|
if [ $? != 0 ]; then
|
||||||
|
error "$GOROOT/bin/godoc failed - see $OUT for details"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
splitIndexfile() {
|
||||||
|
echo "*** split $APPDIR/$INDEXFILE"
|
||||||
|
split -b8m $APPDIR/$INDEXFILE $APPDIR/$SPLITFILES
|
||||||
|
}
|
||||||
|
|
||||||
|
makeConfigfile() {
|
||||||
|
echo "*** make $APPDIR/$CONFIGFILE"
|
||||||
|
cat > $APPDIR/$CONFIGFILE <<EOF
|
||||||
|
package main
|
||||||
|
|
||||||
|
// GENERATED FILE - DO NOT MODIFY BY HAND.
|
||||||
|
// (generated by $GOROOT/src/cmd/godoc/setup-godoc-app.bash)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// .zip filename
|
||||||
|
zipFilename = "$ZIPFILE"
|
||||||
|
|
||||||
|
// goroot directory in .zip file
|
||||||
|
zipGoroot = "$GOROOT"
|
||||||
|
|
||||||
|
// glob pattern describing search index files
|
||||||
|
// (if empty, the index is built at run-time)
|
||||||
|
indexFilenames = "$SPLITFILES*"
|
||||||
|
)
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
getArgs "$@"
|
||||||
|
set -e
|
||||||
|
mkdir $APPDIR
|
||||||
|
copyGodoc
|
||||||
|
copyGoPackages
|
||||||
|
makeAppYaml
|
||||||
|
makeZipfile
|
||||||
|
makeIndexfile
|
||||||
|
splitIndexfile
|
||||||
|
makeConfigfile
|
||||||
|
|
||||||
|
echo "*** setup complete"
|
112
cmd/godoc/snippet.go
Normal file
112
cmd/godoc/snippet.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains the infrastructure to create a code
|
||||||
|
// snippet for search results.
|
||||||
|
//
|
||||||
|
// Note: At the moment, this only creates HTML snippets.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Snippet struct {
|
||||||
|
Line int
|
||||||
|
Text string // HTML-escaped
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
|
||||||
|
// TODO instead of pretty-printing the node, should use the original source instead
|
||||||
|
var buf1 bytes.Buffer
|
||||||
|
writeNode(&buf1, fset, decl)
|
||||||
|
// wrap text with <pre> tag
|
||||||
|
var buf2 bytes.Buffer
|
||||||
|
buf2.WriteString("<pre>")
|
||||||
|
FormatText(&buf2, buf1.Bytes(), -1, true, id.Name, nil)
|
||||||
|
buf2.WriteString("</pre>")
|
||||||
|
return &Snippet{fset.Position(id.Pos()).Line, buf2.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec {
|
||||||
|
for _, spec := range list {
|
||||||
|
switch s := spec.(type) {
|
||||||
|
case *ast.ImportSpec:
|
||||||
|
if s.Name == id {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
case *ast.ValueSpec:
|
||||||
|
for _, n := range s.Names {
|
||||||
|
if n == id {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.TypeSpec:
|
||||||
|
if s.Name == id {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet {
|
||||||
|
s := findSpec(d.Specs, id)
|
||||||
|
if s == nil {
|
||||||
|
return nil // declaration doesn't contain id - exit gracefully
|
||||||
|
}
|
||||||
|
|
||||||
|
// only use the spec containing the id for the snippet
|
||||||
|
dd := &ast.GenDecl{
|
||||||
|
Doc: d.Doc,
|
||||||
|
TokPos: d.Pos(),
|
||||||
|
Tok: d.Tok,
|
||||||
|
Lparen: d.Lparen,
|
||||||
|
Specs: []ast.Spec{s},
|
||||||
|
Rparen: d.Rparen,
|
||||||
|
}
|
||||||
|
|
||||||
|
return newSnippet(fset, dd, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet {
|
||||||
|
if d.Name != id {
|
||||||
|
return nil // declaration doesn't contain id - exit gracefully
|
||||||
|
}
|
||||||
|
|
||||||
|
// only use the function signature for the snippet
|
||||||
|
dd := &ast.FuncDecl{
|
||||||
|
Doc: d.Doc,
|
||||||
|
Recv: d.Recv,
|
||||||
|
Name: d.Name,
|
||||||
|
Type: d.Type,
|
||||||
|
}
|
||||||
|
|
||||||
|
return newSnippet(fset, dd, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSnippet creates a text snippet from a declaration decl containing an
|
||||||
|
// identifier id. Parts of the declaration not containing the identifier
|
||||||
|
// may be removed for a more compact snippet.
|
||||||
|
//
|
||||||
|
func NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) (s *Snippet) {
|
||||||
|
switch d := decl.(type) {
|
||||||
|
case *ast.GenDecl:
|
||||||
|
s = genSnippet(fset, d, id)
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
s = funcSnippet(fset, d, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle failure gracefully
|
||||||
|
if s == nil {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintf(&buf, `<span class="alert">could not generate a snippet for <span class="highlight">%s</span></span>`, id.Name)
|
||||||
|
s = &Snippet{fset.Position(id.Pos()).Line, buf.String()}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
179
cmd/godoc/spec.go
Normal file
179
cmd/godoc/spec.go
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file contains the mechanism to "linkify" html source
|
||||||
|
// text containing EBNF sections (as found in go_spec.html).
|
||||||
|
// The result is the input source text with the EBNF sections
|
||||||
|
// modified such that identifiers are linked to the respective
|
||||||
|
// definitions.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"text/scanner"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ebnfParser struct {
|
||||||
|
out io.Writer // parser output
|
||||||
|
src []byte // parser input
|
||||||
|
scanner scanner.Scanner
|
||||||
|
prev int // offset of previous token
|
||||||
|
pos int // offset of current token
|
||||||
|
tok rune // one token look-ahead
|
||||||
|
lit string // token literal
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) flush() {
|
||||||
|
p.out.Write(p.src[p.prev:p.pos])
|
||||||
|
p.prev = p.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) next() {
|
||||||
|
p.tok = p.scanner.Scan()
|
||||||
|
p.pos = p.scanner.Position.Offset
|
||||||
|
p.lit = p.scanner.TokenText()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) printf(format string, args ...interface{}) {
|
||||||
|
p.flush()
|
||||||
|
fmt.Fprintf(p.out, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) errorExpected(msg string) {
|
||||||
|
p.printf(`<span class="highlight">error: expected %s, found %s</span>`, msg, scanner.TokenString(p.tok))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) expect(tok rune) {
|
||||||
|
if p.tok != tok {
|
||||||
|
p.errorExpected(scanner.TokenString(tok))
|
||||||
|
}
|
||||||
|
p.next() // make progress in any case
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parseIdentifier(def bool) {
|
||||||
|
if p.tok == scanner.Ident {
|
||||||
|
name := p.lit
|
||||||
|
if def {
|
||||||
|
p.printf(`<a id="%s">%s</a>`, name, name)
|
||||||
|
} else {
|
||||||
|
p.printf(`<a href="#%s" class="noline">%s</a>`, name, name)
|
||||||
|
}
|
||||||
|
p.prev += len(name) // skip identifier when printing next time
|
||||||
|
p.next()
|
||||||
|
} else {
|
||||||
|
p.expect(scanner.Ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parseTerm() bool {
|
||||||
|
switch p.tok {
|
||||||
|
case scanner.Ident:
|
||||||
|
p.parseIdentifier(false)
|
||||||
|
|
||||||
|
case scanner.String:
|
||||||
|
p.next()
|
||||||
|
const ellipsis = '…' // U+2026, the horizontal ellipsis character
|
||||||
|
if p.tok == ellipsis {
|
||||||
|
p.next()
|
||||||
|
p.expect(scanner.String)
|
||||||
|
}
|
||||||
|
|
||||||
|
case '(':
|
||||||
|
p.next()
|
||||||
|
p.parseExpression()
|
||||||
|
p.expect(')')
|
||||||
|
|
||||||
|
case '[':
|
||||||
|
p.next()
|
||||||
|
p.parseExpression()
|
||||||
|
p.expect(']')
|
||||||
|
|
||||||
|
case '{':
|
||||||
|
p.next()
|
||||||
|
p.parseExpression()
|
||||||
|
p.expect('}')
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false // no term found
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parseSequence() {
|
||||||
|
if !p.parseTerm() {
|
||||||
|
p.errorExpected("term")
|
||||||
|
}
|
||||||
|
for p.parseTerm() {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parseExpression() {
|
||||||
|
for {
|
||||||
|
p.parseSequence()
|
||||||
|
if p.tok != '|' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parseProduction() {
|
||||||
|
p.parseIdentifier(true)
|
||||||
|
p.expect('=')
|
||||||
|
if p.tok != '.' {
|
||||||
|
p.parseExpression()
|
||||||
|
}
|
||||||
|
p.expect('.')
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ebnfParser) parse(out io.Writer, src []byte) {
|
||||||
|
// initialize ebnfParser
|
||||||
|
p.out = out
|
||||||
|
p.src = src
|
||||||
|
p.scanner.Init(bytes.NewBuffer(src))
|
||||||
|
p.next() // initializes pos, tok, lit
|
||||||
|
|
||||||
|
// process source
|
||||||
|
for p.tok != scanner.EOF {
|
||||||
|
p.parseProduction()
|
||||||
|
}
|
||||||
|
p.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Markers around EBNF sections
|
||||||
|
var (
|
||||||
|
openTag = []byte(`<pre class="ebnf">`)
|
||||||
|
closeTag = []byte(`</pre>`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func Linkify(out io.Writer, src []byte) {
|
||||||
|
for len(src) > 0 {
|
||||||
|
// i: beginning of EBNF text (or end of source)
|
||||||
|
i := bytes.Index(src, openTag)
|
||||||
|
if i < 0 {
|
||||||
|
i = len(src) - len(openTag)
|
||||||
|
}
|
||||||
|
i += len(openTag)
|
||||||
|
|
||||||
|
// j: end of EBNF text (or end of source)
|
||||||
|
j := bytes.Index(src[i:], closeTag) // close marker
|
||||||
|
if j < 0 {
|
||||||
|
j = len(src) - i
|
||||||
|
}
|
||||||
|
j += i
|
||||||
|
|
||||||
|
// write text before EBNF
|
||||||
|
out.Write(src[0:i])
|
||||||
|
// process EBNF
|
||||||
|
var p ebnfParser
|
||||||
|
p.parse(out, src[i:j])
|
||||||
|
|
||||||
|
// advance
|
||||||
|
src = src[j:]
|
||||||
|
}
|
||||||
|
}
|
182
cmd/godoc/template.go
Normal file
182
cmd/godoc/template.go
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Template support for writing HTML documents.
|
||||||
|
// Documents that include Template: true in their
|
||||||
|
// metadata are executed as input to text/template.
|
||||||
|
//
|
||||||
|
// This file defines functions for those templates to invoke.
|
||||||
|
|
||||||
|
// The template uses the function "code" to inject program
|
||||||
|
// source into the output by extracting code from files and
|
||||||
|
// injecting them as HTML-escaped <pre> blocks.
|
||||||
|
//
|
||||||
|
// The syntax is simple: 1, 2, or 3 space-separated arguments:
|
||||||
|
//
|
||||||
|
// Whole file:
|
||||||
|
// {{code "foo.go"}}
|
||||||
|
// One line (here the signature of main):
|
||||||
|
// {{code "foo.go" `/^func.main/`}}
|
||||||
|
// Block of text, determined by start and end (here the body of main):
|
||||||
|
// {{code "foo.go" `/^func.main/` `/^}/`
|
||||||
|
//
|
||||||
|
// Patterns can be `/regular expression/`, a decimal number, or "$"
|
||||||
|
// to signify the end of the file. In multi-line matches,
|
||||||
|
// lines that end with the four characters
|
||||||
|
// OMIT
|
||||||
|
// are omitted from the output, making it easy to provide marker
|
||||||
|
// lines in the input that will not appear in the output but are easy
|
||||||
|
// to identify by pattern.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Functions in this file panic on error, but the panic is recovered
|
||||||
|
// to an error by 'code'.
|
||||||
|
|
||||||
|
var templateFuncs = template.FuncMap{
|
||||||
|
"code": code,
|
||||||
|
}
|
||||||
|
|
||||||
|
// contents reads and returns the content of the named file
|
||||||
|
// (from the virtual file system, so for example /doc refers to $GOROOT/doc).
|
||||||
|
func contents(name string) string {
|
||||||
|
file, err := ReadFile(fs, name)
|
||||||
|
if err != nil {
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
return string(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringFor returns a textual representation of the arg, formatted according to its nature.
|
||||||
|
func stringFor(arg interface{}) string {
|
||||||
|
switch arg := arg.(type) {
|
||||||
|
case int:
|
||||||
|
return fmt.Sprintf("%d", arg)
|
||||||
|
case string:
|
||||||
|
if len(arg) > 2 && arg[0] == '/' && arg[len(arg)-1] == '/' {
|
||||||
|
return fmt.Sprintf("%#q", arg)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%q", arg)
|
||||||
|
default:
|
||||||
|
log.Panicf("unrecognized argument: %v type %T", arg, arg)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func code(file string, arg ...interface{}) (s string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
err = fmt.Errorf("%v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
text := contents(file)
|
||||||
|
var command string
|
||||||
|
switch len(arg) {
|
||||||
|
case 0:
|
||||||
|
// text is already whole file.
|
||||||
|
command = fmt.Sprintf("code %q", file)
|
||||||
|
case 1:
|
||||||
|
command = fmt.Sprintf("code %q %s", file, stringFor(arg[0]))
|
||||||
|
text = oneLine(file, text, arg[0])
|
||||||
|
case 2:
|
||||||
|
command = fmt.Sprintf("code %q %s %s", file, stringFor(arg[0]), stringFor(arg[1]))
|
||||||
|
text = multipleLines(file, text, arg[0], arg[1])
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("incorrect code invocation: code %q %q", file, arg)
|
||||||
|
}
|
||||||
|
// Trim spaces from output.
|
||||||
|
text = strings.Trim(text, "\n")
|
||||||
|
// Replace tabs by spaces, which work better in HTML.
|
||||||
|
text = strings.Replace(text, "\t", " ", -1)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
// HTML-escape text and syntax-color comments like elsewhere.
|
||||||
|
FormatText(&buf, []byte(text), -1, true, "", nil)
|
||||||
|
// Include the command as a comment.
|
||||||
|
text = fmt.Sprintf("<pre><!--{{%s}}\n-->%s</pre>", command, buf.Bytes())
|
||||||
|
return text, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseArg returns the integer or string value of the argument and tells which it is.
|
||||||
|
func parseArg(arg interface{}, file string, max int) (ival int, sval string, isInt bool) {
|
||||||
|
switch n := arg.(type) {
|
||||||
|
case int:
|
||||||
|
if n <= 0 || n > max {
|
||||||
|
log.Panicf("%q:%d is out of range", file, n)
|
||||||
|
}
|
||||||
|
return n, "", true
|
||||||
|
case string:
|
||||||
|
return 0, n, false
|
||||||
|
}
|
||||||
|
log.Panicf("unrecognized argument %v type %T", arg, arg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// oneLine returns the single line generated by a two-argument code invocation.
|
||||||
|
func oneLine(file, text string, arg interface{}) string {
|
||||||
|
lines := strings.SplitAfter(contents(file), "\n")
|
||||||
|
line, pattern, isInt := parseArg(arg, file, len(lines))
|
||||||
|
if isInt {
|
||||||
|
return lines[line-1]
|
||||||
|
}
|
||||||
|
return lines[match(file, 0, lines, pattern)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// multipleLines returns the text generated by a three-argument code invocation.
|
||||||
|
func multipleLines(file, text string, arg1, arg2 interface{}) string {
|
||||||
|
lines := strings.SplitAfter(contents(file), "\n")
|
||||||
|
line1, pattern1, isInt1 := parseArg(arg1, file, len(lines))
|
||||||
|
line2, pattern2, isInt2 := parseArg(arg2, file, len(lines))
|
||||||
|
if !isInt1 {
|
||||||
|
line1 = match(file, 0, lines, pattern1)
|
||||||
|
}
|
||||||
|
if !isInt2 {
|
||||||
|
line2 = match(file, line1, lines, pattern2)
|
||||||
|
} else if line2 < line1 {
|
||||||
|
log.Panicf("lines out of order for %q: %d %d", text, line1, line2)
|
||||||
|
}
|
||||||
|
for k := line1 - 1; k < line2; k++ {
|
||||||
|
if strings.HasSuffix(lines[k], "OMIT\n") {
|
||||||
|
lines[k] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(lines[line1-1:line2], "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// match identifies the input line that matches the pattern in a code invocation.
|
||||||
|
// If start>0, match lines starting there rather than at the beginning.
|
||||||
|
// The return value is 1-indexed.
|
||||||
|
func match(file string, start int, lines []string, pattern string) int {
|
||||||
|
// $ matches the end of the file.
|
||||||
|
if pattern == "$" {
|
||||||
|
if len(lines) == 0 {
|
||||||
|
log.Panicf("%q: empty file", file)
|
||||||
|
}
|
||||||
|
return len(lines)
|
||||||
|
}
|
||||||
|
// /regexp/ matches the line that matches the regexp.
|
||||||
|
if len(pattern) > 2 && pattern[0] == '/' && pattern[len(pattern)-1] == '/' {
|
||||||
|
re, err := regexp.Compile(pattern[1 : len(pattern)-1])
|
||||||
|
if err != nil {
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
for i := start; i < len(lines); i++ {
|
||||||
|
if re.MatchString(lines[i]) {
|
||||||
|
return i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Panicf("%s: no match for %#q", file, pattern)
|
||||||
|
}
|
||||||
|
log.Panicf("unrecognized pattern: %q", pattern)
|
||||||
|
return 0
|
||||||
|
}
|
88
cmd/godoc/throttle.go
Normal file
88
cmd/godoc/throttle.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// A Throttle permits throttling of a goroutine by
|
||||||
|
// calling the Throttle method repeatedly.
|
||||||
|
//
|
||||||
|
type Throttle struct {
|
||||||
|
f float64 // f = (1-r)/r for 0 < r < 1
|
||||||
|
dt time.Duration // minimum run time slice; >= 0
|
||||||
|
tr time.Duration // accumulated time running
|
||||||
|
ts time.Duration // accumulated time stopped
|
||||||
|
tt time.Time // earliest throttle time (= time Throttle returned + tm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThrottle creates a new Throttle with a throttle value r and
|
||||||
|
// a minimum allocated run time slice of dt:
|
||||||
|
//
|
||||||
|
// r == 0: "empty" throttle; the goroutine is always sleeping
|
||||||
|
// r == 1: full throttle; the goroutine is never sleeping
|
||||||
|
//
|
||||||
|
// A value of r == 0.6 throttles a goroutine such that it runs
|
||||||
|
// approx. 60% of the time, and sleeps approx. 40% of the time.
|
||||||
|
// Values of r < 0 or r > 1 are clamped down to values between 0 and 1.
|
||||||
|
// Values of dt < 0 are set to 0.
|
||||||
|
//
|
||||||
|
func NewThrottle(r float64, dt time.Duration) *Throttle {
|
||||||
|
var f float64
|
||||||
|
switch {
|
||||||
|
case r <= 0:
|
||||||
|
f = -1 // indicates always sleep
|
||||||
|
case r >= 1:
|
||||||
|
f = 0 // assume r == 1 (never sleep)
|
||||||
|
default:
|
||||||
|
// 0 < r < 1
|
||||||
|
f = (1 - r) / r
|
||||||
|
}
|
||||||
|
if dt < 0 {
|
||||||
|
dt = 0
|
||||||
|
}
|
||||||
|
return &Throttle{f: f, dt: dt, tt: time.Now().Add(dt)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Throttle calls time.Sleep such that over time the ratio tr/ts between
|
||||||
|
// accumulated run (tr) and sleep times (ts) approximates the value 1/(1-r)
|
||||||
|
// where r is the throttle value. Throttle returns immediately (w/o sleeping)
|
||||||
|
// if less than tm ns have passed since the last call to Throttle.
|
||||||
|
//
|
||||||
|
func (p *Throttle) Throttle() {
|
||||||
|
if p.f < 0 {
|
||||||
|
select {} // always sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
t0 := time.Now()
|
||||||
|
if t0.Before(p.tt) {
|
||||||
|
return // keep running (minimum time slice not exhausted yet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// accumulate running time
|
||||||
|
p.tr += t0.Sub(p.tt) + p.dt
|
||||||
|
|
||||||
|
// compute sleep time
|
||||||
|
// Over time we want:
|
||||||
|
//
|
||||||
|
// tr/ts = r/(1-r)
|
||||||
|
//
|
||||||
|
// Thus:
|
||||||
|
//
|
||||||
|
// ts = tr*f with f = (1-r)/r
|
||||||
|
//
|
||||||
|
// After some incremental run time δr added to the total run time
|
||||||
|
// tr, the incremental sleep-time δs to get to the same ratio again
|
||||||
|
// after waking up from time.Sleep is:
|
||||||
|
if δs := time.Duration(float64(p.tr)*p.f) - p.ts; δs > 0 {
|
||||||
|
time.Sleep(δs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// accumulate (actual) sleep time
|
||||||
|
t1 := time.Now()
|
||||||
|
p.ts += t1.Sub(t0)
|
||||||
|
|
||||||
|
// set earliest next throttle time
|
||||||
|
p.tt = t1.Add(p.dt)
|
||||||
|
}
|
91
cmd/godoc/utils.go
Normal file
91
cmd/godoc/utils.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains support functionality for godoc.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
pathpkg "path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An RWValue wraps a value and permits mutually exclusive
|
||||||
|
// access to it and records the time the value was last set.
|
||||||
|
//
|
||||||
|
type RWValue struct {
|
||||||
|
mutex sync.RWMutex
|
||||||
|
value interface{}
|
||||||
|
timestamp time.Time // time of last set()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *RWValue) set(value interface{}) {
|
||||||
|
v.mutex.Lock()
|
||||||
|
v.value = value
|
||||||
|
v.timestamp = time.Now()
|
||||||
|
v.mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *RWValue) get() (interface{}, time.Time) {
|
||||||
|
v.mutex.RLock()
|
||||||
|
defer v.mutex.RUnlock()
|
||||||
|
return v.value, v.timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// isText returns true if a significant prefix of s looks like correct UTF-8;
|
||||||
|
// that is, if it is likely that s is human-readable text.
|
||||||
|
//
|
||||||
|
func isText(s []byte) bool {
|
||||||
|
const max = 1024 // at least utf8.UTFMax
|
||||||
|
if len(s) > max {
|
||||||
|
s = s[0:max]
|
||||||
|
}
|
||||||
|
for i, c := range string(s) {
|
||||||
|
if i+utf8.UTFMax > len(s) {
|
||||||
|
// last char may be incomplete - ignore
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c == 0xFFFD || c < ' ' && c != '\n' && c != '\t' && c != '\f' {
|
||||||
|
// decoding error or control character - not a text file
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// textExt[x] is true if the extension x indicates a text file, and false otherwise.
|
||||||
|
var textExt = map[string]bool{
|
||||||
|
".css": false, // must be served raw
|
||||||
|
".js": false, // must be served raw
|
||||||
|
}
|
||||||
|
|
||||||
|
// isTextFile returns true if the file has a known extension indicating
|
||||||
|
// a text file, or if a significant chunk of the specified file looks like
|
||||||
|
// correct UTF-8; that is, if it is likely that the file contains human-
|
||||||
|
// readable text.
|
||||||
|
//
|
||||||
|
func isTextFile(filename string) bool {
|
||||||
|
// if the extension is known, use it for decision making
|
||||||
|
if isText, found := textExt[pathpkg.Ext(filename)]; found {
|
||||||
|
return isText
|
||||||
|
}
|
||||||
|
|
||||||
|
// the extension is not known; read an initial chunk
|
||||||
|
// of the file and check if it looks like text
|
||||||
|
f, err := fs.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var buf [1024]byte
|
||||||
|
n, err := f.Read(buf[0:])
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return isText(buf[0:n])
|
||||||
|
}
|
236
cmd/godoc/zip.go
Normal file
236
cmd/godoc/zip.go
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file provides an implementation of the FileSystem
|
||||||
|
// interface based on the contents of a .zip file.
|
||||||
|
//
|
||||||
|
// Assumptions:
|
||||||
|
//
|
||||||
|
// - The file paths stored in the zip file must use a slash ('/') as path
|
||||||
|
// separator; and they must be relative (i.e., they must not start with
|
||||||
|
// a '/' - this is usually the case if the file was created w/o special
|
||||||
|
// options).
|
||||||
|
// - The zip file system treats the file paths found in the zip internally
|
||||||
|
// like absolute paths w/o a leading '/'; i.e., the paths are considered
|
||||||
|
// relative to the root of the file system.
|
||||||
|
// - All path arguments to file system methods must be absolute paths.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// zipFI is the zip-file based implementation of FileInfo
|
||||||
|
type zipFI struct {
|
||||||
|
name string // directory-local name
|
||||||
|
file *zip.File // nil for a directory
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) Name() string {
|
||||||
|
return fi.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) Size() int64 {
|
||||||
|
if f := fi.file; f != nil {
|
||||||
|
return int64(f.UncompressedSize)
|
||||||
|
}
|
||||||
|
return 0 // directory
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) ModTime() time.Time {
|
||||||
|
if f := fi.file; f != nil {
|
||||||
|
return f.ModTime()
|
||||||
|
}
|
||||||
|
return time.Time{} // directory has no modified time entry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) Mode() os.FileMode {
|
||||||
|
if fi.file == nil {
|
||||||
|
// Unix directories typically are executable, hence 555.
|
||||||
|
return os.ModeDir | 0555
|
||||||
|
}
|
||||||
|
return 0444
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) IsDir() bool {
|
||||||
|
return fi.file == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi zipFI) Sys() interface{} {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// zipFS is the zip-file based implementation of FileSystem
|
||||||
|
type zipFS struct {
|
||||||
|
*zip.ReadCloser
|
||||||
|
list zipList
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) String() string {
|
||||||
|
return "zip(" + fs.name + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) Close() error {
|
||||||
|
fs.list = nil
|
||||||
|
return fs.ReadCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func zipPath(name string) string {
|
||||||
|
name = path.Clean(name)
|
||||||
|
if !path.IsAbs(name) {
|
||||||
|
panic(fmt.Sprintf("stat: not an absolute path: %s", name))
|
||||||
|
}
|
||||||
|
return name[1:] // strip leading '/'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) stat(abspath string) (int, zipFI, error) {
|
||||||
|
i, exact := fs.list.lookup(abspath)
|
||||||
|
if i < 0 {
|
||||||
|
// abspath has leading '/' stripped - print it explicitly
|
||||||
|
return -1, zipFI{}, fmt.Errorf("file not found: /%s", abspath)
|
||||||
|
}
|
||||||
|
_, name := path.Split(abspath)
|
||||||
|
var file *zip.File
|
||||||
|
if exact {
|
||||||
|
file = fs.list[i] // exact match found - must be a file
|
||||||
|
}
|
||||||
|
return i, zipFI{name, file}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) Open(abspath string) (readSeekCloser, error) {
|
||||||
|
_, fi, err := fs.stat(zipPath(abspath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
return nil, fmt.Errorf("Open: %s is a directory", abspath)
|
||||||
|
}
|
||||||
|
r, err := fi.file.Open()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &zipSeek{fi.file, r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type zipSeek struct {
|
||||||
|
file *zip.File
|
||||||
|
io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *zipSeek) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if whence == 0 && offset == 0 {
|
||||||
|
r, err := f.file.Open()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
f.ReadCloser = r
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("unsupported Seek in %s", f.file.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) Lstat(abspath string) (os.FileInfo, error) {
|
||||||
|
_, fi, err := fs.stat(zipPath(abspath))
|
||||||
|
return fi, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) Stat(abspath string) (os.FileInfo, error) {
|
||||||
|
_, fi, err := fs.stat(zipPath(abspath))
|
||||||
|
return fi, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *zipFS) ReadDir(abspath string) ([]os.FileInfo, error) {
|
||||||
|
path := zipPath(abspath)
|
||||||
|
i, fi, err := fs.stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return nil, fmt.Errorf("ReadDir: %s is not a directory", abspath)
|
||||||
|
}
|
||||||
|
|
||||||
|
var list []os.FileInfo
|
||||||
|
dirname := path + "/"
|
||||||
|
prevname := ""
|
||||||
|
for _, e := range fs.list[i:] {
|
||||||
|
if !strings.HasPrefix(e.Name, dirname) {
|
||||||
|
break // not in the same directory anymore
|
||||||
|
}
|
||||||
|
name := e.Name[len(dirname):] // local name
|
||||||
|
file := e
|
||||||
|
if i := strings.IndexRune(name, '/'); i >= 0 {
|
||||||
|
// We infer directories from files in subdirectories.
|
||||||
|
// If we have x/y, return a directory entry for x.
|
||||||
|
name = name[0:i] // keep local directory name only
|
||||||
|
file = nil
|
||||||
|
}
|
||||||
|
// If we have x/y and x/z, don't return two directory entries for x.
|
||||||
|
// TODO(gri): It should be possible to do this more efficiently
|
||||||
|
// by determining the (fs.list) range of local directory entries
|
||||||
|
// (via two binary searches).
|
||||||
|
if name != prevname {
|
||||||
|
list = append(list, zipFI{name, file})
|
||||||
|
prevname = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewZipFS(rc *zip.ReadCloser, name string) FileSystem {
|
||||||
|
list := make(zipList, len(rc.File))
|
||||||
|
copy(list, rc.File) // sort a copy of rc.File
|
||||||
|
sort.Sort(list)
|
||||||
|
return &zipFS{rc, list, name}
|
||||||
|
}
|
||||||
|
|
||||||
|
type zipList []*zip.File
|
||||||
|
|
||||||
|
// zipList implements sort.Interface
|
||||||
|
func (z zipList) Len() int { return len(z) }
|
||||||
|
func (z zipList) Less(i, j int) bool { return z[i].Name < z[j].Name }
|
||||||
|
func (z zipList) Swap(i, j int) { z[i], z[j] = z[j], z[i] }
|
||||||
|
|
||||||
|
// lookup returns the smallest index of an entry with an exact match
|
||||||
|
// for name, or an inexact match starting with name/. If there is no
|
||||||
|
// such entry, the result is -1, false.
|
||||||
|
func (z zipList) lookup(name string) (index int, exact bool) {
|
||||||
|
// look for exact match first (name comes before name/ in z)
|
||||||
|
i := sort.Search(len(z), func(i int) bool {
|
||||||
|
return name <= z[i].Name
|
||||||
|
})
|
||||||
|
if i >= len(z) {
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
// 0 <= i < len(z)
|
||||||
|
if z[i].Name == name {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// look for inexact match (must be in z[i:], if present)
|
||||||
|
z = z[i:]
|
||||||
|
name += "/"
|
||||||
|
j := sort.Search(len(z), func(i int) bool {
|
||||||
|
return name <= z[i].Name
|
||||||
|
})
|
||||||
|
if j >= len(z) {
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
// 0 <= j < len(z)
|
||||||
|
if strings.HasPrefix(z[j].Name, name) {
|
||||||
|
return i + j, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, false
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user