perf,perfdata: copy perf[data].golang.org code from x/perf

This change re-homes the code for perf.golang.org and
perfdata.golang.org (available in x/perf/analysis and x/perf/storage
respectively) to x/build to avoid any issues with backwards
compatibility. We're much more lax about the Go 1 compatibility promise
in x/build than in other x/ repos (except maybe x/exp) so we can make
bigger changes. Also, we may re-home these services to live closer to
the rest of the build infrastructure, so co-locating the code also makes
sense.

The code was taken from golang.org/x/perf (or go.googlesource.com/perf)
at git hash 64dc439b20aef7259bd73a5452063060adb8d898, or CL 392658.

In order to make this work, we also copy the internal diff and basedir
packages from x/perf.

There are a few things these depend on that we have to leave behind. One
is x/perf/benchstat, which means we also leave behind
x/perf/storage/benchfmt, which it depends on. Both are OK because they
have new and shiny replacements that we'd rather use anyway.

This change also involved running a bunch of sed commands to update
package import paths as well as comments. The full diff is included in
the commit for review, but will be removed before landing.

For golang/go#48803.

Change-Id: Ib15840c15254bc8bfa266bbc82e1df7cf4c252db
Reviewed-on: https://go-review.googlesource.com/c/build/+/395175
Reviewed-by: Michael Pratt <mpratt@google.com>
Trust: Dmitri Shuralyov <dmitshur@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Anthony Knyszek 2022-03-23 22:27:41 +00:00 коммит произвёл Michael Knyszek
Родитель 1182a1fa66
Коммит fc96089fad
57 изменённых файлов: 5715 добавлений и 1 удалений

12
go.mod
Просмотреть файл

@ -9,7 +9,9 @@ require (
cloud.google.com/go/storage v1.10.0
contrib.go.opencensus.io/exporter/prometheus v0.3.0
contrib.go.opencensus.io/exporter/stackdriver v0.13.5
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf
github.com/NYTimes/gziphandler v1.1.1
github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0
github.com/aws/aws-sdk-go v1.30.15
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
@ -18,6 +20,7 @@ require (
github.com/dghubble/oauth1 v0.7.0
github.com/esimov/stackblur-go v1.1.0
github.com/gliderlabs/ssh v0.3.3
github.com/go-sql-driver/mysql v1.5.0
github.com/golang-migrate/migrate/v4 v4.15.0-beta.3
github.com/golang/protobuf v1.5.2
github.com/google/go-cmp v0.5.6
@ -31,6 +34,7 @@ require (
github.com/jackc/pgconn v1.11.0
github.com/jackc/pgx/v4 v4.13.0
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1
github.com/mattn/go-sqlite3 v1.14.6
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07
go.opencensus.io v0.23.0
go4.org v0.0.0-20180809161055-417644f6feb5
@ -44,6 +48,7 @@ require (
golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
google.golang.org/api v0.51.0
google.golang.org/appengine v1.6.7
google.golang.org/genproto v0.0.0-20210726143408-b02e89920bf0
google.golang.org/grpc v1.39.0
google.golang.org/protobuf v1.27.1
@ -51,6 +56,7 @@ require (
)
require (
github.com/aclements/go-moremath v0.0.0-20161014184102-0ff62e0875ff // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
@ -59,6 +65,11 @@ require (
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 // indirect
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 // indirect
github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 // indirect
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 // indirect
github.com/google/go-querystring v1.0.0 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.0 // indirect
@ -87,7 +98,6 @@ require (
golang.org/x/mod v0.4.2 // indirect
golang.org/x/tools v0.1.5 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

10
go.sum
Просмотреть файл

@ -65,6 +65,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf h1:8F6fjL5iQP6sArGtPuXh0l6hggdcIpAm4ChjVJE4oTs=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
@ -76,7 +77,9 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0 h1:E5Dzlk3akC+T2Zj1LBHgfPK1y8YWgLDnNDRmG+tpSKw=
github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes=
github.com/aclements/go-moremath v0.0.0-20161014184102-0ff62e0875ff h1:txKOXqsFQUyi7Ht0Prto4QMU4O/0Gby6v5RFqMS0/PM=
github.com/aclements/go-moremath v0.0.0-20161014184102-0ff62e0875ff/go.mod h1:idZL3yvz4kzx1dsBOAC+oYv6L92P1oFEhUXUB1A/lwQ=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -239,6 +242,7 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@ -318,10 +322,15 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50=
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18=
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg=
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M=
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks=
github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI=
github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A=
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4=
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -582,6 +591,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=

Просмотреть файл

@ -0,0 +1,58 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package basedir finds templates and static files associated with a binary.
package basedir
import (
"bytes"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// Find locates a directory for the given package.
// pkg should be the directory that contains the templates and/or static directories.
// If pkg cannot be found, an empty string will be returned.
func Find(pkg string) string {
cmd := exec.Command("go", "list", "-e", "-f", "{{.Dir}}", pkg)
if out, err := cmd.Output(); err == nil && len(out) > 0 {
return string(bytes.TrimRight(out, "\r\n"))
}
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = defaultGOPATH()
}
if gopath != "" {
for _, dir := range strings.Split(gopath, ":") {
p := filepath.Join(dir, pkg)
if _, err := os.Stat(p); err == nil {
return p
}
}
}
return ""
}
// Copied from go/build/build.go
func defaultGOPATH() string {
env := "HOME"
if runtime.GOOS == "windows" {
env = "USERPROFILE"
} else if runtime.GOOS == "plan9" {
env = "home"
}
if home := os.Getenv(env); home != "" {
def := filepath.Join(home, "go")
if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
// Don't set the default GOPATH to GOROOT,
// as that will trigger warnings from the go tool.
return ""
}
return def
}
return ""
}

68
internal/diff/diff.go Normal file
Просмотреть файл

@ -0,0 +1,68 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package diff
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"runtime"
)
func writeTempFile(dir, prefix string, data []byte) (string, error) {
file, err := ioutil.TempFile(dir, prefix)
if err != nil {
return "", err
}
_, err = file.Write(data)
if err1 := file.Close(); err == nil {
err = err1
}
if err != nil {
os.Remove(file.Name())
return "", err
}
return file.Name(), nil
}
// Diff returns a human-readable description of the differences between s1 and s2.
// If the "diff" command is available, it returns the output of unified diff on s1 and s2.
// If the result is non-empty, the strings differ or the diff command failed.
func Diff(s1, s2 string) string {
if s1 == s2 {
return ""
}
if _, err := exec.LookPath("diff"); err != nil {
return fmt.Sprintf("diff command unavailable\nold: %q\nnew: %q", s1, s2)
}
f1, err := writeTempFile("", "benchfmt_test", []byte(s1))
if err != nil {
return err.Error()
}
defer os.Remove(f1)
f2, err := writeTempFile("", "benchfmt_test", []byte(s2))
if err != nil {
return err.Error()
}
defer os.Remove(f2)
cmd := "diff"
if runtime.GOOS == "plan9" {
cmd = "/bin/ape/diff"
}
data, err := exec.Command(cmd, "-u", f1, f2).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
if err != nil {
data = append(data, []byte(err.Error())...)
}
return string(data)
}

51
perf/app/app.go Normal file
Просмотреть файл

@ -0,0 +1,51 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package app implements the performance data analysis server.
package app
import (
"net/http"
"golang.org/x/build/perfdata"
)
// App manages the analysis server logic.
// Construct an App instance and call RegisterOnMux to connect it with an HTTP server.
type App struct {
// StorageClient is used to talk to the perfdata server.
StorageClient *perfdata.Client
// BaseDir is the directory containing the "template" directory.
// If empty, the current directory will be used.
BaseDir string
}
// RegisterOnMux registers the app's URLs on mux.
func (a *App) RegisterOnMux(mux *http.ServeMux) {
mux.HandleFunc("/", a.index)
mux.HandleFunc("/search", a.search)
mux.HandleFunc("/compare", a.compare)
mux.HandleFunc("/trend", a.trend)
}
// search handles /search.
// This currently just runs the compare handler, until more analysis methods are implemented.
func (a *App) search(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
if r.Header.Get("Accept") == "text/plain" || r.Header.Get("X-Benchsave") == "1" {
// TODO(quentin): Switch to real Accept negotiation when golang/go#19307 is resolved.
// Benchsave sends both of these headers.
a.textCompare(w, r)
return
}
// TODO(quentin): Intelligently choose an analysis method
// based on the results from the query, once there is more
// than one analysis method.
//q := r.Form.Get("q")
a.compare(w, r)
}

24
perf/app/appengine.go Normal file
Просмотреть файл

@ -0,0 +1,24 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build appengine
// +build appengine
package app
import (
"net/http"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
)
// requestContext returns the Context object for a given request.
func requestContext(r *http.Request) context.Context {
return appengine.NewContext(r)
}
var infof = log.Infof
var errorf = log.Errorf

398
perf/app/compare.go Normal file
Просмотреть файл

@ -0,0 +1,398 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
"golang.org/x/build/perfdata/query"
"golang.org/x/net/context"
"golang.org/x/perf/benchstat"
"golang.org/x/perf/storage/benchfmt"
)
// A resultGroup holds a list of results and tracks the distinct labels found in that list.
type resultGroup struct {
// The (partial) query string that resulted in this group.
Q string
// Raw list of results.
results []*benchfmt.Result
// LabelValues is the count of results found with each distinct (key, value) pair found in labels.
// A value of "" counts results missing that key.
LabelValues map[string]valueSet
}
// add adds res to the resultGroup.
func (g *resultGroup) add(res *benchfmt.Result) {
g.results = append(g.results, res)
if g.LabelValues == nil {
g.LabelValues = make(map[string]valueSet)
}
for k, v := range res.Labels {
if g.LabelValues[k] == nil {
g.LabelValues[k] = make(valueSet)
if len(g.results) > 1 {
g.LabelValues[k][""] = len(g.results) - 1
}
}
g.LabelValues[k][v]++
}
for k := range g.LabelValues {
if res.Labels[k] == "" {
g.LabelValues[k][""]++
}
}
}
// splitOn returns a new set of groups sharing a common value for key.
func (g *resultGroup) splitOn(key string) []*resultGroup {
groups := make(map[string]*resultGroup)
var values []string
for _, res := range g.results {
value := res.Labels[key]
if groups[value] == nil {
groups[value] = &resultGroup{Q: key + ":" + value}
values = append(values, value)
}
groups[value].add(res)
}
sort.Strings(values)
var out []*resultGroup
for _, value := range values {
out = append(out, groups[value])
}
return out
}
// valueSet is a set of values and the number of results with each value.
type valueSet map[string]int
// valueCount and byCount are used for sorting a valueSet
type valueCount struct {
Value string
Count int
}
type byCount []valueCount
func (s byCount) Len() int { return len(s) }
func (s byCount) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byCount) Less(i, j int) bool {
if s[i].Count != s[j].Count {
return s[i].Count > s[j].Count
}
return s[i].Value < s[j].Value
}
// TopN returns a slice containing n valueCount entries, and if any labels were omitted, an extra entry with value "…".
func (vs valueSet) TopN(n int) []valueCount {
var s []valueCount
var total int
for v, count := range vs {
s = append(s, valueCount{v, count})
total += count
}
sort.Sort(byCount(s))
out := s
if len(out) > n {
out = s[:n]
}
if len(out) < len(s) {
var outTotal int
for _, vc := range out {
outTotal += vc.Count
}
out = append(out, valueCount{"…", total - outTotal})
}
return out
}
// addToQuery returns a new query string with add applied as a filter.
func addToQuery(query, add string) string {
if strings.ContainsAny(add, " \t\\\"") {
add = strings.Replace(add, `\`, `\\`, -1)
add = strings.Replace(add, `"`, `\"`, -1)
add = `"` + add + `"`
}
if strings.Contains(query, "|") {
return add + " " + query
}
return add + " | " + query
}
// linkify returns a link related to the label's value. If no such link exists, it returns an empty string.
// For example, "cl: 1234" is linked to golang.org/cl/1234.
// string is used as the return type and not template.URL so that html/template will validate the scheme.
func linkify(labels benchfmt.Labels, label string) string {
switch label {
case "cl", "commit":
return "https://golang.org/cl/" + template.URLQueryEscaper(labels[label])
case "ps":
// TODO(quentin): Figure out how to link to a particular patch set on Gerrit.
return ""
case "repo":
return labels["repo"]
case "try":
// TODO(quentin): Return link to farmer once farmer has permalinks.
return ""
}
return ""
}
// compare handles queries that require comparison of the groups in the query.
func (a *App) compare(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
q := r.Form.Get("q")
tmpl, err := ioutil.ReadFile(filepath.Join(a.BaseDir, "template/compare.html"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
t, err := template.New("main").Funcs(template.FuncMap{
"addToQuery": addToQuery,
"linkify": linkify,
}).Parse(string(tmpl))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
data := a.compareQuery(ctx, q)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := t.Execute(w, data); err != nil {
http.Error(w, err.Error(), 500)
return
}
}
type compareData struct {
Q string
Error string
Benchstat template.HTML
Groups []*resultGroup
Labels map[string]bool
CommonLabels benchfmt.Labels
}
// queryKeys returns the keys that are exact-matched by q.
func queryKeys(q string) map[string]bool {
out := make(map[string]bool)
for _, part := range query.SplitWords(q) {
// TODO(quentin): This func is shared with db.go; refactor?
i := strings.IndexFunc(part, func(r rune) bool {
return r == ':' || r == '>' || r == '<' || unicode.IsSpace(r) || unicode.IsUpper(r)
})
if i >= 0 && part[i] == ':' {
out[part[:i]] = true
}
}
return out
}
// elideKeyValues returns content, a benchmark format line, with the
// values of any keys in keys elided.
func elideKeyValues(content string, keys map[string]bool) string {
var end string
if i := strings.IndexFunc(content, unicode.IsSpace); i >= 0 {
content, end = content[:i], content[i:]
}
// Check for gomaxprocs value
if i := strings.LastIndex(content, "-"); i >= 0 {
_, err := strconv.Atoi(content[i+1:])
if err == nil {
if keys["gomaxprocs"] {
content, end = content[:i], "-*"+end
} else {
content, end = content[:i], content[i:]+end
}
}
}
parts := strings.Split(content, "/")
for i, part := range parts {
if equals := strings.Index(part, "="); equals >= 0 {
if keys[part[:equals]] {
parts[i] = part[:equals] + "=*"
}
} else if i == 0 {
if keys["name"] {
parts[i] = "Benchmark*"
}
} else if keys[fmt.Sprintf("sub%d", i)] {
parts[i] = "*"
}
}
return strings.Join(parts, "/") + end
}
// fetchCompareResults fetches the matching results for a given query string.
// The results will be grouped into one or more groups based on either the query string or heuristics.
func (a *App) fetchCompareResults(ctx context.Context, q string) ([]*resultGroup, error) {
// Parse query
prefix, queries := parseQueryString(q)
// Send requests
// TODO(quentin): Issue requests in parallel?
var groups []*resultGroup
var found int
for _, qPart := range queries {
keys := queryKeys(qPart)
group := &resultGroup{Q: qPart}
if prefix != "" {
qPart = prefix + " " + qPart
}
res := a.StorageClient.Query(ctx, qPart)
for res.Next() {
result := res.Result()
result.Content = elideKeyValues(result.Content, keys)
group.add(result)
found++
}
err := res.Err()
res.Close()
if err != nil {
// TODO: If the query is invalid, surface that to the user.
return nil, err
}
groups = append(groups, group)
}
if found == 0 {
return nil, errors.New("no results matched the query string")
}
// Attempt to automatically split results.
if len(groups) == 1 {
group := groups[0]
// Matching a single CL -> split by filename
switch {
case len(group.LabelValues["cl"]) == 1 && len(group.LabelValues["ps"]) == 1 && len(group.LabelValues["upload-file"]) > 1:
groups = group.splitOn("upload-file")
// Matching a single upload with multiple files -> split by file
case len(group.LabelValues["upload"]) == 1 && len(group.LabelValues["upload-part"]) > 1:
groups = group.splitOn("upload-part")
}
}
return groups, nil
}
func (a *App) compareQuery(ctx context.Context, q string) *compareData {
if len(q) == 0 {
return &compareData{}
}
groups, err := a.fetchCompareResults(ctx, q)
if err != nil {
return &compareData{
Q: q,
Error: err.Error(),
}
}
var buf bytes.Buffer
// Compute benchstat
c := &benchstat.Collection{
AddGeoMean: true,
SplitBy: nil,
}
for _, label := range []string{"buildlet", "pkg", "goos", "goarch"} {
for _, g := range groups {
if len(g.LabelValues[label]) > 1 {
c.SplitBy = append(c.SplitBy, label)
break
}
}
}
for _, g := range groups {
c.AddResults(g.Q, g.results)
}
benchstat.FormatHTML(&buf, c.Tables())
// Prepare struct for template.
labels := make(map[string]bool)
// commonLabels are the key: value of every label that has an
// identical value on every result.
commonLabels := make(benchfmt.Labels)
// Scan the first group for common labels.
for k, vs := range groups[0].LabelValues {
if len(vs) == 1 {
for v := range vs {
commonLabels[k] = v
}
}
}
// Remove any labels not common in later groups.
for _, g := range groups[1:] {
for k, v := range commonLabels {
if len(g.LabelValues[k]) != 1 || g.LabelValues[k][v] == 0 {
delete(commonLabels, k)
}
}
}
// List all labels present and not in commonLabels.
for _, g := range groups {
for k := range g.LabelValues {
if commonLabels[k] != "" {
continue
}
labels[k] = true
}
}
data := &compareData{
Q: q,
Benchstat: template.HTML(buf.String()),
Groups: groups,
Labels: labels,
CommonLabels: commonLabels,
}
return data
}
// textCompare is called if benchsave is requesting a text-only analysis.
func (a *App) textCompare(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
q := r.Form.Get("q")
groups, err := a.fetchCompareResults(ctx, q)
if err != nil {
// TODO(quentin): Should we serve this with a 500 or 404? This means the query was invalid or had no results.
fmt.Fprintf(w, "unable to analyze results: %v", err)
}
// Compute benchstat
c := new(benchstat.Collection)
for _, g := range groups {
c.AddResults(g.Q, g.results)
}
benchstat.FormatText(w, c.Tables())
}

158
perf/app/compare_test.go Normal file
Просмотреть файл

@ -0,0 +1,158 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"golang.org/x/build/perfdata"
"golang.org/x/net/context"
"golang.org/x/perf/storage/benchfmt"
)
func TestResultGroup(t *testing.T) {
data := `key: value
BenchmarkName 1 ns/op
key: value2
BenchmarkName 1 ns/op`
var results []*benchfmt.Result
br := benchfmt.NewReader(strings.NewReader(data))
g := &resultGroup{}
for br.Next() {
results = append(results, br.Result())
g.add(br.Result())
}
if err := br.Err(); err != nil {
t.Fatalf("Err() = %v, want nil", err)
}
if !reflect.DeepEqual(g.results, results) {
t.Errorf("g.results = %#v, want %#v", g.results, results)
}
if want := map[string]valueSet{"key": {"value": 1, "value2": 1}}; !reflect.DeepEqual(g.LabelValues, want) {
t.Errorf("g.LabelValues = %#v, want %#v", g.LabelValues, want)
}
groups := g.splitOn("key")
if len(groups) != 2 {
t.Fatalf("g.splitOn returned %d groups, want 2", len(groups))
}
for i, results := range [][]*benchfmt.Result{
{results[0]},
{results[1]},
} {
if !reflect.DeepEqual(groups[i].results, results) {
t.Errorf("groups[%d].results = %#v, want %#v", i, groups[i].results, results)
}
}
}
// static responses for TestCompareQuery
var compareQueries = map[string]string{
"one": `upload: 1
upload-part: 1
label: value
BenchmarkOne 1 5 ns/op
BenchmarkTwo 1 10 ns/op`,
"two": `upload: 1
upload-part: 2
BenchmarkOne 1 10 ns/op
BenchmarkTwo 1 5 ns/op`,
"onetwo": `upload: 1
upload-part: 1
label: value
BenchmarkOne 1 5 ns/op
BenchmarkTwo 1 10 ns/op
label:
upload-part: 2
BenchmarkOne 1 10 ns/op
BenchmarkTwo 1 5 ns/op`,
}
func TestCompareQuery(t *testing.T) {
// TODO(quentin): This test seems too heavyweight; we are but shouldn't be also testing the perfdata client -> perfdata server interaction.
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
t.Errorf("ParseForm = %v", err)
}
q := r.Form.Get("q")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, compareQueries[q])
}))
defer ts.Close()
a := &App{StorageClient: &perfdata.Client{BaseURL: ts.URL}}
for _, q := range []string{"one vs two", "onetwo"} {
t.Run(q, func(t *testing.T) {
data := a.compareQuery(context.Background(), q)
if data.Error != "" {
t.Fatalf("compareQuery failed: %s", data.Error)
}
if have := data.Q; have != q {
t.Errorf("Q = %q, want %q", have, q)
}
if len(data.Groups) != 2 {
t.Errorf("len(Groups) = %d, want 2", len(data.Groups))
}
if len(data.Benchstat) == 0 {
t.Error("len(Benchstat) = 0, want >0")
}
if want := map[string]bool{"upload-part": true, "label": true}; !reflect.DeepEqual(data.Labels, want) {
t.Errorf("Labels = %#v, want %#v", data.Labels, want)
}
if want := (benchfmt.Labels{"upload": "1"}); !reflect.DeepEqual(data.CommonLabels, want) {
t.Errorf("CommonLabels = %#v, want %#v", data.CommonLabels, want)
}
})
}
}
func TestAddToQuery(t *testing.T) {
tests := []struct {
query, add string
want string
}{
{"one", "two", "two | one"},
{"pre | one vs two", "three", "three pre | one vs two"},
{"four", "five six", `"five six" | four`},
{"seven", `extra "fun"\problem`, `"extra \"fun\"\\problem" | seven`},
{"eight", `ni\"ne`, `"ni\\\"ne" | eight`},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
if got := addToQuery(test.query, test.add); got != test.want {
t.Errorf("addToQuery(%q, %q) = %q, want %q", test.query, test.add, got, test.want)
}
})
}
}
func TestElideKeyValues(t *testing.T) {
type sb map[string]bool
tests := []struct {
content string
keys sb
want string
}{
{"BenchmarkOne/key=1-1 1 ns/op", sb{"key": true}, "BenchmarkOne/key=*-1 1 ns/op"},
{"BenchmarkOne/key=1-2 1 ns/op", sb{"other": true}, "BenchmarkOne/key=1-2 1 ns/op"},
{"BenchmarkOne/key=1/key2=2-3 1 ns/op", sb{"key": true}, "BenchmarkOne/key=*/key2=2-3 1 ns/op"},
{"BenchmarkOne/foo/bar-4 1 ns/op", sb{"sub1": true}, "BenchmarkOne/*/bar-4 1 ns/op"},
{"BenchmarkOne/foo/bar-5 1 ns/op", sb{"gomaxprocs": true}, "BenchmarkOne/foo/bar-* 1 ns/op"},
{"BenchmarkOne/foo/bar-6 1 ns/op", sb{"name": true}, "Benchmark*/foo/bar-6 1 ns/op"},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
have := elideKeyValues(test.content, test.keys)
if have != test.want {
t.Errorf("elideKeys(%q, %#v) = %q, want %q", test.content, map[string]bool(test.keys), have, test.want)
}
})
}
}

47
perf/app/index.go Normal file
Просмотреть файл

@ -0,0 +1,47 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"html/template"
"io/ioutil"
"net/http"
"path/filepath"
"golang.org/x/build/perfdata"
)
// index redirects / to /search.
func (a *App) index(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
tmpl, err := ioutil.ReadFile(filepath.Join(a.BaseDir, "template/index.html"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
t, err := template.New("main").Parse(string(tmpl))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
var uploads []perfdata.UploadInfo
ul := a.StorageClient.ListUploads(ctx, "", []string{"by", "upload-time"}, 16)
defer ul.Close()
for ul.Next() {
uploads = append(uploads, ul.Info())
}
if err := ul.Err(); err != nil {
errorf(ctx, "failed to fetch recent uploads: %v", err)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := t.Execute(w, struct{ RecentUploads []perfdata.UploadInfo }{uploads}); err != nil {
http.Error(w, err.Error(), 500)
return
}
}

125
perf/app/kza.go Normal file
Просмотреть файл

@ -0,0 +1,125 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import "math"
// TODO: This all assumes that data is sampled at a regular interval
// and there are no missing values. It could be generalized to accept
// missing values (perhaps represented by NaN), or generalized much
// further by accepting (t, x) pairs and a vector of times at which to
// evaluate the filter (and an arbitrary window size). I would have to
// figure out how that affects the difference array in KZA.
// TODO: These can generate a lot of garbage. Perhaps the caller
// should pass in the target slice? Or these should just overwrite the
// input array and leave it to the caller to copy if necessary?
// MovingAverage performs a moving average (MA) filter of xs with
// window size m. m must be a positive odd integer.
//
// Note that this is filter is often described in terms of the half
// length of the window (m-1)/2.
func MovingAverage(xs []float64, m int) []float64 {
if m <= 0 || m%2 != 1 {
panic("m must be a positive, odd integer")
}
ys := make([]float64, len(xs))
sum, n := 0.0, 0
for l, i, r := -m, -(m-1)/2, 0; i < len(ys); l, i, r = l+1, i+1, r+1 {
if l >= 0 {
sum -= xs[l]
n--
}
if r < len(xs) {
sum += xs[r]
n++
}
if i >= 0 {
ys[i] = sum / float64(n)
}
}
return ys
}
// KolmogorovZurbenko performs a Kolmogorov-Zurbenko (KZ) filter of xs
// with window size m and k iterations. m must be a positive odd
// integer. k must be positive.
func KolmogorovZurbenko(xs []float64, m, k int) []float64 {
// k is typically small, and MA is quite efficient, so just do
// the iterated moving average rather than bothering to
// compute the binomial coefficient kernel.
for i := 0; i < k; i++ {
// TODO: Generate less garbage.
xs = MovingAverage(xs, m)
}
return xs
}
// AdaptiveKolmogorovZurbenko performs an adaptive Kolmogorov-Zurbenko
// (KZA) filter of xs using an initial window size m and k iterations.
// m must be a positive odd integer. k must be positive.
//
// See Zurbenko, et al. 1996: Detecting discontinuities in time series
// of upper air data: Demonstration of an adaptive filter technique.
// Journal of Climate, 9, 3548–3560.
func AdaptiveKolmogorovZurbenko(xs []float64, m, k int) []float64 {
// Perform initial KZ filter.
z := KolmogorovZurbenko(xs, m, k)
// Compute differenced values.
q := (m - 1) / 2
d := make([]float64, len(z)+1)
maxD := 0.0
for i := q; i < len(z)-q; i++ {
d[i] = math.Abs(z[i+q] - z[i-q])
if d[i] > maxD {
maxD = d[i]
}
}
if maxD == 0 {
// xs is constant, so no amount of filtering will do
// anything. Avoid dividing 0/0 below.
return xs
}
// Compute adaptive filter.
ys := make([]float64, len(xs))
for t := range ys {
dPrime := d[t+1] - d[t]
f := 1 - d[t]/maxD
qt := q
if dPrime <= 0 {
// Zurbenko doesn't specify what to do with
// the fractional part of qt and qh, so we
// interpret this as summing all points of xs
// between qt and qh.
qt = int(math.Ceil(float64(q) * f))
}
if t-qt < 0 {
qt = t
}
qh := q
if dPrime >= 0 {
qh = int(math.Floor(float64(q) * f))
}
if t+qh >= len(xs) {
qh = len(xs) - t - 1
}
sum := 0.0
for i := t - qt; i <= t+qh; i++ {
sum += xs[i]
}
// Zurbenko divides by qh+qt, but this undercounts the
// number of terms in the sum by 1.
ys[t] = sum / float64(qh+qt+1)
}
return ys
}

54
perf/app/kza_test.go Normal file
Просмотреть файл

@ -0,0 +1,54 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"math/rand"
"testing"
)
// Aeq returns true if expect and got are equal to 8 significant
// figures (1 part in 100 million).
func Aeq(expect, got float64) bool {
if expect < 0 && got < 0 {
expect, got = -expect, -got
}
return expect*0.99999999 <= got && got*0.99999999 <= expect
}
func TestMovingAverage(t *testing.T) {
// Test MovingAverage against the obvious (but slow)
// implementation.
xs := make([]float64, 100)
for iter := 0; iter < 10; iter++ {
for i := range xs {
xs[i] = rand.Float64()
}
m := 1 + 2*rand.Intn(100)
ys1, ys2 := MovingAverage(xs, m), slowMovingAverage(xs, m)
// TODO: Use stuff from mathtest.
for i, y1 := range ys1 {
if !Aeq(y1, ys2[i]) {
t.Fatalf("want %v, got %v", ys2, ys1)
}
}
}
}
func slowMovingAverage(xs []float64, m int) []float64 {
ys := make([]float64, len(xs))
for i := range ys {
psum, n := 0.0, 0
for j := i - (m-1)/2; j <= i+(m-1)/2; j++ {
if 0 <= j && j < len(xs) {
psum += xs[j]
n++
}
}
ys[i] = psum / float64(n)
}
return ys
}

26
perf/app/local.go Normal file
Просмотреть файл

@ -0,0 +1,26 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !appengine
// +build !appengine
package app
import (
"log"
"net/http"
"golang.org/x/net/context"
)
// requestContext returns the Context object for a given request.
func requestContext(r *http.Request) context.Context {
return r.Context()
}
func infof(_ context.Context, format string, args ...interface{}) {
log.Printf(format, args...)
}
var errorf = infof

60
perf/app/parse.go Normal file
Просмотреть файл

@ -0,0 +1,60 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import "strings"
// parseQueryString splits a user-entered query into one or more perfdata server queries.
// The supported query formats are:
// prefix | one vs two - parsed as "prefix", {"one", "two"}
// prefix one vs two - parsed as "", {"prefix one", "two"}
// anything else - parsed as "", {"anything else"}
// The vs and | separators must not be quoted.
func parseQueryString(q string) (string, []string) {
var queries []string
var parts []string
var prefix string
quoting := false
for r := 0; r < len(q); {
switch c := q[r]; {
case c == '"' && quoting:
quoting = false
r++
case quoting:
if c == '\\' {
r++
}
r++
case c == '"':
quoting = true
r++
case c == ' ', c == '\t':
switch part := q[:r]; {
case part == "|" && prefix == "":
prefix = strings.Join(parts, " ")
parts = nil
case part == "vs":
queries = append(queries, strings.Join(parts, " "))
parts = nil
default:
parts = append(parts, part)
}
q = q[r+1:]
r = 0
default:
if c == '\\' {
r++
}
r++
}
}
if len(q) > 0 {
parts = append(parts, q)
}
if len(parts) > 0 {
queries = append(queries, strings.Join(parts, " "))
}
return prefix, queries
}

35
perf/app/parse_test.go Normal file
Просмотреть файл

@ -0,0 +1,35 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"reflect"
"testing"
)
func TestParseQueryString(t *testing.T) {
tests := []struct {
q string
wantPrefix string
wantParts []string
}{
{"prefix | one vs two", "prefix", []string{"one", "two"}},
{"prefix one vs two", "", []string{"prefix one", "two"}},
{"anything else", "", []string{"anything else"}},
{`one vs "two vs three"`, "", []string{"one", `"two vs three"`}},
{"mixed\ttabs \"and\tspaces\"", "", []string{"mixed tabs \"and\tspaces\""}},
}
for _, test := range tests {
t.Run(test.q, func(t *testing.T) {
havePrefix, haveParts := parseQueryString(test.q)
if havePrefix != test.wantPrefix {
t.Errorf("parseQueryString returned prefix %q, want %q", havePrefix, test.wantPrefix)
}
if !reflect.DeepEqual(haveParts, test.wantParts) {
t.Errorf("parseQueryString returned parts %#v, want %#v", haveParts, test.wantParts)
}
})
}
}

505
perf/app/trend.go Normal file
Просмотреть файл

@ -0,0 +1,505 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Loosely based on github.com/aclements/go-misc/benchplot
package app
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"math"
"net/http"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/aclements/go-gg/generic/slice"
"github.com/aclements/go-gg/ggstat"
"github.com/aclements/go-gg/table"
"golang.org/x/build/perfdata"
"golang.org/x/net/context"
)
// trend handles /trend.
// With no query, it prints the list of recent uploads containing a "trend" key.
// With a query, it shows a graph of the matching benchmark results.
func (a *App) trend(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
q := r.Form.Get("q")
tmpl, err := ioutil.ReadFile(filepath.Join(a.BaseDir, "template/trend.html"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
t, err := template.New("main").Parse(string(tmpl))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
opt := plotOptions{
x: r.Form.Get("x"),
raw: r.Form.Get("raw") == "1",
}
data := a.trendQuery(ctx, q, opt)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := t.Execute(w, data); err != nil {
http.Error(w, err.Error(), 500)
return
}
}
// trendData is the struct passed to the trend.html template.
type trendData struct {
Q string
Error string
TrendUploads []perfdata.UploadInfo
PlotData template.JS
PlotType template.JS
}
// trendData computes the values for the template and returns a trendData for display.
func (a *App) trendQuery(ctx context.Context, q string, opt plotOptions) *trendData {
d := &trendData{Q: q}
if q == "" {
ul := a.StorageClient.ListUploads(ctx, `trend>`, []string{"by", "upload-time", "trend"}, 16)
defer ul.Close()
for ul.Next() {
d.TrendUploads = append(d.TrendUploads, ul.Info())
}
if err := ul.Err(); err != nil {
errorf(ctx, "failed to fetch recent trend uploads: %v", err)
}
return d
}
// TODO(quentin): Chunk query based on matching upload IDs.
res := a.StorageClient.Query(ctx, q)
defer res.Close()
t, resultCols := queryToTable(res)
if err := res.Err(); err != nil {
errorf(ctx, "failed to read query results: %v", err)
d.Error = fmt.Sprintf("failed to read query results: %v", err)
return d
}
for _, col := range []string{"commit", "commit-time", "branch", "name"} {
if !hasStringColumn(t, col) {
d.Error = fmt.Sprintf("results missing %q label", col)
return d
}
}
if opt.x != "" && !hasStringColumn(t, opt.x) {
d.Error = fmt.Sprintf("results missing x label %q", opt.x)
return d
}
data := plot(t, resultCols, opt)
// TODO(quentin): Give the user control over across vs. plotting in separate graphs, instead of only showing one graph with ns/op for each benchmark.
if opt.raw {
data = table.MapTables(data, func(_ table.GroupID, t *table.Table) *table.Table {
// From http://tristen.ca/hcl-picker/#/hlc/9/1.13/F1796F/B3EC6C
colors := []string{"#F1796F", "#B3EC6C", "#F67E9D", "#6CEB98", "#E392CB", "#0AE4C6", "#B7ABEC", "#16D7E9", "#75C4F7"}
colorIdx := 0
partColors := make(map[string]string)
styles := make([]string, t.Len())
for i, part := range t.MustColumn("upload-part").([]string) {
if _, ok := partColors[part]; !ok {
partColors[part] = colors[colorIdx]
colorIdx++
if colorIdx >= len(colors) {
colorIdx = 0
}
}
styles[i] = "color: " + partColors[part]
}
return table.NewBuilder(t).Add("style", styles).Done()
})
columns := []column{
{Name: "commit-index"},
{Name: "result"},
{Name: "style", Role: "style"},
{Name: "commit", Role: "tooltip"},
}
d.PlotData = tableToJS(data.Table(data.Tables()[0]), columns)
d.PlotType = "ScatterChart"
return d
}
// Pivot all of the benchmarks into columns of a single table.
ar := &aggResults{
Across: "name",
Values: []string{"filtered normalized mean result", "normalized mean result", "normalized median result", "normalized min result", "normalized max result"},
}
data = ggstat.Agg("commit", "branch", "commit-index")(ar.agg).F(data)
tables := data.Tables()
infof(ctx, "tables: %v", tables)
columns := []column{
{Name: "commit-index"},
{Name: "commit", Role: "tooltip"},
}
for _, prefix := range ar.Prefixes {
if len(ar.Prefixes) == 1 {
columns = append(columns,
column{Name: prefix + "/normalized mean result"},
column{Name: prefix + "/normalized min result", Role: "interval"},
column{Name: prefix + "/normalized max result", Role: "interval"},
column{Name: prefix + "/normalized median result"},
)
}
columns = append(columns,
column{Name: prefix + "/filtered normalized mean result"},
)
}
d.PlotData = tableToJS(data.Table(tables[0]), columns)
d.PlotType = "LineChart"
return d
}
// queryToTable converts the result of a Query into a Table for later processing.
// Each label is placed in a column named after the key.
// Each metric is placed in a separate result column named after the unit.
func queryToTable(q *perfdata.Query) (t *table.Table, resultCols []string) {
var names []string
labels := make(map[string][]string)
results := make(map[string][]float64)
i := 0
for q.Next() {
res := q.Result()
// TODO(quentin): Handle multiple results with the same name but different NameLabels.
names = append(names, res.NameLabels["name"])
for k := range res.Labels {
if labels[k] == nil {
labels[k] = make([]string, i)
}
}
for k := range labels {
labels[k] = append(labels[k], res.Labels[k])
}
f := strings.Fields(res.Content)
metrics := make(map[string]float64)
for j := 2; j+2 <= len(f); j += 2 {
val, err := strconv.ParseFloat(f[j], 64)
if err != nil {
continue
}
unit := f[j+1]
if results[unit] == nil {
results[unit] = make([]float64, i)
}
metrics[unit] = val
}
for k := range results {
results[k] = append(results[k], metrics[k])
}
i++
}
tab := new(table.Builder).Add("name", names)
for k, v := range labels {
tab.Add(k, v)
}
for k, v := range results {
tab.Add(k, v)
resultCols = append(resultCols, k)
}
sort.Strings(resultCols)
return tab.Done(), resultCols
}
type plotOptions struct {
// x names the column to use for the X axis.
// If unspecified, "commit" is used.
x string
// raw will return the raw points without any averaging/smoothing.
// The only result column will be "result".
raw bool
// correlate will use the string column "upload-part" as an indication that results came from the same machine. Commits present in multiple parts will be used to correlate results.
correlate bool
}
// plot takes raw benchmark data in t and produces a Grouping object containing filtered, normalized metric results for a graph.
// t must contain the string columns "commit", "commit-time", "branch". resultCols specifies the names of float64 columns containing metric results.
// The returned grouping has columns "commit", "commit-time", "commit-index", "branch", "metric", "normalized min result", "normalized max result", "normalized mean result", "filtered normalized mean result".
// This is roughly the algorithm from github.com/aclements/go-misc/benchplot
func plot(t table.Grouping, resultCols []string, opt plotOptions) table.Grouping {
nrows := len(table.GroupBy(t, "name").Tables())
// Turn ordered commit-time into a "commit-index" column.
if opt.x == "" {
opt.x = "commit"
}
// TODO(quentin): One SortBy call should do this, but
// sometimes it seems to sort by the second column instead of
// the first. Do them in separate steps until SortBy is fixed.
t = table.SortBy(t, opt.x)
t = table.SortBy(t, "commit-time")
t = colIndex{col: opt.x}.F(t)
// Unpivot all of the metrics into one column.
t = table.Unpivot(t, "metric", "result", resultCols...)
// TODO(quentin): Let user choose which metric(s) to keep.
t = table.FilterEq(t, "metric", "ns/op")
if opt.raw {
return t
}
// Average each result at each commit (but keep columns names
// the same to keep things easier to read).
t = ggstat.Agg("commit", "name", "metric", "branch", "commit-index")(ggstat.AggMean("result"), ggstat.AggQuantile("median", .5, "result"), ggstat.AggMin("result"), ggstat.AggMax("result")).F(t)
y := "mean result"
// Normalize to earliest commit on master. It's important to
// do this before the geomean if there are commits missing.
// Unfortunately, that also means we have to *temporarily*
// group by name and metric, since the geomean needs to be
// done on a different grouping.
t = table.GroupBy(t, "name", "metric")
t = ggstat.Normalize{X: "branch", By: firstMasterIndex, Cols: []string{"mean result", "median result", "max result", "min result"}, DenomCols: []string{"mean result", "mean result", "mean result", "mean result"}}.F(t)
y = "normalized " + y
for _, col := range []string{"mean result", "median result", "max result", "min result"} {
t = table.Remove(t, col)
}
t = table.Ungroup(table.Ungroup(t))
// Compute geomean for each metric at each commit if there's
// more than one benchmark.
if len(table.GroupBy(t, "name").Tables()) > 1 {
gt := removeNaNs(t, y)
gt = ggstat.Agg("commit", "metric", "branch", "commit-index")(ggstat.AggGeoMean(y, "normalized median result"), ggstat.AggMin("normalized min result"), ggstat.AggMax("normalized max result")).F(gt)
gt = table.MapTables(gt, func(_ table.GroupID, t *table.Table) *table.Table {
return table.NewBuilder(t).AddConst("name", " geomean").Done()
})
gt = table.Rename(gt, "geomean "+y, y)
gt = table.Rename(gt, "geomean normalized median result", "normalized median result")
gt = table.Rename(gt, "min normalized min result", "normalized min result")
gt = table.Rename(gt, "max normalized max result", "normalized max result")
t = table.Concat(t, gt)
nrows++
}
// Filter the data to reduce noise.
t = table.GroupBy(t, "name", "metric")
t = kza{y, 15, 3}.F(t)
y = "filtered " + y
t = table.Ungroup(table.Ungroup(t))
return t
}
// hasStringColumn returns whether t has a []string column called col.
func hasStringColumn(t table.Grouping, col string) bool {
c := t.Table(t.Tables()[0]).Column(col)
if c == nil {
return false
}
_, ok := c.([]string)
return ok
}
// aggResults pivots the table, taking the columns in Values and making a new column for each distinct value in Across.
// aggResults("in", []string{"value1", "value2"} will reshape a table like
// in value1 value2
// one 1 2
// two 3 4
// and will turn in into a table like
// one/value1 one/value2 two/value1 two/value2
// 1 2 3 4
// across columns must be []string, and value columns must be []float64.
type aggResults struct {
// Across is the name of the column whose values are the column prefix.
Across string
// Values is the name of the columns to split.
Values []string
// Prefixes is filled in after calling agg with the name of each prefix that was found.
Prefixes []string
}
// agg implements ggstat.Aggregator and allows using a with ggstat.Agg.
func (a *aggResults) agg(input table.Grouping, output *table.Builder) {
var prefixes []string
rows := len(input.Tables())
columns := make(map[string][]float64)
for i, gid := range input.Tables() {
var vs [][]float64
for _, col := range a.Values {
vs = append(vs, input.Table(gid).MustColumn(col).([]float64))
}
as := input.Table(gid).MustColumn(a.Across).([]string)
for j, prefix := range as {
for k, col := range a.Values {
key := prefix + "/" + col
if columns[key] == nil {
if k == 0 {
// First time we've seen this prefix, track it.
prefixes = append(prefixes, prefix)
}
columns[key] = make([]float64, rows)
for i := range columns[key] {
columns[key][i] = math.NaN()
}
}
columns[key][i] = vs[k][j]
}
}
}
sort.Strings(prefixes)
a.Prefixes = prefixes
for _, prefix := range prefixes {
for _, col := range a.Values {
key := prefix + "/" + col
output.Add(key, columns[key])
}
}
}
// firstMasterIndex returns the index of the first commit on master.
// This is used to find the value to normalize against.
func firstMasterIndex(bs []string) int {
return slice.Index(bs, "master")
}
// colIndex is a gg.Stat that adds a column called "commit-index" sequentially counting unique values of the column "commit".
type colIndex struct {
// col specifies the string column to assign indices to. If unspecified, "commit" will be used.
col string
}
func (ci colIndex) F(g table.Grouping) table.Grouping {
if ci.col == "" {
ci.col = "commit"
}
return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {
idxs := make([]int, t.Len())
last, idx := "", -1
for i, hash := range t.MustColumn(ci.col).([]string) {
if hash != last {
idx++
last = hash
}
idxs[i] = idx
}
t = table.NewBuilder(t).Add("commit-index", idxs).Done()
return t
})
}
// removeNaNs returns a new Grouping with rows containing NaN in col removed.
func removeNaNs(g table.Grouping, col string) table.Grouping {
return table.Filter(g, func(result float64) bool {
return !math.IsNaN(result)
}, col)
}
// kza implements adaptive Kolmogorov-Zurbenko filtering on the data in X.
type kza struct {
X string
M, K int
}
func (k kza) F(g table.Grouping) table.Grouping {
return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {
var xs []float64
slice.Convert(&xs, t.MustColumn(k.X))
nxs := AdaptiveKolmogorovZurbenko(xs, k.M, k.K)
return table.NewBuilder(t).Add("filtered "+k.X, nxs).Done()
})
}
// column represents a column in a google.visualization.DataTable
type column struct {
Name string `json:"id"`
Role string `json:"role,omitempty"`
// These fields are filled in by tableToJS if unspecified.
Type string `json:"type"`
Label string `json:"label"`
}
// tableToJS converts a Table to a javascript literal which can be passed to "new google.visualization.DataTable".
func tableToJS(t *table.Table, columns []column) template.JS {
var out bytes.Buffer
fmt.Fprint(&out, "{cols: [")
var slices []table.Slice
for i, c := range columns {
if i > 0 {
fmt.Fprint(&out, ",\n")
}
col := t.Column(c.Name)
slices = append(slices, col)
if c.Type == "" {
switch col.(type) {
case []string:
c.Type = "string"
case []int, []float64:
c.Type = "number"
default:
// Matches the hardcoded string below.
c.Type = "string"
}
}
if c.Label == "" {
c.Label = c.Name
}
data, err := json.Marshal(c)
if err != nil {
panic(err)
}
out.Write(data)
}
fmt.Fprint(&out, "],\nrows: [")
for i := 0; i < t.Len(); i++ {
if i > 0 {
fmt.Fprint(&out, ",\n")
}
fmt.Fprint(&out, "{c:[")
for j := range columns {
if j > 0 {
fmt.Fprint(&out, ", ")
}
fmt.Fprint(&out, "{v: ")
var value []byte
var err error
switch column := slices[j].(type) {
case []string:
value, err = json.Marshal(column[i])
case []int:
value, err = json.Marshal(column[i])
case []float64:
value, err = json.Marshal(column[i])
default:
value = []byte(`"unknown column type"`)
}
if err != nil {
panic(err)
}
out.Write(value)
fmt.Fprint(&out, "}")
}
fmt.Fprint(&out, "]}")
}
fmt.Fprint(&out, "]}")
return template.JS(out.String())
}

29
perf/app/trend_test.go Normal file
Просмотреть файл

@ -0,0 +1,29 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"testing"
"github.com/aclements/go-gg/table"
"golang.org/x/build/internal/diff"
)
func TestTableToJS(t *testing.T) {
in := table.TableFromStrings(
[]string{"text", "num"},
[][]string{
{"hello", "15.1"},
{"world", "20"},
}, true)
have := tableToJS(in, []column{{Name: "text"}, {Name: "num"}})
want := `{cols: [{"id":"text","type":"string","label":"text"},
{"id":"num","type":"number","label":"num"}],
rows: [{c:[{v: "hello"}, {v: 15.1}]},
{c:[{v: "world"}, {v: 20}]}]}`
if d := diff.Diff(string(have), want); d != "" {
t.Errorf("tableToJS returned wrong JS (- have, + want):\n%s", d)
}
}

Просмотреть файл

@ -0,0 +1,25 @@
# This file specifies files that are *not* uploaded to Google Cloud Platform
# using gcloud. It follows the same syntax as .gitignore, with the addition of
# "#!include" directives (which insert the entries of the given .gitignore-style
# file at that point).
#
# For more information, run:
# $ gcloud topic gcloudignore
#
.gcloudignore
# If you would like to upload your .git directory, .gitignore file or files
# from your .gitignore file, remove the corresponding line
# below:
.git
.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

14
perf/appengine/README.md Normal file
Просмотреть файл

@ -0,0 +1,14 @@
# perf.golang.org
Deploy:
1. `gcloud app deploy --project=golang-org --no-promote app.yaml`
2. Find the new version in the
[Cloud Console](https://console.cloud.google.com/appengine/versions?project=golang-org&serviceId=perf).
3. Check that the deployed version is working (click the website link in the version list).
4. If all is well, click "Migrate Traffic" to move 100% of the perf.golang.org traffic to the new version.
5. You're done.

55
perf/appengine/app.go Normal file
Просмотреть файл

@ -0,0 +1,55 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This binary contains an App Engine app for perf.golang.org
package main
import (
"log"
"net/http"
"os"
"time"
"golang.org/x/build/perf/app"
"golang.org/x/build/perfdata"
"golang.org/x/net/context"
"google.golang.org/appengine"
)
func mustGetenv(k string) string {
v := os.Getenv(k)
if v == "" {
log.Panicf("%s environment variable not set.", k)
}
return v
}
// appHandler is the default handler, registered to serve "/".
// It creates a new App instance using the appengine Context and then
// dispatches the request to the App. The environment variable
// STORAGE_URL_BASE must be set in app.yaml with the name of the bucket to
// write to.
func appHandler(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
// urlfetch defaults to 5s timeout if the context has no timeout.
// The underlying request has a 60 second timeout, so we might as well propagate that here.
// (Why doesn't appengine do that for us?)
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
app := &app.App{
BaseDir: "analysis/appengine", // relative to module root
StorageClient: &perfdata.Client{
BaseURL: mustGetenv("STORAGE_URL_BASE"),
HTTPClient: http.DefaultClient,
},
}
mux := http.NewServeMux()
app.RegisterOnMux(mux)
mux.ServeHTTP(w, r)
}
func main() {
http.HandleFunc("/", appHandler)
appengine.Main()
}

11
perf/appengine/app.yaml Normal file
Просмотреть файл

@ -0,0 +1,11 @@
runtime: go113
service: perf
handlers:
- url: /_ah/remote_api
script: auto
- url: /.*
script: auto
secure: always
env_variables:
STORAGE_URL_BASE: 'https://perfdata.golang.org'

Просмотреть файл

@ -0,0 +1,165 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Performance Result Comparison</title>
<style type="text/css">
#header h1 {
display: inline;
}
#search {
padding: 1em .5em;
width: 100%;
}
input[type="text"] {
font-size: 100%;
}
#results {
border-top: 1px solid black;
}
tr.diff td {
font-size: 80%;
font-family: sans-serif;
vertical-align: top;
}
th.label {
text-align: left;
vertical-align: top;
}
td.count {
text-align: right;
}
#labels {
float: left;
margin-right: 1em;
border-right: 1px solid black;
border-collapse: collapse;
vertical-align: top;
}
#labels tbody {
border-collapse: collapse;
border-bottom: 1px solid black;
}
#labels > tbody > tr:last-child > th, #labels > tbody > tr:last-child > td {
padding-bottom: 1em;
}
#labels tbody tr:first-child th, #benchstat {
padding-top: 1em;
}
#labels tbody.diff tr:first-child th {
padding-top: 1em;
border-collapse: collapse;
border-top: 1px solid black;
}
#labels .diff {
padding-bottom: 1em;
}
#labels .diff table td, #labels .diff .query {
max-width: 20em;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.benchstat tr.configs th {
max-width: 0;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.benchstat { border-collapse: collapse; }
.benchstat th:nth-child(1) { text-align: left; }
.benchstat tbody td:nth-child(1n+2):not(.note) { text-align: right; padding: 0em 1em; }
.benchstat tr:not(.configs) th { border-top: 1px solid #666; border-bottom: 1px solid #ccc; }
.benchstat .nodelta { text-align: center !important; }
.benchstat .better td.delta { font-weight: bold; }
.benchstat .worse td.delta { font-weight: bold; color: #c00; }
</style>
</head>
<body>
<div id="header">
<h1>Go Performance Dashboard</h1>
<a href="/">about</a>
</div>
<div id="search">
<form action="/search">
<input type="text" name="q" value="{{.Q}}" size="120">
<input type="submit" value="Search">
</form>
</div>
<div id="results">
{{if not .Q}}
<p>The Go Performance Dashboard provides a centralized
resource for sharing and analyzing benchmark results. To get
started, upload benchmark results using
<code>go get -u golang.org/x/perf/cmd/benchsave</code>
and
<code>benchsave old.txt new.txt</code>
or upload via the web at
<a href="https://perfdata-dot-golang-org.appspot.com/upload">https://perfdata-dot-golang-org.appspot.com/upload</a>.</p>
{{else}}
{{with .Error}}
<p>{{.}}</p>
{{else}}
<table id="labels">
{{with $cl := .CommonLabels}}
<tbody>
<tr>
<th>label</th><th>common value</th>
</tr>
{{range $label, $value := .}}
<tr>
<th class="label">{{$label}}</th><td>{{with $href := linkify $cl $label}}<a href="{{$href}}" rel="nofollow">{{$value}}</a>{{else}}{{$value}}{{end}}</td>
</tr>
{{end}}
</tbody>
{{end}}
<tbody class="diff">
<tr>
<th>label</th>
<th>values</th>
</tr>
{{range $label, $exists := .Labels}}
<tr class="diff">
<th class="label">{{$label}}</th>
<td>
{{range $index, $group := $.Groups}}
<div class="query">{{$group.Q}}:</div>
<table>
{{with index $group.LabelValues $label}}
{{range .TopN 4}}
<tr>
<td class="count">
{{.Count}}
</td>
<td>
{{if eq .Value ""}}
missing
{{else if eq .Value "…"}}
{{.Value}}
{{else}}
<a href="/search?q={{addToQuery $.Q (printf "%s:%s" $label .Value)}}">
{{printf "%q" .Value}}
</a>
{{end}}
</td>
</tr>
{{end}}
{{end}}
</table>
{{end}}
</td>
</tr>
{{end}}
</tbody>
</table>
<div id="benchstat">
{{.Benchstat}}
</div>
{{end}}
{{end}}
</div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,61 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Go Performance Result Dashboard</title>
<style type="text/css">
#header h1 {
display: inline;
}
#search {
padding: 1em .5em;
width: 100%;
}
input[type="text"] {
font-size: 100%;
}
#results {
border-top: 1px solid black;
}
</style>
</head>
<body>
<div id="header">
<h1>Go Performance Dashboard</h1>
<a href="/">about</a>
</div>
<div id="search">
<form action="/search">
<input type="text" name="q" size="120">
<input type="submit" value="Search">
</form>
</div>
<div id="results">
<p>The Go Performance Dashboard provides a centralized
resource for sharing and analyzing benchmark results. To get
started, upload benchmark results using
<code>go get -u golang.org/x/perf/cmd/benchsave</code>
and
<code>benchsave old.txt new.txt</code>
or upload via the web at
<a href="https://perfdata-dot-golang-org.appspot.com/upload">https://perfdata-dot-golang-org.appspot.com/upload</a>.</p>
{{with .RecentUploads}}
<h2>Recent Uploads</h2>
<table>
<thead>
<tr><th>Records</th><th>Upload</th><th>By</th></tr>
</thead>
<tbody>
{{range .}}
<tr>
<td>{{.Count}}</td>
<td><a href="/search?q=upload:{{.UploadID}}">{{index .LabelValues "upload-time"}}</a></td>
<td>{{.LabelValues.by}}</td>
</tr>
{{end}}
</tbody>
</table>
{{end}}
</div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Performance Result Comparison</title>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<style type="text/css">
#header h1 {
display: inline;
}
#search {
padding: 1em .5em;
width: 100%;
}
input[type="text"] {
font-size: 100%;
}
#results {
border-top: 1px solid black;
}
</style>
</head>
<body>
<div id="header">
<h1>Go Performance Dashboard</h1>
<a href="/">about</a>
</div>
<div id="search">
<form action="/trend">
<input type="text" name="q" value="{{.Q}}" size="120">
<input type="submit" value="Search">
</form>
</div>
<div id="results">
{{if not .Q}}
<h2>Recent Uploads</h2>
<table>
<tr><th>Upload ID</th><th>trend</th></tr>
{{range .TrendUploads}}
<tr><td><a href="/trend?q=upload:{{.UploadID}}">{{.UploadID}}</a></td><td>{{.LabelValues.trend}}</td></tr>
{{end}}
</table>
{{else}}
{{with .Error}}
<p>{{.}}</p>
{{else}}
<div id="chart" style="height: 600px"></div>
<script type="text/javascript">
google.charts.load('current', {'packages':['corechart']});
google.charts.setOnLoadCallback(draw);
function draw() {
var dt = new google.visualization.DataTable({{.PlotData}});
var options = {
title: 'Benchmark Trend',
hAxis: {
title: 'commit index',
},
vAxis: {
title: 'normalized ns/op',
},
explorer: {
actions: ['dragToZoom', 'rightClickToReset'],
maxZoomIn: 0.05,
},
};
var chart = new google.visualization.{{.PlotType}}(document.getElementById('chart'));
chart.draw(dt, options);
}
</script>
{{end}}
{{end}}
</div>
</body>
</html>

60
perf/localperf/app.go Normal file
Просмотреть файл

@ -0,0 +1,60 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Localperf runs an HTTP server for benchmark analysis.
//
// Usage:
//
// localperf [-addr address] [-perfdata url] [-base_dir ../appengine]
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"golang.org/x/build/internal/basedir"
"golang.org/x/build/perf/app"
"golang.org/x/build/perfdata"
)
var (
addr = flag.String("addr", "localhost:8080", "serve HTTP on `address`")
perfdataURL = flag.String("perfdata", "https://perfdata.golang.org", "perfdata server base `url`")
baseDir = flag.String("base_dir", basedir.Find("golang.org/x/perf/analysis/appengine"), "base `directory` for templates")
)
func usage() {
fmt.Fprintf(os.Stderr, `Usage of localperf:
localperf [flags]
`)
flag.PrintDefaults()
os.Exit(2)
}
func main() {
log.SetPrefix("localperf: ")
flag.Usage = usage
flag.Parse()
if flag.NArg() != 0 {
flag.Usage()
}
if *baseDir == "" {
log.Print("base_dir is required and could not be automatically found")
flag.Usage()
}
app := &app.App{
StorageClient: &perfdata.Client{BaseURL: *perfdataURL},
BaseDir: *baseDir,
}
app.RegisterOnMux(http.DefaultServeMux)
log.Printf("Listening on %s", *addr)
log.Fatal(http.ListenAndServe(*addr, nil))
}

55
perfdata/app/app.go Normal file
Просмотреть файл

@ -0,0 +1,55 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package app implements the performance data storage server. Combine
// an App with a database and filesystem to get an HTTP server.
package app
import (
"errors"
"net/http"
"path/filepath"
"golang.org/x/build/perfdata/db"
"golang.org/x/build/perfdata/fs"
)
// App manages the storage server logic. Construct an App instance
// using a literal with DB and FS objects and call RegisterOnMux to
// connect it with an HTTP server.
type App struct {
DB *db.DB
FS fs.FS
// Auth obtains the username for the request.
// If necessary, it can write its own response (e.g. a
// redirect) and return ErrResponseWritten.
Auth func(http.ResponseWriter, *http.Request) (string, error)
// ViewURLBase will be used to construct a URL to return as
// "viewurl" in the response from /upload. If it is non-empty,
// the upload ID will be appended to ViewURLBase.
ViewURLBase string
// BaseDir is the directory containing the "template" directory.
// If empty, the current directory will be used.
BaseDir string
}
// ErrResponseWritten can be returned by App.Auth to abort the normal /upload handling.
var ErrResponseWritten = errors.New("response written")
// RegisterOnMux registers the app's URLs on mux.
func (a *App) RegisterOnMux(mux *http.ServeMux) {
// TODO(quentin): Should we just make the App itself be an http.Handler?
mux.HandleFunc("/", a.index)
mux.HandleFunc("/upload", a.upload)
mux.HandleFunc("/search", a.search)
mux.HandleFunc("/uploads", a.uploads)
}
// index serves the readme on /
func (a *App) index(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(a.BaseDir, "static/index.html"))
}

24
perfdata/app/appengine.go Normal file
Просмотреть файл

@ -0,0 +1,24 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build appengine
// +build appengine
package app
import (
"net/http"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
)
// requestContext returns the Context object for a given request.
func requestContext(r *http.Request) context.Context {
return appengine.NewContext(r)
}
var infof = log.Infof
var errorf = log.Errorf

26
perfdata/app/local.go Normal file
Просмотреть файл

@ -0,0 +1,26 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !appengine
// +build !appengine
package app
import (
"log"
"net/http"
"golang.org/x/net/context"
)
// requestContext returns the Context object for a given request.
func requestContext(r *http.Request) context.Context {
return r.Context()
}
func infof(_ context.Context, format string, args ...interface{}) {
log.Printf(format, args...)
}
var errorf = infof

95
perfdata/app/query.go Normal file
Просмотреть файл

@ -0,0 +1,95 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"encoding/json"
"net/http"
"strconv"
"golang.org/x/perf/storage/benchfmt"
)
func (a *App) search(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
q := r.Form.Get("q")
if q == "" {
http.Error(w, "missing q parameter", 400)
return
}
query := a.DB.Query(q)
defer query.Close()
infof(ctx, "query: %s", query.Debug())
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
bw := benchfmt.NewPrinter(w)
for query.Next() {
if err := bw.Print(query.Result()); err != nil {
http.Error(w, err.Error(), 500)
return
}
}
if err := query.Err(); err != nil {
errorf(ctx, "query returned error: %v", err)
http.Error(w, err.Error(), 500)
return
}
}
// uploads serves a list of upload IDs on /uploads.
// If the query parameter q is provided, only uploads containing matching records are returned.
// The format of the result is "<count> <uploadid>\n" where count is the number of matching records.
// The lines are sorted in order from most to least recent.
// If the query parameter limit is provided, only the most recent limit upload IDs are returned.
// If limit is not provided, the most recent 1000 upload IDs are returned.
func (a *App) uploads(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), 500)
return
}
q := r.Form.Get("q")
limit := 1000
limitStr := r.Form.Get("limit")
if limitStr != "" {
var err error
limit, err = strconv.Atoi(limitStr)
if err != nil {
http.Error(w, "invalid limit parameter", 400)
return
}
}
res := a.DB.ListUploads(q, r.Form["extra_label"], limit)
defer res.Close()
infof(ctx, "query: %s", res.Debug())
w.Header().Set("Content-Type", "application/json")
e := json.NewEncoder(w)
for res.Next() {
ui := res.Info()
if err := e.Encode(&ui); err != nil {
errorf(ctx, "failed to encode JSON: %v", err)
http.Error(w, err.Error(), 500)
return
}
}
if err := res.Err(); err != nil {
http.Error(w, err.Error(), 500)
return
}
}

189
perfdata/app/query_test.go Normal file
Просмотреть файл

@ -0,0 +1,189 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package app
import (
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"reflect"
"testing"
"golang.org/x/build/perfdata"
"golang.org/x/perf/storage/benchfmt"
)
func TestQuery(t *testing.T) {
app := createTestApp(t)
defer app.Close()
// Write 1024 test results to the database. These results
// have labels named label0, label1, etc. Each label's value
// is an integer whose value is (record number) / (1 << label
// number). So 1 record has each value of label0, 2 records
// have each value of label1, 4 records have each value of
// label2, etc. This allows writing queries that match 2^n records.
status := app.uploadFiles(t, func(mpw *multipart.Writer) {
w, err := mpw.CreateFormFile("file", "path/1.txt")
if err != nil {
t.Errorf("CreateFormFile: %v", err)
}
bp := benchfmt.NewPrinter(w)
for i := 0; i < 1024; i++ {
r := &benchfmt.Result{Labels: make(map[string]string), NameLabels: make(map[string]string), Content: "BenchmarkName 1 ns/op"}
for j := uint(0); j < 10; j++ {
r.Labels[fmt.Sprintf("label%d", j)] = fmt.Sprintf("%d", i/(1<<j))
}
r.NameLabels["name"] = "Name"
if err := bp.Print(r); err != nil {
t.Fatalf("Print: %v", err)
}
}
})
tests := []struct {
q string
want []int
}{
{"label0:0", []int{0}},
{"label1:0", []int{0, 1}},
{"label0:5 name:Name", []int{5}},
{"label0:0 label0:5", nil},
}
for _, test := range tests {
t.Run("query="+test.q, func(t *testing.T) {
u := app.srv.URL + "/search?" + url.Values{"q": []string{test.q}}.Encode()
resp, err := http.Get(u)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("get /search: %v", resp.Status)
}
br := benchfmt.NewReader(resp.Body)
for i, num := range test.want {
if !br.Next() {
t.Fatalf("#%d: Next() = false, want true (Err() = %v)", i, br.Err())
}
r := br.Result()
if r.Labels["upload"] != status.UploadID {
t.Errorf("#%d: upload = %q, want %q", i, r.Labels["upload"], status.UploadID)
}
if r.Labels["upload-part"] != status.FileIDs[0] {
t.Errorf("#%d: upload-part = %q, want %q", i, r.Labels["upload-part"], status.FileIDs[0])
}
if r.Labels["upload-file"] != "1.txt" {
t.Errorf("#%d: upload-file = %q, want %q", i, r.Labels["upload-file"], "1.txt")
}
if r.Labels["label0"] != fmt.Sprintf("%d", num) {
t.Errorf("#%d: label0 = %q, want %d", i, r.Labels["label0"], num)
}
if r.NameLabels["name"] != "Name" {
t.Errorf("#%d: name = %q, want %q", i, r.NameLabels["name"], "Name")
}
if r.Labels["by"] != "user" {
t.Errorf("#%d: by = %q, want %q", i, r.Labels["uploader"], "user")
}
}
if br.Next() {
t.Fatalf("Next() = true, want false")
}
if err := br.Err(); err != nil {
t.Errorf("Err() = %v, want nil", err)
}
})
}
}
func TestUploads(t *testing.T) {
app := createTestApp(t)
defer app.Close()
// Write 9 uploads to the database. These uploads have 1-9
// results each, a common label "i" set to the upload number,
// and a label "j" set to the record number within the upload.
var uploadIDs []string
for i := 0; i < 9; i++ {
status := app.uploadFiles(t, func(mpw *multipart.Writer) {
w, err := mpw.CreateFormFile("file", "path/1.txt")
if err != nil {
t.Errorf("CreateFormFile: %v", err)
}
bp := benchfmt.NewPrinter(w)
for j := 0; j <= i; j++ {
r := &benchfmt.Result{Labels: map[string]string{"i": fmt.Sprintf("%d", i)}, NameLabels: make(map[string]string), Content: "BenchmarkName 1 ns/op"}
r.Labels["j"] = fmt.Sprintf("%d", j)
if err := bp.Print(r); err != nil {
t.Fatalf("Print: %v", err)
}
}
})
uploadIDs = append(uploadIDs, status.UploadID)
}
tests := []struct {
q string
extraLabels []string
want []perfdata.UploadInfo
}{
{"", nil, []perfdata.UploadInfo{
{9, uploadIDs[8], nil}, {8, uploadIDs[7], nil}, {7, uploadIDs[6], nil}, {6, uploadIDs[5], nil}, {5, uploadIDs[4], nil}, {4, uploadIDs[3], nil}, {3, uploadIDs[2], nil}, {2, uploadIDs[1], nil}, {1, uploadIDs[0], nil},
}},
{"j:5", nil, []perfdata.UploadInfo{{1, uploadIDs[8], nil}, {1, uploadIDs[7], nil}, {1, uploadIDs[6], nil}, {1, uploadIDs[5], nil}}},
{"i:5", []string{"i"}, []perfdata.UploadInfo{{6, uploadIDs[5], benchfmt.Labels{"i": "5"}}}},
{"not:found", nil, nil},
}
for _, test := range tests {
t.Run("query="+test.q, func(t *testing.T) {
u := app.srv.URL + "/uploads"
uv := url.Values{}
if test.q != "" {
uv["q"] = []string{test.q}
}
if test.extraLabels != nil {
uv["extra_label"] = test.extraLabels
}
if len(uv) > 0 {
u += "?" + uv.Encode()
}
resp, err := http.Get(u)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("get /uploads: %v", resp.Status)
}
dec := json.NewDecoder(resp.Body)
i := 0
for {
var ui perfdata.UploadInfo
if err := dec.Decode(&ui); err == io.EOF {
break
} else if err != nil {
t.Fatalf("failed to parse UploadInfo: %v", err)
}
if i > len(test.want) {
t.Fatalf("too many responses: have %d+ want %d", i, len(test.want))
}
if !reflect.DeepEqual(ui, test.want[i]) {
t.Errorf("uploadinfo = %#v, want %#v", ui, test.want[i])
}
i++
}
if i < len(test.want) {
t.Fatalf("missing responses: have %d want %d", i, len(test.want))
}
})
}
}

219
perfdata/app/upload.go Normal file
Просмотреть файл

@ -0,0 +1,219 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package app
import (
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"path/filepath"
"sort"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/perf/storage/benchfmt"
"golang.org/x/build/perfdata/db"
)
// upload is the handler for the /upload endpoint. It serves a form on
// GET requests and processes files in a multipart/x-form-data POST
// request.
func (a *App) upload(w http.ResponseWriter, r *http.Request) {
ctx := requestContext(r)
user, err := a.Auth(w, r)
switch {
case err == ErrResponseWritten:
return
case err != nil:
errorf(ctx, "%v", err)
http.Error(w, err.Error(), 500)
return
}
if r.Method == http.MethodGet {
http.ServeFile(w, r, filepath.Join(a.BaseDir, "static/upload.html"))
return
}
if r.Method != http.MethodPost {
http.Error(w, "/upload must be called as a POST request", http.StatusMethodNotAllowed)
return
}
// We use r.MultipartReader instead of r.ParseForm to avoid
// storing uploaded data in memory.
mr, err := r.MultipartReader()
if err != nil {
errorf(ctx, "%v", err)
http.Error(w, err.Error(), 500)
return
}
result, err := a.processUpload(ctx, user, mr)
if err != nil {
errorf(ctx, "%v", err)
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(result); err != nil {
errorf(ctx, "%v", err)
http.Error(w, err.Error(), 500)
return
}
}
// uploadStatus is the response to an /upload POST served as JSON.
type uploadStatus struct {
// UploadID is the upload ID assigned to the upload.
UploadID string `json:"uploadid"`
// FileIDs is the list of file IDs assigned to the files in the upload.
FileIDs []string `json:"fileids"`
// ViewURL is a URL that can be used to interactively view the upload.
ViewURL string `json:"viewurl,omitempty"`
}
// processUpload takes one or more files from a multipart.Reader,
// writes them to the filesystem, and indexes their content.
func (a *App) processUpload(ctx context.Context, user string, mr *multipart.Reader) (*uploadStatus, error) {
var upload *db.Upload
var fileids []string
uploadtime := time.Now().UTC().Format(time.RFC3339)
for i := 0; ; i++ {
p, err := mr.NextPart()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
name := p.FormName()
if name == "commit" {
continue
}
if name != "file" {
return nil, fmt.Errorf("unexpected field %q", name)
}
if upload == nil {
var err error
upload, err = a.DB.NewUpload(ctx)
if err != nil {
return nil, err
}
defer func() {
if upload != nil {
upload.Abort()
}
}()
}
// The incoming file needs to be stored in Cloud
// Storage and it also needs to be indexed. If the file
// is invalid (contains no valid records) it needs to
// be rejected and the Cloud Storage upload aborted.
meta := map[string]string{
"upload": upload.ID,
"upload-part": fmt.Sprintf("%s/%d", upload.ID, i),
"upload-time": uploadtime,
}
name = p.FileName()
if slash := strings.LastIndexAny(name, `/\`); slash >= 0 {
name = name[slash+1:]
}
if name != "" {
meta["upload-file"] = name
}
if user != "" {
meta["by"] = user
}
// We need to do two things with the incoming data:
// - Write it to permanent storage via a.FS
// - Write index records to a.DB
// AND if anything fails, attempt to clean up both the
// FS and the index records.
if err := a.indexFile(ctx, upload, p, meta); err != nil {
return nil, err
}
fileids = append(fileids, meta["upload-part"])
}
if upload == nil {
return nil, errors.New("no files processed")
}
if err := upload.Commit(); err != nil {
return nil, err
}
status := &uploadStatus{UploadID: upload.ID, FileIDs: fileids}
if a.ViewURLBase != "" {
status.ViewURL = a.ViewURLBase + url.QueryEscape(upload.ID)
}
upload = nil
return status, nil
}
func (a *App) indexFile(ctx context.Context, upload *db.Upload, p io.Reader, meta map[string]string) (err error) {
path := fmt.Sprintf("uploads/%s.txt", meta["upload-part"])
fw, err := a.FS.NewWriter(ctx, path, meta)
if err != nil {
return err
}
defer func() {
start := time.Now()
if err != nil {
fw.CloseWithError(err)
} else {
err = fw.Close()
}
infof(ctx, "Close(%q) took %.2f seconds", path, time.Since(start).Seconds())
}()
var keys []string
for k := range meta {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
if _, err := fmt.Fprintf(fw, "%s: %s\n", k, meta[k]); err != nil {
return err
}
}
// Write a blank line to separate metadata from user-generated content.
fmt.Fprintf(fw, "\n")
// TODO(quentin): Add a separate goroutine and buffer for writes to fw?
tr := io.TeeReader(p, fw)
br := benchfmt.NewReader(tr)
br.AddLabels(meta)
i := 0
for br.Next() {
i++
if err := upload.InsertRecord(br.Result()); err != nil {
return err
}
}
if err := br.Err(); err != nil {
return err
}
if i == 0 {
return errors.New("no valid benchmark lines found")
}
return nil
}

126
perfdata/app/upload_test.go Normal file
Просмотреть файл

@ -0,0 +1,126 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package app
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"golang.org/x/build/perfdata/db"
"golang.org/x/build/perfdata/db/dbtest"
_ "golang.org/x/build/perfdata/db/sqlite3"
"golang.org/x/build/perfdata/fs"
)
type testApp struct {
db *db.DB
dbCleanup func()
fs *fs.MemFS
app *App
srv *httptest.Server
}
func (app *testApp) Close() {
app.dbCleanup()
app.srv.Close()
}
// createTestApp returns a testApp corresponding to a new app
// serving from an in-memory database and file system on an
// isolated test HTTP server.
//
// When finished with app, the caller must call app.Close().
func createTestApp(t *testing.T) *testApp {
db, cleanup := dbtest.NewDB(t)
fs := fs.NewMemFS()
app := &App{
DB: db,
FS: fs,
Auth: func(http.ResponseWriter, *http.Request) (string, error) { return "user", nil },
ViewURLBase: "view:",
}
mux := http.NewServeMux()
app.RegisterOnMux(mux)
srv := httptest.NewServer(mux)
return &testApp{db, cleanup, fs, app, srv}
}
// uploadFiles calls the /upload endpoint and executes f in a new
// goroutine to write files to the POST request.
func (app *testApp) uploadFiles(t *testing.T, f func(*multipart.Writer)) *uploadStatus {
pr, pw := io.Pipe()
mpw := multipart.NewWriter(pw)
go func() {
defer pw.Close()
defer mpw.Close()
f(mpw)
}()
resp, err := http.Post(app.srv.URL+"/upload", mpw.FormDataContentType(), pr)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("post /upload: %v", resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("reading /upload response: %v", err)
}
t.Logf("/upload response:\n%s", body)
status := &uploadStatus{}
if err := json.Unmarshal(body, status); err != nil {
t.Fatalf("unmarshaling /upload response: %v", err)
}
return status
}
func TestUpload(t *testing.T) {
app := createTestApp(t)
defer app.Close()
wantID := time.Now().UTC().Format("20060102.") + "1"
status := app.uploadFiles(t, func(mpw *multipart.Writer) {
w, err := mpw.CreateFormFile("file", "1.txt")
if err != nil {
t.Errorf("CreateFormFile: %v", err)
}
fmt.Fprintf(w, "key: value\nBenchmarkOne 5 ns/op\nkey:value2\nBenchmarkTwo 10 ns/op\n")
})
if status.UploadID != wantID {
t.Errorf("uploadid = %q, want %q", status.UploadID, wantID)
}
if have, want := status.FileIDs, []string{wantID + "/0"}; !reflect.DeepEqual(have, want) {
t.Errorf("fileids = %v, want %v", have, want)
}
if want := "view:" + wantID; status.ViewURL != want {
t.Errorf("viewurl = %q, want %q", status.ViewURL, want)
}
if len(app.fs.Files()) != 1 {
t.Errorf("/upload wrote %d files, want 1", len(app.fs.Files()))
}
}

Просмотреть файл

@ -0,0 +1,25 @@
# This file specifies files that are *not* uploaded to Google Cloud Platform
# using gcloud. It follows the same syntax as .gitignore, with the addition of
# "#!include" directives (which insert the entries of the given .gitignore-style
# file at that point).
#
# For more information, run:
# $ gcloud topic gcloudignore
#
.gcloudignore
# If you would like to upload your .git directory, .gitignore file or files
# from your .gitignore file, remove the corresponding line
# below:
.git
.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

Просмотреть файл

@ -0,0 +1,7 @@
# perfdata.golang.org
Deploy:
```
gcloud app deploy --project=golang-org app.yaml
```

156
perfdata/appengine/app.go Normal file
Просмотреть файл

@ -0,0 +1,156 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This binary contains an App Engine app for perfdata.golang.org
package main
import (
"errors"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"golang.org/x/net/context"
"golang.org/x/build/perfdata/app"
"golang.org/x/build/perfdata/db"
"golang.org/x/build/perfdata/fs/gcs"
oauth2 "google.golang.org/api/oauth2/v2"
"google.golang.org/appengine"
aelog "google.golang.org/appengine/log"
"google.golang.org/appengine/user"
)
// connectDB returns a DB initialized from the environment variables set in app.yaml. CLOUDSQL_CONNECTION_NAME, CLOUDSQL_USER, and CLOUDSQL_DATABASE must be set to point to the Cloud SQL instance. CLOUDSQL_PASSWORD can be set if needed.
func connectDB() (*db.DB, error) {
var (
connectionName = mustGetenv("CLOUDSQL_CONNECTION_NAME")
user = mustGetenv("CLOUDSQL_USER")
password = os.Getenv("CLOUDSQL_PASSWORD") // NOTE: password may be empty
dbName = mustGetenv("CLOUDSQL_DATABASE")
socket = os.Getenv("CLOUDSQL_SOCKET_PREFIX")
)
// /cloudsql is used on App Engine.
if socket == "" {
socket = "/cloudsql"
}
return db.OpenSQL("mysql", fmt.Sprintf("%s:%s@unix(%s/%s)/%s", user, password, socket, connectionName, dbName))
}
func mustGetenv(k string) string {
v := os.Getenv(k)
if v == "" {
log.Panicf("%s environment variable not set.", k)
}
return v
}
func auth(w http.ResponseWriter, r *http.Request) (string, error) {
ctx := appengine.NewContext(r)
u, err := reqUser(ctx, r)
if err != nil {
return "", err
}
if u == "" {
url, err := user.LoginURL(ctx, r.URL.String())
if err != nil {
return "", err
}
http.Redirect(w, r, url, http.StatusFound)
return "", app.ErrResponseWritten
}
return u, nil
}
// reqUser gets the username from the request, trying AE user authentication, AE OAuth authentication, and Google OAuth authentication, in that order.
// If the request contains no authentication, "", nil is returned.
// If the request contains bogus authentication, an error is returned.
func reqUser(ctx context.Context, r *http.Request) (string, error) {
u := user.Current(ctx)
if u != nil {
return u.Email, nil
}
if r.Header.Get("Authorization") == "" {
return "", nil
}
u, err := user.CurrentOAuth(ctx, "https://www.googleapis.com/auth/userinfo.email")
if err == nil {
return u.Email, nil
}
return oauthServiceUser(ctx, r)
}
// oauthServiceUser authenticates the OAuth token from r's headers.
// This is necessary because user.CurrentOAuth does not work if the token is for a service account.
func oauthServiceUser(ctx context.Context, r *http.Request) (string, error) {
tok := r.Header.Get("Authorization")
if !strings.HasPrefix(tok, "Bearer ") {
return "", errors.New("unknown Authorization header")
}
tok = tok[len("Bearer "):]
service, err := oauth2.New(http.DefaultClient)
if err != nil {
return "", err
}
info, err := service.Tokeninfo().AccessToken(tok).Do()
if err != nil {
return "", err
}
if !info.VerifiedEmail || info.Email == "" {
return "", errors.New("token does not contain verified e-mail address")
}
return info.Email, nil
}
// appHandler is the default handler, registered to serve "/".
// It creates a new App instance using the appengine Context and then
// dispatches the request to the App. The environment variable
// GCS_BUCKET must be set in app.yaml with the name of the bucket to
// write to. PERFDATA_VIEW_URL_BASE may be set to the URL that should
// be supplied in /upload responses.
func appHandler(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
// App Engine does not return a context with a deadline set,
// even though the request does have a deadline. urlfetch uses
// a 5s default timeout if the context does not have a
// deadline, so explicitly set a deadline to match the App
// Engine timeout.
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
// GCS clients need to be constructed with an AppEngine
// context, so we can't actually make the App until the
// request comes in.
// TODO(quentin): Figure out if there's a way to construct the
// app and clients once, in init(), instead of on every request.
db, err := connectDB()
if err != nil {
aelog.Errorf(ctx, "connectDB: %v", err)
http.Error(w, err.Error(), 500)
return
}
defer db.Close()
fs, err := gcs.NewFS(ctx, mustGetenv("GCS_BUCKET"))
if err != nil {
aelog.Errorf(ctx, "gcs.NewFS: %v", err)
http.Error(w, err.Error(), 500)
return
}
mux := http.NewServeMux()
app := &app.App{DB: db, FS: fs, Auth: auth, ViewURLBase: os.Getenv("PERFDATA_VIEW_URL_BASE")}
app.RegisterOnMux(mux)
mux.ServeHTTP(w, r)
}
func main() {
http.HandleFunc("/", appHandler)
appengine.Main()
}

Просмотреть файл

@ -0,0 +1,17 @@
runtime: go113
service: perfdata
instance_class: F4_HIGHMEM
handlers:
- url: /_ah/remote_api
script: auto
- url: /.*
script: auto
secure: always
env_variables:
CLOUDSQL_CONNECTION_NAME: 'golang-org:us-central1:golang-org'
CLOUDSQL_USER: 'root'
CLOUDSQL_PASSWORD: ''
CLOUDSQL_DATABASE: 'perfdata'
GCS_BUCKET: 'golang-perfdata'
PERFDATA_VIEW_URL_BASE: 'https://perf.golang.org/search?q=upload:'

Просмотреть файл

@ -0,0 +1,55 @@
<!DOCTYPE html>
<html>
<head>
<title>Go Performance Data Server</title>
</head>
<body>
<h1>Go Performance Data Server</h1>
<p>The Go Performance Data Server allows upload and querying of benchmark results in the <a href="https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md">standard benchmark data format</a>. It provides a RESTful API to upload benchmark results and query individual results.</p>
<h2>API Documentation</h2>
<h3>POST /upload</h3>
<p>A POST request to this URL with multipart/form-data contents. The form should contain a single field, "file", and the other MIME components are the uploaded files in benchmark format. The request is authenticated with OAuth. Upon success, it will return a JSON body that identifies the uploaded records:</p>
<pre>
{
"uploadid": "arbitrary-string",
"fileids": [
"arbitrary-string-1",
"arbitrary-string-2"
],
"viewurl": "https://foo/bar",
}
</pre>
<p>The upload ID may be used in a query as "upload:$uploadid" to find the uploaded data, and each file ID may be used in a query as "upload-part:$fileid". The view URL is optional and if present points to a human-readable page for analysing the uploaded benchmark data.</p>
<p>Errors will be returned as HTTP errors with a plain text error message.</p>
<p>As a convenience for testing, GET on /upload will render an HTML form that can be used for initiating an upload.</p>
<h3>GET /search?q=$search</h3>
<p>A GET request to this URL will return a text file with synthesized benchmark results matching the search. The search string contains space-separated "key:value" pairs which limits the results to only records containing those exact fields. Every "key:value" pair is ANDed together, and each value must be matched exactly, with no regexes or substring matches supported. The operators "&gt;" and "&lt;" may be used instead of ":" to perform a range query. Example searches:</p>
<ul>
<li>by:rsc pkg:compress/flate commit:1234</li>
<li>upload-part:4567</li>
<li>upload:123</li>
<li>commit-time&gt;2016-12-01</li>
</ul>
<h3>GET /uploads?q=$search&amp;extra_label=$label&amp;limit=$limit</h3>
<p>A GET request to this URL returns a list of the most recent <code>$limit</code> uploads that match the search string. If the <code>q</code> parameter is omitted, all uploads will be returned. If the <code>limit</code> parameter is omitted, a server-specified limit is used. If the <code>extra_label</code> parameter is supplied, an arbitrary value for that label will be chosen from the upload's records. (Therefore, this is most useful for labels that do not vary across the upload, such as "by" or "upload-time".)</p>
<p>The result of this query is streaming JSON (readable using <a href="https://godoc.org/encoding/json#NewDecoder">>json.NewDecoder</a>), with one JSON entity per upload:</p>
<pre>
{
"Count": 10,
"UploadID": "arbitrary-string",
"LabelValues": {
"by": "user@email.com",
"upload-time": "2006-01-02T15:04:05Z",
}
}
</pre>
</body>
</html>

Просмотреть файл

@ -0,0 +1,13 @@
<!DOCTYPE html>
<html>
<head>
<title>Upload Performance Results</title>
</head>
<body>
<p>Upload one or more <a href="https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md">benchmark files</a>.</p>
<form method="post" enctype="multipart/form-data">
<label>File: <input type="file" name="file" multiple></label><br>
<input type="submit" value="Upload">
</form>
</body>
</html>

343
perfdata/client.go Normal file
Просмотреть файл

@ -0,0 +1,343 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package perfdata contains a client for the performance data storage server.
package perfdata
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"golang.org/x/perf/storage/benchfmt"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
// A Client issues queries to a performance data storage server.
// It is safe to use from multiple goroutines simultaneously.
type Client struct {
// BaseURL is the base URL of the storage server.
BaseURL string
// HTTPClient is the HTTP client for sending requests. If nil, http.DefaultClient will be used.
HTTPClient *http.Client
}
// httpClient returns the http.Client to use for requests.
func (c *Client) httpClient() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return http.DefaultClient
}
// Query searches for results matching the given query string.
//
// The query string is first parsed into quoted words (as in the shell)
// and then each word must be formatted as one of the following:
// key:value - exact match on label "key" = "value"
// key>value - value greater than (useful for dates)
// key<value - value less than (also useful for dates)
func (c *Client) Query(ctx context.Context, q string) *Query {
hc := c.httpClient()
resp, err := ctxhttp.Get(ctx, hc, c.BaseURL+"/search?"+url.Values{"q": []string{q}}.Encode())
if err != nil {
return &Query{err: err}
}
if resp.StatusCode != 200 {
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &Query{err: err}
}
return &Query{err: fmt.Errorf("%s", body)}
}
br := benchfmt.NewReader(resp.Body)
return &Query{br: br, body: resp.Body}
}
// A Query allows iteration over the results of a search query.
// Use Next to advance through the results, making sure to call Close when done:
//
// q := client.Query("key:value")
// defer q.Close()
// for q.Next() {
// res := q.Result()
// ...
// }
// if err = q.Err(); err != nil {
// // handle error encountered during query
// }
type Query struct {
br *benchfmt.Reader
body io.ReadCloser
err error
}
// Next prepares the next result for reading with the Result
// method. It returns false when there are no more results, either by
// reaching the end of the input or an error.
func (q *Query) Next() bool {
if q.err != nil {
return false
}
return q.br.Next()
}
// Result returns the most recent result generated by a call to Next.
func (q *Query) Result() *benchfmt.Result {
return q.br.Result()
}
// Err returns the first error encountered during the query.
func (q *Query) Err() error {
if q.err != nil {
return q.err
}
return q.br.Err()
}
// Close frees resources associated with the query.
func (q *Query) Close() error {
if q.body != nil {
q.body.Close()
q.body = nil
}
return q.err
}
// UploadInfo represents an upload summary.
type UploadInfo struct {
Count int
UploadID string
LabelValues benchfmt.Labels `json:",omitempty"`
}
// ListUploads searches for uploads containing results matching the given query string.
// The query may be empty, in which case all uploads will be returned.
// extraLabels specifies other labels to be retrieved.
// If limit is 0, no limit will be provided to the server.
// The uploads are returned starting with the most recent upload.
func (c *Client) ListUploads(ctx context.Context, q string, extraLabels []string, limit int) *UploadList {
hc := c.httpClient()
v := url.Values{"extra_label": extraLabels}
if q != "" {
v["q"] = []string{q}
}
if limit != 0 {
v["limit"] = []string{fmt.Sprintf("%d", limit)}
}
u := c.BaseURL + "/uploads"
if len(v) > 0 {
u += "?" + v.Encode()
}
resp, err := ctxhttp.Get(ctx, hc, u)
if err != nil {
return &UploadList{err: err}
}
if resp.StatusCode != 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &UploadList{err: err}
}
return &UploadList{err: fmt.Errorf("%s", body)}
}
return &UploadList{body: resp.Body, dec: json.NewDecoder(resp.Body)}
}
// UploadList is the result of ListUploads.
// Use Next to advance through the rows, making sure to call Close when done:
//
// q := db.ListUploads("key:value")
// defer q.Close()
// for q.Next() {
// id, count := q.Row()
// labels := q.LabelValues()
// ...
// }
// err = q.Err() // get any error encountered during iteration
// ...
type UploadList struct {
body io.Closer
dec *json.Decoder
// from last call to Next
ui UploadInfo
err error
}
// Next prepares the next result for reading with the Result
// method. It returns false when there are no more results, either by
// reaching the end of the input or an error.
func (ul *UploadList) Next() bool {
if ul.err != nil {
return false
}
// Clear UploadInfo before decoding new value.
ul.ui = UploadInfo{}
ul.err = ul.dec.Decode(&ul.ui)
return ul.err == nil
}
// Info returns the most recent UploadInfo generated by a call to Next.
func (ul *UploadList) Info() UploadInfo {
return ul.ui
}
// Err returns the error state of the query.
func (ul *UploadList) Err() error {
if ul.err == io.EOF {
return nil
}
return ul.err
}
// Close frees resources associated with the query.
func (ul *UploadList) Close() error {
if ul.body != nil {
err := ul.body.Close()
ul.body = nil
return err
}
return ul.Err()
}
// NewUpload starts a new upload to the storage server.
// The upload must have Abort or Commit called on it.
// If the server requires authentication for uploads, c.HTTPClient should be set to the result of oauth2.NewClient.
func (c *Client) NewUpload(ctx context.Context) *Upload {
hc := c.httpClient()
pr, pw := io.Pipe()
mpw := multipart.NewWriter(pw)
req, err := http.NewRequest("POST", c.BaseURL+"/upload", pr)
if err != nil {
return &Upload{err: err}
}
req.Header.Set("Content-Type", mpw.FormDataContentType())
req.Header.Set("User-Agent", "golang.org/x/build/perfdata")
errCh := make(chan error)
u := &Upload{pw: pw, mpw: mpw, errCh: errCh}
go func() {
resp, err := ctxhttp.Do(ctx, hc, req)
if err != nil {
errCh <- err
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := ioutil.ReadAll(resp.Body)
errCh <- fmt.Errorf("upload failed: %v\n%s", resp.Status, body)
return
}
status := &UploadStatus{}
if err := json.NewDecoder(resp.Body).Decode(status); err != nil {
errCh <- err
}
u.status = status
errCh <- nil
}()
return u
}
// UploadStatus contains information about a successful upload.
type UploadStatus struct {
// UploadID is the upload ID assigned to the upload.
UploadID string `json:"uploadid"`
// FileIDs is the list of file IDs assigned to the files in the upload.
FileIDs []string `json:"fileids"`
// ViewURL is a server-supplied URL to view the results.
ViewURL string `json:"viewurl"`
}
// An Upload is an in-progress upload.
// Use CreateFile to upload one or more files, then call Commit or Abort.
//
// u := client.NewUpload()
// w, err := u.CreateFile()
// if err != nil {
// u.Abort()
// return err
// }
// fmt.Fprintf(w, "BenchmarkResult 1 1 ns/op\n")
// if err := u.Commit(); err != nil {
// return err
// }
type Upload struct {
pw io.WriteCloser
mpw *multipart.Writer
status *UploadStatus
// errCh is used to report the success/failure of the HTTP request
errCh chan error
// err is the first observed error; it is only accessed from user-called methods for thread safety
err error
}
// CreateFile creates a new upload with the given name.
// The Writer may be used until CreateFile is called again.
// name may be the empty string if the file does not have a name.
func (u *Upload) CreateFile(name string) (io.Writer, error) {
if u.err != nil {
return nil, u.err
}
return u.mpw.CreateFormFile("file", name)
}
// Commit attempts to commit the upload.
func (u *Upload) Commit() (*UploadStatus, error) {
if u.err != nil {
return nil, u.err
}
if u.err = u.mpw.WriteField("commit", "1"); u.err != nil {
u.Abort()
return nil, u.err
}
if u.err = u.mpw.Close(); u.err != nil {
u.Abort()
return nil, u.err
}
u.mpw = nil
if u.err = u.pw.Close(); u.err != nil {
u.Abort()
return nil, u.err
}
u.pw = nil
u.err = <-u.errCh
u.errCh = nil
if u.err != nil {
return nil, u.err
}
return u.status, nil
}
// Abort attempts to cancel the in-progress upload.
func (u *Upload) Abort() error {
if u.mpw != nil {
u.mpw.WriteField("abort", "1")
// Writing the 'abort' field will cause the server to send back an error response.
u.mpw.Close()
u.mpw = nil
}
if u.pw != nil {
u.pw.Close()
u.pw = nil
}
err := <-u.errCh
u.errCh = nil
if u.err == nil {
u.err = err
}
return u.err
}

207
perfdata/client_test.go Normal file
Просмотреть файл

@ -0,0 +1,207 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package perfdata
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"golang.org/x/perf/storage/benchfmt"
"golang.org/x/net/context"
"golang.org/x/build/internal/diff"
)
func TestQueryError(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "invalid query", 500)
}))
defer ts.Close()
c := &Client{BaseURL: ts.URL}
q := c.Query(context.Background(), "invalid query")
defer q.Close()
if q.Next() {
t.Error("Next = true, want false")
}
if q.Err() == nil {
t.Error("Err = nil, want error")
}
}
func TestQuery(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if have, want := r.URL.RequestURI(), "/search?q=key1%3Avalue+key2%3Avalue"; have != want {
t.Errorf("RequestURI = %q, want %q", have, want)
}
fmt.Fprintf(w, "key: value\nBenchmarkOne 5 ns/op\nkey: value2\nBenchmarkTwo 10 ns/op\n")
}))
defer ts.Close()
c := &Client{BaseURL: ts.URL}
q := c.Query(context.Background(), "key1:value key2:value")
defer q.Close()
var buf bytes.Buffer
bp := benchfmt.NewPrinter(&buf)
for q.Next() {
if err := bp.Print(q.Result()); err != nil {
t.Fatalf("Print: %v", err)
}
}
if err := q.Err(); err != nil {
t.Fatalf("Err: %v", err)
}
want := "key: value\nBenchmarkOne 5 ns/op\nkey: value2\nBenchmarkTwo 10 ns/op\n"
if diff := diff.Diff(buf.String(), want); diff != "" {
t.Errorf("wrong results: (- have/+ want)\n%s", diff)
}
}
func TestListUploads(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if have, want := r.URL.RequestURI(), "/uploads?extra_label=key1&extra_label=key2&limit=10&q=key1%3Avalue+key2%3Avalue"; have != want {
t.Errorf("RequestURI = %q, want %q", have, want)
}
fmt.Fprintf(w, "%s\n", `{"UploadID": "id", "Count": 100, "LabelValues": {"key1": "value"}}`)
}))
defer ts.Close()
c := &Client{BaseURL: ts.URL}
r := c.ListUploads(context.Background(), "key1:value key2:value", []string{"key1", "key2"}, 10)
defer r.Close()
if !r.Next() {
t.Errorf("Next = false, want true")
}
if have, want := r.Info(), (UploadInfo{Count: 100, UploadID: "id", LabelValues: benchfmt.Labels{"key1": "value"}}); !reflect.DeepEqual(have, want) {
t.Errorf("Info = %#v, want %#v", have, want)
}
if err := r.Err(); err != nil {
t.Fatalf("Err: %v", err)
}
}
func TestNewUpload(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if have, want := r.URL.RequestURI(), "/upload"; have != want {
t.Errorf("RequestURI = %q, want %q", have, want)
}
mr, err := r.MultipartReader()
if err != nil {
t.Error(err)
}
i := 0
for i = 0; ; i++ {
p, err := mr.NextPart()
if err == io.EOF {
break
}
name := p.FormName()
if name == "commit" {
continue
}
if name != "file" {
t.Errorf("unexpected field %q, want file", name)
}
if have, want := p.FileName(), fmt.Sprintf("want%d.txt", i); have != want {
t.Errorf("file name = %q, want %q", have, want)
}
content, _ := ioutil.ReadAll(p)
if have, want := string(content), "content"; have != want {
t.Errorf("unexpected content %q, want %q", have, want)
}
}
if i != 3 {
t.Errorf("number of files = %d, want %d", i, 3)
}
fmt.Fprintf(w, "%s\n", `{"uploadid": "id", "fileids": ["id/1", "id/2"]}`)
}))
defer ts.Close()
c := &Client{BaseURL: ts.URL}
u := c.NewUpload(context.Background())
for i := 0; i < 2; i++ {
w, err := u.CreateFile(fmt.Sprintf("want%d.txt", i))
if err != nil {
t.Fatalf("CreateFile = %v", err)
}
if _, err := fmt.Fprintf(w, "content"); err != nil {
t.Fatalf("Write returned %v", err)
}
}
status, err := u.Commit()
if err != nil {
t.Errorf("Commit = %v", err)
}
if status.UploadID != "id" {
t.Errorf("status.UploadID = %q, want %q", status.UploadID, "id")
}
}
func TestNewUploadAbort(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if have, want := r.URL.RequestURI(), "/upload"; have != want {
t.Errorf("RequestURI = %q, want %q", have, want)
}
mr, err := r.MultipartReader()
if err != nil {
t.Error(err)
}
i := 0
for i = 0; ; i++ {
p, err := mr.NextPart()
if err == io.EOF {
break
}
name := p.FormName()
if name == "abort" {
continue
}
if name != "file" {
t.Errorf("unexpected field %q, want file or abort", name)
}
if have, want := p.FileName(), fmt.Sprintf("want%d.txt", i); have != want {
t.Errorf("file name = %q, want %q", have, want)
}
content, _ := ioutil.ReadAll(p)
if have, want := string(content), "content"; have != want {
t.Errorf("unexpected content %q, want %q", have, want)
}
}
if i != 3 {
t.Errorf("number of files = %d, want %d", i, 3)
}
fmt.Fprintf(w, "%s\n", `{"uploadid": "id", "fileids": ["id/1", "id/2"]}`)
}))
defer ts.Close()
c := &Client{BaseURL: ts.URL}
u := c.NewUpload(context.Background())
for i := 0; i < 2; i++ {
w, err := u.CreateFile(fmt.Sprintf("want%d.txt", i))
if err != nil {
t.Fatalf("CreateFile = %v", err)
}
if _, err := fmt.Fprintf(w, "content"); err != nil {
t.Fatalf("Write returned %v", err)
}
}
if err := u.Abort(); err != nil {
t.Errorf("Abort = %v", err)
}
}

Просмотреть файл

@ -0,0 +1,145 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Reindex repopulates the perfdata SQL database from the original data files in Google Cloud Storage.
//
// Usage:
//
// reindex [-v] [-db foo.bar/baz] [-bucket name] prefix...
//
// Reindex reindexes all the uploads with IDs starting with the given prefixes.
//go:build cloud
// +build cloud
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"strings"
"cloud.google.com/go/storage"
_ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql"
"golang.org/x/perf/storage/benchfmt"
"golang.org/x/build/perfdata/db"
"google.golang.org/api/iterator"
)
var (
dbName = flag.String("db", "root:@cloudsql(golang-org:us-central1:golang-org)/perfdata?interpolateParams=true", "connect to MySQL `database`")
bucket = flag.String("bucket", "golang-perfdata", "read from Google Cloud Storage `bucket`")
verbose = flag.Bool("v", false, "verbose")
)
func usage() {
fmt.Fprintf(os.Stderr, `Usage of reindex:
reindex [flags] prefix...
`)
flag.PrintDefaults()
os.Exit(2)
}
func main() {
log.SetPrefix("reindex: ")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
if *verbose {
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
}
ctx := context.Background()
prefixes := flag.Args()
if len(prefixes) == 0 {
log.Fatal("no prefixes to reindex")
}
d, err := db.OpenSQL("mysql", *dbName)
if err != nil {
log.Fatal(err)
}
defer d.Close()
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
bucket := client.Bucket(*bucket)
for _, prefix := range prefixes {
if strings.Index(prefix, "/") >= 0 {
log.Fatalf("prefix %q cannot contain /", prefix)
}
it := bucket.Objects(ctx, &storage.Query{Prefix: "uploads/" + prefix})
var lastUploadId string
var files []string
for {
objAttrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
name := strings.TrimPrefix(objAttrs.Name, "uploads/")
slash := strings.Index(name, "/")
if slash < 0 {
log.Printf("ignoring file %q", objAttrs.Name)
}
uploadID := name[:slash]
if lastUploadId != "" && uploadID != lastUploadId {
if err := reindex(ctx, d, bucket, lastUploadId, files); err != nil {
log.Fatal(err)
}
files = nil
}
files = append(files, objAttrs.Name)
lastUploadId = uploadID
}
if len(files) > 0 {
if err := reindex(ctx, d, bucket, lastUploadId, files); err != nil {
log.Fatal(err)
}
}
}
}
func reindex(ctx context.Context, db *db.DB, bucket *storage.BucketHandle, uploadID string, files []string) error {
if *verbose {
log.Printf("reindexing %q", uploadID)
}
u, err := db.ReplaceUpload(uploadID)
if err != nil {
return err
}
for _, name := range files {
if err := reindexOne(ctx, u, bucket, name); err != nil {
return err
}
}
return u.Commit()
}
func reindexOne(ctx context.Context, u *db.Upload, bucket *storage.BucketHandle, name string) error {
r, err := bucket.Object(name).NewReader(ctx)
if err != nil {
return err
}
defer r.Close()
br := benchfmt.NewReader(r)
for br.Next() {
if err := u.InsertRecord(br.Result()); err != nil {
return err
}
}
if err := br.Err(); err != nil {
return err
}
return nil
}

683
perfdata/db/db.go Normal file
Просмотреть файл

@ -0,0 +1,683 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package db provides the high-level database interface for the
// storage app.
package db
import (
"bytes"
"database/sql"
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"text/template"
"time"
"golang.org/x/net/context"
"golang.org/x/build/perfdata"
"golang.org/x/perf/storage/benchfmt"
"golang.org/x/build/perfdata/query"
)
// TODO(quentin): Add Context to every function when App Engine supports Go >=1.8.
// DB is a high-level interface to a database for the storage
// app. It's safe for concurrent use by multiple goroutines.
type DB struct {
sql *sql.DB // underlying database connection
driverName string // name of underlying driver for SQL differences
// prepared statements
lastUpload *sql.Stmt
insertUpload *sql.Stmt
checkUpload *sql.Stmt
deleteRecords *sql.Stmt
}
// OpenSQL creates a DB backed by a SQL database. The parameters are
// the same as the parameters for sql.Open. Only mysql and sqlite3 are
// explicitly supported; other database engines will receive MySQL
// query syntax which may or may not be compatible.
func OpenSQL(driverName, dataSourceName string) (*DB, error) {
db, err := sql.Open(driverName, dataSourceName)
if err != nil {
return nil, err
}
if hook := openHooks[driverName]; hook != nil {
if err := hook(db); err != nil {
return nil, err
}
}
d := &DB{sql: db, driverName: driverName}
if err := d.createTables(driverName); err != nil {
return nil, err
}
if err := d.prepareStatements(driverName); err != nil {
return nil, err
}
return d, nil
}
var openHooks = make(map[string]func(*sql.DB) error)
// RegisterOpenHook registers a hook to be called after opening a connection to driverName.
// This is used by the sqlite3 package to register a ConnectHook.
// It must be called from an init function.
func RegisterOpenHook(driverName string, hook func(*sql.DB) error) {
openHooks[driverName] = hook
}
// createTmpl is the template used to prepare the CREATE statements
// for the database. It is evaluated with . as a map containing one
// entry whose key is the driver name.
var createTmpl = template.Must(template.New("create").Parse(`
CREATE TABLE IF NOT EXISTS Uploads (
UploadID VARCHAR(20) PRIMARY KEY,
Day VARCHAR(8),
Seq BIGINT UNSIGNED
{{if not .sqlite3}}
, Index (Day, Seq)
{{end}}
);
{{if .sqlite3}}
CREATE INDEX IF NOT EXISTS UploadDaySeq ON Uploads(Day, Seq);
{{end}}
CREATE TABLE IF NOT EXISTS Records (
UploadID VARCHAR(20) NOT NULL,
RecordID BIGINT UNSIGNED NOT NULL,
Content BLOB NOT NULL,
PRIMARY KEY (UploadID, RecordID),
FOREIGN KEY (UploadID) REFERENCES Uploads(UploadID) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS RecordLabels (
UploadID VARCHAR(20) NOT NULL,
RecordID BIGINT UNSIGNED NOT NULL,
Name VARCHAR(255) NOT NULL,
Value VARCHAR(8192) NOT NULL,
{{if not .sqlite3}}
Index (Name(100), Value(100)),
{{end}}
PRIMARY KEY (UploadID, RecordID, Name),
FOREIGN KEY (UploadID, RecordID) REFERENCES Records(UploadID, RecordID) ON UPDATE CASCADE ON DELETE CASCADE
);
{{if .sqlite3}}
CREATE INDEX IF NOT EXISTS RecordLabelsNameValue ON RecordLabels(Name, Value);
{{end}}
`))
// createTables creates any missing tables on the connection in
// db.sql. driverName is the same driver name passed to sql.Open and
// is used to select the correct syntax.
func (db *DB) createTables(driverName string) error {
var buf bytes.Buffer
if err := createTmpl.Execute(&buf, map[string]bool{driverName: true}); err != nil {
return err
}
for _, q := range strings.Split(buf.String(), ";") {
if strings.TrimSpace(q) == "" {
continue
}
if _, err := db.sql.Exec(q); err != nil {
return fmt.Errorf("create table: %v", err)
}
}
return nil
}
// prepareStatements calls db.sql.Prepare on reusable SQL statements.
func (db *DB) prepareStatements(driverName string) error {
var err error
query := "SELECT UploadID FROM Uploads ORDER BY Day DESC, Seq DESC LIMIT 1"
if driverName != "sqlite3" {
query += " FOR UPDATE"
}
db.lastUpload, err = db.sql.Prepare(query)
if err != nil {
return err
}
db.insertUpload, err = db.sql.Prepare("INSERT INTO Uploads(UploadID, Day, Seq) VALUES (?, ?, ?)")
if err != nil {
return err
}
db.checkUpload, err = db.sql.Prepare("SELECT 1 FROM Uploads WHERE UploadID = ?")
if err != nil {
return err
}
db.deleteRecords, err = db.sql.Prepare("DELETE FROM Records WHERE UploadID = ?")
if err != nil {
return err
}
return nil
}
// An Upload is a collection of files that share an upload ID.
type Upload struct {
// ID is the value of the "upload" key that should be
// associated with every record in this upload.
ID string
// recordid is the index of the next record to insert.
recordid int64
// db is the underlying database that this upload is going to.
db *DB
// tx is the transaction used by the upload.
tx *sql.Tx
// pending arguments for flush
insertRecordArgs []interface{}
insertLabelArgs []interface{}
lastResult *benchfmt.Result
}
// now is a hook for testing
var now = time.Now
// ReplaceUpload removes the records associated with id if any and
// allows insertion of new records.
func (db *DB) ReplaceUpload(id string) (*Upload, error) {
if _, err := db.deleteRecords.Exec(id); err != nil {
return nil, err
}
var found bool
err := db.checkUpload.QueryRow(id).Scan(&found)
switch err {
case sql.ErrNoRows:
var day sql.NullString
var num sql.NullInt64
if m := regexp.MustCompile(`^(\d+)\.(\d+)$`).FindStringSubmatch(id); m != nil {
day.Valid, num.Valid = true, true
day.String = m[1]
num.Int64, _ = strconv.ParseInt(m[2], 10, 64)
}
if _, err := db.insertUpload.Exec(id, day, num); err != nil {
return nil, err
}
case nil:
default:
return nil, err
}
tx, err := db.sql.Begin()
if err != nil {
return nil, err
}
u := &Upload{
ID: id,
db: db,
tx: tx,
}
return u, nil
}
// NewUpload returns an upload for storing new files.
// All records written to the Upload will have the same upload ID.
func (db *DB) NewUpload(ctx context.Context) (*Upload, error) {
day := now().UTC().Format("20060102")
num := 0
tx, err := db.sql.Begin()
if err != nil {
return nil, err
}
defer func() {
if tx != nil {
tx.Rollback()
}
}()
var lastID string
err = tx.Stmt(db.lastUpload).QueryRow().Scan(&lastID)
switch err {
case sql.ErrNoRows:
case nil:
if strings.HasPrefix(lastID, day) {
num, err = strconv.Atoi(lastID[len(day)+1:])
if err != nil {
return nil, err
}
}
default:
return nil, err
}
num++
id := fmt.Sprintf("%s.%d", day, num)
_, err = tx.Stmt(db.insertUpload).Exec(id, day, num)
if err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
tx = nil
utx, err := db.sql.Begin()
if err != nil {
return nil, err
}
u := &Upload{
ID: id,
db: db,
tx: utx,
}
return u, nil
}
// InsertRecord inserts a single record in an existing upload.
// If InsertRecord returns a non-nil error, the Upload has failed and u.Abort() must be called.
func (u *Upload) InsertRecord(r *benchfmt.Result) error {
if u.lastResult != nil && u.lastResult.SameLabels(r) {
data := u.insertRecordArgs[len(u.insertRecordArgs)-1].([]byte)
data = append(data, r.Content...)
data = append(data, '\n')
u.insertRecordArgs[len(u.insertRecordArgs)-1] = data
return nil
}
// TODO(quentin): Support multiple lines (slice of results?)
var buf bytes.Buffer
if err := benchfmt.NewPrinter(&buf).Print(r); err != nil {
return err
}
u.lastResult = r
u.insertRecordArgs = append(u.insertRecordArgs, u.ID, u.recordid, buf.Bytes())
for _, k := range r.Labels.Keys() {
if err := u.insertLabel(k, r.Labels[k]); err != nil {
return err
}
}
for _, k := range r.NameLabels.Keys() {
if err := u.insertLabel(k, r.NameLabels[k]); err != nil {
return err
}
}
u.recordid++
return nil
}
// insertLabel queues a label pair for insertion.
// If there are enough labels queued, flush is called.
func (u *Upload) insertLabel(key, value string) error {
// N.B. sqlite3 has a max of 999 arguments.
// https://www.sqlite.org/limits.html#max_variable_number
if len(u.insertLabelArgs) >= 990 {
if err := u.flush(); err != nil {
return err
}
}
u.insertLabelArgs = append(u.insertLabelArgs, u.ID, u.recordid, key, value)
return nil
}
// repeatDelim returns a string consisting of n copies of s with delim between each copy.
func repeatDelim(s, delim string, n int) string {
return strings.TrimSuffix(strings.Repeat(s+delim, n), delim)
}
// insertMultiple executes a single INSERT statement to insert multiple rows.
func insertMultiple(tx *sql.Tx, sqlPrefix string, argsPerRow int, args []interface{}) error {
if len(args) == 0 {
return nil
}
query := sqlPrefix + repeatDelim("("+repeatDelim("?", ", ", argsPerRow)+")", ", ", len(args)/argsPerRow)
_, err := tx.Exec(query, args...)
return err
}
// flush sends INSERT statements for any pending data in u.insertRecordArgs and u.insertLabelArgs.
func (u *Upload) flush() error {
if n := len(u.insertRecordArgs); n > 0 {
if err := insertMultiple(u.tx, "INSERT INTO Records(UploadID, RecordID, Content) VALUES ", 3, u.insertRecordArgs); err != nil {
return err
}
u.insertRecordArgs = nil
}
if n := len(u.insertLabelArgs); n > 0 {
if err := insertMultiple(u.tx, "INSERT INTO RecordLabels VALUES ", 4, u.insertLabelArgs); err != nil {
return err
}
u.insertLabelArgs = nil
}
u.lastResult = nil
return nil
}
// Commit finishes processing the upload.
func (u *Upload) Commit() error {
if err := u.flush(); err != nil {
return err
}
return u.tx.Commit()
}
// Abort cleans up resources associated with the upload.
// It does not attempt to clean up partial database state.
func (u *Upload) Abort() error {
return u.tx.Rollback()
}
// parseQuery parses a query into a slice of SQL subselects and a slice of arguments.
// The subselects must be joined with INNER JOIN in the order returned.
func parseQuery(q string) (sql []string, args []interface{}, err error) {
var keys []string
parts := make(map[string]part)
for _, word := range query.SplitWords(q) {
p, err := parseWord(word)
if err != nil {
return nil, nil, err
}
if _, ok := parts[p.key]; ok {
parts[p.key], err = parts[p.key].merge(p)
if err != nil {
return nil, nil, err
}
} else {
keys = append(keys, p.key)
parts[p.key] = p
}
}
// Process each key
sort.Strings(keys)
for _, key := range keys {
s, a, err := parts[key].sql()
if err != nil {
return nil, nil, err
}
sql = append(sql, s)
args = append(args, a...)
}
return
}
// Query searches for results matching the given query string.
//
// The query string is first parsed into quoted words (as in the shell)
// and then each word must be formatted as one of the following:
// key:value - exact match on label "key" = "value"
// key>value - value greater than (useful for dates)
// key<value - value less than (also useful for dates)
func (db *DB) Query(q string) *Query {
ret := &Query{q: q}
query := "SELECT r.Content FROM "
sql, args, err := parseQuery(q)
if err != nil {
ret.err = err
return ret
}
for i, part := range sql {
if i > 0 {
query += " INNER JOIN "
}
query += fmt.Sprintf("(%s) t%d", part, i)
if i > 0 {
query += " USING (UploadID, RecordID)"
}
}
if len(sql) > 0 {
query += " LEFT JOIN"
}
query += " Records r"
if len(sql) > 0 {
query += " USING (UploadID, RecordID)"
}
ret.sqlQuery, ret.sqlArgs = query, args
ret.rows, ret.err = db.sql.Query(query, args...)
return ret
}
// Query is the result of a query.
// Use Next to advance through the rows, making sure to call Close when done:
//
// q := db.Query("key:value")
// defer q.Close()
// for q.Next() {
// res := q.Result()
// ...
// }
// err = q.Err() // get any error encountered during iteration
// ...
type Query struct {
rows *sql.Rows
// for Debug
q string
sqlQuery string
sqlArgs []interface{}
// from last call to Next
br *benchfmt.Reader
err error
}
// Debug returns the human-readable state of the query.
func (q *Query) Debug() string {
ret := fmt.Sprintf("q=%q", q.q)
if q.sqlQuery != "" || len(q.sqlArgs) > 0 {
ret += fmt.Sprintf(" sql={%q %#v}", q.sqlQuery, q.sqlArgs)
}
if q.err != nil {
ret += fmt.Sprintf(" err=%v", q.err)
}
return ret
}
// Next prepares the next result for reading with the Result
// method. It returns false when there are no more results, either by
// reaching the end of the input or an error.
func (q *Query) Next() bool {
if q.err != nil {
return false
}
if q.br != nil {
if q.br.Next() {
return true
}
q.err = q.br.Err()
if q.err != nil {
return false
}
}
if !q.rows.Next() {
return false
}
var content []byte
q.err = q.rows.Scan(&content)
if q.err != nil {
return false
}
q.br = benchfmt.NewReader(bytes.NewReader(content))
if !q.br.Next() {
q.err = q.br.Err()
if q.err == nil {
q.err = io.ErrUnexpectedEOF
}
return false
}
return q.err == nil
}
// Result returns the most recent result generated by a call to Next.
func (q *Query) Result() *benchfmt.Result {
return q.br.Result()
}
// Err returns the error state of the query.
func (q *Query) Err() error {
if q.err == io.EOF {
return nil
}
return q.err
}
// Close frees resources associated with the query.
func (q *Query) Close() error {
if q.rows != nil {
return q.rows.Close()
}
return q.Err()
}
// CountUploads returns the number of uploads in the database.
func (db *DB) CountUploads() (int, error) {
var uploads int
err := db.sql.QueryRow("SELECT COUNT(*) FROM Uploads").Scan(&uploads)
return uploads, err
}
// Close closes the database connections, releasing any open resources.
func (db *DB) Close() error {
for _, stmt := range []*sql.Stmt{db.lastUpload, db.insertUpload, db.checkUpload, db.deleteRecords} {
if err := stmt.Close(); err != nil {
return err
}
}
return db.sql.Close()
}
// UploadList is the result of ListUploads.
// Use Next to advance through the rows, making sure to call Close when done:
//
// q := db.ListUploads("key:value")
// defer q.Close()
// for q.Next() {
// info := q.Info()
// ...
// }
// err = q.Err() // get any error encountered during iteration
// ...
type UploadList struct {
rows *sql.Rows
extraLabels []string
// for Debug
q string
sqlQuery string
sqlArgs []interface{}
// from last call to Next
count int
uploadID string
labelValues []sql.NullString
err error
}
// Debug returns the human-readable state of ul.
func (ul *UploadList) Debug() string {
ret := fmt.Sprintf("q=%q", ul.q)
if ul.sqlQuery != "" || len(ul.sqlArgs) > 0 {
ret += fmt.Sprintf(" sql={%q %#v}", ul.sqlQuery, ul.sqlArgs)
}
if ul.err != nil {
ret += fmt.Sprintf(" err=%v", ul.err)
}
return ret
}
// ListUploads searches for uploads containing results matching the given query string.
// The query may be empty, in which case all uploads will be returned.
// For each label in extraLabels, one unspecified record's value will be obtained for each upload.
// If limit is non-zero, only the limit most recent uploads will be returned.
func (db *DB) ListUploads(q string, extraLabels []string, limit int) *UploadList {
ret := &UploadList{q: q, extraLabels: extraLabels}
var args []interface{}
query := "SELECT j.UploadID, rCount"
for i, label := range extraLabels {
query += fmt.Sprintf(", (SELECT l%d.Value FROM RecordLabels l%d WHERE l%d.UploadID = j.UploadID AND Name = ? LIMIT 1)", i, i, i)
args = append(args, label)
}
sql, qArgs, err := parseQuery(q)
if err != nil {
ret.err = err
return ret
}
if len(sql) == 0 {
// Optimize empty query.
query += " FROM (SELECT UploadID, (SELECT COUNT(*) FROM Records r WHERE r.UploadID = u.UploadID) AS rCount FROM Uploads u "
switch db.driverName {
case "sqlite3":
query += "WHERE"
default:
query += "HAVING"
}
query += " rCount > 0 ORDER BY u.Day DESC, u.Seq DESC, u.UploadID DESC"
if limit != 0 {
query += fmt.Sprintf(" LIMIT %d", limit)
}
query += ") j"
} else {
// Join individual queries.
query += " FROM (SELECT UploadID, COUNT(*) as rCount FROM "
args = append(args, qArgs...)
for i, part := range sql {
if i > 0 {
query += " INNER JOIN "
}
query += fmt.Sprintf("(%s) t%d", part, i)
if i > 0 {
query += " USING (UploadID, RecordID)"
}
}
query += " LEFT JOIN Records r USING (UploadID, RecordID)"
query += " GROUP BY UploadID) j LEFT JOIN Uploads u USING (UploadID) ORDER BY u.Day DESC, u.Seq DESC, u.UploadID DESC"
if limit != 0 {
query += fmt.Sprintf(" LIMIT %d", limit)
}
}
ret.sqlQuery, ret.sqlArgs = query, args
ret.rows, ret.err = db.sql.Query(query, args...)
return ret
}
// Next prepares the next result for reading with the Result
// method. It returns false when there are no more results, either by
// reaching the end of the input or an error.
func (ul *UploadList) Next() bool {
if ul.err != nil {
return false
}
if !ul.rows.Next() {
return false
}
args := []interface{}{&ul.uploadID, &ul.count}
ul.labelValues = make([]sql.NullString, len(ul.extraLabels))
for i := range ul.labelValues {
args = append(args, &ul.labelValues[i])
}
ul.err = ul.rows.Scan(args...)
if ul.err != nil {
return false
}
return ul.err == nil
}
// Info returns the most recent UploadInfo generated by a call to Next.
func (ul *UploadList) Info() perfdata.UploadInfo {
l := make(benchfmt.Labels)
for i := range ul.extraLabels {
if ul.labelValues[i].Valid {
l[ul.extraLabels[i]] = ul.labelValues[i].String
}
}
return perfdata.UploadInfo{UploadID: ul.uploadID, Count: ul.count, LabelValues: l}
}
// Err returns the error state of the query.
func (ul *UploadList) Err() error {
return ul.err
}
// Close frees resources associated with the query.
func (ul *UploadList) Close() error {
if ul.rows != nil {
return ul.rows.Close()
}
return ul.err
}

430
perfdata/db/db_test.go Normal file
Просмотреть файл

@ -0,0 +1,430 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package db_test
import (
"bytes"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"golang.org/x/net/context"
"golang.org/x/build/internal/diff"
"golang.org/x/perf/storage/benchfmt"
. "golang.org/x/build/perfdata/db"
"golang.org/x/build/perfdata/db/dbtest"
)
// Most of the db package is tested via the end-to-end-tests in perf/storage/app.
// TestUploadIDs verifies that NewUpload generates the correct sequence of upload IDs.
func TestUploadIDs(t *testing.T) {
ctx := context.Background()
db, cleanup := dbtest.NewDB(t)
defer cleanup()
defer SetNow(time.Time{})
tests := []struct {
sec int64
id string
}{
{0, "19700101.1"},
{0, "19700101.2"},
{86400, "19700102.1"},
{86400, "19700102.2"},
{86400, "19700102.3"},
{86400, "19700102.4"},
{86400, "19700102.5"},
{86400, "19700102.6"},
{86400, "19700102.7"},
{86400, "19700102.8"},
{86400, "19700102.9"},
{86400, "19700102.10"},
{86400, "19700102.11"},
}
for _, test := range tests {
SetNow(time.Unix(test.sec, 0))
u, err := db.NewUpload(ctx)
if err != nil {
t.Fatalf("NewUpload: %v", err)
}
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
if u.ID != test.id {
t.Fatalf("u.ID = %q, want %q", u.ID, test.id)
}
}
}
// checkQueryResults performs a query on db and verifies that the
// results as printed by BenchmarkPrinter are equal to results.
func checkQueryResults(t *testing.T, db *DB, query, results string) {
q := db.Query(query)
defer q.Close()
var buf bytes.Buffer
bp := benchfmt.NewPrinter(&buf)
for q.Next() {
if err := bp.Print(q.Result()); err != nil {
t.Fatalf("Print: %v", err)
}
}
if err := q.Err(); err != nil {
t.Fatalf("Err: %v", err)
}
if diff := diff.Diff(buf.String(), results); diff != "" {
t.Errorf("wrong results: (- have/+ want)\n%s", diff)
}
}
// TestReplaceUpload verifies that the expected number of rows exist after replacing an upload.
func TestReplaceUpload(t *testing.T) {
SetNow(time.Unix(0, 0))
defer SetNow(time.Time{})
db, cleanup := dbtest.NewDB(t)
defer cleanup()
ctx := context.Background()
labels := benchfmt.Labels{"key": "value"}
u, err := db.NewUpload(ctx)
if err != nil {
t.Fatalf("NewUpload: %v", err)
}
labels["uploadid"] = u.ID
for _, num := range []string{"1", "2"} {
labels["num"] = num
for _, num2 := range []int{1, 2} {
if err := u.InsertRecord(&benchfmt.Result{
Labels: labels,
NameLabels: nil,
LineNum: 1,
Content: fmt.Sprintf("BenchmarkName %d ns/op", num2),
}); err != nil {
t.Fatalf("InsertRecord: %v", err)
}
labels = labels.Copy()
}
}
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
checkQueryResults(t, db, "key:value",
`key: value
num: 1
uploadid: 19700101.1
BenchmarkName 1 ns/op
BenchmarkName 2 ns/op
num: 2
BenchmarkName 1 ns/op
BenchmarkName 2 ns/op
`)
labels["num"] = "3"
for _, uploadid := range []string{u.ID, "new"} {
u, err := db.ReplaceUpload(uploadid)
if err != nil {
t.Fatalf("ReplaceUpload: %v", err)
}
labels["uploadid"] = u.ID
if err := u.InsertRecord(&benchfmt.Result{
Labels: labels,
NameLabels: nil,
LineNum: 1,
Content: "BenchmarkName 3 ns/op",
}); err != nil {
t.Fatalf("InsertRecord: %v", err)
}
labels = labels.Copy()
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
}
checkQueryResults(t, db, "key:value",
`key: value
num: 3
uploadid: 19700101.1
BenchmarkName 3 ns/op
uploadid: new
BenchmarkName 3 ns/op
`)
}
// TestNewUpload verifies that NewUpload and InsertRecord wrote the correct rows to the database.
func TestNewUpload(t *testing.T) {
SetNow(time.Unix(0, 0))
defer SetNow(time.Time{})
db, cleanup := dbtest.NewDB(t)
defer cleanup()
u, err := db.NewUpload(context.Background())
if err != nil {
t.Fatalf("NewUpload: %v", err)
}
br := benchfmt.NewReader(strings.NewReader(`
key: value
BenchmarkName 1 ns/op
BenchmarkName 2 ns/op
`))
for br.Next() {
if err := u.InsertRecord(br.Result()); err != nil {
t.Fatalf("InsertRecord: %v", err)
}
}
if err := br.Err(); err != nil {
t.Fatalf("Err: %v", err)
}
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
rows, err := DBSQL(db).Query("SELECT UploadId, RecordId, Name, Value FROM RecordLabels")
if err != nil {
t.Fatalf("sql.Query: %v", err)
}
defer rows.Close()
want := map[string]string{
"key": "value",
"name": "Name",
}
i := 0
for rows.Next() {
var uploadid string
var recordid int64
var name, value string
if err := rows.Scan(&uploadid, &recordid, &name, &value); err != nil {
t.Fatalf("rows.Scan: %v", err)
}
if uploadid != "19700101.1" {
t.Errorf("uploadid = %q, want %q", uploadid, "19700101.1")
}
if recordid != 0 {
t.Errorf("recordid = %d, want 0", recordid)
}
if want[name] != value {
t.Errorf("%s = %q, want %q", name, value, want[name])
}
i++
}
if i != len(want) {
t.Errorf("have %d labels, want %d", i, len(want))
}
if err := rows.Err(); err != nil {
t.Errorf("rows.Err: %v", err)
}
}
func TestQuery(t *testing.T) {
db, cleanup := dbtest.NewDB(t)
defer cleanup()
u, err := db.NewUpload(context.Background())
if err != nil {
t.Fatalf("NewUpload: %v", err)
}
var allRecords []int
for i := 0; i < 1024; i++ {
allRecords = append(allRecords, i)
r := &benchfmt.Result{Labels: make(map[string]string), NameLabels: make(map[string]string), Content: "BenchmarkName 1 ns/op"}
r.Labels["upload"] = u.ID
for j := uint(0); j < 10; j++ {
r.Labels[fmt.Sprintf("label%d", j)] = fmt.Sprintf("%d", i/(1<<j))
}
r.NameLabels["name"] = "Name"
if err := u.InsertRecord(r); err != nil {
t.Fatalf("InsertRecord: %v", err)
}
}
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
tests := []struct {
q string
want []int // nil means we want an error
}{
{"label0:0", []int{0}},
{"label0:1 label0:1 label0<2 label0>0", []int{1}},
{"label0>0 label0<2 label0:1 label0:1", []int{1}},
{"label0<2 label0<1", []int{0}},
{"label0>1021 label0>1022 label1:511", []int{1023}},
{"label1:0", []int{0, 1}},
{"label0:5 name:Name", []int{5}},
{"label0:0 label0:5", []int{}},
{"bogus query", nil},
{"label1<2 label3:0", []int{0, 1, 2, 3}},
{"label1>510 label1<52", []int{1022, 1023}},
{"", allRecords},
{"missing>", []int{}},
{"label0>", allRecords},
{"upload:" + u.ID, allRecords},
{"upload:none", []int{}},
{"upload>" + u.ID, []int{}},
{"upload<" + u.ID, []int{}},
{"label0:0 upload:" + u.ID, []int{0}},
}
for _, test := range tests {
t.Run("query="+test.q, func(t *testing.T) {
q := db.Query(test.q)
if test.want == nil {
if q.Next() {
t.Fatal("Next() = true, want false")
}
if err := q.Err(); err == nil {
t.Fatal("Err() = nil, want error")
}
return
}
defer func() {
t.Logf("q.Debug: %s", q.Debug())
if err := q.Close(); err != nil {
t.Errorf("Close: %v", err)
}
}()
var have []int
for i := range test.want {
if !q.Next() {
t.Fatalf("#%d: Next() = false", i)
}
r := q.Result()
n, err := strconv.Atoi(r.Labels["label0"])
if err != nil {
t.Fatalf("unexpected label0 value %q: %v", r.Labels["label0"], err)
}
have = append(have, n)
if r.NameLabels["name"] != "Name" {
t.Errorf("result[%d].name = %q, want %q", i, r.NameLabels["name"], "Name")
}
}
for q.Next() {
r := q.Result()
t.Errorf("Next() = true, want false (got labels %v)", r.Labels)
}
if err := q.Err(); err != nil {
t.Errorf("Err() = %v, want nil", err)
}
sort.Ints(have)
if len(have) == 0 {
have = []int{}
}
if !reflect.DeepEqual(have, test.want) {
t.Errorf("label0[] = %v, want %v", have, test.want)
}
})
}
}
// TestListUploads verifies that ListUploads returns the correct values.
func TestListUploads(t *testing.T) {
SetNow(time.Unix(0, 0))
defer SetNow(time.Time{})
db, cleanup := dbtest.NewDB(t)
defer cleanup()
for i := -1; i < 9; i++ {
u, err := db.NewUpload(context.Background())
if err != nil {
t.Fatalf("NewUpload: %v", err)
}
for j := 0; j <= i; j++ {
labels := benchfmt.Labels{
"key": "value",
"i": fmt.Sprintf("%d", i),
"j": fmt.Sprintf("%d", j),
}
if err := u.InsertRecord(&benchfmt.Result{
Labels: labels,
NameLabels: nil,
LineNum: 1,
Content: fmt.Sprintf("BenchmarkName %d ns/op", j),
}); err != nil {
t.Fatalf("InsertRecord: %v", err)
}
}
if err := u.Commit(); err != nil {
t.Fatalf("Commit: %v", err)
}
}
type result struct {
count int
id string
}
tests := []struct {
query string
extraLabels []string
limit int
want []result
}{
{"", nil, 0, []result{{9, "19700101.10"}, {8, "19700101.9"}, {7, "19700101.8"}, {6, "19700101.7"}, {5, "19700101.6"}, {4, "19700101.5"}, {3, "19700101.4"}, {2, "19700101.3"}, {1, "19700101.2"}}},
{"", nil, 2, []result{{9, "19700101.10"}, {8, "19700101.9"}}},
{"j:5", nil, 0, []result{{1, "19700101.10"}, {1, "19700101.9"}, {1, "19700101.8"}, {1, "19700101.7"}}},
{"i:5", nil, 0, []result{{6, "19700101.7"}}},
{"i:5", []string{"i", "missing"}, 0, []result{{6, "19700101.7"}}},
{"not:found", nil, 0, nil},
}
for _, test := range tests {
t.Run(fmt.Sprintf("query=%s/limit=%d", test.query, test.limit), func(t *testing.T) {
r := db.ListUploads(test.query, test.extraLabels, test.limit)
defer func() {
t.Logf("r.Debug: %s", r.Debug())
r.Close()
}()
var have []result
for r.Next() {
ui := r.Info()
res := result{ui.Count, ui.UploadID}
have = append(have, res)
for k, v := range ui.LabelValues {
switch k {
case "i":
uploadNum, err := strconv.Atoi(res.id[strings.LastIndex(res.id, ".")+1:])
if err != nil {
t.Fatalf("cannot parse upload ID %q", res.id)
}
if v != fmt.Sprintf("%d", uploadNum-2) {
t.Errorf(`i = %q, want "%d"`, v, uploadNum-2)
}
default:
t.Errorf("unexpected label %q", k)
}
}
}
if err := r.Err(); err != nil {
t.Errorf("Err() = %v", err)
}
if !reflect.DeepEqual(have, test.want) {
t.Errorf("results = %v, want %v", have, test.want)
}
})
}
}

Просмотреть файл

@ -0,0 +1,56 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cloud && !plan9
// +build cloud,!plan9
package dbtest
import (
"crypto/rand"
"database/sql"
"encoding/base64"
"flag"
"fmt"
"testing"
_ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql"
)
var cloud = flag.Bool("cloud", false, "connect to Cloud SQL database instead of in-memory SQLite")
var cloudsql = flag.String("cloudsql", "golang-org:us-central1:golang-org", "name of Cloud SQL instance to run tests on")
// createEmptyDB makes a new, empty database for the test.
func createEmptyDB(t *testing.T) (driver, dsn string, cleanup func()) {
if !*cloud {
return "sqlite3", ":memory:", nil
}
buf := make([]byte, 6)
if _, err := rand.Read(buf); err != nil {
t.Fatal(err)
}
name := "perfdata-test-" + base64.RawURLEncoding.EncodeToString(buf)
prefix := fmt.Sprintf("root:@cloudsql(%s)/", *cloudsql)
db, err := sql.Open("mysql", prefix)
if err != nil {
t.Fatal(err)
}
if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE `%s`", name)); err != nil {
db.Close()
t.Fatal(err)
}
t.Logf("Using database %q", name)
return "mysql", prefix + name + "?interpolateParams=true", func() {
if _, err := db.Exec(fmt.Sprintf("DROP DATABASE `%s`", name)); err != nil {
t.Error(err)
}
db.Close()
}
}

Просмотреть файл

@ -0,0 +1,47 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package dbtest
import (
"testing"
"golang.org/x/build/perfdata/db"
_ "golang.org/x/build/perfdata/db/sqlite3"
)
// NewDB makes a connection to a testing database, either sqlite3 or
// Cloud SQL depending on the -cloud flag. cleanup must be called when
// done with the testing database, instead of calling db.Close()
func NewDB(t *testing.T) (*db.DB, func()) {
driverName, dataSourceName, cloudCleanup := createEmptyDB(t)
d, err := db.OpenSQL(driverName, dataSourceName)
if err != nil {
if cloudCleanup != nil {
cloudCleanup()
}
t.Fatalf("open database: %v", err)
}
cleanup := func() {
if cloudCleanup != nil {
cloudCleanup()
}
d.Close()
}
// Make sure the database really is empty.
uploads, err := d.CountUploads()
if err != nil {
cleanup()
t.Fatal(err)
}
if uploads != 0 {
cleanup()
t.Fatalf("found %d row(s) in Uploads, want 0", uploads)
}
return d, cleanup
}

Просмотреть файл

@ -0,0 +1,17 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !cloud && !plan9
// +build !cloud,!plan9
package dbtest
import (
"testing"
)
// createEmptyDB makes a new, empty database for the test.
func createEmptyDB(t *testing.T) (driver, dsn string, cleanup func()) {
return "sqlite3", ":memory:", nil
}

Просмотреть файл

@ -0,0 +1,22 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package db
import (
"database/sql"
"time"
)
func DBSQL(db *DB) *sql.DB {
return db.sql
}
func SetNow(t time.Time) {
if t.IsZero() {
now = time.Now
return
}
now = func() time.Time { return t }
}

168
perfdata/db/query.go Normal file
Просмотреть файл

@ -0,0 +1,168 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package db
import (
"fmt"
"io"
"strings"
"unicode"
)
// operation is the enum for possible query operations.
type operation rune
// Query operations.
// Only equals, lt, and gt can be specified in a query.
// The order of these operations is used by merge.
const (
equals operation = iota
ltgt
lt
gt
)
// A part is a single query part with a key, operator, and value.
type part struct {
key string
operator operation
// value and value2 hold the values to compare against.
value, value2 string
}
// sepToOperation maps runes to operation values.
var sepToOperation = map[byte]operation{
':': equals,
'<': lt,
'>': gt,
}
// parseWord parse a single query part (as returned by SplitWords with quoting and escaping already removed) into a part struct.
func parseWord(word string) (part, error) {
sepIndex := strings.IndexFunc(word, func(r rune) bool {
return r == ':' || r == '>' || r == '<' || unicode.IsSpace(r) || unicode.IsUpper(r)
})
if sepIndex < 0 {
return part{}, fmt.Errorf("query part %q is missing operator", word)
}
key, sep, value := word[:sepIndex], word[sepIndex], word[sepIndex+1:]
if oper, ok := sepToOperation[sep]; ok {
return part{key, oper, value, ""}, nil
}
return part{}, fmt.Errorf("query part %q has invalid key", word)
}
// merge merges two query parts together into a single query part.
// The keys of the two parts must be equal.
// If the result is a query part that can never match, io.EOF is returned as the error.
func (p part) merge(p2 part) (part, error) {
if p2.operator < p.operator {
// Sort the parts so we only need half the table below.
p, p2 = p2, p
}
switch p.operator {
case equals:
switch p2.operator {
case equals:
if p.value == p2.value {
return p, nil
}
return part{}, io.EOF
case lt:
if p.value < p2.value {
return p, nil
}
return part{}, io.EOF
case gt:
if p.value > p2.value {
return p, nil
}
return part{}, io.EOF
case ltgt:
if p.value < p2.value && p.value > p2.value2 {
return p, nil
}
return part{}, io.EOF
}
case ltgt:
switch p2.operator {
case ltgt:
if p2.value < p.value {
p.value = p2.value
}
if p2.value2 > p.value2 {
p.value2 = p2.value2
}
case lt:
if p2.value < p.value {
p.value = p2.value
}
case gt:
if p2.value > p.value2 {
p.value2 = p2.value
}
}
case lt:
switch p2.operator {
case lt:
if p2.value < p.value {
return p2, nil
}
return p, nil
case gt:
p = part{p.key, ltgt, p.value, p2.value}
}
case gt:
// p2.operator == gt
if p2.value > p.value {
return p2, nil
}
return p, nil
}
// p.operator == ltgt
if p.value <= p.value2 || p.value == "" {
return part{}, io.EOF
}
if p.value2 == "" {
return part{p.key, lt, p.value, ""}, nil
}
return p, nil
}
// sql returns a SQL expression and a list of arguments for finding records matching p.
func (p part) sql() (sql string, args []interface{}, err error) {
if p.key == "upload" {
switch p.operator {
case equals:
return "SELECT UploadID, RecordID FROM Records WHERE UploadID = ?", []interface{}{p.value}, nil
case lt:
return "SELECT UploadID, RecordID FROM Records WHERE UploadID < ?", []interface{}{p.value}, nil
case gt:
return "SELECT UploadID, RecordID FROM Records WHERE UploadID > ?", []interface{}{p.value}, nil
case ltgt:
return "SELECT UploadID, RecordID FROM Records WHERE UploadID < ? AND UploadID > ?", []interface{}{p.value, p.value2}, nil
}
}
switch p.operator {
case equals:
if p.value == "" {
// TODO(quentin): Implement support for searching for missing labels.
return "", nil, fmt.Errorf("missing value for key %q", p.key)
}
return "SELECT UploadID, RecordID FROM RecordLabels WHERE Name = ? AND Value = ?", []interface{}{p.key, p.value}, nil
case lt:
return "SELECT UploadID, RecordID FROM RecordLabels WHERE Name = ? AND Value < ?", []interface{}{p.key, p.value}, nil
case gt:
if p.value == "" {
// Simplify queries for any value.
return "SELECT UploadID, RecordID FROM RecordLabels WHERE Name = ?", []interface{}{p.key}, nil
}
return "SELECT UploadID, RecordID FROM RecordLabels WHERE Name = ? AND Value > ?", []interface{}{p.key, p.value}, nil
case ltgt:
return "SELECT UploadID, RecordID FROM RecordLabels WHERE Name = ? AND Value < ? AND Value > ?", []interface{}{p.key, p.value, p.value2}, nil
default:
panic("unknown operator " + string(p.operator))
}
}

44
perfdata/db/query_test.go Normal file
Просмотреть файл

@ -0,0 +1,44 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package db
import "testing"
func TestParseWord(t *testing.T) {
tests := []struct {
word string
want part
wantErr bool
}{
{"key:value", part{"key", equals, "value", ""}, false},
{"key>value", part{"key", gt, "value", ""}, false},
{"key<value", part{"key", lt, "value", ""}, false},
{"bogus query", part{}, true},
}
for _, test := range tests {
t.Run(test.word, func(t *testing.T) {
p, err := parseWord(test.word)
if test.wantErr {
if err == nil {
t.Fatalf("have %#v, want error", p)
}
return
}
if err != nil {
t.Fatalf("have error %v", err)
}
if p != test.want {
t.Fatalf("parseWord = %#v, want %#v", p, test.want)
}
p, err = p.merge(part{p.key, gt, "", ""})
if err != nil {
t.Fatalf("failed to merge with noop: %v", err)
}
if p != test.want {
t.Fatalf("merge with noop = %#v, want %#v", p, test.want)
}
})
}
}

22
perfdata/db/schema.sql Normal file
Просмотреть файл

@ -0,0 +1,22 @@
-- The intended production Cloud SQL schema. Committed here only as a
-- form of notes (see the actual current schema in
-- db.go:createTables).
CREATE TABLE Uploads (
UploadId SERIAL PRIMARY KEY AUTO_INCREMENT
);
CREATE TABLE Records (
UploadId BIGINT UNSIGNED,
RecordId BIGINT UNSIGNED,
Contents BLOB,
PRIMARY KEY (UploadId, RecordId),
FOREIGN KEY (UploadId) REFERENCES Uploads(UploadId)
);
CREATE TABLE RecordLabels (
UploadId BIGINT UNSIGNED,
RecordId BIGINT UNSIGNED,
Name VARCHAR(255),
Value VARCHAR(8192),
INDEX (Name(100), Value(100)),
FOREIGN KEY (UploadId, RecordId) REFERENCES Records(UploadId, RecordId)
);

Просмотреть файл

@ -0,0 +1,28 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
// Package sqlite3 provides the sqlite3 driver for
// x/build/perfdata/db. It must be imported instead of go-sqlite3 to
// ensure foreign keys are properly honored.
package sqlite3
import (
"database/sql"
sqlite3 "github.com/mattn/go-sqlite3"
"golang.org/x/build/perfdata/db"
)
func init() {
db.RegisterOpenHook("sqlite3", func(db *sql.DB) error {
db.Driver().(*sqlite3.SQLiteDriver).ConnectHook = func(c *sqlite3.SQLiteConn) error {
_, err := c.Exec("PRAGMA foreign_keys = ON;", nil)
return err
}
return nil
})
}

98
perfdata/fs/fs.go Normal file
Просмотреть файл

@ -0,0 +1,98 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fs provides a backend-agnostic filesystem layer for storing
// performance results.
package fs
import (
"errors"
"io"
"sort"
"sync"
"golang.org/x/net/context"
)
// An FS stores uploaded benchmark data files.
type FS interface {
// NewWriter returns a Writer for a given file name.
// When the Writer is closed, the file will be stored with the
// given metadata and the data written to the writer.
NewWriter(ctx context.Context, name string, metadata map[string]string) (Writer, error)
}
// A Writer is an io.Writer that can also be closed with an error.
type Writer interface {
io.WriteCloser
// CloseWithError cancels the writing of the file, removing
// any partially written data.
CloseWithError(error) error
}
// MemFS is an in-memory filesystem implementing the FS interface.
type MemFS struct {
mu sync.Mutex
content map[string]*memFile
}
// NewMemFS constructs a new, empty MemFS.
func NewMemFS() *MemFS {
return &MemFS{
content: make(map[string]*memFile),
}
}
// NewWriter returns a Writer for a given file name. As a side effect,
// it associates the given metadata with the file.
func (fs *MemFS) NewWriter(_ context.Context, name string, metadata map[string]string) (Writer, error) {
meta := make(map[string]string)
for k, v := range metadata {
meta[k] = v
}
return &memFile{fs: fs, name: name, metadata: meta}, nil
}
// Files returns the names of the files written to fs.
func (fs *MemFS) Files() []string {
fs.mu.Lock()
defer fs.mu.Unlock()
var files []string
for f := range fs.content {
files = append(files, f)
}
sort.Strings(files)
return files
}
// memFile represents a file in a MemFS. While the file is being
// written, fs points to the filesystem. Close writes the file's
// content to fs and sets fs to nil.
type memFile struct {
fs *MemFS
name string
metadata map[string]string
content []byte
}
func (f *memFile) Write(p []byte) (int, error) {
f.content = append(f.content, p...)
return len(p), nil
}
func (f *memFile) Close() error {
if f.fs == nil {
return errors.New("already closed")
}
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
f.fs.content[f.name] = f
f.fs = nil
return nil
}
func (f *memFile) CloseWithError(error) error {
f.fs = nil
return nil
}

34
perfdata/fs/gcs/gcs.go Normal file
Просмотреть файл

@ -0,0 +1,34 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcs implements the fs.FS interface using Google Cloud Storage.
package gcs
import (
"cloud.google.com/go/storage"
"golang.org/x/build/perfdata/fs"
"golang.org/x/net/context"
)
// impl is an fs.FS backed by Google Cloud Storage.
type impl struct {
bucket *storage.BucketHandle
}
// NewFS constructs an FS that writes to the provided bucket.
// On AppEngine, ctx must be a request-derived Context.
func NewFS(ctx context.Context, bucketName string) (fs.FS, error) {
client, err := storage.NewClient(ctx)
if err != nil {
return nil, err
}
return &impl{client.Bucket(bucketName)}, nil
}
func (fs *impl) NewWriter(ctx context.Context, name string, metadata map[string]string) (fs.Writer, error) {
w := fs.bucket.Object(name).NewWriter(ctx)
// TODO(quentin): Do these need "x-goog-meta-" prefixes?
w.Metadata = metadata
return w, nil
}

Просмотреть файл

@ -0,0 +1,48 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package local implements the fs.FS interface using local files.
// Metadata is not stored separately; the header of each file should
// contain metadata as written by storage/app.
package local
import (
"os"
"path/filepath"
"golang.org/x/net/context"
"golang.org/x/build/perfdata/fs"
)
// impl is an fs.FS backed by local disk.
type impl struct {
root string
}
// NewFS constructs an FS that writes to the provided directory.
func NewFS(root string) fs.FS {
return &impl{root}
}
// NewWriter creates a file and assigns metadata as extended filesystem attributes.
func (fs *impl) NewWriter(ctx context.Context, name string, metadata map[string]string) (fs.Writer, error) {
if err := os.MkdirAll(filepath.Join(fs.root, filepath.Dir(name)), 0777); err != nil {
return nil, err
}
f, err := os.Create(filepath.Join(fs.root, name))
if err != nil {
return nil, err
}
return &wrapper{f}, nil
}
type wrapper struct {
*os.File
}
// CloseWithError closes the file and attempts to unlink it.
func (w *wrapper) CloseWithError(error) error {
w.Close()
return os.Remove(w.Name())
}

Просмотреть файл

@ -0,0 +1,50 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package local
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"golang.org/x/build/internal/diff"
)
func TestNewWriter(t *testing.T) {
ctx := context.Background()
dir, err := ioutil.TempDir("", "local_test")
if err != nil {
t.Fatalf("TempDir = %v", err)
}
defer os.RemoveAll(dir)
fs := NewFS(dir)
w, err := fs.NewWriter(ctx, "dir/file", map[string]string{"key": "value", "key2": "value2"})
if err != nil {
t.Fatalf("NewWriter = %v", err)
}
want := "hello world"
if _, err := w.Write([]byte(want)); err != nil {
t.Fatalf("Write = %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("Close = %v", err)
}
have, err := ioutil.ReadFile(filepath.Join(dir, "dir/file"))
if err != nil {
t.Fatalf("ReadFile = %v", err)
}
if d := diff.Diff(string(have), want); d != "" {
t.Errorf("file contents differ. have (-)/want (+)\n%s", d)
}
}

Просмотреть файл

@ -0,0 +1,66 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
// Localperfdata runs an HTTP server for benchmark perfdata.
//
// Usage:
//
// localperfdata [-addr address] [-view_url_base url] [-base_dir ../appengine] [-dsn file.db]
package main
import (
"flag"
"log"
"net/http"
"golang.org/x/build/internal/basedir"
"golang.org/x/build/perfdata/app"
"golang.org/x/build/perfdata/db"
_ "golang.org/x/build/perfdata/db/sqlite3"
"golang.org/x/build/perfdata/fs"
"golang.org/x/build/perfdata/fs/local"
)
var (
addr = flag.String("addr", ":8080", "serve HTTP on `address`")
viewURLBase = flag.String("view_url_base", "", "/upload response with `URL` for viewing")
dsn = flag.String("dsn", ":memory:", "sqlite `dsn`")
data = flag.String("data", "", "data `directory` (in-memory if empty)")
baseDir = flag.String("base_dir", basedir.Find("golang.org/x/build/perfdata/appengine"), "base `directory` for static files")
)
func main() {
flag.Parse()
if *baseDir == "" {
log.Print("base_dir is required and could not be automatically found")
flag.Usage()
}
db, err := db.OpenSQL("sqlite3", *dsn)
if err != nil {
log.Fatalf("open database: %v", err)
}
var fs fs.FS = fs.NewMemFS()
if *data != "" {
fs = local.NewFS(*data)
}
app := &app.App{
DB: db,
FS: fs,
ViewURLBase: *viewURLBase,
Auth: func(http.ResponseWriter, *http.Request) (string, error) { return "", nil },
BaseDir: *baseDir,
}
app.RegisterOnMux(http.DefaultServeMux)
log.Printf("Listening on %s", *addr)
log.Fatal(http.ListenAndServe(*addr, nil))
}

48
perfdata/query/query.go Normal file
Просмотреть файл

@ -0,0 +1,48 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package query provides tools for parsing a query.
package query
// SplitWords splits q into words using shell syntax (whitespace
// can be escaped with double quotes or with a backslash).
func SplitWords(q string) []string {
var words []string
word := make([]byte, len(q))
w := 0
quoting := false
for r := 0; r < len(q); r++ {
switch c := q[r]; {
case c == '"' && quoting:
quoting = false
case quoting:
if c == '\\' {
r++
}
if r < len(q) {
word[w] = q[r]
w++
}
case c == '"':
quoting = true
case c == ' ', c == '\t':
if w > 0 {
words = append(words, string(word[:w]))
}
w = 0
case c == '\\':
r++
fallthrough
default:
if r < len(q) {
word[w] = q[r]
w++
}
}
}
if w > 0 {
words = append(words, string(word[:w]))
}
return words
}

Просмотреть файл

@ -0,0 +1,28 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package query
import (
"reflect"
"testing"
)
func TestSplitQueryWords(t *testing.T) {
for _, test := range []struct {
q string
want []string
}{
{"hello world", []string{"hello", "world"}},
{"hello\\ world", []string{"hello world"}},
{`"key:value two" and\ more`, []string{"key:value two", "and more"}},
{`one" two"\ three four`, []string{"one two three", "four"}},
{`"4'7\""`, []string{`4'7"`}},
} {
have := SplitWords(test.q)
if !reflect.DeepEqual(have, test.want) {
t.Errorf("splitQueryWords(%q) = %+v, want %+v", test.q, have, test.want)
}
}
}