зеркало из https://github.com/golang/tools.git
gopls/internal/regtest/bench: put feature benchmarks in their own file
Purely for consistent organization. No functional changes. Change-Id: Id26d55d6496523827c154f8a2a17b3660f6081eb Reviewed-on: https://go-review.googlesource.com/c/tools/+/419982 TryBot-Result: Gopher Robot <gobot@golang.org> gopls-CI: kokoro <noreply+kokoro@google.com> Reviewed-by: Peter Weinberger <pjw@google.com> Run-TryBot: Robert Findley <rfindley@google.com>
This commit is contained in:
Родитель
c7f11917cb
Коммит
21861e6be5
|
@ -5,19 +5,13 @@
|
|||
package bench
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/gopls/internal/hooks"
|
||||
"golang.org/x/tools/internal/lsp/bug"
|
||||
"golang.org/x/tools/internal/lsp/fake"
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -46,180 +40,3 @@ func benchmarkOptions(dir string) []RunOption {
|
|||
func printBenchmarkResults(result testing.BenchmarkResult) {
|
||||
fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString())
|
||||
}
|
||||
|
||||
var iwlOptions struct {
|
||||
workdir string
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory")
|
||||
}
|
||||
|
||||
func TestBenchmarkIWL(t *testing.T) {
|
||||
if iwlOptions.workdir == "" {
|
||||
t.Skip("-iwl_workdir not configured")
|
||||
}
|
||||
|
||||
opts := stressTestOptions(iwlOptions.workdir)
|
||||
// Don't skip hooks, so that we can wait for IWL.
|
||||
opts = append(opts, SkipHooks(false))
|
||||
|
||||
results := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {})
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
printBenchmarkResults(results)
|
||||
}
|
||||
|
||||
var symbolOptions struct {
|
||||
workdir, query, matcher, style string
|
||||
printResults bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory")
|
||||
flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark")
|
||||
flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark")
|
||||
flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark")
|
||||
flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results")
|
||||
}
|
||||
|
||||
func TestBenchmarkSymbols(t *testing.T) {
|
||||
if symbolOptions.workdir == "" {
|
||||
t.Skip("-symbol_workdir not configured")
|
||||
}
|
||||
|
||||
opts := benchmarkOptions(symbolOptions.workdir)
|
||||
settings := make(Settings)
|
||||
if symbolOptions.matcher != "" {
|
||||
settings["symbolMatcher"] = symbolOptions.matcher
|
||||
}
|
||||
if symbolOptions.style != "" {
|
||||
settings["symbolStyle"] = symbolOptions.style
|
||||
}
|
||||
opts = append(opts, settings)
|
||||
|
||||
WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
|
||||
// We can't Await in this test, since we have disabled hooks. Instead, run
|
||||
// one symbol request to completion to ensure all necessary cache entries
|
||||
// are populated.
|
||||
symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
|
||||
Query: symbolOptions.query,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if symbolOptions.printResults {
|
||||
fmt.Println("Results:")
|
||||
for i := 0; i < len(symbols); i++ {
|
||||
fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
|
||||
}
|
||||
}
|
||||
|
||||
results := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
|
||||
Query: symbolOptions.query,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
printBenchmarkResults(results)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.")
|
||||
benchFile = flag.String("didchange_file", "", "The file to modify")
|
||||
benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
|
||||
)
|
||||
|
||||
// TestBenchmarkDidChange benchmarks modifications of a single file by making
|
||||
// synthetic modifications in a comment. It controls pacing by waiting for the
|
||||
// server to actually start processing the didChange notification before
|
||||
// proceeding. Notably it does not wait for diagnostics to complete.
|
||||
//
|
||||
// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir
|
||||
// is the path to a workspace root, and -didchange_file is the
|
||||
// workspace-relative path to a file to modify. e.g.:
|
||||
//
|
||||
// go test -run=TestBenchmarkDidChange \
|
||||
// -didchange_dir=path/to/kubernetes \
|
||||
// -didchange_file=pkg/util/hash/hash.go
|
||||
func TestBenchmarkDidChange(t *testing.T) {
|
||||
if *benchDir == "" {
|
||||
t.Skip("-didchange_dir is not set")
|
||||
}
|
||||
if *benchFile == "" {
|
||||
t.Fatal("-didchange_file must be set if -didchange_dir is set")
|
||||
}
|
||||
|
||||
opts := benchmarkOptions(*benchDir)
|
||||
WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
|
||||
env.OpenFile(*benchFile)
|
||||
env.Await(env.DoneWithOpen())
|
||||
// Insert the text we'll be modifying at the top of the file.
|
||||
env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
|
||||
|
||||
// Run the profiler after the initial load,
|
||||
// across all benchmark iterations.
|
||||
if *benchProfile != "" {
|
||||
profile, err := os.Create(*benchProfile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer profile.Close()
|
||||
if err := pprof.StartCPUProfile(profile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
env.EditBuffer(*benchFile, fake.Edit{
|
||||
Start: fake.Pos{Line: 0, Column: 0},
|
||||
End: fake.Pos{Line: 1, Column: 0},
|
||||
// Increment
|
||||
Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
|
||||
})
|
||||
env.Await(StartedChange(uint64(i + 1)))
|
||||
}
|
||||
})
|
||||
printBenchmarkResults(result)
|
||||
})
|
||||
}
|
||||
|
||||
// TestPrintMemStats measures the memory usage of loading a project.
|
||||
// It uses the same -didchange_dir flag as above.
|
||||
// Always run it in isolation since it measures global heap usage.
|
||||
//
|
||||
// Kubernetes example:
|
||||
//
|
||||
// $ go test -v -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes
|
||||
// TotalAlloc: 5766 MB
|
||||
// HeapAlloc: 1984 MB
|
||||
//
|
||||
// Both figures exhibit variance of less than 1%.
|
||||
func TestPrintMemStats(t *testing.T) {
|
||||
if *benchDir == "" {
|
||||
t.Skip("-didchange_dir is not set")
|
||||
}
|
||||
|
||||
// Load the program...
|
||||
opts := benchmarkOptions(*benchDir)
|
||||
WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
|
||||
// ...and print the memory usage.
|
||||
runtime.GC()
|
||||
runtime.GC()
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
|
||||
t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bench
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/fake"
|
||||
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
)
|
||||
|
||||
var (
|
||||
benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.")
|
||||
benchFile = flag.String("didchange_file", "", "The file to modify")
|
||||
benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
|
||||
)
|
||||
|
||||
// TestBenchmarkDidChange benchmarks modifications of a single file by making
|
||||
// synthetic modifications in a comment. It controls pacing by waiting for the
|
||||
// server to actually start processing the didChange notification before
|
||||
// proceeding. Notably it does not wait for diagnostics to complete.
|
||||
//
|
||||
// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir
|
||||
// is the path to a workspace root, and -didchange_file is the
|
||||
// workspace-relative path to a file to modify. e.g.:
|
||||
//
|
||||
// go test -run=TestBenchmarkDidChange \
|
||||
// -didchange_dir=path/to/kubernetes \
|
||||
// -didchange_file=pkg/util/hash/hash.go
|
||||
func TestBenchmarkDidChange(t *testing.T) {
|
||||
if *benchDir == "" {
|
||||
t.Skip("-didchange_dir is not set")
|
||||
}
|
||||
if *benchFile == "" {
|
||||
t.Fatal("-didchange_file must be set if -didchange_dir is set")
|
||||
}
|
||||
|
||||
opts := benchmarkOptions(*benchDir)
|
||||
WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
|
||||
env.OpenFile(*benchFile)
|
||||
env.Await(env.DoneWithOpen())
|
||||
// Insert the text we'll be modifying at the top of the file.
|
||||
env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
|
||||
|
||||
// Run the profiler after the initial load,
|
||||
// across all benchmark iterations.
|
||||
if *benchProfile != "" {
|
||||
profile, err := os.Create(*benchProfile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer profile.Close()
|
||||
if err := pprof.StartCPUProfile(profile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
env.EditBuffer(*benchFile, fake.Edit{
|
||||
Start: fake.Pos{Line: 0, Column: 0},
|
||||
End: fake.Pos{Line: 1, Column: 0},
|
||||
// Increment
|
||||
Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
|
||||
})
|
||||
env.Await(StartedChange(uint64(i + 1)))
|
||||
}
|
||||
})
|
||||
printBenchmarkResults(result)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bench
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
)
|
||||
|
||||
var iwlOptions struct {
|
||||
workdir string
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory")
|
||||
}
|
||||
|
||||
func TestBenchmarkIWL(t *testing.T) {
|
||||
if iwlOptions.workdir == "" {
|
||||
t.Skip("-iwl_workdir not configured")
|
||||
}
|
||||
|
||||
opts := stressTestOptions(iwlOptions.workdir)
|
||||
// Don't skip hooks, so that we can wait for IWL.
|
||||
opts = append(opts, SkipHooks(false))
|
||||
|
||||
results := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {})
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
printBenchmarkResults(results)
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bench
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
)
|
||||
|
||||
// TestPrintMemStats measures the memory usage of loading a project.
|
||||
// It uses the same -didchange_dir flag as above.
|
||||
// Always run it in isolation since it measures global heap usage.
|
||||
//
|
||||
// Kubernetes example:
|
||||
//
|
||||
// $ go test -v -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes
|
||||
// TotalAlloc: 5766 MB
|
||||
// HeapAlloc: 1984 MB
|
||||
//
|
||||
// Both figures exhibit variance of less than 1%.
|
||||
func TestPrintMemStats(t *testing.T) {
|
||||
if *benchDir == "" {
|
||||
t.Skip("-didchange_dir is not set")
|
||||
}
|
||||
|
||||
// Load the program...
|
||||
opts := benchmarkOptions(*benchDir)
|
||||
WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
|
||||
// ...and print the memory usage.
|
||||
runtime.GC()
|
||||
runtime.GC()
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
|
||||
t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bench
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
|
||||
. "golang.org/x/tools/internal/lsp/regtest"
|
||||
)
|
||||
|
||||
var symbolOptions struct {
|
||||
workdir, query, matcher, style string
|
||||
printResults bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory")
|
||||
flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark")
|
||||
flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark")
|
||||
flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark")
|
||||
flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results")
|
||||
}
|
||||
|
||||
func TestBenchmarkSymbols(t *testing.T) {
|
||||
if symbolOptions.workdir == "" {
|
||||
t.Skip("-symbol_workdir not configured")
|
||||
}
|
||||
|
||||
opts := benchmarkOptions(symbolOptions.workdir)
|
||||
settings := make(Settings)
|
||||
if symbolOptions.matcher != "" {
|
||||
settings["symbolMatcher"] = symbolOptions.matcher
|
||||
}
|
||||
if symbolOptions.style != "" {
|
||||
settings["symbolStyle"] = symbolOptions.style
|
||||
}
|
||||
opts = append(opts, settings)
|
||||
|
||||
WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
|
||||
// We can't Await in this test, since we have disabled hooks. Instead, run
|
||||
// one symbol request to completion to ensure all necessary cache entries
|
||||
// are populated.
|
||||
symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
|
||||
Query: symbolOptions.query,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if symbolOptions.printResults {
|
||||
fmt.Println("Results:")
|
||||
for i := 0; i < len(symbols); i++ {
|
||||
fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
|
||||
}
|
||||
}
|
||||
|
||||
results := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
|
||||
Query: symbolOptions.query,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
printBenchmarkResults(results)
|
||||
})
|
||||
}
|
Загрузка…
Ссылка в новой задаче