зеркало из
1
0
Форкнуть 0

Produce Markdown test-run summary for Github (#2275)

Take advantage of this [new feature](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) to make test passes/failures easier to find.
This commit is contained in:
George Pollard 2022-05-13 11:53:53 +12:00 коммит произвёл GitHub
Родитель c75139ab33
Коммит 03cfbbf342
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
4 изменённых файлов: 320 добавлений и 26 удалений

Просмотреть файл

@ -168,7 +168,7 @@ fi
write-verbose "Checking for $TOOL_DEST/go-task"
if should-install "$TOOL_DEST/task"; then
write-info "Installing go-task"
curl -sL "https://github.com/go-task/task/releases/download/v3.7.0/task_linux_amd64.tar.gz" | tar xz -C "$TOOL_DEST" task
curl -sL "https://github.com/go-task/task/releases/download/v3.12.1/task_linux_amd64.tar.gz" | tar xz -C "$TOOL_DEST" task
fi
# Install binaries for envtest

3
.github/workflows/pr-validation.yml поставляемый
Просмотреть файл

@ -96,6 +96,9 @@ jobs:
container_id=${{steps.devcontainer.outputs.id}}
docker exec "$container_id" task ci
# generate summary file for display in Actions
cat reports/*.md > $GITHUB_STEP_SUMMARY
- name: Build docker image & build configuration YAML
run: |
container_id=${{steps.devcontainer.outputs.id}}

Просмотреть файл

@ -11,11 +11,19 @@ vars:
CONTROLLER_APP: aso-controller
CONTROLLER_ROOT: v2/
CONTROLLER_OUTPUT:
sh: 'realpath v2/config'
TEST_RESOURCE_PREFIX: asotest
TEST_LIVE_RESOURCE_PREFIX: asolivetest
SCRIPTS_ROOT: ./scripts
HEADER_FILE:
sh: 'realpath v2/boilerplate.go.txt'
TEST_OUT:
sh: 'realpath reports'
VERSION:
# Version tags for the v2 controller must start with "v2", e.g. "v2.0.0-alpha.0".
sh: "{{.SCRIPTS_ROOT}}/build-version.py v2"
@ -30,6 +38,8 @@ vars:
CROSSPLANE_APP: crossplane-gen
CROSSPLANE_ROOT: ./hack/crossplane/
CROSSPLANE_OUTPUT:
sh: 'realpath hack/crossplane/config'
KUBE_RBAC_PROXY: gcr.io/kubebuilder/kube-rbac-proxy
@ -106,7 +116,12 @@ tasks:
desc: Run {{.GENERATOR_APP}} unit tests and output coverage.
dir: '{{.GENERATOR_ROOT}}'
cmds:
- go test ./... -tags=noexit -race -covermode atomic -coverprofile=generator-coverage.out -coverpkg=./... -run '{{default ".*" .TEST_FILTER}}'
- defer: # want to run even on failure
task: produce-markdown-summary
vars:
INPUT_FILE: '{{.TEST_OUT}}/generator-unit-tests.json'
OUTPUT_FILE: '{{.TEST_OUT}}/generator-unit-tests.md'
- go test ./... -tags=noexit -race -covermode atomic -coverprofile=generator-coverage.out -json -coverpkg=./... -run '{{default ".*" .TEST_FILTER}}' > '{{.TEST_OUT}}/generator-unit-tests.json'
generator:update-golden-tests:
desc: Update {{.GENERATOR_APP}} golden test outputs.
@ -206,8 +221,13 @@ tasks:
dir: "{{.CONTROLLER_ROOT}}"
deps: [controller:generate-crds]
cmds:
- defer: # want to run even on failure
task: produce-markdown-summary
vars:
INPUT_FILE: '{{.TEST_OUT}}/controller-unit-tests.json'
OUTPUT_FILE: '{{.TEST_OUT}}/controller-unit-tests.md'
# -race fails at the moment in gopter - possibly due to our shared generator variable?
- go test -short -tags=noexit -timeout 10m -covermode atomic -coverprofile=controller-coverage.out -coverpkg="./..." -run '{{default ".*" .TEST_FILTER}}' ./...
- go test -short -tags=noexit -timeout 10m -covermode atomic -coverprofile=controller-coverage.out -json -coverpkg="./..." -run '{{default ".*" .TEST_FILTER}}' ./... > '{{.TEST_OUT}}/controller-unit-tests.json'
controller:build:
desc: Generate the {{.CONTROLLER_APP}} binary.
@ -279,8 +299,13 @@ tasks:
dir: "{{.CONTROLLER_ROOT}}"
deps: [controller:run-kustomize-for-envtest]
cmds:
- defer: # want to run even on failure
task: produce-markdown-summary
vars:
INPUT_FILE: '{{.TEST_OUT}}/controller-integration-tests.json'
OUTPUT_FILE: '{{.TEST_OUT}}/controller-integration-tests.md'
# -race fails at the moment in controller-runtime
- go test -covermode atomic -coverprofile=coverage-integration-envtest.out -coverpkg="./..." -timeout 15m -run '{{default ".*" .TEST_FILTER}}' ./internal/controllers
- go test -covermode atomic -coverprofile=coverage-integration-envtest.out -coverpkg="./..." -json -timeout 15m -run '{{default ".*" .TEST_FILTER}}' ./internal/controllers > '{{.TEST_OUT}}/controller-integration-tests.json'
controller:test-integration-envtest-live:
desc: Run integration tests with envtest against live data and output coverage.
@ -381,21 +406,16 @@ tasks:
cmds:
- mkdir -p v2/api
- find v2/api -type f -name "zz_generated.*" -delete
- if [ -d "{{.OUTPUT}}/crd/bases" ]; then find "{{.OUTPUT}}/crd/bases" -type f -delete; fi
- if [ -d "{{.OUTPUT}}/crd/patches" ]; then find "{{.OUTPUT}}/crd/patches" -type f -delete; fi
- if [ -d "{{.CONTROLLER_OUTPUT}}/crd/bases" ]; then find "{{.CONTROLLER_OUTPUT}}/crd/bases" -type f -delete; fi
- if [ -d "{{.CONTROLLER_OUTPUT}}/crd/patches" ]; then find "{{.CONTROLLER_OUTPUT}}/crd/patches" -type f -delete; fi
- cd v2/api && controller-gen {{.OBJECT_OPTIONS}} paths=./...
- cd v2/api && controller-gen {{.CRD_OPTIONS}} {{.WEBHOOK_OPTIONS}} {{.RBAC_OPTIONS}} paths=./...
- cd v2/api && gofmt -l -s -w . # format all generated code
vars:
HEADER_FILE:
sh: 'realpath v2/boilerplate.go.txt #controller:generate-crds'
OBJECT_OPTIONS: object:headerFile={{.HEADER_FILE}}
OUTPUT:
sh: 'realpath v2/config #controller:generate-crds'
CRD_OPTIONS: crd:crdVersions=v1,allowDangerousTypes=true output:crd:artifacts:config={{.OUTPUT}}/crd/bases
WEBHOOK_OPTIONS: webhook output:webhook:artifacts:config={{.OUTPUT}}/webhook
RBAC_OPTIONS: rbac:roleName=manager-role output:rbac:artifacts:config={{.OUTPUT}}/rbac
CRD_OPTIONS: crd:crdVersions=v1,allowDangerousTypes=true output:crd:artifacts:config={{.CONTROLLER_OUTPUT}}/crd/bases
WEBHOOK_OPTIONS: webhook output:webhook:artifacts:config={{.CONTROLLER_OUTPUT}}/webhook
RBAC_OPTIONS: rbac:roleName=manager-role output:rbac:artifacts:config={{.CONTROLLER_OUTPUT}}/rbac
controller:generate-genruntime-deepcopy:
desc: Run controller-gen to generate {{.CONTROLLER_APP}} CRD files.
@ -407,8 +427,6 @@ tasks:
- cd pkg/genruntime && controller-gen {{.OBJECT_OPTIONS}} paths=./...
- cd pkg/genruntime && gofmt -l -s -w . # We do this because controller-gen can generate these files in a way that isn't gofmt'ed which can break things down the line
vars:
HEADER_FILE:
sh: 'realpath boilerplate.go.txt #controller:generate-genruntime-deepcopy'
OBJECT_OPTIONS: object:headerFile={{.HEADER_FILE}}
controller:generate-kustomize:
@ -642,21 +660,16 @@ tasks:
cmds:
- mkdir -p ./apis
- find ./apis -type f -name "zz_generated.*" -delete
- if [ -d "{{.OUTPUT}}/crd/bases" ]; then find "{{.OUTPUT}}/crd/bases" -type f -delete; fi
- if [ -d "{{.OUTPUT}}/crd/patches" ]; then find "{{.OUTPUT}}/crd/patches" -type f -delete; fi
- if [ -d "{{.CROSSPLANE_OUTPUT}}/crd/bases" ]; then find "{{.CROSSPLANE_OUTPUT}}/crd/bases" -type f -delete; fi
- if [ -d "{{.CROSSPLANE_OUTPUT}}/crd/patches" ]; then find "{{.CROSSPLANE_OUTPUT}}/crd/patches" -type f -delete; fi
- cd apis && controller-gen {{.OBJECT_OPTIONS}} paths=./...
- cd apis && controller-gen {{.CRD_OPTIONS}} {{.WEBHOOK_OPTIONS}} {{.RBAC_OPTIONS}} paths=./...
- cd apis && gofmt -l -s -w . # format all generated code
vars:
HEADER_FILE:
sh: 'realpath ../../v2/boilerplate.go.txt #crossplane:generate-crds'
OBJECT_OPTIONS: object:headerFile={{.HEADER_FILE}}
OUTPUT:
sh: 'realpath config #crossplane:generate-crds'
CRD_OPTIONS: crd:crdVersions=v1,allowDangerousTypes=true output:crd:artifacts:config={{.OUTPUT}}/crd/bases
WEBHOOK_OPTIONS: webhook output:webhook:artifacts:config={{.OUTPUT}}/webhook
RBAC_OPTIONS: rbac:roleName=manager-role # output:rbac:artifacts:config={{.OUTPUT}}/rbac # not output currently?
CRD_OPTIONS: crd:crdVersions=v1,allowDangerousTypes=true output:crd:artifacts:config={{.CROSSPLANE_OUTPUT}}/crd/bases
WEBHOOK_OPTIONS: webhook output:webhook:artifacts:config={{.CROSSPLANE_OUTPUT}}/webhook
RBAC_OPTIONS: rbac:roleName=manager-role # output:rbac:artifacts:config={{.CROSSPLANE_OUTPUT}}/rbac # not output currently?
crossplane:generate-types:
desc: Run {{.GENERATOR_APP}} to generate input files for controller-gen for {{.CROSSPLANE_APP}}.
@ -723,3 +736,8 @@ tasks:
vars:
CHANGED:
sh: git status --porcelain
produce-markdown-summary:
desc: Builds a test output summary for Github
cmds:
- cmd: go run v2/tools/mangle-test-json/main.go "{{.INPUT_FILE}}" > "{{.OUTPUT_FILE}}"

Просмотреть файл

@ -0,0 +1,273 @@
/*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT license.
*/
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"sort"
"strings"
"time"
)
type JSONFormat struct {
Time time.Time
Action string
Package string
Test string
Output string
}
type TestRun struct {
Action string
Package string
Test string
Output []string
RunTime time.Duration
}
func main() {
for _, testOutputFile := range os.Args[1:] {
fmt.Printf("# `%s`\n\n", testOutputFile)
byPackage := loadJSON(testOutputFile)
packages := []string{}
for k, v := range byPackage {
packages = append(packages, k)
sort.Slice(v, func(i, j int) bool {
return v[i].Test < v[j].Test
})
}
sort.Strings(packages)
printSummary(packages, byPackage)
printDetails(packages, byPackage)
printSlowTests(byPackage)
}
}
func min(i, j int) int {
if i <= j {
return i
}
return j
}
func actionSymbol(d TestRun) string {
switch d.Action {
case "pass":
return "✅"
case "fail":
return "❌"
case "skip":
return "⏭️"
default:
panic(fmt.Sprintf("unhandled action: %s", d.Action))
}
}
func loadJSON(testOutputFile string) map[string][]TestRun {
content, err := ioutil.ReadFile(testOutputFile)
if err != nil {
log.Fatalf("%e", err)
}
// make test output into valid JSON
jsonData := "[" + strings.Join(strings.Split(strings.Trim(string(content), " \n\r"), "\n"), ",") + "]"
data := []JSONFormat{}
err = json.Unmarshal([]byte(jsonData), &data)
if err != nil {
log.Fatalf("%e", err)
}
// track when each test started running
startTimes := make(map[string]time.Time)
runTimes := make(map[string]time.Duration)
outputs := make(map[string][]string)
key := func(d JSONFormat) string {
return d.Package + "/" + d.Test
}
// package → list of tests
byPackage := make(map[string][]TestRun, len(data))
for _, d := range data {
switch d.Action {
case "run":
if startTimes[key(d)] != (time.Time{}) {
panic("run while already running")
}
startTimes[key(d)] = d.Time
case "pause":
if startTimes[key(d)] == (time.Time{}) {
panic("pause while not running")
}
runTimes[key(d)] += d.Time.Sub(startTimes[key(d)])
startTimes[key(d)] = time.Time{}
case "cont":
// cont while still in running state happens sometimes (???)
// so don't check
startTimes[key(d)] = d.Time
case "output":
outputs[key(d)] = append(outputs[key(d)], d.Output)
case "pass", "fail", "skip":
if d.Test != "" && startTimes[key(d)] == (time.Time{}) {
panic("finished when not running")
}
runTimes[key(d)] += d.Time.Sub(startTimes[key(d)])
byPackage[d.Package] = append(byPackage[d.Package], TestRun{
Action: d.Action,
Package: d.Package,
Test: d.Test,
Output: outputs[key(d)],
// round all runtimes to ms to avoid excessive decimal places
RunTime: sensitiveRound(runTimes[key(d)]),
})
}
}
return byPackage
}
func sensitiveRound(d time.Duration) time.Duration {
if d > time.Minute {
return d.Round(time.Second)
}
return d.Round(time.Millisecond)
}
func printSummary(packages []string, byPackage map[string][]TestRun) {
fmt.Printf("## Package Summary\n\n")
// output table-of-contents
for _, pkg := range packages {
tests := byPackage[pkg]
if len(tests) == 1 {
// package-only
continue
}
totalRuntime := time.Duration(0)
for _, t := range tests[1:] {
totalRuntime += t.RunTime
}
overallOutcome := actionSymbol(tests[0])
fmt.Printf("* %s `%s` (runtime %s)\n", overallOutcome, pkg, totalRuntime)
}
fmt.Println()
}
var maxOutputLines = 300
func printDetails(packages []string, byPackage map[string][]TestRun) {
anyFailed := false
testFailed := func() {
if !anyFailed {
anyFailed = true
fmt.Printf("## Failed Test Details\n\n")
}
}
for _, pkg := range packages {
tests := byPackage[pkg]
// check package-level indicator, which will be first ("" test name):
if tests[0].Action != "fail" {
continue // no failed tests, skip
}
// package name as header
fmt.Printf("### `%s`\n\n", pkg)
// Output info on stderr
fmt.Fprintf(os.Stderr, "Package failed: %s\n", pkg)
for _, test := range tests[1:] {
// only printing failed tests
if test.Action == "fail" {
testFailed()
fmt.Printf("#### `%s`\n", test.Test)
fmt.Printf("Failed in %s\n:", test.RunTime)
trimmedOutput, output := escapeOutput(test.Output)
summary := "Test output"
if trimmedOutput {
summary += fmt.Sprintf(" (trimmed to last %d lines) — full details available in log", maxOutputLines)
}
fmt.Printf("<details><summary>%s</summary><pre>%s</pre></details>\n\n", summary, output)
// Output info on stderr, so that test failure isnt silent on console
// when running `task ci`, and that full logs are available if they get trimmed
fmt.Fprintf(os.Stderr, "- Test failed: %s\n", test.Test)
fmt.Fprintln(os.Stderr, "=== TEST OUTPUT ===")
for _, outputLine := range test.Output {
fmt.Fprint(os.Stderr, outputLine) // note that line already has newline attached
}
fmt.Fprintln(os.Stderr, "=== END TEST OUTPUT ===")
}
}
fmt.Println()
}
if !anyFailed {
fmt.Println("**🎉 All tests passed. 🎉**")
}
}
func escapeOutput(outputs []string) (bool, string) {
trimmed := false
if len(outputs) > maxOutputLines {
outputs = outputs[len(outputs)-maxOutputLines:]
trimmed = true
}
result := strings.Builder{}
for _, output := range outputs {
s := output
s = strings.ReplaceAll(s, "&", "&amp;")
s = strings.ReplaceAll(s, "<", "&lt;")
s = strings.ReplaceAll(s, "\n", "<br>")
result.WriteString(s)
}
return trimmed, result.String()
}
func printSlowTests(byPackage map[string][]TestRun) {
fmt.Printf("## Longest-running tests\n\n")
allTests := []TestRun{}
for _, v := range byPackage {
for _, t := range v[1:] { // skip "" package test
allTests = append(allTests, t)
}
}
sort.Slice(allTests, func(i, j int) bool {
return allTests[i].RunTime > allTests[j].RunTime
})
fmt.Println("| Package | Name | Time |")
fmt.Println("|---------|------|-----:|")
for i := 0; i < min(10, len(allTests)); i += 1 {
test := allTests[i]
fmt.Printf("| `%s` | `%s` | %s |\n", test.Package, test.Test, test.RunTime)
}
}