зеркало из https://github.com/Azure/ARO-RP.git
Vendoring
go mod tidy -compat=1.17 && go mod vendor
This commit is contained in:
Родитель
26ef9684eb
Коммит
9e842a962d
6
go.mod
6
go.mod
|
@ -18,6 +18,7 @@ require (
|
||||||
github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9
|
github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9
|
||||||
github.com/containers/image/v5 v5.21.0
|
github.com/containers/image/v5 v5.21.0
|
||||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||||
|
github.com/coreos/go-semver v0.3.0
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2
|
github.com/coreos/go-systemd/v22 v22.3.2
|
||||||
github.com/coreos/ignition/v2 v2.14.0
|
github.com/coreos/ignition/v2 v2.14.0
|
||||||
github.com/coreos/stream-metadata-go v0.2.0
|
github.com/coreos/stream-metadata-go v0.2.0
|
||||||
|
@ -37,7 +38,7 @@ require (
|
||||||
github.com/gorilla/sessions v1.2.1
|
github.com/gorilla/sessions v1.2.1
|
||||||
github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234
|
github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234
|
||||||
github.com/jstemmer/go-junit-report v0.9.1
|
github.com/jstemmer/go-junit-report v0.9.1
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo/v2 v2.1.3
|
||||||
github.com/onsi/gomega v1.19.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible
|
github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible
|
||||||
github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a
|
github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a
|
||||||
|
@ -119,7 +120,6 @@ require (
|
||||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||||
github.com/containers/ocicrypt v1.1.3 // indirect
|
github.com/containers/ocicrypt v1.1.3 // indirect
|
||||||
github.com/containers/storage v1.39.0 // indirect
|
github.com/containers/storage v1.39.0 // indirect
|
||||||
github.com/coreos/go-semver v0.3.0 // indirect
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||||
github.com/coreos/ignition v0.35.0 // indirect
|
github.com/coreos/ignition v0.35.0 // indirect
|
||||||
github.com/coreos/vcontext v0.0.0-20220326205524-7fcaf69e7050 // indirect
|
github.com/coreos/vcontext v0.0.0-20220326205524-7fcaf69e7050 // indirect
|
||||||
|
@ -252,7 +252,6 @@ require (
|
||||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||||
github.com/nishanths/exhaustive v0.2.3 // indirect
|
github.com/nishanths/exhaustive v0.2.3 // indirect
|
||||||
github.com/nishanths/predeclared v0.2.1 // indirect
|
github.com/nishanths/predeclared v0.2.1 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
|
@ -334,7 +333,6 @@ require (
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
honnef.co/go/tools v0.2.1 // indirect
|
honnef.co/go/tools v0.2.1 // indirect
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
# editorconfig.org
|
||||||
|
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
insert_final_newline = true
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 8
|
||||||
|
|
||||||
|
[*.{md,yml,yaml,json}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
|
@ -0,0 +1 @@
|
||||||
|
* text=auto
|
|
@ -0,0 +1,2 @@
|
||||||
|
vendor/
|
||||||
|
/.glide
|
|
@ -0,0 +1,364 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
## Release 3.2.0 (2020-12-14)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #211: Added randInt function (thanks @kochurovro)
|
||||||
|
- #223: Added fromJson and mustFromJson functions (thanks @mholt)
|
||||||
|
- #242: Added a bcrypt function (thanks @robbiet480)
|
||||||
|
- #253: Added randBytes function (thanks @MikaelSmith)
|
||||||
|
- #254: Added dig function for dicts (thanks @nyarly)
|
||||||
|
- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
|
||||||
|
- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
|
||||||
|
- #268: Added and and all functions for testing conditions (thanks @phuslu)
|
||||||
|
- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
|
||||||
|
(thanks @andrewmostello)
|
||||||
|
- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
|
||||||
|
- #270: Extend certificate functions to handle non-RSA keys + add support for
|
||||||
|
ed25519 keys (thanks @misberner)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
|
||||||
|
- Using semver 3.1.1 and mergo 0.3.11
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #249: Fix htmlDateInZone example (thanks @spawnia)
|
||||||
|
|
||||||
|
NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
|
||||||
|
0.3.9 via 0.3.10 release.
|
||||||
|
|
||||||
|
## Release 3.1.0 (2020-04-16)
|
||||||
|
|
||||||
|
NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
|
||||||
|
that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
|
||||||
|
- #224: Added duration filter (thanks @frebib)
|
||||||
|
- #205: Added `seq` function (thanks @thadc23)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #203: Unlambda functions with correct signature (thanks @muesli)
|
||||||
|
- #236: Updated the license formatting for GitHub display purposes
|
||||||
|
- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
|
||||||
|
as it causes a breaking change for sprig. That issue is tracked at
|
||||||
|
https://github.com/imdario/mergo/issues/139
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #229: Fix `seq` example in docs (thanks @kalmant)
|
||||||
|
|
||||||
|
## Release 3.0.2 (2019-12-13)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #220: Updating to semver v3.0.3 to fix issue with <= ranges
|
||||||
|
- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
|
||||||
|
|
||||||
|
## Release 3.0.1 (2019-12-08)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #212: Updated semver fixing broken constraint checking with ^0.0
|
||||||
|
|
||||||
|
## Release 3.0.0 (2019-10-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #187: Added durationRound function (thanks @yjp20)
|
||||||
|
- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
|
||||||
|
- #193: Added toRawJson support (thanks @Dean-Coakley)
|
||||||
|
- #197: Added get support to dicts (thanks @Dean-Coakley)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #186: Moving dependency management to Go modules
|
||||||
|
- #186: Updated semver to v3. This has changes in the way ^ is handled
|
||||||
|
- #194: Updated documentation on merging and how it copies. Added example using deepCopy
|
||||||
|
- #196: trunc now supports negative values (thanks @Dean-Coakley)
|
||||||
|
|
||||||
|
## Release 2.22.0 (2019-10-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
|
||||||
|
- #195: Added deepCopy function for use with dicts
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Updated merge and mergeOverwrite documentation to explain copying and how to
|
||||||
|
use deepCopy with it
|
||||||
|
|
||||||
|
## Release 2.21.0 (2019-09-18)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
|
||||||
|
- #128: Added toDecimal support (thanks @Dean-Coakley)
|
||||||
|
- #169: Added list contcat (thanks @astorath)
|
||||||
|
- #174: Added deepEqual function (thanks @bonifaido)
|
||||||
|
- #170: Added url parse and join functions (thanks @astorath)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #172: Fix semver wildcard example (thanks @piepmatz)
|
||||||
|
- #175: Fix dateInZone doc example (thanks @s3than)
|
||||||
|
|
||||||
|
## Release 2.20.0 (2019-06-18)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #164: Adding function to get unix epoch for a time (@mattfarina)
|
||||||
|
- #166: Adding tests for date_in_zone (@mattfarina)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
|
||||||
|
- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
|
||||||
|
- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
## Release 2.19.0 (2019-03-02)
|
||||||
|
|
||||||
|
IMPORTANT: This release reverts a change from 2.18.0
|
||||||
|
|
||||||
|
In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
|
||||||
|
|
||||||
|
We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Fix substr panic 35fb796 (Alexey igrychev)
|
||||||
|
- Remove extra period 1eb7729 (Matthew Lorimor)
|
||||||
|
- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
|
||||||
|
- README edits/fixes/suggestions 08fe136 (Lauri Apple)
|
||||||
|
|
||||||
|
|
||||||
|
## Release 2.18.0 (2019-02-12)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Added mergeOverwrite function
|
||||||
|
- cryptographic functions that use secure random (see fe1de12)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
|
||||||
|
- Handle has for nil list 9c10885 (Daniel Cohen)
|
||||||
|
- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
|
||||||
|
- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
|
||||||
|
- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
|
||||||
|
- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
|
||||||
|
- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
|
||||||
|
- Fix substr var names and comments d581f80 (Dean Coakley)
|
||||||
|
- Fix substr documentation 2737203 (Dean Coakley)
|
||||||
|
|
||||||
|
## Release 2.17.1 (2019-01-03)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
|
||||||
|
|
||||||
|
## Release 2.17.0 (2019-01-03)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- adds alder32sum function and test 6908fc2 (marshallford)
|
||||||
|
- Added kebabcase function ca331a1 (Ilyes512)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Update goutils to 1.1.0 4e1125d (Matt Butcher)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix 'has' documentation e3f2a85 (dean-coakley)
|
||||||
|
- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
|
||||||
|
- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
|
||||||
|
|
||||||
|
## Release 2.16.0 (2018-08-13)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- add splitn function fccb0b0 (Helgi Þorbjörnsson)
|
||||||
|
- Add slice func df28ca7 (gongdo)
|
||||||
|
- Generate serial number a3bdffd (Cody Coons)
|
||||||
|
- Extract values of dict with values function df39312 (Lawrence Jones)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Modify panic message for list.slice ae38335 (gongdo)
|
||||||
|
- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
|
||||||
|
- Remove duplicated documentation 1d97af1 (Matthew Fisher)
|
||||||
|
- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix file permissions c5f40b5 (gongdo)
|
||||||
|
- Fix example for buildCustomCert 7779e0d (Tin Lam)
|
||||||
|
|
||||||
|
## Release 2.15.0 (2018-04-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
|
||||||
|
- #66: Add ternary function (thanks @binoculars)
|
||||||
|
- #67: Allow keys function to take multiple dicts (thanks @binoculars)
|
||||||
|
- #89: Added sha1sum to crypto function (thanks @benkeil)
|
||||||
|
- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
|
||||||
|
- #92: Add travis testing for go 1.10
|
||||||
|
- #93: Adding appveyor config for windows testing
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #90: Updating to more recent dependencies
|
||||||
|
- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #76: Fixed documentation typos (thanks @Thiht)
|
||||||
|
- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
|
||||||
|
|
||||||
|
## Release 2.14.1 (2017-12-01)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
|
||||||
|
- #61: Removing line with {{ due to blocking github pages genertion
|
||||||
|
- #64: Update the list functions to handle int, string, and other slices for compatibility
|
||||||
|
|
||||||
|
## Release 2.14.0 (2017-10-06)
|
||||||
|
|
||||||
|
This new version of Sprig adds a set of functions for generating and working with SSL certificates.
|
||||||
|
|
||||||
|
- `genCA` generates an SSL Certificate Authority
|
||||||
|
- `genSelfSignedCert` generates an SSL self-signed certificate
|
||||||
|
- `genSignedCert` generates an SSL certificate and key based on a given CA
|
||||||
|
|
||||||
|
## Release 2.13.0 (2017-09-18)
|
||||||
|
|
||||||
|
This release adds new functions, including:
|
||||||
|
|
||||||
|
- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
|
||||||
|
- `floor`, `ceil`, and `round` math functions
|
||||||
|
- `toDate` converts a string to a date
|
||||||
|
- `nindent` is just like `indent` but also prepends a new line
|
||||||
|
- `ago` returns the time from `time.Now`
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #40: Added basic regex functionality (thanks @alanquillin)
|
||||||
|
- #41: Added ceil floor and round functions (thanks @alanquillin)
|
||||||
|
- #48: Added toDate function (thanks @andreynering)
|
||||||
|
- #50: Added nindent function (thanks @binoculars)
|
||||||
|
- #46: Added ago function (thanks @slayer)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #51: Updated godocs to include new string functions (thanks @curtisallen)
|
||||||
|
- #49: Added ability to merge multiple dicts (thanks @binoculars)
|
||||||
|
|
||||||
|
## Release 2.12.0 (2017-05-17)
|
||||||
|
|
||||||
|
- `snakecase`, `camelcase`, and `shuffle` are three new string functions
|
||||||
|
- `fail` allows you to bail out of a template render when conditions are not met
|
||||||
|
|
||||||
|
## Release 2.11.0 (2017-05-02)
|
||||||
|
|
||||||
|
- Added `toJson` and `toPrettyJson`
|
||||||
|
- Added `merge`
|
||||||
|
- Refactored documentation
|
||||||
|
|
||||||
|
## Release 2.10.0 (2017-03-15)
|
||||||
|
|
||||||
|
- Added `semver` and `semverCompare` for Semantic Versions
|
||||||
|
- `list` replaces `tuple`
|
||||||
|
- Fixed issue with `join`
|
||||||
|
- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
|
||||||
|
|
||||||
|
## Release 2.9.0 (2017-02-23)
|
||||||
|
|
||||||
|
- Added `splitList` to split a list
|
||||||
|
- Added crypto functions of `genPrivateKey` and `derivePassword`
|
||||||
|
|
||||||
|
## Release 2.8.0 (2016-12-21)
|
||||||
|
|
||||||
|
- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
|
||||||
|
- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
|
||||||
|
|
||||||
|
## Release 2.7.0 (2016-12-01)
|
||||||
|
|
||||||
|
- Added `sha256sum` to generate a hash of an input
|
||||||
|
- Added functions to convert a numeric or string to `int`, `int64`, `float64`
|
||||||
|
|
||||||
|
## Release 2.6.0 (2016-10-03)
|
||||||
|
|
||||||
|
- Added a `uuidv4` template function for generating UUIDs inside of a template.
|
||||||
|
|
||||||
|
## Release 2.5.0 (2016-08-19)
|
||||||
|
|
||||||
|
- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
|
||||||
|
- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
|
||||||
|
- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
|
||||||
|
|
||||||
|
## Release 2.4.0 (2016-08-16)
|
||||||
|
|
||||||
|
- Adds two functions: `until` and `untilStep`
|
||||||
|
|
||||||
|
## Release 2.3.0 (2016-06-21)
|
||||||
|
|
||||||
|
- cat: Concatenate strings with whitespace separators.
|
||||||
|
- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
|
||||||
|
- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
|
||||||
|
- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
|
||||||
|
|
||||||
|
## Release 2.2.0 (2016-04-21)
|
||||||
|
|
||||||
|
- Added a `genPrivateKey` function (Thanks @bacongobbler)
|
||||||
|
|
||||||
|
## Release 2.1.0 (2016-03-30)
|
||||||
|
|
||||||
|
- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
|
||||||
|
- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
|
||||||
|
|
||||||
|
## Release 2.0.0 (2016-03-29)
|
||||||
|
|
||||||
|
Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
|
||||||
|
|
||||||
|
- `min` complements `max` (formerly `biggest`)
|
||||||
|
- `empty` indicates that a value is the empty value for its type
|
||||||
|
- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
|
||||||
|
- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
|
||||||
|
- Date formatters have been added for HTML dates (as used in `date` input fields)
|
||||||
|
- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
|
||||||
|
|
||||||
|
## Release 1.2.0 (2016-02-01)
|
||||||
|
|
||||||
|
- Added quote and squote
|
||||||
|
- Added b32enc and b32dec
|
||||||
|
- add now takes varargs
|
||||||
|
- biggest now takes varargs
|
||||||
|
|
||||||
|
## Release 1.1.0 (2015-12-29)
|
||||||
|
|
||||||
|
- Added #4: Added contains function. strings.Contains, but with the arguments
|
||||||
|
switched to simplify common pipelines. (thanks krancour)
|
||||||
|
- Added Travis-CI testing support
|
||||||
|
|
||||||
|
## Release 1.0.0 (2015-12-23)
|
||||||
|
|
||||||
|
- Initial release
|
|
@ -1,6 +1,4 @@
|
||||||
The MIT License (MIT)
|
Copyright (C) 2013-2020 Masterminds
|
||||||
|
|
||||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
@ -9,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
furnished to do so, subject to the following conditions:
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
The above copyright notice and this permission notice shall be included in
|
||||||
copies or substantial portions of the Software.
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
SOFTWARE.
|
THE SOFTWARE.
|
|
@ -0,0 +1,73 @@
|
||||||
|
# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig)
|
||||||
|
|
||||||
|
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
|
||||||
|
all functions that depend on external (non standard library) or crypto packages
|
||||||
|
removed.
|
||||||
|
The reason for this is to make this library more lightweight. Most of these
|
||||||
|
functions (specially crypto ones) are not needed on most apps, but costs a lot
|
||||||
|
in terms of binary size and compilation time.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
|
||||||
|
detailed instructions and code snippets for the >100 template functions available.
|
||||||
|
|
||||||
|
**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
|
||||||
|
our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
|
||||||
|
|
||||||
|
For standard usage, read on.
|
||||||
|
|
||||||
|
### Load the Slim-Sprig library
|
||||||
|
|
||||||
|
To load the Slim-Sprig `FuncMap`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html/template"
|
||||||
|
|
||||||
|
"github.com/go-task/slim-sprig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This example illustrates that the FuncMap *must* be set before the
|
||||||
|
// templates themselves are loaded.
|
||||||
|
tpl := template.Must(
|
||||||
|
template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Calling the functions inside of templates
|
||||||
|
|
||||||
|
By convention, all functions are lowercase. This seems to follow the Go
|
||||||
|
idiom for template functions (as opposed to template methods, which are
|
||||||
|
TitleCase). For example, this:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ "hello!" | upper | repeat 5 }}
|
||||||
|
```
|
||||||
|
|
||||||
|
produces this:
|
||||||
|
|
||||||
|
```
|
||||||
|
HELLO!HELLO!HELLO!HELLO!HELLO!
|
||||||
|
```
|
||||||
|
|
||||||
|
## Principles Driving Our Function Selection
|
||||||
|
|
||||||
|
We followed these principles to decide which functions to add and how to implement them:
|
||||||
|
|
||||||
|
- Use template functions to build layout. The following
|
||||||
|
types of operations are within the domain of template functions:
|
||||||
|
- Formatting
|
||||||
|
- Layout
|
||||||
|
- Simple type conversions
|
||||||
|
- Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
|
||||||
|
- Template functions should not return errors unless there is no way to print
|
||||||
|
a sensible value. For example, converting a string to an integer should not
|
||||||
|
produce an error if conversion fails. Instead, it should display a default
|
||||||
|
value.
|
||||||
|
- Simple math is necessary for grid layouts, pagers, and so on. Complex math
|
||||||
|
(anything other than arithmetic) should be done outside of templates.
|
||||||
|
- Template functions only deal with the data passed into them. They never retrieve
|
||||||
|
data from a source.
|
||||||
|
- Finally, do not override core Go template functions.
|
|
@ -0,0 +1,12 @@
|
||||||
|
# https://taskfile.dev
|
||||||
|
|
||||||
|
version: '2'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
default:
|
||||||
|
cmds:
|
||||||
|
- task: test
|
||||||
|
|
||||||
|
test:
|
||||||
|
cmds:
|
||||||
|
- go test -v .
|
|
@ -0,0 +1,24 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"hash/adler32"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sha256sum(input string) string {
|
||||||
|
hash := sha256.Sum256([]byte(input))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha1sum(input string) string {
|
||||||
|
hash := sha1.Sum([]byte(input))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func adler32sum(input string) string {
|
||||||
|
hash := adler32.Checksum([]byte(input))
|
||||||
|
return fmt.Sprintf("%d", hash)
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Given a format and a date, format the date string.
|
||||||
|
//
|
||||||
|
// Date can be a `time.Time` or an `int, int32, int64`.
|
||||||
|
// In the later case, it is treated as seconds since UNIX
|
||||||
|
// epoch.
|
||||||
|
func date(fmt string, date interface{}) string {
|
||||||
|
return dateInZone(fmt, date, "Local")
|
||||||
|
}
|
||||||
|
|
||||||
|
func htmlDate(date interface{}) string {
|
||||||
|
return dateInZone("2006-01-02", date, "Local")
|
||||||
|
}
|
||||||
|
|
||||||
|
func htmlDateInZone(date interface{}, zone string) string {
|
||||||
|
return dateInZone("2006-01-02", date, zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateInZone(fmt string, date interface{}, zone string) string {
|
||||||
|
var t time.Time
|
||||||
|
switch date := date.(type) {
|
||||||
|
default:
|
||||||
|
t = time.Now()
|
||||||
|
case time.Time:
|
||||||
|
t = date
|
||||||
|
case *time.Time:
|
||||||
|
t = *date
|
||||||
|
case int64:
|
||||||
|
t = time.Unix(date, 0)
|
||||||
|
case int:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
case int32:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
loc, err := time.LoadLocation(zone)
|
||||||
|
if err != nil {
|
||||||
|
loc, _ = time.LoadLocation("UTC")
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.In(loc).Format(fmt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateModify(fmt string, date time.Time) time.Time {
|
||||||
|
d, err := time.ParseDuration(fmt)
|
||||||
|
if err != nil {
|
||||||
|
return date
|
||||||
|
}
|
||||||
|
return date.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustDateModify(fmt string, date time.Time) (time.Time, error) {
|
||||||
|
d, err := time.ParseDuration(fmt)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
return date.Add(d), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateAgo(date interface{}) string {
|
||||||
|
var t time.Time
|
||||||
|
|
||||||
|
switch date := date.(type) {
|
||||||
|
default:
|
||||||
|
t = time.Now()
|
||||||
|
case time.Time:
|
||||||
|
t = date
|
||||||
|
case int64:
|
||||||
|
t = time.Unix(date, 0)
|
||||||
|
case int:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
}
|
||||||
|
// Drop resolution to seconds
|
||||||
|
duration := time.Since(t).Round(time.Second)
|
||||||
|
return duration.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func duration(sec interface{}) string {
|
||||||
|
var n int64
|
||||||
|
switch value := sec.(type) {
|
||||||
|
default:
|
||||||
|
n = 0
|
||||||
|
case string:
|
||||||
|
n, _ = strconv.ParseInt(value, 10, 64)
|
||||||
|
case int64:
|
||||||
|
n = value
|
||||||
|
}
|
||||||
|
return (time.Duration(n) * time.Second).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func durationRound(duration interface{}) string {
|
||||||
|
var d time.Duration
|
||||||
|
switch duration := duration.(type) {
|
||||||
|
default:
|
||||||
|
d = 0
|
||||||
|
case string:
|
||||||
|
d, _ = time.ParseDuration(duration)
|
||||||
|
case int64:
|
||||||
|
d = time.Duration(duration)
|
||||||
|
case time.Time:
|
||||||
|
d = time.Since(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := uint64(d)
|
||||||
|
neg := d < 0
|
||||||
|
if neg {
|
||||||
|
u = -u
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
year = uint64(time.Hour) * 24 * 365
|
||||||
|
month = uint64(time.Hour) * 24 * 30
|
||||||
|
day = uint64(time.Hour) * 24
|
||||||
|
hour = uint64(time.Hour)
|
||||||
|
minute = uint64(time.Minute)
|
||||||
|
second = uint64(time.Second)
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case u > year:
|
||||||
|
return strconv.FormatUint(u/year, 10) + "y"
|
||||||
|
case u > month:
|
||||||
|
return strconv.FormatUint(u/month, 10) + "mo"
|
||||||
|
case u > day:
|
||||||
|
return strconv.FormatUint(u/day, 10) + "d"
|
||||||
|
case u > hour:
|
||||||
|
return strconv.FormatUint(u/hour, 10) + "h"
|
||||||
|
case u > minute:
|
||||||
|
return strconv.FormatUint(u/minute, 10) + "m"
|
||||||
|
case u > second:
|
||||||
|
return strconv.FormatUint(u/second, 10) + "s"
|
||||||
|
}
|
||||||
|
return "0s"
|
||||||
|
}
|
||||||
|
|
||||||
|
func toDate(fmt, str string) time.Time {
|
||||||
|
t, _ := time.ParseInLocation(fmt, str, time.Local)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToDate(fmt, str string) (time.Time, error) {
|
||||||
|
return time.ParseInLocation(fmt, str, time.Local)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unixEpoch(date time.Time) string {
|
||||||
|
return strconv.FormatInt(date.Unix(), 10)
|
||||||
|
}
|
|
@ -0,0 +1,163 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
// dfault checks whether `given` is set, and returns default if not set.
|
||||||
|
//
|
||||||
|
// This returns `d` if `given` appears not to be set, and `given` otherwise.
|
||||||
|
//
|
||||||
|
// For numeric types 0 is unset.
|
||||||
|
// For strings, maps, arrays, and slices, len() = 0 is considered unset.
|
||||||
|
// For bool, false is unset.
|
||||||
|
// Structs are never considered unset.
|
||||||
|
//
|
||||||
|
// For everything else, including pointers, a nil value is unset.
|
||||||
|
func dfault(d interface{}, given ...interface{}) interface{} {
|
||||||
|
|
||||||
|
if empty(given) || empty(given[0]) {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
return given[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty returns true if the given value has the zero value for its type.
|
||||||
|
func empty(given interface{}) bool {
|
||||||
|
g := reflect.ValueOf(given)
|
||||||
|
if !g.IsValid() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basically adapted from text/template.isTrue
|
||||||
|
switch g.Kind() {
|
||||||
|
default:
|
||||||
|
return g.IsNil()
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||||
|
return g.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !g.Bool()
|
||||||
|
case reflect.Complex64, reflect.Complex128:
|
||||||
|
return g.Complex() == 0
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return g.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return g.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return g.Float() == 0
|
||||||
|
case reflect.Struct:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// coalesce returns the first non-empty value.
|
||||||
|
func coalesce(v ...interface{}) interface{} {
|
||||||
|
for _, val := range v {
|
||||||
|
if !empty(val) {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// all returns true if empty(x) is false for all values x in the list.
|
||||||
|
// If the list is empty, return true.
|
||||||
|
func all(v ...interface{}) bool {
|
||||||
|
for _, val := range v {
|
||||||
|
if empty(val) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// any returns true if empty(x) is false for any x in the list.
|
||||||
|
// If the list is empty, return false.
|
||||||
|
func any(v ...interface{}) bool {
|
||||||
|
for _, val := range v {
|
||||||
|
if !empty(val) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromJson decodes JSON into a structured value, ignoring errors.
|
||||||
|
func fromJson(v string) interface{} {
|
||||||
|
output, _ := mustFromJson(v)
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustFromJson decodes JSON into a structured value, returning errors.
|
||||||
|
func mustFromJson(v string) (interface{}, error) {
|
||||||
|
var output interface{}
|
||||||
|
err := json.Unmarshal([]byte(v), &output)
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// toJson encodes an item into a JSON string
|
||||||
|
func toJson(v interface{}) string {
|
||||||
|
output, _ := json.Marshal(v)
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToJson(v interface{}) (string, error) {
|
||||||
|
output, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toPrettyJson encodes an item into a pretty (indented) JSON string
|
||||||
|
func toPrettyJson(v interface{}) string {
|
||||||
|
output, _ := json.MarshalIndent(v, "", " ")
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToPrettyJson(v interface{}) (string, error) {
|
||||||
|
output, err := json.MarshalIndent(v, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
|
||||||
|
func toRawJson(v interface{}) string {
|
||||||
|
output, err := mustToRawJson(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
|
||||||
|
func mustToRawJson(v interface{}) (string, error) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
err := enc.Encode(&v)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.TrimSuffix(buf.String(), "\n"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ternary returns the first value if the last value is true, otherwise returns the second value.
|
||||||
|
func ternary(vt interface{}, vf interface{}, v bool) interface{} {
|
||||||
|
if v {
|
||||||
|
return vt
|
||||||
|
}
|
||||||
|
|
||||||
|
return vf
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
func get(d map[string]interface{}, key string) interface{} {
|
||||||
|
if val, ok := d[key]; ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
|
||||||
|
d[key] = value
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func unset(d map[string]interface{}, key string) map[string]interface{} {
|
||||||
|
delete(d, key)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasKey(d map[string]interface{}, key string) bool {
|
||||||
|
_, ok := d[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func pluck(key string, d ...map[string]interface{}) []interface{} {
|
||||||
|
res := []interface{}{}
|
||||||
|
for _, dict := range d {
|
||||||
|
if val, ok := dict[key]; ok {
|
||||||
|
res = append(res, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func keys(dicts ...map[string]interface{}) []string {
|
||||||
|
k := []string{}
|
||||||
|
for _, dict := range dicts {
|
||||||
|
for key := range dict {
|
||||||
|
k = append(k, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
|
||||||
|
res := map[string]interface{}{}
|
||||||
|
for _, k := range keys {
|
||||||
|
if v, ok := dict[k]; ok {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
|
||||||
|
res := map[string]interface{}{}
|
||||||
|
|
||||||
|
omit := make(map[string]bool, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
omit[k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range dict {
|
||||||
|
if _, ok := omit[k]; !ok {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func dict(v ...interface{}) map[string]interface{} {
|
||||||
|
dict := map[string]interface{}{}
|
||||||
|
lenv := len(v)
|
||||||
|
for i := 0; i < lenv; i += 2 {
|
||||||
|
key := strval(v[i])
|
||||||
|
if i+1 >= lenv {
|
||||||
|
dict[key] = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dict[key] = v[i+1]
|
||||||
|
}
|
||||||
|
return dict
|
||||||
|
}
|
||||||
|
|
||||||
|
func values(dict map[string]interface{}) []interface{} {
|
||||||
|
values := []interface{}{}
|
||||||
|
for _, value := range dict {
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
func dig(ps ...interface{}) (interface{}, error) {
|
||||||
|
if len(ps) < 3 {
|
||||||
|
panic("dig needs at least three arguments")
|
||||||
|
}
|
||||||
|
dict := ps[len(ps)-1].(map[string]interface{})
|
||||||
|
def := ps[len(ps)-2]
|
||||||
|
ks := make([]string, len(ps)-2)
|
||||||
|
for i := 0; i < len(ks); i++ {
|
||||||
|
ks[i] = ps[i].(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
return digFromDict(dict, def, ks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
|
||||||
|
k, ns := ks[0], ks[1:len(ks)]
|
||||||
|
step, has := dict[k]
|
||||||
|
if !has {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
if len(ns) == 0 {
|
||||||
|
return step, nil
|
||||||
|
}
|
||||||
|
return digFromDict(step.(map[string]interface{}), d, ns)
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
/*
|
||||||
|
Package sprig provides template functions for Go.
|
||||||
|
|
||||||
|
This package contains a number of utility functions for working with data
|
||||||
|
inside of Go `html/template` and `text/template` files.
|
||||||
|
|
||||||
|
To add these functions, use the `template.Funcs()` method:
|
||||||
|
|
||||||
|
t := templates.New("foo").Funcs(sprig.FuncMap())
|
||||||
|
|
||||||
|
Note that you should add the function map before you parse any template files.
|
||||||
|
|
||||||
|
In several cases, Sprig reverses the order of arguments from the way they
|
||||||
|
appear in the standard library. This is to make it easier to pipe
|
||||||
|
arguments into functions.
|
||||||
|
|
||||||
|
See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
|
||||||
|
*/
|
||||||
|
package sprig
|
|
@ -0,0 +1,317 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"html/template"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
ttemplate "text/template"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FuncMap produces the function map.
|
||||||
|
//
|
||||||
|
// Use this to pass the functions into the template engine:
|
||||||
|
//
|
||||||
|
// tpl := template.New("foo").Funcs(sprig.FuncMap()))
|
||||||
|
//
|
||||||
|
func FuncMap() template.FuncMap {
|
||||||
|
return HtmlFuncMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
|
||||||
|
func HermeticTxtFuncMap() ttemplate.FuncMap {
|
||||||
|
r := TxtFuncMap()
|
||||||
|
for _, name := range nonhermeticFunctions {
|
||||||
|
delete(r, name)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
|
||||||
|
func HermeticHtmlFuncMap() template.FuncMap {
|
||||||
|
r := HtmlFuncMap()
|
||||||
|
for _, name := range nonhermeticFunctions {
|
||||||
|
delete(r, name)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxtFuncMap returns a 'text/template'.FuncMap
|
||||||
|
func TxtFuncMap() ttemplate.FuncMap {
|
||||||
|
return ttemplate.FuncMap(GenericFuncMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HtmlFuncMap returns an 'html/template'.Funcmap
|
||||||
|
func HtmlFuncMap() template.FuncMap {
|
||||||
|
return template.FuncMap(GenericFuncMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
|
||||||
|
func GenericFuncMap() map[string]interface{} {
|
||||||
|
gfm := make(map[string]interface{}, len(genericMap))
|
||||||
|
for k, v := range genericMap {
|
||||||
|
gfm[k] = v
|
||||||
|
}
|
||||||
|
return gfm
|
||||||
|
}
|
||||||
|
|
||||||
|
// These functions are not guaranteed to evaluate to the same result for given input, because they
|
||||||
|
// refer to the environment or global state.
|
||||||
|
var nonhermeticFunctions = []string{
|
||||||
|
// Date functions
|
||||||
|
"date",
|
||||||
|
"date_in_zone",
|
||||||
|
"date_modify",
|
||||||
|
"now",
|
||||||
|
"htmlDate",
|
||||||
|
"htmlDateInZone",
|
||||||
|
"dateInZone",
|
||||||
|
"dateModify",
|
||||||
|
|
||||||
|
// Strings
|
||||||
|
"randAlphaNum",
|
||||||
|
"randAlpha",
|
||||||
|
"randAscii",
|
||||||
|
"randNumeric",
|
||||||
|
"randBytes",
|
||||||
|
"uuidv4",
|
||||||
|
|
||||||
|
// OS
|
||||||
|
"env",
|
||||||
|
"expandenv",
|
||||||
|
|
||||||
|
// Network
|
||||||
|
"getHostByName",
|
||||||
|
}
|
||||||
|
|
||||||
|
var genericMap = map[string]interface{}{
|
||||||
|
"hello": func() string { return "Hello!" },
|
||||||
|
|
||||||
|
// Date functions
|
||||||
|
"ago": dateAgo,
|
||||||
|
"date": date,
|
||||||
|
"date_in_zone": dateInZone,
|
||||||
|
"date_modify": dateModify,
|
||||||
|
"dateInZone": dateInZone,
|
||||||
|
"dateModify": dateModify,
|
||||||
|
"duration": duration,
|
||||||
|
"durationRound": durationRound,
|
||||||
|
"htmlDate": htmlDate,
|
||||||
|
"htmlDateInZone": htmlDateInZone,
|
||||||
|
"must_date_modify": mustDateModify,
|
||||||
|
"mustDateModify": mustDateModify,
|
||||||
|
"mustToDate": mustToDate,
|
||||||
|
"now": time.Now,
|
||||||
|
"toDate": toDate,
|
||||||
|
"unixEpoch": unixEpoch,
|
||||||
|
|
||||||
|
// Strings
|
||||||
|
"trunc": trunc,
|
||||||
|
"trim": strings.TrimSpace,
|
||||||
|
"upper": strings.ToUpper,
|
||||||
|
"lower": strings.ToLower,
|
||||||
|
"title": strings.Title,
|
||||||
|
"substr": substring,
|
||||||
|
// Switch order so that "foo" | repeat 5
|
||||||
|
"repeat": func(count int, str string) string { return strings.Repeat(str, count) },
|
||||||
|
// Deprecated: Use trimAll.
|
||||||
|
"trimall": func(a, b string) string { return strings.Trim(b, a) },
|
||||||
|
// Switch order so that "$foo" | trimall "$"
|
||||||
|
"trimAll": func(a, b string) string { return strings.Trim(b, a) },
|
||||||
|
"trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
|
||||||
|
"trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
|
||||||
|
// Switch order so that "foobar" | contains "foo"
|
||||||
|
"contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
|
||||||
|
"hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
|
||||||
|
"hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
|
||||||
|
"quote": quote,
|
||||||
|
"squote": squote,
|
||||||
|
"cat": cat,
|
||||||
|
"indent": indent,
|
||||||
|
"nindent": nindent,
|
||||||
|
"replace": replace,
|
||||||
|
"plural": plural,
|
||||||
|
"sha1sum": sha1sum,
|
||||||
|
"sha256sum": sha256sum,
|
||||||
|
"adler32sum": adler32sum,
|
||||||
|
"toString": strval,
|
||||||
|
|
||||||
|
// Wrap Atoi to stop errors.
|
||||||
|
"atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
|
||||||
|
"int64": toInt64,
|
||||||
|
"int": toInt,
|
||||||
|
"float64": toFloat64,
|
||||||
|
"seq": seq,
|
||||||
|
"toDecimal": toDecimal,
|
||||||
|
|
||||||
|
//"gt": func(a, b int) bool {return a > b},
|
||||||
|
//"gte": func(a, b int) bool {return a >= b},
|
||||||
|
//"lt": func(a, b int) bool {return a < b},
|
||||||
|
//"lte": func(a, b int) bool {return a <= b},
|
||||||
|
|
||||||
|
// split "/" foo/bar returns map[int]string{0: foo, 1: bar}
|
||||||
|
"split": split,
|
||||||
|
"splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
|
||||||
|
// splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
|
||||||
|
"splitn": splitn,
|
||||||
|
"toStrings": strslice,
|
||||||
|
|
||||||
|
"until": until,
|
||||||
|
"untilStep": untilStep,
|
||||||
|
|
||||||
|
// VERY basic arithmetic.
|
||||||
|
"add1": func(i interface{}) int64 { return toInt64(i) + 1 },
|
||||||
|
"add": func(i ...interface{}) int64 {
|
||||||
|
var a int64 = 0
|
||||||
|
for _, b := range i {
|
||||||
|
a += toInt64(b)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
},
|
||||||
|
"sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
|
||||||
|
"div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
|
||||||
|
"mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
|
||||||
|
"mul": func(a interface{}, v ...interface{}) int64 {
|
||||||
|
val := toInt64(a)
|
||||||
|
for _, b := range v {
|
||||||
|
val = val * toInt64(b)
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
},
|
||||||
|
"randInt": func(min, max int) int { return rand.Intn(max-min) + min },
|
||||||
|
"biggest": max,
|
||||||
|
"max": max,
|
||||||
|
"min": min,
|
||||||
|
"maxf": maxf,
|
||||||
|
"minf": minf,
|
||||||
|
"ceil": ceil,
|
||||||
|
"floor": floor,
|
||||||
|
"round": round,
|
||||||
|
|
||||||
|
// string slices. Note that we reverse the order b/c that's better
|
||||||
|
// for template processing.
|
||||||
|
"join": join,
|
||||||
|
"sortAlpha": sortAlpha,
|
||||||
|
|
||||||
|
// Defaults
|
||||||
|
"default": dfault,
|
||||||
|
"empty": empty,
|
||||||
|
"coalesce": coalesce,
|
||||||
|
"all": all,
|
||||||
|
"any": any,
|
||||||
|
"compact": compact,
|
||||||
|
"mustCompact": mustCompact,
|
||||||
|
"fromJson": fromJson,
|
||||||
|
"toJson": toJson,
|
||||||
|
"toPrettyJson": toPrettyJson,
|
||||||
|
"toRawJson": toRawJson,
|
||||||
|
"mustFromJson": mustFromJson,
|
||||||
|
"mustToJson": mustToJson,
|
||||||
|
"mustToPrettyJson": mustToPrettyJson,
|
||||||
|
"mustToRawJson": mustToRawJson,
|
||||||
|
"ternary": ternary,
|
||||||
|
|
||||||
|
// Reflection
|
||||||
|
"typeOf": typeOf,
|
||||||
|
"typeIs": typeIs,
|
||||||
|
"typeIsLike": typeIsLike,
|
||||||
|
"kindOf": kindOf,
|
||||||
|
"kindIs": kindIs,
|
||||||
|
"deepEqual": reflect.DeepEqual,
|
||||||
|
|
||||||
|
// OS:
|
||||||
|
"env": os.Getenv,
|
||||||
|
"expandenv": os.ExpandEnv,
|
||||||
|
|
||||||
|
// Network:
|
||||||
|
"getHostByName": getHostByName,
|
||||||
|
|
||||||
|
// Paths:
|
||||||
|
"base": path.Base,
|
||||||
|
"dir": path.Dir,
|
||||||
|
"clean": path.Clean,
|
||||||
|
"ext": path.Ext,
|
||||||
|
"isAbs": path.IsAbs,
|
||||||
|
|
||||||
|
// Filepaths:
|
||||||
|
"osBase": filepath.Base,
|
||||||
|
"osClean": filepath.Clean,
|
||||||
|
"osDir": filepath.Dir,
|
||||||
|
"osExt": filepath.Ext,
|
||||||
|
"osIsAbs": filepath.IsAbs,
|
||||||
|
|
||||||
|
// Encoding:
|
||||||
|
"b64enc": base64encode,
|
||||||
|
"b64dec": base64decode,
|
||||||
|
"b32enc": base32encode,
|
||||||
|
"b32dec": base32decode,
|
||||||
|
|
||||||
|
// Data Structures:
|
||||||
|
"tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
|
||||||
|
"list": list,
|
||||||
|
"dict": dict,
|
||||||
|
"get": get,
|
||||||
|
"set": set,
|
||||||
|
"unset": unset,
|
||||||
|
"hasKey": hasKey,
|
||||||
|
"pluck": pluck,
|
||||||
|
"keys": keys,
|
||||||
|
"pick": pick,
|
||||||
|
"omit": omit,
|
||||||
|
"values": values,
|
||||||
|
|
||||||
|
"append": push, "push": push,
|
||||||
|
"mustAppend": mustPush, "mustPush": mustPush,
|
||||||
|
"prepend": prepend,
|
||||||
|
"mustPrepend": mustPrepend,
|
||||||
|
"first": first,
|
||||||
|
"mustFirst": mustFirst,
|
||||||
|
"rest": rest,
|
||||||
|
"mustRest": mustRest,
|
||||||
|
"last": last,
|
||||||
|
"mustLast": mustLast,
|
||||||
|
"initial": initial,
|
||||||
|
"mustInitial": mustInitial,
|
||||||
|
"reverse": reverse,
|
||||||
|
"mustReverse": mustReverse,
|
||||||
|
"uniq": uniq,
|
||||||
|
"mustUniq": mustUniq,
|
||||||
|
"without": without,
|
||||||
|
"mustWithout": mustWithout,
|
||||||
|
"has": has,
|
||||||
|
"mustHas": mustHas,
|
||||||
|
"slice": slice,
|
||||||
|
"mustSlice": mustSlice,
|
||||||
|
"concat": concat,
|
||||||
|
"dig": dig,
|
||||||
|
"chunk": chunk,
|
||||||
|
"mustChunk": mustChunk,
|
||||||
|
|
||||||
|
// Flow Control:
|
||||||
|
"fail": func(msg string) (string, error) { return "", errors.New(msg) },
|
||||||
|
|
||||||
|
// Regex
|
||||||
|
"regexMatch": regexMatch,
|
||||||
|
"mustRegexMatch": mustRegexMatch,
|
||||||
|
"regexFindAll": regexFindAll,
|
||||||
|
"mustRegexFindAll": mustRegexFindAll,
|
||||||
|
"regexFind": regexFind,
|
||||||
|
"mustRegexFind": mustRegexFind,
|
||||||
|
"regexReplaceAll": regexReplaceAll,
|
||||||
|
"mustRegexReplaceAll": mustRegexReplaceAll,
|
||||||
|
"regexReplaceAllLiteral": regexReplaceAllLiteral,
|
||||||
|
"mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
|
||||||
|
"regexSplit": regexSplit,
|
||||||
|
"mustRegexSplit": mustRegexSplit,
|
||||||
|
"regexQuoteMeta": regexQuoteMeta,
|
||||||
|
|
||||||
|
// URLs:
|
||||||
|
"urlParse": urlParse,
|
||||||
|
"urlJoin": urlJoin,
|
||||||
|
}
|
|
@ -0,0 +1,464 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reflection is used in these functions so that slices and arrays of strings,
|
||||||
|
// ints, and other types not implementing []interface{} can be worked with.
|
||||||
|
// For example, this is useful if you need to work on the output of regexs.
|
||||||
|
|
||||||
|
func list(v ...interface{}) []interface{} {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func push(list interface{}, v interface{}) []interface{} {
|
||||||
|
l, err := mustPush(list, v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(nl, v), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot push on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepend(list interface{}, v interface{}) []interface{} {
|
||||||
|
l, err := mustPrepend(list, v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
|
||||||
|
//return append([]interface{}{v}, list...)
|
||||||
|
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return append([]interface{}{v}, nl...), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot prepend on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func chunk(size int, list interface{}) [][]interface{} {
|
||||||
|
l, err := mustChunk(size, list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustChunk(size int, list interface{}) ([][]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
|
||||||
|
cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
|
||||||
|
nl := make([][]interface{}, cs)
|
||||||
|
|
||||||
|
for i := 0; i < cs; i++ {
|
||||||
|
clen := size
|
||||||
|
if i == cs-1 {
|
||||||
|
clen = int(math.Floor(math.Mod(float64(l), float64(size))))
|
||||||
|
if clen == 0 {
|
||||||
|
clen = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nl[i] = make([]interface{}, clen)
|
||||||
|
|
||||||
|
for j := 0; j < clen; j++ {
|
||||||
|
ix := i*size + j
|
||||||
|
nl[i][j] = l2.Index(ix).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot chunk type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func last(list interface{}) interface{} {
|
||||||
|
l, err := mustLast(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustLast(list interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Index(l - 1).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find last on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func first(list interface{}) interface{} {
|
||||||
|
l, err := mustFirst(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustFirst(list interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Index(0).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find first on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rest(list interface{}) []interface{} {
|
||||||
|
l, err := mustRest(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRest(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nl := make([]interface{}, l-1)
|
||||||
|
for i := 1; i < l; i++ {
|
||||||
|
nl[i-1] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find rest on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initial(list interface{}) []interface{} {
|
||||||
|
l, err := mustInitial(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustInitial(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nl := make([]interface{}, l-1)
|
||||||
|
for i := 0; i < l-1; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find initial on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortAlpha(list interface{}) []string {
|
||||||
|
k := reflect.Indirect(reflect.ValueOf(list)).Kind()
|
||||||
|
switch k {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
a := strslice(list)
|
||||||
|
s := sort.StringSlice(a)
|
||||||
|
s.Sort()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return []string{strval(list)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverse(v interface{}) []interface{} {
|
||||||
|
l, err := mustReverse(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustReverse(v interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(v).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(v)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
// We do not sort in place because the incoming array should not be altered.
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[l-i-1] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compact(list interface{}) []interface{} {
|
||||||
|
l, err := mustCompact(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustCompact(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !empty(item) {
|
||||||
|
nl = append(nl, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot compact on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniq(list interface{}) []interface{} {
|
||||||
|
l, err := mustUniq(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustUniq(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
dest := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !inList(dest, item) {
|
||||||
|
dest = append(dest, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dest, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func inList(haystack []interface{}, needle interface{}) bool {
|
||||||
|
for _, h := range haystack {
|
||||||
|
if reflect.DeepEqual(needle, h) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func without(list interface{}, omit ...interface{}) []interface{} {
|
||||||
|
l, err := mustWithout(list, omit...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
res := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !inList(omit, item) {
|
||||||
|
res = append(res, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find without on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func has(needle interface{}, haystack interface{}) bool {
|
||||||
|
l, err := mustHas(needle, haystack)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustHas(needle interface{}, haystack interface{}) (bool, error) {
|
||||||
|
if haystack == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
tp := reflect.TypeOf(haystack).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(haystack)
|
||||||
|
var item interface{}
|
||||||
|
l := l2.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if reflect.DeepEqual(needle, item) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("Cannot find has on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// $list := [1, 2, 3, 4, 5]
|
||||||
|
// slice $list -> list[0:5] = list[:]
|
||||||
|
// slice $list 0 3 -> list[0:3] = list[:3]
|
||||||
|
// slice $list 3 5 -> list[3:5]
|
||||||
|
// slice $list 3 -> list[3:5] = list[3:]
|
||||||
|
func slice(list interface{}, indices ...interface{}) interface{} {
|
||||||
|
l, err := mustSlice(list, indices...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var start, end int
|
||||||
|
if len(indices) > 0 {
|
||||||
|
start = toInt(indices[0])
|
||||||
|
}
|
||||||
|
if len(indices) < 2 {
|
||||||
|
end = l
|
||||||
|
} else {
|
||||||
|
end = toInt(indices[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Slice(start, end).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func concat(lists ...interface{}) interface{} {
|
||||||
|
var res []interface{}
|
||||||
|
for _, list := range lists {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
for i := 0; i < l2.Len(); i++ {
|
||||||
|
res = append(res, l2.Index(i).Interface())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Cannot concat type %s as list", tp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getHostByName(name string) string {
|
||||||
|
addrs, _ := net.LookupHost(name)
|
||||||
|
//TODO: add error handing when release v3 comes out
|
||||||
|
return addrs[rand.Intn(len(addrs))]
|
||||||
|
}
|
|
@ -0,0 +1,228 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// toFloat64 converts 64-bit floats
|
||||||
|
func toFloat64(v interface{}) float64 {
|
||||||
|
if str, ok := v.(string); ok {
|
||||||
|
iv, err := strconv.ParseFloat(str, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return iv
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.Indirect(reflect.ValueOf(v))
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return float64(val.Int())
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||||
|
return float64(val.Uint())
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
return float64(val.Uint())
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return val.Float()
|
||||||
|
case reflect.Bool:
|
||||||
|
if val.Bool() {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInt(v interface{}) int {
|
||||||
|
//It's not optimal. Bud I don't want duplicate toInt64 code.
|
||||||
|
return int(toInt64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// toInt64 converts integer types to 64-bit integers
|
||||||
|
func toInt64(v interface{}) int64 {
|
||||||
|
if str, ok := v.(string); ok {
|
||||||
|
iv, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return iv
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.Indirect(reflect.ValueOf(v))
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return val.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||||
|
return int64(val.Uint())
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
tv := val.Uint()
|
||||||
|
if tv <= math.MaxInt64 {
|
||||||
|
return int64(tv)
|
||||||
|
}
|
||||||
|
// TODO: What is the sensible thing to do here?
|
||||||
|
return math.MaxInt64
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return int64(val.Float())
|
||||||
|
case reflect.Bool:
|
||||||
|
if val.Bool() {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a interface{}, i ...interface{}) int64 {
|
||||||
|
aa := toInt64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toInt64(b)
|
||||||
|
if bb > aa {
|
||||||
|
aa = bb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxf(a interface{}, i ...interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toFloat64(b)
|
||||||
|
aa = math.Max(aa, bb)
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a interface{}, i ...interface{}) int64 {
|
||||||
|
aa := toInt64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toInt64(b)
|
||||||
|
if bb < aa {
|
||||||
|
aa = bb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func minf(a interface{}, i ...interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toFloat64(b)
|
||||||
|
aa = math.Min(aa, bb)
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func until(count int) []int {
|
||||||
|
step := 1
|
||||||
|
if count < 0 {
|
||||||
|
step = -1
|
||||||
|
}
|
||||||
|
return untilStep(0, count, step)
|
||||||
|
}
|
||||||
|
|
||||||
|
func untilStep(start, stop, step int) []int {
|
||||||
|
v := []int{}
|
||||||
|
|
||||||
|
if stop < start {
|
||||||
|
if step >= 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
for i := start; i > stop; i += step {
|
||||||
|
v = append(v, i)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
if step <= 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
for i := start; i < stop; i += step {
|
||||||
|
v = append(v, i)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func floor(a interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
return math.Floor(aa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ceil(a interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
return math.Ceil(aa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func round(a interface{}, p int, rOpt ...float64) float64 {
|
||||||
|
roundOn := .5
|
||||||
|
if len(rOpt) > 0 {
|
||||||
|
roundOn = rOpt[0]
|
||||||
|
}
|
||||||
|
val := toFloat64(a)
|
||||||
|
places := toFloat64(p)
|
||||||
|
|
||||||
|
var round float64
|
||||||
|
pow := math.Pow(10, places)
|
||||||
|
digit := pow * val
|
||||||
|
_, div := math.Modf(digit)
|
||||||
|
if div >= roundOn {
|
||||||
|
round = math.Ceil(digit)
|
||||||
|
} else {
|
||||||
|
round = math.Floor(digit)
|
||||||
|
}
|
||||||
|
return round / pow
|
||||||
|
}
|
||||||
|
|
||||||
|
// converts unix octal to decimal
|
||||||
|
func toDecimal(v interface{}) int64 {
|
||||||
|
result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func seq(params ...int) string {
|
||||||
|
increment := 1
|
||||||
|
switch len(params) {
|
||||||
|
case 0:
|
||||||
|
return ""
|
||||||
|
case 1:
|
||||||
|
start := 1
|
||||||
|
end := params[0]
|
||||||
|
if end < start {
|
||||||
|
increment = -1
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+increment, increment), " ")
|
||||||
|
case 3:
|
||||||
|
start := params[0]
|
||||||
|
end := params[2]
|
||||||
|
step := params[1]
|
||||||
|
if end < start {
|
||||||
|
increment = -1
|
||||||
|
if step > 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+increment, step), " ")
|
||||||
|
case 2:
|
||||||
|
start := params[0]
|
||||||
|
end := params[1]
|
||||||
|
step := 1
|
||||||
|
if end < start {
|
||||||
|
step = -1
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+step, step), " ")
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func intArrayToString(slice []int, delimeter string) string {
|
||||||
|
return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// typeIs returns true if the src is the type named in target.
|
||||||
|
func typeIs(target string, src interface{}) bool {
|
||||||
|
return target == typeOf(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeIsLike(target string, src interface{}) bool {
|
||||||
|
t := typeOf(src)
|
||||||
|
return target == t || "*"+target == t
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeOf(src interface{}) string {
|
||||||
|
return fmt.Sprintf("%T", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindIs(target string, src interface{}) bool {
|
||||||
|
return target == kindOf(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindOf(src interface{}) string {
|
||||||
|
return reflect.ValueOf(src).Kind().String()
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func regexMatch(regex string, s string) bool {
|
||||||
|
match, _ := regexp.MatchString(regex, s)
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexMatch(regex string, s string) (bool, error) {
|
||||||
|
return regexp.MatchString(regex, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexFindAll(regex string, s string, n int) []string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.FindAllString(s, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
return r.FindAllString(s, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexFind(regex string, s string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.FindString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexFind(regex string, s string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.FindString(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexReplaceAll(regex string, s string, repl string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.ReplaceAllString(s, repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.ReplaceAllString(s, repl), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexReplaceAllLiteral(regex string, s string, repl string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.ReplaceAllLiteralString(s, repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.ReplaceAllLiteralString(s, repl), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexSplit(regex string, s string, n int) []string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.Split(s, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexSplit(regex string, s string, n int) ([]string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
return r.Split(s, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexQuoteMeta(s string) string {
|
||||||
|
return regexp.QuoteMeta(s)
|
||||||
|
}
|
|
@ -0,0 +1,189 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func base64encode(v string) string {
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func base64decode(v string) string {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func base32encode(v string) string {
|
||||||
|
return base32.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func base32decode(v string) string {
|
||||||
|
data, err := base32.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func quote(str ...interface{}) string {
|
||||||
|
out := make([]string, 0, len(str))
|
||||||
|
for _, s := range str {
|
||||||
|
if s != nil {
|
||||||
|
out = append(out, fmt.Sprintf("%q", strval(s)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(out, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func squote(str ...interface{}) string {
|
||||||
|
out := make([]string, 0, len(str))
|
||||||
|
for _, s := range str {
|
||||||
|
if s != nil {
|
||||||
|
out = append(out, fmt.Sprintf("'%v'", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(out, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func cat(v ...interface{}) string {
|
||||||
|
v = removeNilElements(v)
|
||||||
|
r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
|
||||||
|
return fmt.Sprintf(r, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func indent(spaces int, v string) string {
|
||||||
|
pad := strings.Repeat(" ", spaces)
|
||||||
|
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nindent(spaces int, v string) string {
|
||||||
|
return "\n" + indent(spaces, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func replace(old, new, src string) string {
|
||||||
|
return strings.Replace(src, old, new, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func plural(one, many string, count int) string {
|
||||||
|
if count == 1 {
|
||||||
|
return one
|
||||||
|
}
|
||||||
|
return many
|
||||||
|
}
|
||||||
|
|
||||||
|
func strslice(v interface{}) []string {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case []string:
|
||||||
|
return v
|
||||||
|
case []interface{}:
|
||||||
|
b := make([]string, 0, len(v))
|
||||||
|
for _, s := range v {
|
||||||
|
if s != nil {
|
||||||
|
b = append(b, strval(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
l := val.Len()
|
||||||
|
b := make([]string, 0, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
value := val.Index(i).Interface()
|
||||||
|
if value != nil {
|
||||||
|
b = append(b, strval(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
if v == nil {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{strval(v)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeNilElements(v []interface{}) []interface{} {
|
||||||
|
newSlice := make([]interface{}, 0, len(v))
|
||||||
|
for _, i := range v {
|
||||||
|
if i != nil {
|
||||||
|
newSlice = append(newSlice, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
func strval(v interface{}) string {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
return v
|
||||||
|
case []byte:
|
||||||
|
return string(v)
|
||||||
|
case error:
|
||||||
|
return v.Error()
|
||||||
|
case fmt.Stringer:
|
||||||
|
return v.String()
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func trunc(c int, s string) string {
|
||||||
|
if c < 0 && len(s)+c > 0 {
|
||||||
|
return s[len(s)+c:]
|
||||||
|
}
|
||||||
|
if c >= 0 && len(s) > c {
|
||||||
|
return s[:c]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func join(sep string, v interface{}) string {
|
||||||
|
return strings.Join(strslice(v), sep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func split(sep, orig string) map[string]string {
|
||||||
|
parts := strings.Split(orig, sep)
|
||||||
|
res := make(map[string]string, len(parts))
|
||||||
|
for i, v := range parts {
|
||||||
|
res["_"+strconv.Itoa(i)] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitn(sep string, n int, orig string) map[string]string {
|
||||||
|
parts := strings.SplitN(orig, sep, n)
|
||||||
|
res := make(map[string]string, len(parts))
|
||||||
|
for i, v := range parts {
|
||||||
|
res["_"+strconv.Itoa(i)] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// substring creates a substring of the given string.
|
||||||
|
//
|
||||||
|
// If start is < 0, this calls string[:end].
|
||||||
|
//
|
||||||
|
// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
|
||||||
|
//
|
||||||
|
// Otherwise, this calls string[start, end].
|
||||||
|
func substring(start, end int, s string) string {
|
||||||
|
if start < 0 {
|
||||||
|
return s[:end]
|
||||||
|
}
|
||||||
|
if end < 0 || end > len(s) {
|
||||||
|
return s[start:]
|
||||||
|
}
|
||||||
|
return s[start:end]
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dictGetOrEmpty(dict map[string]interface{}, key string) string {
|
||||||
|
value, ok := dict[key]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
tp := reflect.TypeOf(value).Kind()
|
||||||
|
if tp != reflect.String {
|
||||||
|
panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(value).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parses given URL to return dict object
|
||||||
|
func urlParse(v string) map[string]interface{} {
|
||||||
|
dict := map[string]interface{}{}
|
||||||
|
parsedURL, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unable to parse url: %s", err))
|
||||||
|
}
|
||||||
|
dict["scheme"] = parsedURL.Scheme
|
||||||
|
dict["host"] = parsedURL.Host
|
||||||
|
dict["hostname"] = parsedURL.Hostname()
|
||||||
|
dict["path"] = parsedURL.Path
|
||||||
|
dict["query"] = parsedURL.RawQuery
|
||||||
|
dict["opaque"] = parsedURL.Opaque
|
||||||
|
dict["fragment"] = parsedURL.Fragment
|
||||||
|
if parsedURL.User != nil {
|
||||||
|
dict["userinfo"] = parsedURL.User.String()
|
||||||
|
} else {
|
||||||
|
dict["userinfo"] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return dict
|
||||||
|
}
|
||||||
|
|
||||||
|
// join given dict to URL string
|
||||||
|
func urlJoin(d map[string]interface{}) string {
|
||||||
|
resURL := url.URL{
|
||||||
|
Scheme: dictGetOrEmpty(d, "scheme"),
|
||||||
|
Host: dictGetOrEmpty(d, "host"),
|
||||||
|
Path: dictGetOrEmpty(d, "path"),
|
||||||
|
RawQuery: dictGetOrEmpty(d, "query"),
|
||||||
|
Opaque: dictGetOrEmpty(d, "opaque"),
|
||||||
|
Fragment: dictGetOrEmpty(d, "fragment"),
|
||||||
|
}
|
||||||
|
userinfo := dictGetOrEmpty(d, "userinfo")
|
||||||
|
var user *url.Userinfo
|
||||||
|
if userinfo != "" {
|
||||||
|
tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
|
||||||
|
}
|
||||||
|
user = tempURL.User
|
||||||
|
}
|
||||||
|
|
||||||
|
resURL.User = user
|
||||||
|
return resURL.String()
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
# This is the official list of pprof authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
Google Inc.
|
|
@ -0,0 +1,16 @@
|
||||||
|
# People who have agreed to one of the CLAs and can contribute patches.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# https://developers.google.com/open-source/cla/individual
|
||||||
|
# https://developers.google.com/open-source/cla/corporate
|
||||||
|
#
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name <email address>
|
||||||
|
Raul Silvera <rsilvera@google.com>
|
||||||
|
Tipp Moseley <tipp@google.com>
|
||||||
|
Hyoun Kyu Cho <netforce@google.com>
|
||||||
|
Martin Spier <spiermar@gmail.com>
|
||||||
|
Taco de Wolff <tacodewolff@gmail.com>
|
||||||
|
Andrew Hunter <andrewhhunter@gmail.com>
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,567 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p *Profile) decoder() []decoder {
|
||||||
|
return profileDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// preEncode populates the unexported fields to be used by encode
|
||||||
|
// (with suffix X) from the corresponding exported fields. The
|
||||||
|
// exported fields are cleared up to facilitate testing.
|
||||||
|
func (p *Profile) preEncode() {
|
||||||
|
strings := make(map[string]int)
|
||||||
|
addString(strings, "")
|
||||||
|
|
||||||
|
for _, st := range p.SampleType {
|
||||||
|
st.typeX = addString(strings, st.Type)
|
||||||
|
st.unitX = addString(strings, st.Unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
s.labelX = nil
|
||||||
|
var keys []string
|
||||||
|
for k := range s.Label {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
vs := s.Label[k]
|
||||||
|
for _, v := range vs {
|
||||||
|
s.labelX = append(s.labelX,
|
||||||
|
label{
|
||||||
|
keyX: addString(strings, k),
|
||||||
|
strX: addString(strings, v),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var numKeys []string
|
||||||
|
for k := range s.NumLabel {
|
||||||
|
numKeys = append(numKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(numKeys)
|
||||||
|
for _, k := range numKeys {
|
||||||
|
keyX := addString(strings, k)
|
||||||
|
vs := s.NumLabel[k]
|
||||||
|
units := s.NumUnit[k]
|
||||||
|
for i, v := range vs {
|
||||||
|
var unitX int64
|
||||||
|
if len(units) != 0 {
|
||||||
|
unitX = addString(strings, units[i])
|
||||||
|
}
|
||||||
|
s.labelX = append(s.labelX,
|
||||||
|
label{
|
||||||
|
keyX: keyX,
|
||||||
|
numX: v,
|
||||||
|
unitX: unitX,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.locationIDX = make([]uint64, len(s.Location))
|
||||||
|
for i, loc := range s.Location {
|
||||||
|
s.locationIDX[i] = loc.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.fileX = addString(strings, m.File)
|
||||||
|
m.buildIDX = addString(strings, m.BuildID)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range p.Location {
|
||||||
|
for i, ln := range l.Line {
|
||||||
|
if ln.Function != nil {
|
||||||
|
l.Line[i].functionIDX = ln.Function.ID
|
||||||
|
} else {
|
||||||
|
l.Line[i].functionIDX = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l.Mapping != nil {
|
||||||
|
l.mappingIDX = l.Mapping.ID
|
||||||
|
} else {
|
||||||
|
l.mappingIDX = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range p.Function {
|
||||||
|
f.nameX = addString(strings, f.Name)
|
||||||
|
f.systemNameX = addString(strings, f.SystemName)
|
||||||
|
f.filenameX = addString(strings, f.Filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.dropFramesX = addString(strings, p.DropFrames)
|
||||||
|
p.keepFramesX = addString(strings, p.KeepFrames)
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
pt.typeX = addString(strings, pt.Type)
|
||||||
|
pt.unitX = addString(strings, pt.Unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.commentX = nil
|
||||||
|
for _, c := range p.Comments {
|
||||||
|
p.commentX = append(p.commentX, addString(strings, c))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||||
|
|
||||||
|
p.stringTable = make([]string, len(strings))
|
||||||
|
for s, i := range strings {
|
||||||
|
p.stringTable[i] = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Profile) encode(b *buffer) {
|
||||||
|
for _, x := range p.SampleType {
|
||||||
|
encodeMessage(b, 1, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Sample {
|
||||||
|
encodeMessage(b, 2, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Mapping {
|
||||||
|
encodeMessage(b, 3, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Location {
|
||||||
|
encodeMessage(b, 4, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Function {
|
||||||
|
encodeMessage(b, 5, x)
|
||||||
|
}
|
||||||
|
encodeStrings(b, 6, p.stringTable)
|
||||||
|
encodeInt64Opt(b, 7, p.dropFramesX)
|
||||||
|
encodeInt64Opt(b, 8, p.keepFramesX)
|
||||||
|
encodeInt64Opt(b, 9, p.TimeNanos)
|
||||||
|
encodeInt64Opt(b, 10, p.DurationNanos)
|
||||||
|
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
|
||||||
|
encodeMessage(b, 11, p.PeriodType)
|
||||||
|
}
|
||||||
|
encodeInt64Opt(b, 12, p.Period)
|
||||||
|
encodeInt64s(b, 13, p.commentX)
|
||||||
|
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var profileDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// repeated ValueType sample_type = 1
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(ValueType)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.SampleType = append(pp.SampleType, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Sample sample = 2
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Sample)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Sample = append(pp.Sample, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Mapping mapping = 3
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Mapping)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Mapping = append(pp.Mapping, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Location location = 4
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Location)
|
||||||
|
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Location = append(pp.Location, x)
|
||||||
|
err := decodeMessage(b, x)
|
||||||
|
var tmp []Line
|
||||||
|
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
// repeated Function function = 5
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Function)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Function = append(pp.Function, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated string string_table = 6
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
err := decodeStrings(b, &m.(*Profile).stringTable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.(*Profile).stringTable[0] != "" {
|
||||||
|
return errors.New("string_table[0] must be ''")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
// int64 drop_frames = 7
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
|
||||||
|
// int64 keep_frames = 8
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
|
||||||
|
// int64 time_nanos = 9
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
if m.(*Profile).TimeNanos != 0 {
|
||||||
|
return errConcatProfile
|
||||||
|
}
|
||||||
|
return decodeInt64(b, &m.(*Profile).TimeNanos)
|
||||||
|
},
|
||||||
|
// int64 duration_nanos = 10
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
|
||||||
|
// ValueType period_type = 11
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(ValueType)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.PeriodType = x
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// int64 period = 12
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
|
||||||
|
// repeated int64 comment = 13
|
||||||
|
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||||
|
// int64 defaultSampleType = 14
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
// postDecode takes the unexported fields populated by decode (with
|
||||||
|
// suffix X) and populates the corresponding exported fields.
|
||||||
|
// The unexported fields are cleared up to facilitate testing.
|
||||||
|
func (p *Profile) postDecode() error {
|
||||||
|
var err error
|
||||||
|
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||||
|
mappingIds := make([]*Mapping, len(p.Mapping)+1)
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.File, err = getString(p.stringTable, &m.fileX, err)
|
||||||
|
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
|
||||||
|
if m.ID < uint64(len(mappingIds)) {
|
||||||
|
mappingIds[m.ID] = m
|
||||||
|
} else {
|
||||||
|
mappings[m.ID] = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
functions := make(map[uint64]*Function, len(p.Function))
|
||||||
|
functionIds := make([]*Function, len(p.Function)+1)
|
||||||
|
for _, f := range p.Function {
|
||||||
|
f.Name, err = getString(p.stringTable, &f.nameX, err)
|
||||||
|
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
|
||||||
|
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
|
||||||
|
if f.ID < uint64(len(functionIds)) {
|
||||||
|
functionIds[f.ID] = f
|
||||||
|
} else {
|
||||||
|
functions[f.ID] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
locations := make(map[uint64]*Location, len(p.Location))
|
||||||
|
locationIds := make([]*Location, len(p.Location)+1)
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if id := l.mappingIDX; id < uint64(len(mappingIds)) {
|
||||||
|
l.Mapping = mappingIds[id]
|
||||||
|
} else {
|
||||||
|
l.Mapping = mappings[id]
|
||||||
|
}
|
||||||
|
l.mappingIDX = 0
|
||||||
|
for i, ln := range l.Line {
|
||||||
|
if id := ln.functionIDX; id != 0 {
|
||||||
|
l.Line[i].functionIDX = 0
|
||||||
|
if id < uint64(len(functionIds)) {
|
||||||
|
l.Line[i].Function = functionIds[id]
|
||||||
|
} else {
|
||||||
|
l.Line[i].Function = functions[id]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l.ID < uint64(len(locationIds)) {
|
||||||
|
locationIds[l.ID] = l
|
||||||
|
} else {
|
||||||
|
locations[l.ID] = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range p.SampleType {
|
||||||
|
st.Type, err = getString(p.stringTable, &st.typeX, err)
|
||||||
|
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
labels := make(map[string][]string, len(s.labelX))
|
||||||
|
numLabels := make(map[string][]int64, len(s.labelX))
|
||||||
|
numUnits := make(map[string][]string, len(s.labelX))
|
||||||
|
for _, l := range s.labelX {
|
||||||
|
var key, value string
|
||||||
|
key, err = getString(p.stringTable, &l.keyX, err)
|
||||||
|
if l.strX != 0 {
|
||||||
|
value, err = getString(p.stringTable, &l.strX, err)
|
||||||
|
labels[key] = append(labels[key], value)
|
||||||
|
} else if l.numX != 0 || l.unitX != 0 {
|
||||||
|
numValues := numLabels[key]
|
||||||
|
units := numUnits[key]
|
||||||
|
if l.unitX != 0 {
|
||||||
|
var unit string
|
||||||
|
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||||
|
units = padStringArray(units, len(numValues))
|
||||||
|
numUnits[key] = append(units, unit)
|
||||||
|
}
|
||||||
|
numLabels[key] = append(numLabels[key], l.numX)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(labels) > 0 {
|
||||||
|
s.Label = labels
|
||||||
|
}
|
||||||
|
if len(numLabels) > 0 {
|
||||||
|
s.NumLabel = numLabels
|
||||||
|
for key, units := range numUnits {
|
||||||
|
if len(units) > 0 {
|
||||||
|
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.NumUnit = numUnits
|
||||||
|
}
|
||||||
|
s.Location = make([]*Location, len(s.locationIDX))
|
||||||
|
for i, lid := range s.locationIDX {
|
||||||
|
if lid < uint64(len(locationIds)) {
|
||||||
|
s.Location[i] = locationIds[lid]
|
||||||
|
} else {
|
||||||
|
s.Location[i] = locations[lid]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.locationIDX = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
|
||||||
|
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt == nil {
|
||||||
|
p.PeriodType = &ValueType{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
|
||||||
|
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range p.commentX {
|
||||||
|
var c string
|
||||||
|
c, err = getString(p.stringTable, &i, err)
|
||||||
|
p.Comments = append(p.Comments, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.commentX = nil
|
||||||
|
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||||
|
p.stringTable = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// padStringArray pads arr with enough empty strings to make arr
|
||||||
|
// length l when arr's length is less than l.
|
||||||
|
func padStringArray(arr []string, l int) []string {
|
||||||
|
if l <= len(arr) {
|
||||||
|
return arr
|
||||||
|
}
|
||||||
|
return append(arr, make([]string, l-len(arr))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ValueType) decoder() []decoder {
|
||||||
|
return valueTypeDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ValueType) encode(b *buffer) {
|
||||||
|
encodeInt64Opt(b, 1, p.typeX)
|
||||||
|
encodeInt64Opt(b, 2, p.unitX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var valueTypeDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional int64 type = 1
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
|
||||||
|
// optional int64 unit = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Sample) decoder() []decoder {
|
||||||
|
return sampleDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Sample) encode(b *buffer) {
|
||||||
|
encodeUint64s(b, 1, p.locationIDX)
|
||||||
|
encodeInt64s(b, 2, p.Value)
|
||||||
|
for _, x := range p.labelX {
|
||||||
|
encodeMessage(b, 3, x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// repeated uint64 location = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
|
||||||
|
// repeated int64 value = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
|
||||||
|
// repeated Label label = 3
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
s := m.(*Sample)
|
||||||
|
n := len(s.labelX)
|
||||||
|
s.labelX = append(s.labelX, label{})
|
||||||
|
return decodeMessage(b, &s.labelX[n])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p label) decoder() []decoder {
|
||||||
|
return labelDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p label) encode(b *buffer) {
|
||||||
|
encodeInt64Opt(b, 1, p.keyX)
|
||||||
|
encodeInt64Opt(b, 2, p.strX)
|
||||||
|
encodeInt64Opt(b, 3, p.numX)
|
||||||
|
encodeInt64Opt(b, 4, p.unitX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var labelDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional int64 key = 1
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
|
||||||
|
// optional int64 str = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
|
||||||
|
// optional int64 num = 3
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
|
||||||
|
// optional int64 num = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Mapping) decoder() []decoder {
|
||||||
|
return mappingDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Mapping) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeUint64Opt(b, 2, p.Start)
|
||||||
|
encodeUint64Opt(b, 3, p.Limit)
|
||||||
|
encodeUint64Opt(b, 4, p.Offset)
|
||||||
|
encodeInt64Opt(b, 5, p.fileX)
|
||||||
|
encodeInt64Opt(b, 6, p.buildIDX)
|
||||||
|
encodeBoolOpt(b, 7, p.HasFunctions)
|
||||||
|
encodeBoolOpt(b, 8, p.HasFilenames)
|
||||||
|
encodeBoolOpt(b, 9, p.HasLineNumbers)
|
||||||
|
encodeBoolOpt(b, 10, p.HasInlineFrames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mappingDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Location) decoder() []decoder {
|
||||||
|
return locationDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Location) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeUint64Opt(b, 2, p.mappingIDX)
|
||||||
|
encodeUint64Opt(b, 3, p.Address)
|
||||||
|
for i := range p.Line {
|
||||||
|
encodeMessage(b, 4, &p.Line[i])
|
||||||
|
}
|
||||||
|
encodeBoolOpt(b, 5, p.IsFolded)
|
||||||
|
}
|
||||||
|
|
||||||
|
var locationDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
|
||||||
|
func(b *buffer, m message) error { // repeated Line line = 4
|
||||||
|
pp := m.(*Location)
|
||||||
|
n := len(pp.Line)
|
||||||
|
pp.Line = append(pp.Line, Line{})
|
||||||
|
return decodeMessage(b, &pp.Line[n])
|
||||||
|
},
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Line) decoder() []decoder {
|
||||||
|
return lineDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Line) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.functionIDX)
|
||||||
|
encodeInt64Opt(b, 2, p.Line)
|
||||||
|
}
|
||||||
|
|
||||||
|
var lineDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional uint64 function_id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||||
|
// optional int64 line = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Function) decoder() []decoder {
|
||||||
|
return functionDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Function) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeInt64Opt(b, 2, p.nameX)
|
||||||
|
encodeInt64Opt(b, 3, p.systemNameX)
|
||||||
|
encodeInt64Opt(b, 4, p.filenameX)
|
||||||
|
encodeInt64Opt(b, 5, p.StartLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
var functionDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional uint64 id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
|
||||||
|
// optional int64 function_name = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
|
||||||
|
// optional int64 function_system_name = 3
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
|
||||||
|
// repeated int64 filename = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
|
||||||
|
// optional int64 start_line = 5
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func addString(strings map[string]int, s string) int64 {
|
||||||
|
i, ok := strings[s]
|
||||||
|
if !ok {
|
||||||
|
i = len(strings)
|
||||||
|
strings[s] = i
|
||||||
|
}
|
||||||
|
return int64(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getString(strings []string, strng *int64, err error) (string, error) {
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s := int(*strng)
|
||||||
|
if s < 0 || s >= len(strings) {
|
||||||
|
return "", errMalformed
|
||||||
|
}
|
||||||
|
*strng = 0
|
||||||
|
return strings[s], nil
|
||||||
|
}
|
|
@ -0,0 +1,270 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
// Implements methods to filter samples from profiles.
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
// FilterSamplesByName filters the samples in a profile and only keeps
|
||||||
|
// samples where at least one frame matches focus but none match ignore.
|
||||||
|
// Returns true is the corresponding regexp matched at least one sample.
|
||||||
|
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||||
|
focusOrIgnore := make(map[uint64]bool)
|
||||||
|
hidden := make(map[uint64]bool)
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if ignore != nil && l.matchesName(ignore) {
|
||||||
|
im = true
|
||||||
|
focusOrIgnore[l.ID] = false
|
||||||
|
} else if focus == nil || l.matchesName(focus) {
|
||||||
|
fm = true
|
||||||
|
focusOrIgnore[l.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if hide != nil && l.matchesName(hide) {
|
||||||
|
hm = true
|
||||||
|
l.Line = l.unmatchedLines(hide)
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
hidden[l.ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if show != nil {
|
||||||
|
l.Line = l.matchedLines(show)
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
hidden[l.ID] = true
|
||||||
|
} else {
|
||||||
|
hnm = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
|
||||||
|
if len(hidden) > 0 {
|
||||||
|
var locs []*Location
|
||||||
|
for _, loc := range sample.Location {
|
||||||
|
if !hidden[loc.ID] {
|
||||||
|
locs = append(locs, loc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(locs) == 0 {
|
||||||
|
// Remove sample with no locations (by not adding it to s).
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sample.Location = locs
|
||||||
|
}
|
||||||
|
s = append(s, sample)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = s
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShowFrom drops all stack frames above the highest matching frame and returns
|
||||||
|
// whether a match was found. If showFrom is nil it returns false and does not
|
||||||
|
// modify the profile.
|
||||||
|
//
|
||||||
|
// Example: consider a sample with frames [A, B, C, B], where A is the root.
|
||||||
|
// ShowFrom(nil) returns false and has frames [A, B, C, B].
|
||||||
|
// ShowFrom(A) returns true and has frames [A, B, C, B].
|
||||||
|
// ShowFrom(B) returns true and has frames [B, C, B].
|
||||||
|
// ShowFrom(C) returns true and has frames [C, B].
|
||||||
|
// ShowFrom(D) returns false and drops the sample because no frames remain.
|
||||||
|
func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
|
||||||
|
if showFrom == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// showFromLocs stores location IDs that matched ShowFrom.
|
||||||
|
showFromLocs := make(map[uint64]bool)
|
||||||
|
// Apply to locations.
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
if filterShowFromLocation(loc, showFrom) {
|
||||||
|
showFromLocs[loc.ID] = true
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// For all samples, strip locations after the highest matching one.
|
||||||
|
s := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||||
|
if showFromLocs[sample.Location[i].ID] {
|
||||||
|
sample.Location = sample.Location[:i+1]
|
||||||
|
s = append(s, sample)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = s
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterShowFromLocation tests a showFrom regex against a location, removes
|
||||||
|
// lines after the last match and returns whether a match was found. If the
|
||||||
|
// mapping is matched, then all lines are kept.
|
||||||
|
func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
|
||||||
|
if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
|
||||||
|
loc.Line = loc.Line[:i+1]
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastMatchedLineIndex returns the index of the last line that matches a regex,
|
||||||
|
// or -1 if no match is found.
|
||||||
|
func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
|
||||||
|
for i := len(loc.Line) - 1; i >= 0; i-- {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterTagsByName filters the tags in a profile and only keeps
|
||||||
|
// tags that match show and not hide.
|
||||||
|
func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
|
||||||
|
matchRemove := func(name string) bool {
|
||||||
|
matchShow := show == nil || show.MatchString(name)
|
||||||
|
matchHide := hide != nil && hide.MatchString(name)
|
||||||
|
|
||||||
|
if matchShow {
|
||||||
|
sm = true
|
||||||
|
}
|
||||||
|
if matchHide {
|
||||||
|
hm = true
|
||||||
|
}
|
||||||
|
return !matchShow || matchHide
|
||||||
|
}
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for lab := range s.Label {
|
||||||
|
if matchRemove(lab) {
|
||||||
|
delete(s.Label, lab)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for lab := range s.NumLabel {
|
||||||
|
if matchRemove(lab) {
|
||||||
|
delete(s.NumLabel, lab)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchesName returns whether the location matches the regular
|
||||||
|
// expression. It checks any available function names, file names, and
|
||||||
|
// mapping object filename.
|
||||||
|
func (loc *Location) matchesName(re *regexp.Regexp) bool {
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmatchedLines returns the lines in the location that do not match
|
||||||
|
// the regular expression.
|
||||||
|
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var lines []Line
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines = append(lines, ln)
|
||||||
|
}
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchedLines returns the lines in the location that match
|
||||||
|
// the regular expression.
|
||||||
|
func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return loc.Line
|
||||||
|
}
|
||||||
|
var lines []Line
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines = append(lines, ln)
|
||||||
|
}
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// focusedAndNotIgnored looks up a slice of ids against a map of
|
||||||
|
// focused/ignored locations. The map only contains locations that are
|
||||||
|
// explicitly focused or ignored. Returns whether there is at least
|
||||||
|
// one focused location but no ignored locations.
|
||||||
|
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
|
||||||
|
var f bool
|
||||||
|
for _, loc := range locs {
|
||||||
|
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
|
||||||
|
if focus {
|
||||||
|
// Found focused location. Must keep searching in case there
|
||||||
|
// is an ignored one as well.
|
||||||
|
f = true
|
||||||
|
} else {
|
||||||
|
// Found ignored location. Can return false right away.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagMatch selects tags for filtering
|
||||||
|
type TagMatch func(s *Sample) bool
|
||||||
|
|
||||||
|
// FilterSamplesByTag removes all samples from the profile, except
|
||||||
|
// those that match focus and do not match the ignore regular
|
||||||
|
// expression.
|
||||||
|
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
|
||||||
|
samples := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
focused, ignored := true, false
|
||||||
|
if focus != nil {
|
||||||
|
focused = focus(s)
|
||||||
|
}
|
||||||
|
if ignore != nil {
|
||||||
|
ignored = ignore(s)
|
||||||
|
}
|
||||||
|
fm = fm || focused
|
||||||
|
im = im || ignored
|
||||||
|
if focused && !ignored {
|
||||||
|
samples = append(samples, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = samples
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SampleIndexByName returns the appropriate index for a value of sample index.
|
||||||
|
// If numeric, it returns the number, otherwise it looks up the text in the
|
||||||
|
// profile sample types.
|
||||||
|
func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
|
||||||
|
if sampleIndex == "" {
|
||||||
|
if dst := p.DefaultSampleType; dst != "" {
|
||||||
|
for i, t := range sampleTypes(p) {
|
||||||
|
if t == dst {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// By default select the last sample value
|
||||||
|
return len(p.SampleType) - 1, nil
|
||||||
|
}
|
||||||
|
if i, err := strconv.Atoi(sampleIndex); err == nil {
|
||||||
|
if i < 0 || i >= len(p.SampleType) {
|
||||||
|
return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the inuse_ prefix to support legacy pprof options
|
||||||
|
// "inuse_space" and "inuse_objects" for profiles containing types
|
||||||
|
// "space" and "objects".
|
||||||
|
noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
|
||||||
|
for i, t := range p.SampleType {
|
||||||
|
if t.Type == sampleIndex || t.Type == noInuse {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleTypes(p *Profile) []string {
|
||||||
|
types := make([]string, len(p.SampleType))
|
||||||
|
for i, t := range p.SampleType {
|
||||||
|
types[i] = t.Type
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
315
vendor/github.com/google/pprof/profile/legacy_java_profile.go
сгенерированный
поставляемый
Normal file
315
vendor/github.com/google/pprof/profile/legacy_java_profile.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,315 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This file implements parsers to convert java legacy profiles into
|
||||||
|
// the profile.proto format.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
|
||||||
|
javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
|
||||||
|
javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
|
||||||
|
javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
|
||||||
|
javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// javaCPUProfile returns a new Profile from profilez data.
|
||||||
|
// b is the profile bytes after the header, period is the profiling
|
||||||
|
// period, and parse is a function to parse 8-byte chunks from the
|
||||||
|
// profile in its native endianness.
|
||||||
|
func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
|
||||||
|
p := &Profile{
|
||||||
|
Period: period * 1000,
|
||||||
|
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
|
||||||
|
SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var locs map[uint64]*Location
|
||||||
|
if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip out addresses for better merge.
|
||||||
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaProfile returns a new profile from heapz or contentionz
|
||||||
|
// data. b is the profile bytes after the header.
|
||||||
|
func parseJavaProfile(b []byte) (*Profile, error) {
|
||||||
|
h := bytes.SplitAfterN(b, []byte("\n"), 2)
|
||||||
|
if len(h) < 2 {
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Profile{
|
||||||
|
PeriodType: &ValueType{},
|
||||||
|
}
|
||||||
|
header := string(bytes.TrimSpace(h[0]))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var pType string
|
||||||
|
switch header {
|
||||||
|
case "--- heapz 1 ---":
|
||||||
|
pType = "heap"
|
||||||
|
case "--- contentionz 1 ---":
|
||||||
|
pType = "contention"
|
||||||
|
default:
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
if b, err = parseJavaHeader(pType, h[1], p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var locs map[uint64]*Location
|
||||||
|
if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip out addresses for better merge.
|
||||||
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaHeader parses the attribute section on a java profile and
|
||||||
|
// populates a profile. Returns the remainder of the buffer after all
|
||||||
|
// attributes.
|
||||||
|
func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
|
||||||
|
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||||
|
for nextNewLine != -1 {
|
||||||
|
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||||
|
if line != "" {
|
||||||
|
h := attributeRx.FindStringSubmatch(line)
|
||||||
|
if h == nil {
|
||||||
|
// Not a valid attribute, exit.
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
|
||||||
|
var err error
|
||||||
|
switch pType + "/" + attribute {
|
||||||
|
case "heap/format", "cpu/format", "contention/format":
|
||||||
|
if value != "java" {
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
case "heap/resolution":
|
||||||
|
p.SampleType = []*ValueType{
|
||||||
|
{Type: "inuse_objects", Unit: "count"},
|
||||||
|
{Type: "inuse_space", Unit: value},
|
||||||
|
}
|
||||||
|
case "contention/resolution":
|
||||||
|
p.SampleType = []*ValueType{
|
||||||
|
{Type: "contentions", Unit: "count"},
|
||||||
|
{Type: "delay", Unit: value},
|
||||||
|
}
|
||||||
|
case "contention/sampling period":
|
||||||
|
p.PeriodType = &ValueType{
|
||||||
|
Type: "contentions", Unit: "count",
|
||||||
|
}
|
||||||
|
if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||||
|
}
|
||||||
|
case "contention/ms since reset":
|
||||||
|
millis, err := strconv.ParseInt(value, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||||
|
}
|
||||||
|
p.DurationNanos = millis * 1000 * 1000
|
||||||
|
default:
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Grab next line.
|
||||||
|
b = b[nextNewLine+1:]
|
||||||
|
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaSamples parses the samples from a java profile and
|
||||||
|
// populates the Samples in a profile. Returns the remainder of the
|
||||||
|
// buffer after the samples.
|
||||||
|
func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
|
||||||
|
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||||
|
locs := make(map[uint64]*Location)
|
||||||
|
for nextNewLine != -1 {
|
||||||
|
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||||
|
if line != "" {
|
||||||
|
sample := javaSampleRx.FindStringSubmatch(line)
|
||||||
|
if sample == nil {
|
||||||
|
// Not a valid sample, exit.
|
||||||
|
return b, locs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Java profiles have data/fields inverted compared to other
|
||||||
|
// profile types.
|
||||||
|
var err error
|
||||||
|
value1, value2, value3 := sample[2], sample[1], sample[3]
|
||||||
|
addrs, err := parseHexAddresses(value3)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sloc []*Location
|
||||||
|
for _, addr := range addrs {
|
||||||
|
loc := locs[addr]
|
||||||
|
if locs[addr] == nil {
|
||||||
|
loc = &Location{
|
||||||
|
Address: addr,
|
||||||
|
}
|
||||||
|
p.Location = append(p.Location, loc)
|
||||||
|
locs[addr] = loc
|
||||||
|
}
|
||||||
|
sloc = append(sloc, loc)
|
||||||
|
}
|
||||||
|
s := &Sample{
|
||||||
|
Value: make([]int64, 2),
|
||||||
|
Location: sloc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pType {
|
||||||
|
case "heap":
|
||||||
|
const javaHeapzSamplingRate = 524288 // 512K
|
||||||
|
if s.Value[0] == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
|
||||||
|
}
|
||||||
|
s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
|
||||||
|
s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
|
||||||
|
case "contention":
|
||||||
|
if period := p.Period; period != 0 {
|
||||||
|
s.Value[0] = s.Value[0] * p.Period
|
||||||
|
s.Value[1] = s.Value[1] * p.Period
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = append(p.Sample, s)
|
||||||
|
}
|
||||||
|
// Grab next line.
|
||||||
|
b = b[nextNewLine+1:]
|
||||||
|
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||||
|
}
|
||||||
|
return b, locs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaLocations parses the location information in a java
|
||||||
|
// profile and populates the Locations in a profile. It uses the
|
||||||
|
// location addresses from the profile as both the ID of each
|
||||||
|
// location.
|
||||||
|
func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
|
||||||
|
r := bytes.NewBuffer(b)
|
||||||
|
fns := make(map[string]*Function)
|
||||||
|
for {
|
||||||
|
line, err := r.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if line == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if line = strings.TrimSpace(line); line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
jloc := javaLocationRx.FindStringSubmatch(line)
|
||||||
|
if len(jloc) != 3 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr, err := strconv.ParseUint(jloc[1], 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
loc := locs[addr]
|
||||||
|
if loc == nil {
|
||||||
|
// Unused/unseen
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var lineFunc, lineFile string
|
||||||
|
var lineNo int64
|
||||||
|
|
||||||
|
if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
|
||||||
|
// Found a line of the form: "function (file:line)"
|
||||||
|
lineFunc, lineFile = fileLine[1], fileLine[2]
|
||||||
|
if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
|
||||||
|
lineNo = n
|
||||||
|
}
|
||||||
|
} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
|
||||||
|
// If there's not a file:line, it's a shared library path.
|
||||||
|
// The path isn't interesting, so just give the .so.
|
||||||
|
lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
|
||||||
|
} else if strings.Contains(jloc[2], "generated stub/JIT") {
|
||||||
|
lineFunc = "STUB"
|
||||||
|
} else {
|
||||||
|
// Treat whole line as the function name. This is used by the
|
||||||
|
// java agent for internal states such as "GC" or "VM".
|
||||||
|
lineFunc = jloc[2]
|
||||||
|
}
|
||||||
|
fn := fns[lineFunc]
|
||||||
|
|
||||||
|
if fn == nil {
|
||||||
|
fn = &Function{
|
||||||
|
Name: lineFunc,
|
||||||
|
SystemName: lineFunc,
|
||||||
|
Filename: lineFile,
|
||||||
|
}
|
||||||
|
fns[lineFunc] = fn
|
||||||
|
p.Function = append(p.Function, fn)
|
||||||
|
}
|
||||||
|
loc.Line = []Line{
|
||||||
|
{
|
||||||
|
Function: fn,
|
||||||
|
Line: lineNo,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
loc.Address = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
p.remapLocationIDs()
|
||||||
|
p.remapFunctionIDs()
|
||||||
|
p.remapMappingIDs()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
1225
vendor/github.com/google/pprof/profile/legacy_profile.go
сгенерированный
поставляемый
Normal file
1225
vendor/github.com/google/pprof/profile/legacy_profile.go
сгенерированный
поставляемый
Normal file
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,481 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compact performs garbage collection on a profile to remove any
|
||||||
|
// unreferenced fields. This is useful to reduce the size of a profile
|
||||||
|
// after samples or locations have been removed.
|
||||||
|
func (p *Profile) Compact() *Profile {
|
||||||
|
p, _ = Merge([]*Profile{p})
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges all the profiles in profs into a single Profile.
|
||||||
|
// Returns a new profile independent of the input profiles. The merged
|
||||||
|
// profile is compacted to eliminate unused samples, locations,
|
||||||
|
// functions and mappings. Profiles must have identical profile sample
|
||||||
|
// and period types or the merge will fail. profile.Period of the
|
||||||
|
// resulting profile will be the maximum of all profiles, and
|
||||||
|
// profile.TimeNanos will be the earliest nonzero one. Merges are
|
||||||
|
// associative with the caveat of the first profile having some
|
||||||
|
// specialization in how headers are combined. There may be other
|
||||||
|
// subtleties now or in the future regarding associativity.
|
||||||
|
func Merge(srcs []*Profile) (*Profile, error) {
|
||||||
|
if len(srcs) == 0 {
|
||||||
|
return nil, fmt.Errorf("no profiles to merge")
|
||||||
|
}
|
||||||
|
p, err := combineHeaders(srcs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pm := &profileMerger{
|
||||||
|
p: p,
|
||||||
|
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
|
||||||
|
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
|
||||||
|
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
|
||||||
|
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, src := range srcs {
|
||||||
|
// Clear the profile-specific hash tables
|
||||||
|
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||||
|
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||||
|
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||||
|
|
||||||
|
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
|
||||||
|
// The Mapping list has the property that the first mapping
|
||||||
|
// represents the main binary. Take the first Mapping we see,
|
||||||
|
// otherwise the operations below will add mappings in an
|
||||||
|
// arbitrary order.
|
||||||
|
pm.mapMapping(src.Mapping[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range src.Sample {
|
||||||
|
if !isZeroSample(s) {
|
||||||
|
pm.mapSample(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
if isZeroSample(s) {
|
||||||
|
// If there are any zero samples, re-merge the profile to GC
|
||||||
|
// them.
|
||||||
|
return Merge([]*Profile{p})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize normalizes the source profile by multiplying each value in profile by the
|
||||||
|
// ratio of the sum of the base profile's values of that sample type to the sum of the
|
||||||
|
// source profile's value of that sample type.
|
||||||
|
func (p *Profile) Normalize(pb *Profile) error {
|
||||||
|
|
||||||
|
if err := p.compatible(pb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
baseVals := make([]int64, len(p.SampleType))
|
||||||
|
for _, s := range pb.Sample {
|
||||||
|
for i, v := range s.Value {
|
||||||
|
baseVals[i] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVals := make([]int64, len(p.SampleType))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for i, v := range s.Value {
|
||||||
|
srcVals[i] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
normScale := make([]float64, len(baseVals))
|
||||||
|
for i := range baseVals {
|
||||||
|
if srcVals[i] == 0 {
|
||||||
|
normScale[i] = 0.0
|
||||||
|
} else {
|
||||||
|
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.ScaleN(normScale)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZeroSample(s *Sample) bool {
|
||||||
|
for _, v := range s.Value {
|
||||||
|
if v != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type profileMerger struct {
|
||||||
|
p *Profile
|
||||||
|
|
||||||
|
// Memoization tables within a profile.
|
||||||
|
locationsByID map[uint64]*Location
|
||||||
|
functionsByID map[uint64]*Function
|
||||||
|
mappingsByID map[uint64]mapInfo
|
||||||
|
|
||||||
|
// Memoization tables for profile entities.
|
||||||
|
samples map[sampleKey]*Sample
|
||||||
|
locations map[locationKey]*Location
|
||||||
|
functions map[functionKey]*Function
|
||||||
|
mappings map[mappingKey]*Mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapInfo struct {
|
||||||
|
m *Mapping
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||||
|
s := &Sample{
|
||||||
|
Location: make([]*Location, len(src.Location)),
|
||||||
|
Value: make([]int64, len(src.Value)),
|
||||||
|
Label: make(map[string][]string, len(src.Label)),
|
||||||
|
NumLabel: make(map[string][]int64, len(src.NumLabel)),
|
||||||
|
NumUnit: make(map[string][]string, len(src.NumLabel)),
|
||||||
|
}
|
||||||
|
for i, l := range src.Location {
|
||||||
|
s.Location[i] = pm.mapLocation(l)
|
||||||
|
}
|
||||||
|
for k, v := range src.Label {
|
||||||
|
vv := make([]string, len(v))
|
||||||
|
copy(vv, v)
|
||||||
|
s.Label[k] = vv
|
||||||
|
}
|
||||||
|
for k, v := range src.NumLabel {
|
||||||
|
u := src.NumUnit[k]
|
||||||
|
vv := make([]int64, len(v))
|
||||||
|
uu := make([]string, len(u))
|
||||||
|
copy(vv, v)
|
||||||
|
copy(uu, u)
|
||||||
|
s.NumLabel[k] = vv
|
||||||
|
s.NumUnit[k] = uu
|
||||||
|
}
|
||||||
|
// Check memoization table. Must be done on the remapped location to
|
||||||
|
// account for the remapped mapping. Add current values to the
|
||||||
|
// existing sample.
|
||||||
|
k := s.key()
|
||||||
|
if ss, ok := pm.samples[k]; ok {
|
||||||
|
for i, v := range src.Value {
|
||||||
|
ss.Value[i] += v
|
||||||
|
}
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
copy(s.Value, src.Value)
|
||||||
|
pm.samples[k] = s
|
||||||
|
pm.p.Sample = append(pm.p.Sample, s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates sampleKey to be used as a key for maps.
|
||||||
|
func (sample *Sample) key() sampleKey {
|
||||||
|
ids := make([]string, len(sample.Location))
|
||||||
|
for i, l := range sample.Location {
|
||||||
|
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := make([]string, 0, len(sample.Label))
|
||||||
|
for k, v := range sample.Label {
|
||||||
|
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(labels)
|
||||||
|
|
||||||
|
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||||
|
for k, v := range sample.NumLabel {
|
||||||
|
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||||
|
}
|
||||||
|
sort.Strings(numlabels)
|
||||||
|
|
||||||
|
return sampleKey{
|
||||||
|
strings.Join(ids, "|"),
|
||||||
|
strings.Join(labels, ""),
|
||||||
|
strings.Join(numlabels, ""),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sampleKey struct {
|
||||||
|
locations string
|
||||||
|
labels string
|
||||||
|
numlabels string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
mi := pm.mapMapping(src.Mapping)
|
||||||
|
l := &Location{
|
||||||
|
ID: uint64(len(pm.p.Location) + 1),
|
||||||
|
Mapping: mi.m,
|
||||||
|
Address: uint64(int64(src.Address) + mi.offset),
|
||||||
|
Line: make([]Line, len(src.Line)),
|
||||||
|
IsFolded: src.IsFolded,
|
||||||
|
}
|
||||||
|
for i, ln := range src.Line {
|
||||||
|
l.Line[i] = pm.mapLine(ln)
|
||||||
|
}
|
||||||
|
// Check memoization table. Must be done on the remapped location to
|
||||||
|
// account for the remapped mapping ID.
|
||||||
|
k := l.key()
|
||||||
|
if ll, ok := pm.locations[k]; ok {
|
||||||
|
pm.locationsByID[src.ID] = ll
|
||||||
|
return ll
|
||||||
|
}
|
||||||
|
pm.locationsByID[src.ID] = l
|
||||||
|
pm.locations[k] = l
|
||||||
|
pm.p.Location = append(pm.p.Location, l)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates locationKey to be used as a key for maps.
|
||||||
|
func (l *Location) key() locationKey {
|
||||||
|
key := locationKey{
|
||||||
|
addr: l.Address,
|
||||||
|
isFolded: l.IsFolded,
|
||||||
|
}
|
||||||
|
if l.Mapping != nil {
|
||||||
|
// Normalizes address to handle address space randomization.
|
||||||
|
key.addr -= l.Mapping.Start
|
||||||
|
key.mappingID = l.Mapping.ID
|
||||||
|
}
|
||||||
|
lines := make([]string, len(l.Line)*2)
|
||||||
|
for i, line := range l.Line {
|
||||||
|
if line.Function != nil {
|
||||||
|
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||||
|
}
|
||||||
|
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||||
|
}
|
||||||
|
key.lines = strings.Join(lines, "|")
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
type locationKey struct {
|
||||||
|
addr, mappingID uint64
|
||||||
|
lines string
|
||||||
|
isFolded bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||||
|
if src == nil {
|
||||||
|
return mapInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi, ok := pm.mappingsByID[src.ID]; ok {
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check memoization tables.
|
||||||
|
mk := src.key()
|
||||||
|
if m, ok := pm.mappings[mk]; ok {
|
||||||
|
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
|
||||||
|
pm.mappingsByID[src.ID] = mi
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
m := &Mapping{
|
||||||
|
ID: uint64(len(pm.p.Mapping) + 1),
|
||||||
|
Start: src.Start,
|
||||||
|
Limit: src.Limit,
|
||||||
|
Offset: src.Offset,
|
||||||
|
File: src.File,
|
||||||
|
BuildID: src.BuildID,
|
||||||
|
HasFunctions: src.HasFunctions,
|
||||||
|
HasFilenames: src.HasFilenames,
|
||||||
|
HasLineNumbers: src.HasLineNumbers,
|
||||||
|
HasInlineFrames: src.HasInlineFrames,
|
||||||
|
}
|
||||||
|
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||||
|
|
||||||
|
// Update memoization tables.
|
||||||
|
pm.mappings[mk] = m
|
||||||
|
mi := mapInfo{m, 0}
|
||||||
|
pm.mappingsByID[src.ID] = mi
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates encoded strings of Mapping to be used as a key for
|
||||||
|
// maps.
|
||||||
|
func (m *Mapping) key() mappingKey {
|
||||||
|
// Normalize addresses to handle address space randomization.
|
||||||
|
// Round up to next 4K boundary to avoid minor discrepancies.
|
||||||
|
const mapsizeRounding = 0x1000
|
||||||
|
|
||||||
|
size := m.Limit - m.Start
|
||||||
|
size = size + mapsizeRounding - 1
|
||||||
|
size = size - (size % mapsizeRounding)
|
||||||
|
key := mappingKey{
|
||||||
|
size: size,
|
||||||
|
offset: m.Offset,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case m.BuildID != "":
|
||||||
|
key.buildIDOrFile = m.BuildID
|
||||||
|
case m.File != "":
|
||||||
|
key.buildIDOrFile = m.File
|
||||||
|
default:
|
||||||
|
// A mapping containing neither build ID nor file name is a fake mapping. A
|
||||||
|
// key with empty buildIDOrFile is used for fake mappings so that they are
|
||||||
|
// treated as the same mapping during merging.
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
type mappingKey struct {
|
||||||
|
size, offset uint64
|
||||||
|
buildIDOrFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapLine(src Line) Line {
|
||||||
|
ln := Line{
|
||||||
|
Function: pm.mapFunction(src.Function),
|
||||||
|
Line: src.Line,
|
||||||
|
}
|
||||||
|
return ln
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapFunction(src *Function) *Function {
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if f, ok := pm.functionsByID[src.ID]; ok {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
k := src.key()
|
||||||
|
if f, ok := pm.functions[k]; ok {
|
||||||
|
pm.functionsByID[src.ID] = f
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
f := &Function{
|
||||||
|
ID: uint64(len(pm.p.Function) + 1),
|
||||||
|
Name: src.Name,
|
||||||
|
SystemName: src.SystemName,
|
||||||
|
Filename: src.Filename,
|
||||||
|
StartLine: src.StartLine,
|
||||||
|
}
|
||||||
|
pm.functions[k] = f
|
||||||
|
pm.functionsByID[src.ID] = f
|
||||||
|
pm.p.Function = append(pm.p.Function, f)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates a struct to be used as a key for maps.
|
||||||
|
func (f *Function) key() functionKey {
|
||||||
|
return functionKey{
|
||||||
|
f.StartLine,
|
||||||
|
f.Name,
|
||||||
|
f.SystemName,
|
||||||
|
f.Filename,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type functionKey struct {
|
||||||
|
startLine int64
|
||||||
|
name, systemName, fileName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// combineHeaders checks that all profiles can be merged and returns
|
||||||
|
// their combined profile.
|
||||||
|
func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||||
|
for _, s := range srcs[1:] {
|
||||||
|
if err := srcs[0].compatible(s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var timeNanos, durationNanos, period int64
|
||||||
|
var comments []string
|
||||||
|
seenComments := map[string]bool{}
|
||||||
|
var defaultSampleType string
|
||||||
|
for _, s := range srcs {
|
||||||
|
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||||
|
timeNanos = s.TimeNanos
|
||||||
|
}
|
||||||
|
durationNanos += s.DurationNanos
|
||||||
|
if period == 0 || period < s.Period {
|
||||||
|
period = s.Period
|
||||||
|
}
|
||||||
|
for _, c := range s.Comments {
|
||||||
|
if seen := seenComments[c]; !seen {
|
||||||
|
comments = append(comments, c)
|
||||||
|
seenComments[c] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if defaultSampleType == "" {
|
||||||
|
defaultSampleType = s.DefaultSampleType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Profile{
|
||||||
|
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
|
||||||
|
|
||||||
|
DropFrames: srcs[0].DropFrames,
|
||||||
|
KeepFrames: srcs[0].KeepFrames,
|
||||||
|
|
||||||
|
TimeNanos: timeNanos,
|
||||||
|
DurationNanos: durationNanos,
|
||||||
|
PeriodType: srcs[0].PeriodType,
|
||||||
|
Period: period,
|
||||||
|
|
||||||
|
Comments: comments,
|
||||||
|
DefaultSampleType: defaultSampleType,
|
||||||
|
}
|
||||||
|
copy(p.SampleType, srcs[0].SampleType)
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// compatible determines if two profiles can be compared/merged.
|
||||||
|
// returns nil if the profiles are compatible; otherwise an error with
|
||||||
|
// details on the incompatibility.
|
||||||
|
func (p *Profile) compatible(pb *Profile) error {
|
||||||
|
if !equalValueType(p.PeriodType, pb.PeriodType) {
|
||||||
|
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.SampleType) != len(pb.SampleType) {
|
||||||
|
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range p.SampleType {
|
||||||
|
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
|
||||||
|
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalValueType returns true if the two value types are semantically
|
||||||
|
// equal. It ignores the internal fields used during encode/decode.
|
||||||
|
func equalValueType(st1, st2 *ValueType) bool {
|
||||||
|
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||||
|
}
|
|
@ -0,0 +1,805 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package profile provides a representation of profile.proto and
|
||||||
|
// methods to encode/decode profiles in this format.
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Profile is an in-memory representation of profile.proto.
|
||||||
|
type Profile struct {
|
||||||
|
SampleType []*ValueType
|
||||||
|
DefaultSampleType string
|
||||||
|
Sample []*Sample
|
||||||
|
Mapping []*Mapping
|
||||||
|
Location []*Location
|
||||||
|
Function []*Function
|
||||||
|
Comments []string
|
||||||
|
|
||||||
|
DropFrames string
|
||||||
|
KeepFrames string
|
||||||
|
|
||||||
|
TimeNanos int64
|
||||||
|
DurationNanos int64
|
||||||
|
PeriodType *ValueType
|
||||||
|
Period int64
|
||||||
|
|
||||||
|
// The following fields are modified during encoding and copying,
|
||||||
|
// so are protected by a Mutex.
|
||||||
|
encodeMu sync.Mutex
|
||||||
|
|
||||||
|
commentX []int64
|
||||||
|
dropFramesX int64
|
||||||
|
keepFramesX int64
|
||||||
|
stringTable []string
|
||||||
|
defaultSampleTypeX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueType corresponds to Profile.ValueType
|
||||||
|
type ValueType struct {
|
||||||
|
Type string // cpu, wall, inuse_space, etc
|
||||||
|
Unit string // seconds, nanoseconds, bytes, etc
|
||||||
|
|
||||||
|
typeX int64
|
||||||
|
unitX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample corresponds to Profile.Sample
|
||||||
|
type Sample struct {
|
||||||
|
Location []*Location
|
||||||
|
Value []int64
|
||||||
|
Label map[string][]string
|
||||||
|
NumLabel map[string][]int64
|
||||||
|
NumUnit map[string][]string
|
||||||
|
|
||||||
|
locationIDX []uint64
|
||||||
|
labelX []label
|
||||||
|
}
|
||||||
|
|
||||||
|
// label corresponds to Profile.Label
|
||||||
|
type label struct {
|
||||||
|
keyX int64
|
||||||
|
// Exactly one of the two following values must be set
|
||||||
|
strX int64
|
||||||
|
numX int64 // Integer value for this label
|
||||||
|
// can be set if numX has value
|
||||||
|
unitX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mapping corresponds to Profile.Mapping
|
||||||
|
type Mapping struct {
|
||||||
|
ID uint64
|
||||||
|
Start uint64
|
||||||
|
Limit uint64
|
||||||
|
Offset uint64
|
||||||
|
File string
|
||||||
|
BuildID string
|
||||||
|
HasFunctions bool
|
||||||
|
HasFilenames bool
|
||||||
|
HasLineNumbers bool
|
||||||
|
HasInlineFrames bool
|
||||||
|
|
||||||
|
fileX int64
|
||||||
|
buildIDX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Location corresponds to Profile.Location
|
||||||
|
type Location struct {
|
||||||
|
ID uint64
|
||||||
|
Mapping *Mapping
|
||||||
|
Address uint64
|
||||||
|
Line []Line
|
||||||
|
IsFolded bool
|
||||||
|
|
||||||
|
mappingIDX uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Line corresponds to Profile.Line
|
||||||
|
type Line struct {
|
||||||
|
Function *Function
|
||||||
|
Line int64
|
||||||
|
|
||||||
|
functionIDX uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function corresponds to Profile.Function
|
||||||
|
type Function struct {
|
||||||
|
ID uint64
|
||||||
|
Name string
|
||||||
|
SystemName string
|
||||||
|
Filename string
|
||||||
|
StartLine int64
|
||||||
|
|
||||||
|
nameX int64
|
||||||
|
systemNameX int64
|
||||||
|
filenameX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses a profile and checks for its validity. The input
|
||||||
|
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||||
|
// profile formats which may be unsupported in the future.
|
||||||
|
func Parse(r io.Reader) (*Profile, error) {
|
||||||
|
data, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ParseData(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseData parses a profile from a buffer and checks for its
|
||||||
|
// validity.
|
||||||
|
func ParseData(data []byte) (*Profile, error) {
|
||||||
|
var p *Profile
|
||||||
|
var err error
|
||||||
|
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||||
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
|
if err == nil {
|
||||||
|
data, err = ioutil.ReadAll(gz)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
|
||||||
|
p, err = parseLegacy(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing profile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.CheckValid(); err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed profile: %v", err)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errUnrecognized = fmt.Errorf("unrecognized profile format")
|
||||||
|
var errMalformed = fmt.Errorf("malformed profile format")
|
||||||
|
var errNoData = fmt.Errorf("empty input file")
|
||||||
|
var errConcatProfile = fmt.Errorf("concatenated profiles detected")
|
||||||
|
|
||||||
|
func parseLegacy(data []byte) (*Profile, error) {
|
||||||
|
parsers := []func([]byte) (*Profile, error){
|
||||||
|
parseCPU,
|
||||||
|
parseHeap,
|
||||||
|
parseGoCount, // goroutine, threadcreate
|
||||||
|
parseThread,
|
||||||
|
parseContention,
|
||||||
|
parseJavaProfile,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, parser := range parsers {
|
||||||
|
p, err := parser(data)
|
||||||
|
if err == nil {
|
||||||
|
p.addLegacyFrameInfo()
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
if err != errUnrecognized {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseUncompressed parses an uncompressed protobuf into a profile.
|
||||||
|
func ParseUncompressed(data []byte) (*Profile, error) {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, errNoData
|
||||||
|
}
|
||||||
|
p := &Profile{}
|
||||||
|
if err := unmarshal(data, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.postDecode(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
|
||||||
|
|
||||||
|
// massageMappings applies heuristic-based changes to the profile
|
||||||
|
// mappings to account for quirks of some environments.
|
||||||
|
func (p *Profile) massageMappings() {
|
||||||
|
// Merge adjacent regions with matching names, checking that the offsets match
|
||||||
|
if len(p.Mapping) > 1 {
|
||||||
|
mappings := []*Mapping{p.Mapping[0]}
|
||||||
|
for _, m := range p.Mapping[1:] {
|
||||||
|
lm := mappings[len(mappings)-1]
|
||||||
|
if adjacent(lm, m) {
|
||||||
|
lm.Limit = m.Limit
|
||||||
|
if m.File != "" {
|
||||||
|
lm.File = m.File
|
||||||
|
}
|
||||||
|
if m.BuildID != "" {
|
||||||
|
lm.BuildID = m.BuildID
|
||||||
|
}
|
||||||
|
p.updateLocationMapping(m, lm)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mappings = append(mappings, m)
|
||||||
|
}
|
||||||
|
p.Mapping = mappings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use heuristics to identify main binary and move it to the top of the list of mappings
|
||||||
|
for i, m := range p.Mapping {
|
||||||
|
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
|
||||||
|
if len(file) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(libRx.FindStringSubmatch(file)) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if file[0] == '[' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Swap what we guess is main to position 0.
|
||||||
|
p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep the mapping IDs neatly sorted
|
||||||
|
for i, m := range p.Mapping {
|
||||||
|
m.ID = uint64(i + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// adjacent returns whether two mapping entries represent the same
|
||||||
|
// mapping that has been split into two. Check that their addresses are adjacent,
|
||||||
|
// and if the offsets match, if they are available.
|
||||||
|
func adjacent(m1, m2 *Mapping) bool {
|
||||||
|
if m1.File != "" && m2.File != "" {
|
||||||
|
if m1.File != m2.File {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m1.BuildID != "" && m2.BuildID != "" {
|
||||||
|
if m1.BuildID != m2.BuildID {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m1.Limit != m2.Start {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if m1.Offset != 0 && m2.Offset != 0 {
|
||||||
|
offset := m1.Offset + (m1.Limit - m1.Start)
|
||||||
|
if offset != m2.Offset {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Profile) updateLocationMapping(from, to *Mapping) {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping == from {
|
||||||
|
l.Mapping = to
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func serialize(p *Profile) []byte {
|
||||||
|
p.encodeMu.Lock()
|
||||||
|
p.preEncode()
|
||||||
|
b := marshal(p)
|
||||||
|
p.encodeMu.Unlock()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes the profile as a gzip-compressed marshaled protobuf.
|
||||||
|
func (p *Profile) Write(w io.Writer) error {
|
||||||
|
zw := gzip.NewWriter(w)
|
||||||
|
defer zw.Close()
|
||||||
|
_, err := zw.Write(serialize(p))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUncompressed writes the profile as a marshaled protobuf.
|
||||||
|
func (p *Profile) WriteUncompressed(w io.Writer) error {
|
||||||
|
_, err := w.Write(serialize(p))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValid tests whether the profile is valid. Checks include, but are
|
||||||
|
// not limited to:
|
||||||
|
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
|
||||||
|
// - Sample.id has a corresponding Profile.Location
|
||||||
|
func (p *Profile) CheckValid() error {
|
||||||
|
// Check that sample values are consistent
|
||||||
|
sampleLen := len(p.SampleType)
|
||||||
|
if sampleLen == 0 && len(p.Sample) != 0 {
|
||||||
|
return fmt.Errorf("missing sample type information")
|
||||||
|
}
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
if s == nil {
|
||||||
|
return fmt.Errorf("profile has nil sample")
|
||||||
|
}
|
||||||
|
if len(s.Value) != sampleLen {
|
||||||
|
return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
|
||||||
|
}
|
||||||
|
for _, l := range s.Location {
|
||||||
|
if l == nil {
|
||||||
|
return fmt.Errorf("sample has nil location")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all mappings/locations/functions are in the tables
|
||||||
|
// Check that there are no duplicate ids
|
||||||
|
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
if m == nil {
|
||||||
|
return fmt.Errorf("profile has nil mapping")
|
||||||
|
}
|
||||||
|
if m.ID == 0 {
|
||||||
|
return fmt.Errorf("found mapping with reserved ID=0")
|
||||||
|
}
|
||||||
|
if mappings[m.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
|
||||||
|
}
|
||||||
|
mappings[m.ID] = m
|
||||||
|
}
|
||||||
|
functions := make(map[uint64]*Function, len(p.Function))
|
||||||
|
for _, f := range p.Function {
|
||||||
|
if f == nil {
|
||||||
|
return fmt.Errorf("profile has nil function")
|
||||||
|
}
|
||||||
|
if f.ID == 0 {
|
||||||
|
return fmt.Errorf("found function with reserved ID=0")
|
||||||
|
}
|
||||||
|
if functions[f.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple functions with same id: %d", f.ID)
|
||||||
|
}
|
||||||
|
functions[f.ID] = f
|
||||||
|
}
|
||||||
|
locations := make(map[uint64]*Location, len(p.Location))
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l == nil {
|
||||||
|
return fmt.Errorf("profile has nil location")
|
||||||
|
}
|
||||||
|
if l.ID == 0 {
|
||||||
|
return fmt.Errorf("found location with reserved id=0")
|
||||||
|
}
|
||||||
|
if locations[l.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple locations with same id: %d", l.ID)
|
||||||
|
}
|
||||||
|
locations[l.ID] = l
|
||||||
|
if m := l.Mapping; m != nil {
|
||||||
|
if m.ID == 0 || mappings[m.ID] != m {
|
||||||
|
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ln := range l.Line {
|
||||||
|
f := ln.Function
|
||||||
|
if f == nil {
|
||||||
|
return fmt.Errorf("location id: %d has a line with nil function", l.ID)
|
||||||
|
}
|
||||||
|
if f.ID == 0 || functions[f.ID] != f {
|
||||||
|
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate merges the locations in the profile into equivalence
|
||||||
|
// classes preserving the request attributes. It also updates the
|
||||||
|
// samples to point to the merged locations.
|
||||||
|
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||||
|
m.HasFunctions = m.HasFunctions && function
|
||||||
|
m.HasFilenames = m.HasFilenames && filename
|
||||||
|
m.HasLineNumbers = m.HasLineNumbers && linenumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate functions
|
||||||
|
if !function || !filename {
|
||||||
|
for _, f := range p.Function {
|
||||||
|
if !function {
|
||||||
|
f.Name = ""
|
||||||
|
f.SystemName = ""
|
||||||
|
}
|
||||||
|
if !filename {
|
||||||
|
f.Filename = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate locations
|
||||||
|
if !inlineFrame || !address || !linenumber {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if !inlineFrame && len(l.Line) > 1 {
|
||||||
|
l.Line = l.Line[len(l.Line)-1:]
|
||||||
|
}
|
||||||
|
if !linenumber {
|
||||||
|
for i := range l.Line {
|
||||||
|
l.Line[i].Line = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !address {
|
||||||
|
l.Address = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.CheckValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumLabelUnits returns a map of numeric label keys to the units
|
||||||
|
// associated with those keys and a map of those keys to any units
|
||||||
|
// that were encountered but not used.
|
||||||
|
// Unit for a given key is the first encountered unit for that key. If multiple
|
||||||
|
// units are encountered for values paired with a particular key, then the first
|
||||||
|
// unit encountered is used and all other units are returned in sorted order
|
||||||
|
// in map of ignored units.
|
||||||
|
// If no units are encountered for a particular key, the unit is then inferred
|
||||||
|
// based on the key.
|
||||||
|
func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
|
||||||
|
numLabelUnits := map[string]string{}
|
||||||
|
ignoredUnits := map[string]map[string]bool{}
|
||||||
|
encounteredKeys := map[string]bool{}
|
||||||
|
|
||||||
|
// Determine units based on numeric tags for each sample.
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for k := range s.NumLabel {
|
||||||
|
encounteredKeys[k] = true
|
||||||
|
for _, unit := range s.NumUnit[k] {
|
||||||
|
if unit == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if wantUnit, ok := numLabelUnits[k]; !ok {
|
||||||
|
numLabelUnits[k] = unit
|
||||||
|
} else if wantUnit != unit {
|
||||||
|
if v, ok := ignoredUnits[k]; ok {
|
||||||
|
v[unit] = true
|
||||||
|
} else {
|
||||||
|
ignoredUnits[k] = map[string]bool{unit: true}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Infer units for keys without any units associated with
|
||||||
|
// numeric tag values.
|
||||||
|
for key := range encounteredKeys {
|
||||||
|
unit := numLabelUnits[key]
|
||||||
|
if unit == "" {
|
||||||
|
switch key {
|
||||||
|
case "alignment", "request":
|
||||||
|
numLabelUnits[key] = "bytes"
|
||||||
|
default:
|
||||||
|
numLabelUnits[key] = key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy ignored units into more readable format
|
||||||
|
unitsIgnored := make(map[string][]string, len(ignoredUnits))
|
||||||
|
for key, values := range ignoredUnits {
|
||||||
|
units := make([]string, len(values))
|
||||||
|
i := 0
|
||||||
|
for unit := range values {
|
||||||
|
units[i] = unit
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
sort.Strings(units)
|
||||||
|
unitsIgnored[key] = units
|
||||||
|
}
|
||||||
|
|
||||||
|
return numLabelUnits, unitsIgnored
|
||||||
|
}
|
||||||
|
|
||||||
|
// String dumps a text representation of a profile. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (p *Profile) String() string {
|
||||||
|
ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
|
||||||
|
for _, c := range p.Comments {
|
||||||
|
ss = append(ss, "Comment: "+c)
|
||||||
|
}
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||||
|
}
|
||||||
|
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
|
||||||
|
if p.TimeNanos != 0 {
|
||||||
|
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
|
||||||
|
}
|
||||||
|
if p.DurationNanos != 0 {
|
||||||
|
ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Samples:")
|
||||||
|
var sh1 string
|
||||||
|
for _, s := range p.SampleType {
|
||||||
|
dflt := ""
|
||||||
|
if s.Type == p.DefaultSampleType {
|
||||||
|
dflt = "[dflt]"
|
||||||
|
}
|
||||||
|
sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
|
||||||
|
}
|
||||||
|
ss = append(ss, strings.TrimSpace(sh1))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
ss = append(ss, s.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Locations")
|
||||||
|
for _, l := range p.Location {
|
||||||
|
ss = append(ss, l.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Mappings")
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
ss = append(ss, m.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(ss, "\n") + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a mapping. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (m *Mapping) string() string {
|
||||||
|
bits := ""
|
||||||
|
if m.HasFunctions {
|
||||||
|
bits = bits + "[FN]"
|
||||||
|
}
|
||||||
|
if m.HasFilenames {
|
||||||
|
bits = bits + "[FL]"
|
||||||
|
}
|
||||||
|
if m.HasLineNumbers {
|
||||||
|
bits = bits + "[LN]"
|
||||||
|
}
|
||||||
|
if m.HasInlineFrames {
|
||||||
|
bits = bits + "[IN]"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
|
||||||
|
m.ID,
|
||||||
|
m.Start, m.Limit, m.Offset,
|
||||||
|
m.File,
|
||||||
|
m.BuildID,
|
||||||
|
bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a location. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (l *Location) string() string {
|
||||||
|
ss := []string{}
|
||||||
|
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
|
||||||
|
if m := l.Mapping; m != nil {
|
||||||
|
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
|
||||||
|
}
|
||||||
|
if l.IsFolded {
|
||||||
|
locStr = locStr + "[F] "
|
||||||
|
}
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
ss = append(ss, locStr)
|
||||||
|
}
|
||||||
|
for li := range l.Line {
|
||||||
|
lnStr := "??"
|
||||||
|
if fn := l.Line[li].Function; fn != nil {
|
||||||
|
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||||
|
fn.Name,
|
||||||
|
fn.Filename,
|
||||||
|
l.Line[li].Line,
|
||||||
|
fn.StartLine)
|
||||||
|
if fn.Name != fn.SystemName {
|
||||||
|
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss = append(ss, locStr+lnStr)
|
||||||
|
// Do not print location details past the first line
|
||||||
|
locStr = " "
|
||||||
|
}
|
||||||
|
return strings.Join(ss, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a sample. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (s *Sample) string() string {
|
||||||
|
ss := []string{}
|
||||||
|
var sv string
|
||||||
|
for _, v := range s.Value {
|
||||||
|
sv = fmt.Sprintf("%s %10d", sv, v)
|
||||||
|
}
|
||||||
|
sv = sv + ": "
|
||||||
|
for _, l := range s.Location {
|
||||||
|
sv = sv + fmt.Sprintf("%d ", l.ID)
|
||||||
|
}
|
||||||
|
ss = append(ss, sv)
|
||||||
|
const labelHeader = " "
|
||||||
|
if len(s.Label) > 0 {
|
||||||
|
ss = append(ss, labelHeader+labelsToString(s.Label))
|
||||||
|
}
|
||||||
|
if len(s.NumLabel) > 0 {
|
||||||
|
ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
|
||||||
|
}
|
||||||
|
return strings.Join(ss, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// labelsToString returns a string representation of a
|
||||||
|
// map representing labels.
|
||||||
|
func labelsToString(labels map[string][]string) string {
|
||||||
|
ls := []string{}
|
||||||
|
for k, v := range labels {
|
||||||
|
ls = append(ls, fmt.Sprintf("%s:%v", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(ls)
|
||||||
|
return strings.Join(ls, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// numLabelsToString returns a string representation of a map
|
||||||
|
// representing numeric labels.
|
||||||
|
func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
|
||||||
|
ls := []string{}
|
||||||
|
for k, v := range numLabels {
|
||||||
|
units := numUnits[k]
|
||||||
|
var labelString string
|
||||||
|
if len(units) == len(v) {
|
||||||
|
values := make([]string, len(v))
|
||||||
|
for i, vv := range v {
|
||||||
|
values[i] = fmt.Sprintf("%d %s", vv, units[i])
|
||||||
|
}
|
||||||
|
labelString = fmt.Sprintf("%s:%v", k, values)
|
||||||
|
} else {
|
||||||
|
labelString = fmt.Sprintf("%s:%v", k, v)
|
||||||
|
}
|
||||||
|
ls = append(ls, labelString)
|
||||||
|
}
|
||||||
|
sort.Strings(ls)
|
||||||
|
return strings.Join(ls, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLabel sets the specified key to the specified value for all samples in the
|
||||||
|
// profile.
|
||||||
|
func (p *Profile) SetLabel(key string, value []string) {
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
if sample.Label == nil {
|
||||||
|
sample.Label = map[string][]string{key: value}
|
||||||
|
} else {
|
||||||
|
sample.Label[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLabel removes all labels associated with the specified key for all
|
||||||
|
// samples in the profile.
|
||||||
|
func (p *Profile) RemoveLabel(key string) {
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
delete(sample.Label, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasLabel returns true if a sample has a label with indicated key and value.
|
||||||
|
func (s *Sample) HasLabel(key, value string) bool {
|
||||||
|
for _, v := range s.Label[key] {
|
||||||
|
if v == value {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||||
|
// otherwise.
|
||||||
|
func (s *Sample) DiffBaseSample() bool {
|
||||||
|
return s.HasLabel("pprof::base", "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale multiplies all sample values in a profile by a constant and keeps
|
||||||
|
// only samples that have at least one non-zero value.
|
||||||
|
func (p *Profile) Scale(ratio float64) {
|
||||||
|
if ratio == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ratios := make([]float64, len(p.SampleType))
|
||||||
|
for i := range p.SampleType {
|
||||||
|
ratios[i] = ratio
|
||||||
|
}
|
||||||
|
p.ScaleN(ratios)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScaleN multiplies each sample values in a sample by a different amount
|
||||||
|
// and keeps only samples that have at least one non-zero value.
|
||||||
|
func (p *Profile) ScaleN(ratios []float64) error {
|
||||||
|
if len(p.SampleType) != len(ratios) {
|
||||||
|
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
|
||||||
|
}
|
||||||
|
allOnes := true
|
||||||
|
for _, r := range ratios {
|
||||||
|
if r != 1 {
|
||||||
|
allOnes = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allOnes {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fillIdx := 0
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
keepSample := false
|
||||||
|
for i, v := range s.Value {
|
||||||
|
if ratios[i] != 1 {
|
||||||
|
val := int64(math.Round(float64(v) * ratios[i]))
|
||||||
|
s.Value[i] = val
|
||||||
|
keepSample = keepSample || val != 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if keepSample {
|
||||||
|
p.Sample[fillIdx] = s
|
||||||
|
fillIdx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = p.Sample[:fillIdx]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFunctions determines if all locations in this profile have
|
||||||
|
// symbolized function information.
|
||||||
|
func (p *Profile) HasFunctions() bool {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping != nil && !l.Mapping.HasFunctions {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFileLines determines if all locations in this profile have
|
||||||
|
// symbolized file and line number information.
|
||||||
|
func (p *Profile) HasFileLines() bool {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||||
|
// locations can't be symbolized in principle, at least now. Examples are
|
||||||
|
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||||
|
func (m *Mapping) Unsymbolizable() bool {
|
||||||
|
name := filepath.Base(m.File)
|
||||||
|
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy makes a fully independent copy of a profile.
|
||||||
|
func (p *Profile) Copy() *Profile {
|
||||||
|
pp := &Profile{}
|
||||||
|
if err := unmarshal(serialize(p), pp); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := pp.postDecode(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pp
|
||||||
|
}
|
|
@ -0,0 +1,370 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This file is a simple protocol buffer encoder and decoder.
|
||||||
|
// The format is described at
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/encoding
|
||||||
|
//
|
||||||
|
// A protocol message must implement the message interface:
|
||||||
|
// decoder() []decoder
|
||||||
|
// encode(*buffer)
|
||||||
|
//
|
||||||
|
// The decode method returns a slice indexed by field number that gives the
|
||||||
|
// function to decode that field.
|
||||||
|
// The encode method encodes its receiver into the given buffer.
|
||||||
|
//
|
||||||
|
// The two methods are simple enough to be implemented by hand rather than
|
||||||
|
// by using a protocol compiler.
|
||||||
|
//
|
||||||
|
// See profile.go for examples of messages implementing this interface.
|
||||||
|
//
|
||||||
|
// There is no support for groups, message sets, or "has" bits.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type buffer struct {
|
||||||
|
field int // field tag
|
||||||
|
typ int // proto wire type code for field
|
||||||
|
u64 uint64
|
||||||
|
data []byte
|
||||||
|
tmp [16]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type decoder func(*buffer, message) error
|
||||||
|
|
||||||
|
type message interface {
|
||||||
|
decoder() []decoder
|
||||||
|
encode(*buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(m message) []byte {
|
||||||
|
var b buffer
|
||||||
|
m.encode(&b)
|
||||||
|
return b.data
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarint(b *buffer, x uint64) {
|
||||||
|
for x >= 128 {
|
||||||
|
b.data = append(b.data, byte(x)|0x80)
|
||||||
|
x >>= 7
|
||||||
|
}
|
||||||
|
b.data = append(b.data, byte(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeLength(b *buffer, tag int, len int) {
|
||||||
|
encodeVarint(b, uint64(tag)<<3|2)
|
||||||
|
encodeVarint(b, uint64(len))
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64(b *buffer, tag int, x uint64) {
|
||||||
|
// append varint to b.data
|
||||||
|
encodeVarint(b, uint64(tag)<<3)
|
||||||
|
encodeVarint(b, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64s(b *buffer, tag int, x []uint64) {
|
||||||
|
if len(x) > 2 {
|
||||||
|
// Use packed encoding
|
||||||
|
n1 := len(b.data)
|
||||||
|
for _, u := range x {
|
||||||
|
encodeVarint(b, u)
|
||||||
|
}
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, u := range x {
|
||||||
|
encodeUint64(b, tag, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64Opt(b *buffer, tag int, x uint64) {
|
||||||
|
if x == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
encodeUint64(b, tag, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64(b *buffer, tag int, x int64) {
|
||||||
|
u := uint64(x)
|
||||||
|
encodeUint64(b, tag, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64s(b *buffer, tag int, x []int64) {
|
||||||
|
if len(x) > 2 {
|
||||||
|
// Use packed encoding
|
||||||
|
n1 := len(b.data)
|
||||||
|
for _, u := range x {
|
||||||
|
encodeVarint(b, uint64(u))
|
||||||
|
}
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, u := range x {
|
||||||
|
encodeInt64(b, tag, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64Opt(b *buffer, tag int, x int64) {
|
||||||
|
if x == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
encodeInt64(b, tag, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeString(b *buffer, tag int, x string) {
|
||||||
|
encodeLength(b, tag, len(x))
|
||||||
|
b.data = append(b.data, x...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeStrings(b *buffer, tag int, x []string) {
|
||||||
|
for _, s := range x {
|
||||||
|
encodeString(b, tag, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeBool(b *buffer, tag int, x bool) {
|
||||||
|
if x {
|
||||||
|
encodeUint64(b, tag, 1)
|
||||||
|
} else {
|
||||||
|
encodeUint64(b, tag, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeBoolOpt(b *buffer, tag int, x bool) {
|
||||||
|
if x {
|
||||||
|
encodeBool(b, tag, x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeMessage(b *buffer, tag int, m message) {
|
||||||
|
n1 := len(b.data)
|
||||||
|
m.encode(b)
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(data []byte, m message) (err error) {
|
||||||
|
b := buffer{data: data, typ: 2}
|
||||||
|
return decodeMessage(&b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func le64(p []byte) uint64 {
|
||||||
|
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||||
|
}
|
||||||
|
|
||||||
|
func le32(p []byte) uint32 {
|
||||||
|
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeVarint(data []byte) (uint64, []byte, error) {
|
||||||
|
var u uint64
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
if i >= 10 || i >= len(data) {
|
||||||
|
return 0, nil, errors.New("bad varint")
|
||||||
|
}
|
||||||
|
u |= uint64(data[i]&0x7F) << uint(7*i)
|
||||||
|
if data[i]&0x80 == 0 {
|
||||||
|
return u, data[i+1:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeField(b *buffer, data []byte) ([]byte, error) {
|
||||||
|
x, data, err := decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.field = int(x >> 3)
|
||||||
|
b.typ = int(x & 7)
|
||||||
|
b.data = nil
|
||||||
|
b.u64 = 0
|
||||||
|
switch b.typ {
|
||||||
|
case 0:
|
||||||
|
b.u64, data, err = decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
if len(data) < 8 {
|
||||||
|
return nil, errors.New("not enough data")
|
||||||
|
}
|
||||||
|
b.u64 = le64(data[:8])
|
||||||
|
data = data[8:]
|
||||||
|
case 2:
|
||||||
|
var n uint64
|
||||||
|
n, data, err = decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n > uint64(len(data)) {
|
||||||
|
return nil, errors.New("too much data")
|
||||||
|
}
|
||||||
|
b.data = data[:n]
|
||||||
|
data = data[n:]
|
||||||
|
case 5:
|
||||||
|
if len(data) < 4 {
|
||||||
|
return nil, errors.New("not enough data")
|
||||||
|
}
|
||||||
|
b.u64 = uint64(le32(data[:4]))
|
||||||
|
data = data[4:]
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown wire type: %d", b.typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkType(b *buffer, typ int) error {
|
||||||
|
if b.typ != typ {
|
||||||
|
return errors.New("type mismatch")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeMessage(b *buffer, m message) error {
|
||||||
|
if err := checkType(b, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dec := m.decoder()
|
||||||
|
data := b.data
|
||||||
|
for len(data) > 0 {
|
||||||
|
// pull varint field# + type
|
||||||
|
var err error
|
||||||
|
data, err = decodeField(b, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b.field >= len(dec) || dec[b.field] == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := dec[b.field](b, m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeInt64(b *buffer, x *int64) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = int64(b.u64)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeInt64s(b *buffer, x *[]int64) error {
|
||||||
|
if b.typ == 2 {
|
||||||
|
// Packed encoding
|
||||||
|
data := b.data
|
||||||
|
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||||
|
for len(data) > 0 {
|
||||||
|
var u uint64
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmp = append(tmp, int64(u))
|
||||||
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var i int64
|
||||||
|
if err := decodeInt64(b, &i); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUint64(b *buffer, x *uint64) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = b.u64
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||||
|
if b.typ == 2 {
|
||||||
|
data := b.data
|
||||||
|
// Packed encoding
|
||||||
|
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||||
|
for len(data) > 0 {
|
||||||
|
var u uint64
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmp = append(tmp, u)
|
||||||
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var u uint64
|
||||||
|
if err := decodeUint64(b, &u); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeString(b *buffer, x *string) error {
|
||||||
|
if err := checkType(b, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = string(b.data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrings(b *buffer, x *[]string) error {
|
||||||
|
var s string
|
||||||
|
if err := decodeString(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeBool(b *buffer, x *bool) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if int64(b.u64) == 0 {
|
||||||
|
*x = false
|
||||||
|
} else {
|
||||||
|
*x = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,178 @@
|
||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Implements methods to remove frames from profiles.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
reservedNames = []string{"(anonymous namespace)", "operator()"}
|
||||||
|
bracketRx = func() *regexp.Regexp {
|
||||||
|
var quotedNames []string
|
||||||
|
for _, name := range append(reservedNames, "(") {
|
||||||
|
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
|
||||||
|
}
|
||||||
|
return regexp.MustCompile(strings.Join(quotedNames, "|"))
|
||||||
|
}()
|
||||||
|
)
|
||||||
|
|
||||||
|
// simplifyFunc does some primitive simplification of function names.
|
||||||
|
func simplifyFunc(f string) string {
|
||||||
|
// Account for leading '.' on the PPC ELF v1 ABI.
|
||||||
|
funcName := strings.TrimPrefix(f, ".")
|
||||||
|
// Account for unsimplified names -- try to remove the argument list by trimming
|
||||||
|
// starting from the first '(', but skipping reserved names that have '('.
|
||||||
|
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
|
||||||
|
foundReserved := false
|
||||||
|
for _, res := range reservedNames {
|
||||||
|
if funcName[ind[0]:ind[1]] == res {
|
||||||
|
foundReserved = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundReserved {
|
||||||
|
funcName = funcName[:ind[0]]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return funcName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune removes all nodes beneath a node matching dropRx, and not
|
||||||
|
// matching keepRx. If the root node of a Sample matches, the sample
|
||||||
|
// will have an empty stack.
|
||||||
|
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||||
|
prune := make(map[uint64]bool)
|
||||||
|
pruneBeneath := make(map[uint64]bool)
|
||||||
|
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
var i int
|
||||||
|
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||||
|
funcName := simplifyFunc(fn.Name)
|
||||||
|
if dropRx.MatchString(funcName) {
|
||||||
|
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i >= 0 {
|
||||||
|
// Found matching entry to prune.
|
||||||
|
pruneBeneath[loc.ID] = true
|
||||||
|
|
||||||
|
// Remove the matching location.
|
||||||
|
if i == len(loc.Line)-1 {
|
||||||
|
// Matched the top entry: prune the whole location.
|
||||||
|
prune[loc.ID] = true
|
||||||
|
} else {
|
||||||
|
loc.Line = loc.Line[i+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune locs from each Sample
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
// Scan from the root to the leaves to find the prune location.
|
||||||
|
// Do not prune frames before the first user frame, to avoid
|
||||||
|
// pruning everything.
|
||||||
|
foundUser := false
|
||||||
|
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||||
|
id := sample.Location[i].ID
|
||||||
|
if !prune[id] && !pruneBeneath[id] {
|
||||||
|
foundUser = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !foundUser {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if prune[id] {
|
||||||
|
sample.Location = sample.Location[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if pruneBeneath[id] {
|
||||||
|
sample.Location = sample.Location[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUninteresting prunes and elides profiles using built-in
|
||||||
|
// tables of uninteresting function names.
|
||||||
|
func (p *Profile) RemoveUninteresting() error {
|
||||||
|
var keep, drop *regexp.Regexp
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if p.DropFrames != "" {
|
||||||
|
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
|
||||||
|
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
|
||||||
|
}
|
||||||
|
if p.KeepFrames != "" {
|
||||||
|
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
|
||||||
|
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Prune(drop, keep)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
|
||||||
|
//
|
||||||
|
// Please see the example below to understand this method as well as
|
||||||
|
// the difference from Prune method.
|
||||||
|
//
|
||||||
|
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
|
||||||
|
//
|
||||||
|
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
|
||||||
|
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
|
||||||
|
//
|
||||||
|
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
|
||||||
|
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
|
||||||
|
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
|
||||||
|
pruneBeneath := make(map[uint64]bool)
|
||||||
|
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
for i := 0; i < len(loc.Line); i++ {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||||
|
funcName := simplifyFunc(fn.Name)
|
||||||
|
if dropRx.MatchString(funcName) {
|
||||||
|
// Found matching entry to prune.
|
||||||
|
pruneBeneath[loc.ID] = true
|
||||||
|
loc.Line = loc.Line[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune locs from each Sample
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
// Scan from the bottom leaf to the root to find the prune location.
|
||||||
|
for i, loc := range sample.Location {
|
||||||
|
if pruneBeneath[loc.ID] {
|
||||||
|
sample.Location = sample.Location[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +0,0 @@
|
||||||
.idea/
|
|
||||||
.test/
|
|
||||||
examples/_*
|
|
|
@ -1,56 +0,0 @@
|
||||||
# Version v1.4.7-v1.4.8
|
|
||||||
* Documentation updates.
|
|
||||||
* Small linter cleanups.
|
|
||||||
* Added example in test.
|
|
||||||
|
|
||||||
# Version v1.4.6
|
|
||||||
|
|
||||||
* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18.
|
|
||||||
* Add example directories with example and tests for issues.
|
|
||||||
|
|
||||||
# Version v1.4.4-v1.4.5
|
|
||||||
|
|
||||||
* Fix of checksum problem because of forced tag. No changes to the code.
|
|
||||||
|
|
||||||
# Version v1.4.1
|
|
||||||
|
|
||||||
* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
|
|
||||||
|
|
||||||
# Version v1.4.0
|
|
||||||
|
|
||||||
* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
|
|
||||||
|
|
||||||
# Version v1.3.1
|
|
||||||
|
|
||||||
* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
|
|
||||||
fixes upstream issue 93.
|
|
||||||
|
|
||||||
|
|
||||||
# Version v1.3.0
|
|
||||||
|
|
||||||
* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
|
|
||||||
to Line struct".
|
|
||||||
|
|
||||||
# Version v1.2.1
|
|
||||||
|
|
||||||
* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
|
|
||||||
code in readme".
|
|
||||||
* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
|
|
||||||
to comment wording".
|
|
||||||
* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
|
|
||||||
spurious newlines from log messages".
|
|
||||||
|
|
||||||
# Version v1.2.0
|
|
||||||
|
|
||||||
* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
|
|
||||||
problem for never return the last line if it's not followed by a newline".
|
|
||||||
* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
|
|
||||||
deprecated os.SEEK consts". The changes bumped the minimal supported Go
|
|
||||||
release to 1.9.
|
|
||||||
|
|
||||||
# Version v1.1.0
|
|
||||||
|
|
||||||
* migration to go modules.
|
|
||||||
* release of master branch of the dormant upstream, because it contains
|
|
||||||
fixes and improvement no present in the tagged release.
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
FROM golang
|
|
||||||
|
|
||||||
RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
|
|
||||||
ADD . $GOPATH/src/github.com/nxadm/tail/
|
|
||||||
|
|
||||||
# expecting to fetch dependencies successfully.
|
|
||||||
RUN go get -v github.com/nxadm/tail
|
|
||||||
|
|
||||||
# expecting to run the test successfully.
|
|
||||||
RUN go test -v github.com/nxadm/tail
|
|
||||||
|
|
||||||
# expecting to install successfully
|
|
||||||
RUN go install -v github.com/nxadm/tail
|
|
||||||
RUN go install -v github.com/nxadm/tail/cmd/gotail
|
|
||||||
|
|
||||||
RUN $GOPATH/bin/gotail -h || true
|
|
||||||
|
|
||||||
ENV PATH $GOPATH/bin:$PATH
|
|
||||||
CMD ["gotail"]
|
|
|
@ -1,44 +0,0 @@
|
||||||
![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail)
|
|
||||||
|
|
||||||
# tail functionality in Go
|
|
||||||
|
|
||||||
nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
|
||||||
program. The library comes with full support for truncation/move detection as
|
|
||||||
it is designed to work with log rotation tools. The library works on all
|
|
||||||
operating systems supported by Go, including POSIX systems like Linux and
|
|
||||||
*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
|
||||||
|
|
||||||
A simple example:
|
|
||||||
|
|
||||||
```Go
|
|
||||||
// Create a tail
|
|
||||||
t, err := tail.TailFile(
|
|
||||||
"/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print the text of each received line
|
|
||||||
for line := range t.Lines {
|
|
||||||
fmt.Println(line.Text)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
See [API documentation](https://pkg.go.dev/github.com/nxadm/tail).
|
|
||||||
|
|
||||||
## Installing
|
|
||||||
|
|
||||||
go get github.com/nxadm/tail/...
|
|
||||||
|
|
||||||
## History
|
|
||||||
|
|
||||||
This project is an active, drop-in replacement for the
|
|
||||||
[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at
|
|
||||||
[hpcloud](https://github.com/hpcloud/tail). Next to
|
|
||||||
[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6),
|
|
||||||
nxadm/tail continues the development by keeping up to date with the Go toolchain
|
|
||||||
(e.g. go modules) and dependencies, completing the documentation, adding features
|
|
||||||
and fixing bugs.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples).
|
|
|
@ -1,7 +0,0 @@
|
||||||
Copyright (C) 2013 99designs
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
@ -1,97 +0,0 @@
|
||||||
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
|
||||||
package ratelimiter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LeakyBucket struct {
|
|
||||||
Size uint16
|
|
||||||
Fill float64
|
|
||||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
|
||||||
Lastupdate time.Time
|
|
||||||
Now func() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
|
||||||
bucket := LeakyBucket{
|
|
||||||
Size: size,
|
|
||||||
Fill: 0,
|
|
||||||
LeakInterval: leakInterval,
|
|
||||||
Now: time.Now,
|
|
||||||
Lastupdate: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return &bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LeakyBucket) updateFill() {
|
|
||||||
now := b.Now()
|
|
||||||
if b.Fill > 0 {
|
|
||||||
elapsed := now.Sub(b.Lastupdate)
|
|
||||||
|
|
||||||
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
|
||||||
if b.Fill < 0 {
|
|
||||||
b.Fill = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.Lastupdate = now
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LeakyBucket) Pour(amount uint16) bool {
|
|
||||||
b.updateFill()
|
|
||||||
|
|
||||||
var newfill float64 = b.Fill + float64(amount)
|
|
||||||
|
|
||||||
if newfill > float64(b.Size) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
b.Fill = newfill
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The time at which this bucket will be completely drained
|
|
||||||
func (b *LeakyBucket) DrainedAt() time.Time {
|
|
||||||
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// The duration until this bucket is completely drained
|
|
||||||
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
|
||||||
return b.DrainedAt().Sub(b.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
|
||||||
return b.Now().Sub(b.Lastupdate)
|
|
||||||
}
|
|
||||||
|
|
||||||
type LeakyBucketSer struct {
|
|
||||||
Size uint16
|
|
||||||
Fill float64
|
|
||||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
|
||||||
Lastupdate time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
|
||||||
bucket := LeakyBucketSer{
|
|
||||||
Size: b.Size,
|
|
||||||
Fill: b.Fill,
|
|
||||||
LeakInterval: b.LeakInterval,
|
|
||||||
Lastupdate: b.Lastupdate,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
|
||||||
bucket := LeakyBucket{
|
|
||||||
Size: b.Size,
|
|
||||||
Fill: b.Fill,
|
|
||||||
LeakInterval: b.LeakInterval,
|
|
||||||
Lastupdate: b.Lastupdate,
|
|
||||||
Now: time.Now,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &bucket
|
|
||||||
}
|
|
|
@ -1,60 +0,0 @@
|
||||||
package ratelimiter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
GC_SIZE int = 100
|
|
||||||
GC_PERIOD time.Duration = 60 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type Memory struct {
|
|
||||||
store map[string]LeakyBucket
|
|
||||||
lastGCCollected time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMemory() *Memory {
|
|
||||||
m := new(Memory)
|
|
||||||
m.store = make(map[string]LeakyBucket)
|
|
||||||
m.lastGCCollected = time.Now()
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
|
||||||
|
|
||||||
bucket, ok := m.store[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("miss")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &bucket, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
|
||||||
|
|
||||||
if len(m.store) > GC_SIZE {
|
|
||||||
m.GarbageCollect()
|
|
||||||
}
|
|
||||||
|
|
||||||
m.store[key] = bucket
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Memory) GarbageCollect() {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// rate limit GC to once per minute
|
|
||||||
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
|
|
||||||
for key, bucket := range m.store {
|
|
||||||
// if the bucket is drained, then GC
|
|
||||||
if bucket.DrainedAt().Unix() < now.Unix() {
|
|
||||||
delete(m.store, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.lastGCCollected = now
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +0,0 @@
|
||||||
package ratelimiter
|
|
||||||
|
|
||||||
type Storage interface {
|
|
||||||
GetBucketFor(string) (*LeakyBucket, error)
|
|
||||||
SetBucketFor(string, LeakyBucket) error
|
|
||||||
}
|
|
|
@ -1,455 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
|
||||||
//program. The library comes with full support for truncation/move detection as
|
|
||||||
//it is designed to work with log rotation tools. The library works on all
|
|
||||||
//operating systems supported by Go, including POSIX systems like Linux and
|
|
||||||
//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
|
||||||
package tail
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail/ratelimiter"
|
|
||||||
"github.com/nxadm/tail/util"
|
|
||||||
"github.com/nxadm/tail/watch"
|
|
||||||
"gopkg.in/tomb.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrStop is returned when the tail of a file has been marked to be stopped.
|
|
||||||
ErrStop = errors.New("tail should now stop")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Line struct {
|
|
||||||
Text string // The contents of the file
|
|
||||||
Num int // The line number
|
|
||||||
SeekInfo SeekInfo // SeekInfo
|
|
||||||
Time time.Time // Present time
|
|
||||||
Err error // Error from tail
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: this function is no longer used internally and it has little of no
|
|
||||||
// use in the API. As such, it will be removed from the API in a future major
|
|
||||||
// release.
|
|
||||||
//
|
|
||||||
// NewLine returns a * pointer to a Line struct.
|
|
||||||
func NewLine(text string, lineNum int) *Line {
|
|
||||||
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
|
|
||||||
type SeekInfo struct {
|
|
||||||
Offset int64
|
|
||||||
Whence int
|
|
||||||
}
|
|
||||||
|
|
||||||
type logger interface {
|
|
||||||
Fatal(v ...interface{})
|
|
||||||
Fatalf(format string, v ...interface{})
|
|
||||||
Fatalln(v ...interface{})
|
|
||||||
Panic(v ...interface{})
|
|
||||||
Panicf(format string, v ...interface{})
|
|
||||||
Panicln(v ...interface{})
|
|
||||||
Print(v ...interface{})
|
|
||||||
Printf(format string, v ...interface{})
|
|
||||||
Println(v ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is used to specify how a file must be tailed.
|
|
||||||
type Config struct {
|
|
||||||
// File-specifc
|
|
||||||
Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
|
|
||||||
ReOpen bool // Reopen recreated files (tail -F)
|
|
||||||
MustExist bool // Fail early if the file does not exist
|
|
||||||
Poll bool // Poll for file changes instead of using the default inotify
|
|
||||||
Pipe bool // The file is a named pipe (mkfifo)
|
|
||||||
|
|
||||||
// Generic IO
|
|
||||||
Follow bool // Continue looking for new lines (tail -f)
|
|
||||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
|
||||||
|
|
||||||
// Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
|
|
||||||
RateLimiter *ratelimiter.LeakyBucket
|
|
||||||
|
|
||||||
// Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
|
|
||||||
// To disable logging, set it to tail.DiscardingLogger
|
|
||||||
Logger logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type Tail struct {
|
|
||||||
Filename string // The filename
|
|
||||||
Lines chan *Line // A consumable channel of *Line
|
|
||||||
Config // Tail.Configuration
|
|
||||||
|
|
||||||
file *os.File
|
|
||||||
reader *bufio.Reader
|
|
||||||
lineNum int
|
|
||||||
|
|
||||||
watcher watch.FileWatcher
|
|
||||||
changes *watch.FileChanges
|
|
||||||
|
|
||||||
tomb.Tomb // provides: Done, Kill, Dying
|
|
||||||
|
|
||||||
lk sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
|
|
||||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
|
||||||
// DiscardingLogger can be used to disable logging output
|
|
||||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
|
||||||
)
|
|
||||||
|
|
||||||
// TailFile begins tailing the file. And returns a pointer to a Tail struct
|
|
||||||
// and an error. An output stream is made available via the Tail.Lines
|
|
||||||
// channel (e.g. to be looped and printed). To handle errors during tailing,
|
|
||||||
// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
|
|
||||||
// method on the returned *Tail.
|
|
||||||
func TailFile(filename string, config Config) (*Tail, error) {
|
|
||||||
if config.ReOpen && !config.Follow {
|
|
||||||
util.Fatal("cannot set ReOpen without Follow.")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &Tail{
|
|
||||||
Filename: filename,
|
|
||||||
Lines: make(chan *Line),
|
|
||||||
Config: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
// when Logger was not specified in config, use default logger
|
|
||||||
if t.Logger == nil {
|
|
||||||
t.Logger = DefaultLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Poll {
|
|
||||||
t.watcher = watch.NewPollingFileWatcher(filename)
|
|
||||||
} else {
|
|
||||||
t.watcher = watch.NewInotifyFileWatcher(filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.MustExist {
|
|
||||||
var err error
|
|
||||||
t.file, err = OpenFile(t.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go t.tailFileSync()
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tell returns the file's current position, like stdio's ftell() and an error.
|
|
||||||
// Beware that this value may not be completely accurate because one line from
|
|
||||||
// the chan(tail.Lines) may have been read already.
|
|
||||||
func (tail *Tail) Tell() (offset int64, err error) {
|
|
||||||
if tail.file == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
offset, err = tail.file.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tail.lk.Lock()
|
|
||||||
defer tail.lk.Unlock()
|
|
||||||
if tail.reader == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
offset -= int64(tail.reader.Buffered())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop stops the tailing activity.
|
|
||||||
func (tail *Tail) Stop() error {
|
|
||||||
tail.Kill(nil)
|
|
||||||
return tail.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopAtEOF stops tailing as soon as the end of the file is reached. The function
|
|
||||||
// returns an error,
|
|
||||||
func (tail *Tail) StopAtEOF() error {
|
|
||||||
tail.Kill(errStopAtEOF)
|
|
||||||
return tail.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
var errStopAtEOF = errors.New("tail: stop at eof")
|
|
||||||
|
|
||||||
func (tail *Tail) close() {
|
|
||||||
close(tail.Lines)
|
|
||||||
tail.closeFile()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) closeFile() {
|
|
||||||
if tail.file != nil {
|
|
||||||
tail.file.Close()
|
|
||||||
tail.file = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) reopen() error {
|
|
||||||
tail.closeFile()
|
|
||||||
tail.lineNum = 0
|
|
||||||
for {
|
|
||||||
var err error
|
|
||||||
tail.file, err = OpenFile(tail.Filename)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
|
||||||
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
|
||||||
if err == tomb.ErrDying {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) readLine() (string, error) {
|
|
||||||
tail.lk.Lock()
|
|
||||||
line, err := tail.reader.ReadString('\n')
|
|
||||||
tail.lk.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
// Note ReadString "returns the data read before the error" in
|
|
||||||
// case of an error, including EOF, so we return it as is. The
|
|
||||||
// caller is expected to process it if err is EOF.
|
|
||||||
return line, err
|
|
||||||
}
|
|
||||||
|
|
||||||
line = strings.TrimRight(line, "\n")
|
|
||||||
|
|
||||||
return line, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) tailFileSync() {
|
|
||||||
defer tail.Done()
|
|
||||||
defer tail.close()
|
|
||||||
|
|
||||||
if !tail.MustExist {
|
|
||||||
// deferred first open.
|
|
||||||
err := tail.reopen()
|
|
||||||
if err != nil {
|
|
||||||
if err != tomb.ErrDying {
|
|
||||||
tail.Kill(err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek to requested location on first open of the file.
|
|
||||||
if tail.Location != nil {
|
|
||||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
|
||||||
if err != nil {
|
|
||||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tail.openReader()
|
|
||||||
|
|
||||||
// Read line by line.
|
|
||||||
for {
|
|
||||||
// do not seek in named pipes
|
|
||||||
if !tail.Pipe {
|
|
||||||
// grab the position in case we need to back up in the event of a half-line
|
|
||||||
if _, err := tail.Tell(); err != nil {
|
|
||||||
tail.Kill(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
line, err := tail.readLine()
|
|
||||||
|
|
||||||
// Process `line` even if err is EOF.
|
|
||||||
if err == nil {
|
|
||||||
cooloff := !tail.sendLine(line)
|
|
||||||
if cooloff {
|
|
||||||
// Wait a second before seeking till the end of
|
|
||||||
// file when rate limit is reached.
|
|
||||||
msg := ("Too much log activity; waiting a second before resuming tailing")
|
|
||||||
offset, _ := tail.Tell()
|
|
||||||
tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
case <-tail.Dying():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := tail.seekEnd(); err != nil {
|
|
||||||
tail.Kill(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if err == io.EOF {
|
|
||||||
if !tail.Follow {
|
|
||||||
if line != "" {
|
|
||||||
tail.sendLine(line)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if tail.Follow && line != "" {
|
|
||||||
tail.sendLine(line)
|
|
||||||
if err := tail.seekEnd(); err != nil {
|
|
||||||
tail.Kill(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// When EOF is reached, wait for more data to become
|
|
||||||
// available. Wait strategy is based on the `tail.watcher`
|
|
||||||
// implementation (inotify or polling).
|
|
||||||
err := tail.waitForChanges()
|
|
||||||
if err != nil {
|
|
||||||
if err != ErrStop {
|
|
||||||
tail.Kill(err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// non-EOF error
|
|
||||||
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-tail.Dying():
|
|
||||||
if tail.Err() == errStopAtEOF {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitForChanges waits until the file has been appended, deleted,
|
|
||||||
// moved or truncated. When moved or deleted - the file will be
|
|
||||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
|
||||||
func (tail *Tail) waitForChanges() error {
|
|
||||||
if tail.changes == nil {
|
|
||||||
pos, err := tail.file.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-tail.changes.Modified:
|
|
||||||
return nil
|
|
||||||
case <-tail.changes.Deleted:
|
|
||||||
tail.changes = nil
|
|
||||||
if tail.ReOpen {
|
|
||||||
// XXX: we must not log from a library.
|
|
||||||
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
|
||||||
if err := tail.reopen(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
|
||||||
tail.openReader()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
|
||||||
return ErrStop
|
|
||||||
case <-tail.changes.Truncated:
|
|
||||||
// Always reopen truncated files (Follow is true)
|
|
||||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
|
||||||
if err := tail.reopen(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
|
||||||
tail.openReader()
|
|
||||||
return nil
|
|
||||||
case <-tail.Dying():
|
|
||||||
return ErrStop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) openReader() {
|
|
||||||
tail.lk.Lock()
|
|
||||||
if tail.MaxLineSize > 0 {
|
|
||||||
// add 2 to account for newline characters
|
|
||||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
|
||||||
} else {
|
|
||||||
tail.reader = bufio.NewReader(tail.file)
|
|
||||||
}
|
|
||||||
tail.lk.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) seekEnd() error {
|
|
||||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
|
||||||
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
|
||||||
}
|
|
||||||
// Reset the read buffer whenever the file is re-seek'ed
|
|
||||||
tail.reader.Reset(tail.file)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
|
||||||
// if necessary. Return false if rate limit is reached.
|
|
||||||
func (tail *Tail) sendLine(line string) bool {
|
|
||||||
now := time.Now()
|
|
||||||
lines := []string{line}
|
|
||||||
|
|
||||||
// Split longer lines
|
|
||||||
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
|
||||||
lines = util.PartitionString(line, tail.MaxLineSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, line := range lines {
|
|
||||||
tail.lineNum++
|
|
||||||
offset, _ := tail.Tell()
|
|
||||||
select {
|
|
||||||
case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
|
|
||||||
case <-tail.Dying():
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tail.Config.RateLimiter != nil {
|
|
||||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
|
||||||
if !ok {
|
|
||||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
|
|
||||||
tail.Filename)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup removes inotify watches added by the tail package. This function is
|
|
||||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
|
||||||
// automatically remove inotify watches after the process exits.
|
|
||||||
// If you plan to re-read a file, don't call Cleanup in between.
|
|
||||||
func (tail *Tail) Cleanup() {
|
|
||||||
watch.Cleanup(tail.Filename)
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package tail
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: this function is only useful internally and, as such,
|
|
||||||
// it will be removed from the API in a future major release.
|
|
||||||
//
|
|
||||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
|
||||||
// on POSIX and non-POSIX OSes like MS Windows.
|
|
||||||
func OpenFile(name string) (file *os.File, err error) {
|
|
||||||
return os.Open(name)
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package tail
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail/winfile"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: this function is only useful internally and, as such,
|
|
||||||
// it will be removed from the API in a future major release.
|
|
||||||
//
|
|
||||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
|
||||||
// on POSIX and non-POSIX OSes like MS Windows.
|
|
||||||
func OpenFile(name string) (file *os.File, err error) {
|
|
||||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"runtime/debug"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Logger struct {
|
|
||||||
*log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
|
||||||
|
|
||||||
// fatal is like panic except it displays only the current goroutine's stack.
|
|
||||||
func Fatal(format string, v ...interface{}) {
|
|
||||||
// https://github.com/nxadm/log/blob/master/log.go#L45
|
|
||||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// partitionString partitions the string into chunks of given size,
|
|
||||||
// with the last chunk of variable size.
|
|
||||||
func PartitionString(s string, chunkSize int) []string {
|
|
||||||
if chunkSize <= 0 {
|
|
||||||
panic("invalid chunkSize")
|
|
||||||
}
|
|
||||||
length := len(s)
|
|
||||||
chunks := 1 + length/chunkSize
|
|
||||||
start := 0
|
|
||||||
end := chunkSize
|
|
||||||
parts := make([]string, 0, chunks)
|
|
||||||
for {
|
|
||||||
if end > length {
|
|
||||||
end = length
|
|
||||||
}
|
|
||||||
parts = append(parts, s[start:end])
|
|
||||||
if end == length {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
start, end = end, end+chunkSize
|
|
||||||
}
|
|
||||||
return parts
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
package watch
|
|
||||||
|
|
||||||
type FileChanges struct {
|
|
||||||
Modified chan bool // Channel to get notified of modifications
|
|
||||||
Truncated chan bool // Channel to get notified of truncations
|
|
||||||
Deleted chan bool // Channel to get notified of deletions/renames
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileChanges() *FileChanges {
|
|
||||||
return &FileChanges{
|
|
||||||
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *FileChanges) NotifyModified() {
|
|
||||||
sendOnlyIfEmpty(fc.Modified)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *FileChanges) NotifyTruncated() {
|
|
||||||
sendOnlyIfEmpty(fc.Truncated)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *FileChanges) NotifyDeleted() {
|
|
||||||
sendOnlyIfEmpty(fc.Deleted)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
|
||||||
// backlog to be read by other goroutines. This concurrency pattern
|
|
||||||
// can be used to notify other goroutines if and only if they are
|
|
||||||
// looking for it (i.e., subsequent notifications can be compressed
|
|
||||||
// into one).
|
|
||||||
func sendOnlyIfEmpty(ch chan bool) {
|
|
||||||
select {
|
|
||||||
case ch <- true:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,136 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
package watch
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail/util"
|
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
"gopkg.in/tomb.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
|
||||||
type InotifyFileWatcher struct {
|
|
||||||
Filename string
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
|
||||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
|
||||||
return fw
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
|
||||||
err := WatchCreate(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer RemoveWatchCreate(fw.Filename)
|
|
||||||
|
|
||||||
// Do a real check now as the file might have been created before
|
|
||||||
// calling `WatchFlags` above.
|
|
||||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
|
||||||
// file exists, or stat returned an error.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
events := Events(fw.Filename)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case evt, ok := <-events:
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("inotify watcher has been closed")
|
|
||||||
}
|
|
||||||
evtName, err := filepath.Abs(evt.Name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fwFilename, err := filepath.Abs(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if evtName == fwFilename {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case <-t.Dying():
|
|
||||||
return tomb.ErrDying
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
|
||||||
err := Watch(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
changes := NewFileChanges()
|
|
||||||
fw.Size = pos
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
|
|
||||||
events := Events(fw.Filename)
|
|
||||||
|
|
||||||
for {
|
|
||||||
prevSize := fw.Size
|
|
||||||
|
|
||||||
var evt fsnotify.Event
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
select {
|
|
||||||
case evt, ok = <-events:
|
|
||||||
if !ok {
|
|
||||||
RemoveWatch(fw.Filename)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-t.Dying():
|
|
||||||
RemoveWatch(fw.Filename)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
|
||||||
RemoveWatch(fw.Filename)
|
|
||||||
changes.NotifyDeleted()
|
|
||||||
return
|
|
||||||
|
|
||||||
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
|
|
||||||
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
|
||||||
fi, err := os.Stat(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
RemoveWatch(fw.Filename)
|
|
||||||
changes.NotifyDeleted()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// XXX: report this error back to the user
|
|
||||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
|
||||||
}
|
|
||||||
fw.Size = fi.Size()
|
|
||||||
|
|
||||||
if prevSize > 0 && prevSize > fw.Size {
|
|
||||||
changes.NotifyTruncated()
|
|
||||||
} else {
|
|
||||||
changes.NotifyModified()
|
|
||||||
}
|
|
||||||
prevSize = fw.Size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return changes, nil
|
|
||||||
}
|
|
|
@ -1,249 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
package watch
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail/util"
|
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
)
|
|
||||||
|
|
||||||
type InotifyTracker struct {
|
|
||||||
mux sync.Mutex
|
|
||||||
watcher *fsnotify.Watcher
|
|
||||||
chans map[string]chan fsnotify.Event
|
|
||||||
done map[string]chan bool
|
|
||||||
watchNums map[string]int
|
|
||||||
watch chan *watchInfo
|
|
||||||
remove chan *watchInfo
|
|
||||||
error chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
type watchInfo struct {
|
|
||||||
op fsnotify.Op
|
|
||||||
fname string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *watchInfo) isCreate() bool {
|
|
||||||
return this.op == fsnotify.Create
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
|
||||||
shared *InotifyTracker
|
|
||||||
|
|
||||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
|
||||||
once = sync.Once{}
|
|
||||||
goRun = func() {
|
|
||||||
shared = &InotifyTracker{
|
|
||||||
mux: sync.Mutex{},
|
|
||||||
chans: make(map[string]chan fsnotify.Event),
|
|
||||||
done: make(map[string]chan bool),
|
|
||||||
watchNums: make(map[string]int),
|
|
||||||
watch: make(chan *watchInfo),
|
|
||||||
remove: make(chan *watchInfo),
|
|
||||||
error: make(chan error),
|
|
||||||
}
|
|
||||||
go shared.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watch signals the run goroutine to begin watching the input filename
|
|
||||||
func Watch(fname string) error {
|
|
||||||
return watch(&watchInfo{
|
|
||||||
fname: fname,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch create signals the run goroutine to begin watching the input filename
|
|
||||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
|
||||||
func WatchCreate(fname string) error {
|
|
||||||
return watch(&watchInfo{
|
|
||||||
op: fsnotify.Create,
|
|
||||||
fname: fname,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func watch(winfo *watchInfo) error {
|
|
||||||
// start running the shared InotifyTracker if not already running
|
|
||||||
once.Do(goRun)
|
|
||||||
|
|
||||||
winfo.fname = filepath.Clean(winfo.fname)
|
|
||||||
shared.watch <- winfo
|
|
||||||
return <-shared.error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
|
||||||
func RemoveWatch(fname string) error {
|
|
||||||
return remove(&watchInfo{
|
|
||||||
fname: fname,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
|
||||||
func RemoveWatchCreate(fname string) error {
|
|
||||||
return remove(&watchInfo{
|
|
||||||
op: fsnotify.Create,
|
|
||||||
fname: fname,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func remove(winfo *watchInfo) error {
|
|
||||||
// start running the shared InotifyTracker if not already running
|
|
||||||
once.Do(goRun)
|
|
||||||
|
|
||||||
winfo.fname = filepath.Clean(winfo.fname)
|
|
||||||
shared.mux.Lock()
|
|
||||||
done := shared.done[winfo.fname]
|
|
||||||
if done != nil {
|
|
||||||
delete(shared.done, winfo.fname)
|
|
||||||
close(done)
|
|
||||||
}
|
|
||||||
shared.mux.Unlock()
|
|
||||||
|
|
||||||
shared.remove <- winfo
|
|
||||||
return <-shared.error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
|
||||||
// will be sent. This channel will be closed when removeWatch is called on this
|
|
||||||
// filename.
|
|
||||||
func Events(fname string) <-chan fsnotify.Event {
|
|
||||||
shared.mux.Lock()
|
|
||||||
defer shared.mux.Unlock()
|
|
||||||
|
|
||||||
return shared.chans[fname]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup removes the watch for the input filename if necessary.
|
|
||||||
func Cleanup(fname string) error {
|
|
||||||
return RemoveWatch(fname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
|
||||||
// a new Watcher if the previous Watcher was closed.
|
|
||||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
|
||||||
shared.mux.Lock()
|
|
||||||
defer shared.mux.Unlock()
|
|
||||||
|
|
||||||
if shared.chans[winfo.fname] == nil {
|
|
||||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
|
||||||
}
|
|
||||||
if shared.done[winfo.fname] == nil {
|
|
||||||
shared.done[winfo.fname] = make(chan bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
fname := winfo.fname
|
|
||||||
if winfo.isCreate() {
|
|
||||||
// Watch for new files to be created in the parent directory.
|
|
||||||
fname = filepath.Dir(fname)
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
// already in inotify watch
|
|
||||||
if shared.watchNums[fname] == 0 {
|
|
||||||
err = shared.watcher.Add(fname)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
shared.watchNums[fname]++
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
|
||||||
// corresponding events channel.
|
|
||||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
|
|
||||||
shared.mux.Lock()
|
|
||||||
|
|
||||||
ch := shared.chans[winfo.fname]
|
|
||||||
if ch != nil {
|
|
||||||
delete(shared.chans, winfo.fname)
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
fname := winfo.fname
|
|
||||||
if winfo.isCreate() {
|
|
||||||
// Watch for new files to be created in the parent directory.
|
|
||||||
fname = filepath.Dir(fname)
|
|
||||||
}
|
|
||||||
shared.watchNums[fname]--
|
|
||||||
watchNum := shared.watchNums[fname]
|
|
||||||
if watchNum == 0 {
|
|
||||||
delete(shared.watchNums, fname)
|
|
||||||
}
|
|
||||||
shared.mux.Unlock()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
|
||||||
// This needs to happen after releasing the lock because fsnotify waits
|
|
||||||
// synchronously for the kernel to acknowledge the removal of the watch
|
|
||||||
// for this file, which causes us to deadlock if we still held the lock.
|
|
||||||
if watchNum == 0 {
|
|
||||||
err = shared.watcher.Remove(fname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendEvent sends the input event to the appropriate Tail.
|
|
||||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
|
||||||
name := filepath.Clean(event.Name)
|
|
||||||
|
|
||||||
shared.mux.Lock()
|
|
||||||
ch := shared.chans[name]
|
|
||||||
done := shared.done[name]
|
|
||||||
shared.mux.Unlock()
|
|
||||||
|
|
||||||
if ch != nil && done != nil {
|
|
||||||
select {
|
|
||||||
case ch <- event:
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// run starts the goroutine in which the shared struct reads events from its
|
|
||||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
|
||||||
func (shared *InotifyTracker) run() {
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
util.Fatal("failed to create Watcher")
|
|
||||||
}
|
|
||||||
shared.watcher = watcher
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case winfo := <-shared.watch:
|
|
||||||
shared.error <- shared.addWatch(winfo)
|
|
||||||
|
|
||||||
case winfo := <-shared.remove:
|
|
||||||
shared.error <- shared.removeWatch(winfo)
|
|
||||||
|
|
||||||
case event, open := <-shared.watcher.Events:
|
|
||||||
if !open {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
shared.sendEvent(event)
|
|
||||||
|
|
||||||
case err, open := <-shared.watcher.Errors:
|
|
||||||
if !open {
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
sysErr, ok := err.(*os.SyscallError)
|
|
||||||
if !ok || sysErr.Err != syscall.EINTR {
|
|
||||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,119 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
package watch
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail/util"
|
|
||||||
"gopkg.in/tomb.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PollingFileWatcher polls the file for changes.
|
|
||||||
type PollingFileWatcher struct {
|
|
||||||
Filename string
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
|
||||||
fw := &PollingFileWatcher{filename, 0}
|
|
||||||
return fw
|
|
||||||
}
|
|
||||||
|
|
||||||
var POLL_DURATION time.Duration
|
|
||||||
|
|
||||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
|
||||||
for {
|
|
||||||
if _, err := os.Stat(fw.Filename); err == nil {
|
|
||||||
return nil
|
|
||||||
} else if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-time.After(POLL_DURATION):
|
|
||||||
continue
|
|
||||||
case <-t.Dying():
|
|
||||||
return tomb.ErrDying
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
|
||||||
origFi, err := os.Stat(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
changes := NewFileChanges()
|
|
||||||
var prevModTime time.Time
|
|
||||||
|
|
||||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
|
||||||
// the fatal (below) with tomb's Kill.
|
|
||||||
|
|
||||||
fw.Size = pos
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
prevSize := fw.Size
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-t.Dying():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(POLL_DURATION)
|
|
||||||
fi, err := os.Stat(fw.Filename)
|
|
||||||
if err != nil {
|
|
||||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
|
||||||
// so it gives access denied to anything trying to read it until all handles are released.
|
|
||||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
|
||||||
// File does not exist (has been deleted).
|
|
||||||
changes.NotifyDeleted()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: report this error back to the user
|
|
||||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// File got moved/renamed?
|
|
||||||
if !os.SameFile(origFi, fi) {
|
|
||||||
changes.NotifyDeleted()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// File got truncated?
|
|
||||||
fw.Size = fi.Size()
|
|
||||||
if prevSize > 0 && prevSize > fw.Size {
|
|
||||||
changes.NotifyTruncated()
|
|
||||||
prevSize = fw.Size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// File got bigger?
|
|
||||||
if prevSize > 0 && prevSize < fw.Size {
|
|
||||||
changes.NotifyModified()
|
|
||||||
prevSize = fw.Size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
prevSize = fw.Size
|
|
||||||
|
|
||||||
// File was appended to (changed)?
|
|
||||||
modTime := fi.ModTime()
|
|
||||||
if modTime != prevModTime {
|
|
||||||
prevModTime = modTime
|
|
||||||
changes.NotifyModified()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return changes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
POLL_DURATION = 250 * time.Millisecond
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
|
||||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
|
||||||
|
|
||||||
package watch
|
|
||||||
|
|
||||||
import "gopkg.in/tomb.v1"
|
|
||||||
|
|
||||||
// FileWatcher monitors file-level events.
|
|
||||||
type FileWatcher interface {
|
|
||||||
// BlockUntilExists blocks until the file comes into existence.
|
|
||||||
BlockUntilExists(*tomb.Tomb) error
|
|
||||||
|
|
||||||
// ChangeEvents reports on changes to a file, be it modification,
|
|
||||||
// deletion, renames or truncations. Returned FileChanges group of
|
|
||||||
// channels will be closed, thus become unusable, after a deletion
|
|
||||||
// or truncation event.
|
|
||||||
// In order to properly report truncations, ChangeEvents requires
|
|
||||||
// the caller to pass their current offset in the file.
|
|
||||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package winfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// issue also described here
|
|
||||||
//https://codereview.appspot.com/8203043/
|
|
||||||
|
|
||||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
|
||||||
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
|
||||||
if len(path) == 0 {
|
|
||||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
|
||||||
}
|
|
||||||
pathp, err := syscall.UTF16PtrFromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return syscall.InvalidHandle, err
|
|
||||||
}
|
|
||||||
var access uint32
|
|
||||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
|
||||||
case syscall.O_RDONLY:
|
|
||||||
access = syscall.GENERIC_READ
|
|
||||||
case syscall.O_WRONLY:
|
|
||||||
access = syscall.GENERIC_WRITE
|
|
||||||
case syscall.O_RDWR:
|
|
||||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
|
||||||
}
|
|
||||||
if mode&syscall.O_CREAT != 0 {
|
|
||||||
access |= syscall.GENERIC_WRITE
|
|
||||||
}
|
|
||||||
if mode&syscall.O_APPEND != 0 {
|
|
||||||
access &^= syscall.GENERIC_WRITE
|
|
||||||
access |= syscall.FILE_APPEND_DATA
|
|
||||||
}
|
|
||||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
|
||||||
var sa *syscall.SecurityAttributes
|
|
||||||
if mode&syscall.O_CLOEXEC == 0 {
|
|
||||||
sa = makeInheritSa()
|
|
||||||
}
|
|
||||||
var createmode uint32
|
|
||||||
switch {
|
|
||||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
|
||||||
createmode = syscall.CREATE_NEW
|
|
||||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
|
||||||
createmode = syscall.CREATE_ALWAYS
|
|
||||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
|
||||||
createmode = syscall.OPEN_ALWAYS
|
|
||||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
|
||||||
createmode = syscall.TRUNCATE_EXISTING
|
|
||||||
default:
|
|
||||||
createmode = syscall.OPEN_EXISTING
|
|
||||||
}
|
|
||||||
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
|
||||||
return h, e
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
|
||||||
func makeInheritSa() *syscall.SecurityAttributes {
|
|
||||||
var sa syscall.SecurityAttributes
|
|
||||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
|
||||||
sa.InheritHandle = 1
|
|
||||||
return &sa
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
|
||||||
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
|
||||||
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(r), name), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
|
||||||
func syscallMode(i os.FileMode) (o uint32) {
|
|
||||||
o |= uint32(i.Perm())
|
|
||||||
if i&os.ModeSetuid != 0 {
|
|
||||||
o |= syscall.S_ISUID
|
|
||||||
}
|
|
||||||
if i&os.ModeSetgid != 0 {
|
|
||||||
o |= syscall.S_ISGID
|
|
||||||
}
|
|
||||||
if i&os.ModeSticky != 0 {
|
|
||||||
o |= syscall.S_ISVTX
|
|
||||||
}
|
|
||||||
// No mapping for Go's ModeTemporary (plan9 only).
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- tip
|
|
||||||
- 1.16.x
|
|
||||||
- 1.15.x
|
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- $GOPATH/pkg/mod
|
|
||||||
|
|
||||||
# allow internal package imports, necessary for forked repositories
|
|
||||||
go_import_path: github.com/onsi/ginkgo
|
|
||||||
|
|
||||||
install:
|
|
||||||
- GO111MODULE="off" go get -v -t ./...
|
|
||||||
- GO111MODULE="off" go get golang.org/x/tools/cmd/cover
|
|
||||||
- GO111MODULE="off" go get github.com/onsi/gomega
|
|
||||||
- GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
|
|
||||||
- export PATH=$GOPATH/bin:$PATH
|
|
||||||
|
|
||||||
script:
|
|
||||||
- GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum
|
|
||||||
- go vet
|
|
||||||
- ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
|
|
@ -1,169 +0,0 @@
|
||||||
![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
|
|
||||||
|
|
||||||
[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
|
|
||||||
|
|
||||||
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
|
||||||
|
|
||||||
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
|
|
||||||
|
|
||||||
# Ginkgo 2.0 Release Candidate is available!
|
|
||||||
|
|
||||||
An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
|
|
||||||
|
|
||||||
As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
|
|
||||||
|
|
||||||
Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
|
|
||||||
|
|
||||||
## TLDR
|
|
||||||
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
|
|
||||||
It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library.
|
|
||||||
|
|
||||||
```go
|
|
||||||
Describe("the strings package", func() {
|
|
||||||
Context("strings.Contains()", func() {
|
|
||||||
When("the string contains the substring in the middle", func() {
|
|
||||||
It("returns `true`", func() {
|
|
||||||
Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
## Feature List
|
|
||||||
|
|
||||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
|
||||||
|
|
||||||
- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
|
|
||||||
- Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
|
||||||
- [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
|
||||||
- [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
|
|
||||||
- [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
|
||||||
- [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
|
||||||
|
|
||||||
- A comprehensive test runner that lets you:
|
|
||||||
- Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
|
|
||||||
- [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
|
||||||
- Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
|
||||||
- Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
|
|
||||||
|
|
||||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
|
||||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
|
||||||
- `ginkgo -cover` runs your tests using Go's code coverage tool
|
|
||||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
|
||||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
|
||||||
- `ginkgo -r` runs all tests suites under the current directory
|
|
||||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
|
||||||
|
|
||||||
And much more: run `ginkgo help` for details!
|
|
||||||
|
|
||||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
|
||||||
|
|
||||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
|
||||||
|
|
||||||
- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
|
|
||||||
|
|
||||||
- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
|
||||||
|
|
||||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
|
||||||
|
|
||||||
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
|
|
||||||
|
|
||||||
- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`.
|
|
||||||
|
|
||||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
|
||||||
|
|
||||||
- A modular architecture that lets you easily:
|
|
||||||
- Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
|
||||||
- [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
|
||||||
|
|
||||||
## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
|
||||||
|
|
||||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
|
|
||||||
|
|
||||||
## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
|
|
||||||
|
|
||||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
|
|
||||||
|
|
||||||
### Global installation
|
|
||||||
To install the Ginkgo command line interface:
|
|
||||||
```bash
|
|
||||||
go get -u github.com/onsi/ginkgo/ginkgo
|
|
||||||
```
|
|
||||||
Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information.
|
|
||||||
|
|
||||||
### Go module ["tools package"](https://github.com/golang/go/issues/25922):
|
|
||||||
Create (or update) a file called `tools/tools.go` with the following contents:
|
|
||||||
```go
|
|
||||||
// +build tools
|
|
||||||
|
|
||||||
package tools
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/onsi/ginkgo/ginkgo"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This file imports packages that are used when running go generate, or used
|
|
||||||
// during the development process but not otherwise depended on by built code.
|
|
||||||
```
|
|
||||||
The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`.
|
|
||||||
This approach allows the version of Ginkgo to be maintained under source control for reproducible results,
|
|
||||||
and is well suited to automated test pipelines.
|
|
||||||
|
|
||||||
### Bootstrapping
|
|
||||||
```bash
|
|
||||||
cd path/to/package/you/want/to/test
|
|
||||||
|
|
||||||
ginkgo bootstrap # set up a new ginkgo suite
|
|
||||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
|
||||||
|
|
||||||
go test # to run your tests
|
|
||||||
|
|
||||||
ginkgo # also runs your tests
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## I'm new to Go: What are my testing options?
|
|
||||||
|
|
||||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
|
||||||
|
|
||||||
With that said, it's great to know what your options are :)
|
|
||||||
|
|
||||||
### What Go gives you out of the box
|
|
||||||
|
|
||||||
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
|
||||||
|
|
||||||
### Matcher libraries for Go's XUnit style tests
|
|
||||||
|
|
||||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
|
||||||
|
|
||||||
- [testify](https://github.com/stretchr/testify)
|
|
||||||
- [gocheck](https://labix.org/gocheck)
|
|
||||||
|
|
||||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
|
||||||
|
|
||||||
### BDD style testing frameworks
|
|
||||||
|
|
||||||
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
|
|
||||||
|
|
||||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
|
||||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
|
||||||
- [Goblin](https://github.com/franela/goblin)
|
|
||||||
- [Mao](https://github.com/azer/mao)
|
|
||||||
- [Zen](https://github.com/pranavraja/zen)
|
|
||||||
|
|
||||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
|
|
||||||
|
|
||||||
Go explore!
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
Ginkgo is MIT-Licensed
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
|
|
@ -1,232 +0,0 @@
|
||||||
/*
|
|
||||||
Ginkgo accepts a number of configuration options.
|
|
||||||
|
|
||||||
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
|
||||||
|
|
||||||
You can also learn more via
|
|
||||||
|
|
||||||
ginkgo help
|
|
||||||
|
|
||||||
or (I kid you not):
|
|
||||||
|
|
||||||
go test -asdf
|
|
||||||
*/
|
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
const VERSION = "1.16.5"
|
|
||||||
|
|
||||||
type GinkgoConfigType struct {
|
|
||||||
RandomSeed int64
|
|
||||||
RandomizeAllSpecs bool
|
|
||||||
RegexScansFilePath bool
|
|
||||||
FocusStrings []string
|
|
||||||
SkipStrings []string
|
|
||||||
SkipMeasurements bool
|
|
||||||
FailOnPending bool
|
|
||||||
FailFast bool
|
|
||||||
FlakeAttempts int
|
|
||||||
EmitSpecProgress bool
|
|
||||||
DryRun bool
|
|
||||||
DebugParallel bool
|
|
||||||
|
|
||||||
ParallelNode int
|
|
||||||
ParallelTotal int
|
|
||||||
SyncHost string
|
|
||||||
StreamHost string
|
|
||||||
}
|
|
||||||
|
|
||||||
var GinkgoConfig = GinkgoConfigType{}
|
|
||||||
|
|
||||||
type DefaultReporterConfigType struct {
|
|
||||||
NoColor bool
|
|
||||||
SlowSpecThreshold float64
|
|
||||||
NoisyPendings bool
|
|
||||||
NoisySkippings bool
|
|
||||||
Succinct bool
|
|
||||||
Verbose bool
|
|
||||||
FullTrace bool
|
|
||||||
ReportPassed bool
|
|
||||||
ReportFile string
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
|
||||||
|
|
||||||
func processPrefix(prefix string) string {
|
|
||||||
if prefix != "" {
|
|
||||||
prefix += "."
|
|
||||||
}
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
type flagFunc func(string)
|
|
||||||
|
|
||||||
func (f flagFunc) String() string { return "" }
|
|
||||||
func (f flagFunc) Set(s string) error { f(s); return nil }
|
|
||||||
|
|
||||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
|
||||||
prefix = processPrefix(prefix)
|
|
||||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
|
||||||
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
|
||||||
|
|
||||||
flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.")
|
|
||||||
flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.")
|
|
||||||
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
|
|
||||||
|
|
||||||
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
|
|
||||||
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
|
||||||
|
|
||||||
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
|
||||||
|
|
||||||
if includeParallelFlags {
|
|
||||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
|
||||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
|
||||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
|
||||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
|
||||||
}
|
|
||||||
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
|
||||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
|
||||||
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
|
||||||
prefix = processPrefix(prefix)
|
|
||||||
result := make([]string, 0)
|
|
||||||
|
|
||||||
if ginkgo.RandomSeed > 0 {
|
|
||||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.RandomizeAllSpecs {
|
|
||||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.SkipMeasurements {
|
|
||||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.FailOnPending {
|
|
||||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.FailFast {
|
|
||||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.DryRun {
|
|
||||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range ginkgo.FocusStrings {
|
|
||||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range ginkgo.SkipStrings {
|
|
||||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.FlakeAttempts > 1 {
|
|
||||||
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.EmitSpecProgress {
|
|
||||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.DebugParallel {
|
|
||||||
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.ParallelNode != 0 {
|
|
||||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.ParallelTotal != 0 {
|
|
||||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.StreamHost != "" {
|
|
||||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.SyncHost != "" {
|
|
||||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ginkgo.RegexScansFilePath {
|
|
||||||
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.NoColor {
|
|
||||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.SlowSpecThreshold > 0 {
|
|
||||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reporter.NoisyPendings {
|
|
||||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reporter.NoisySkippings {
|
|
||||||
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.Verbose {
|
|
||||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.Succinct {
|
|
||||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.FullTrace {
|
|
||||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.ReportPassed {
|
|
||||||
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
if reporter.ReportFile != "" {
|
|
||||||
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// flagFocus implements the -focus flag.
|
|
||||||
func flagFocus(arg string) {
|
|
||||||
if arg != "" {
|
|
||||||
GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flagSkip implements the -skip flag.
|
|
||||||
func flagSkip(arg string) {
|
|
||||||
if arg != "" {
|
|
||||||
GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,681 +0,0 @@
|
||||||
/*
|
|
||||||
Ginkgo is a BDD-style testing framework for Golang
|
|
||||||
|
|
||||||
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
|
||||||
|
|
||||||
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
|
||||||
|
|
||||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
|
||||||
|
|
||||||
Ginkgo is MIT-Licensed
|
|
||||||
*/
|
|
||||||
package ginkgo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/internal/codelocation"
|
|
||||||
"github.com/onsi/ginkgo/internal/global"
|
|
||||||
"github.com/onsi/ginkgo/internal/remote"
|
|
||||||
"github.com/onsi/ginkgo/internal/testingtproxy"
|
|
||||||
"github.com/onsi/ginkgo/internal/writer"
|
|
||||||
"github.com/onsi/ginkgo/reporters"
|
|
||||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
|
||||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var deprecationTracker = types.NewDeprecationTracker()
|
|
||||||
|
|
||||||
const GINKGO_VERSION = config.VERSION
|
|
||||||
const GINKGO_PANIC = `
|
|
||||||
Your test failed.
|
|
||||||
Ginkgo panics to prevent subsequent assertions from running.
|
|
||||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
|
||||||
|
|
||||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
|
||||||
To circumvent this, you should call
|
|
||||||
|
|
||||||
defer GinkgoRecover()
|
|
||||||
|
|
||||||
at the top of the goroutine that caused this panic.
|
|
||||||
`
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
config.Flags(flag.CommandLine, "ginkgo", true)
|
|
||||||
GinkgoWriter = writer.New(os.Stdout)
|
|
||||||
}
|
|
||||||
|
|
||||||
//GinkgoWriter implements an io.Writer
|
|
||||||
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
|
||||||
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
|
||||||
//only if the current test fails.
|
|
||||||
var GinkgoWriter io.Writer
|
|
||||||
|
|
||||||
//The interface by which Ginkgo receives *testing.T
|
|
||||||
type GinkgoTestingT interface {
|
|
||||||
Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
|
|
||||||
//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
|
|
||||||
//consistent executions from run to run, where your tests contain variability (for
|
|
||||||
//example, when selecting random test data).
|
|
||||||
func GinkgoRandomSeed() int64 {
|
|
||||||
return config.GinkgoConfig.RandomSeed
|
|
||||||
}
|
|
||||||
|
|
||||||
//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
|
|
||||||
func GinkgoParallelNode() int {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
|
|
||||||
return GinkgoParallelProcess()
|
|
||||||
}
|
|
||||||
|
|
||||||
//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
|
|
||||||
//The process number is 1-indexed
|
|
||||||
func GinkgoParallelProcess() int {
|
|
||||||
return config.GinkgoConfig.ParallelNode
|
|
||||||
}
|
|
||||||
|
|
||||||
//Some matcher libraries or legacy codebases require a *testing.T
|
|
||||||
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
|
||||||
//the library in question accepts *testing.T through an interface
|
|
||||||
//
|
|
||||||
// For example, with testify:
|
|
||||||
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
|
||||||
//
|
|
||||||
// Or with gomock:
|
|
||||||
// gomock.NewController(GinkgoT())
|
|
||||||
//
|
|
||||||
// GinkgoT() takes an optional offset argument that can be used to get the
|
|
||||||
// correct line number associated with the failure.
|
|
||||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
|
||||||
offset := 3
|
|
||||||
if len(optionalOffset) > 0 {
|
|
||||||
offset = optionalOffset[0]
|
|
||||||
}
|
|
||||||
failedFunc := func() bool {
|
|
||||||
return CurrentGinkgoTestDescription().Failed
|
|
||||||
}
|
|
||||||
nameFunc := func() string {
|
|
||||||
return CurrentGinkgoTestDescription().FullTestText
|
|
||||||
}
|
|
||||||
return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
//The interface returned by GinkgoT(). This covers most of the methods
|
|
||||||
//in the testing package's T.
|
|
||||||
type GinkgoTInterface interface {
|
|
||||||
Cleanup(func())
|
|
||||||
Setenv(key, value string)
|
|
||||||
Error(args ...interface{})
|
|
||||||
Errorf(format string, args ...interface{})
|
|
||||||
Fail()
|
|
||||||
FailNow()
|
|
||||||
Failed() bool
|
|
||||||
Fatal(args ...interface{})
|
|
||||||
Fatalf(format string, args ...interface{})
|
|
||||||
Helper()
|
|
||||||
Log(args ...interface{})
|
|
||||||
Logf(format string, args ...interface{})
|
|
||||||
Name() string
|
|
||||||
Parallel()
|
|
||||||
Skip(args ...interface{})
|
|
||||||
SkipNow()
|
|
||||||
Skipf(format string, args ...interface{})
|
|
||||||
Skipped() bool
|
|
||||||
TempDir() string
|
|
||||||
}
|
|
||||||
|
|
||||||
//Custom Ginkgo test reporters must implement the Reporter interface.
|
|
||||||
//
|
|
||||||
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
|
||||||
//and a SpecSummary just before a spec begins and just after a spec ends
|
|
||||||
type Reporter reporters.Reporter
|
|
||||||
|
|
||||||
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
|
||||||
//to tell Ginkgo that your async test is done.
|
|
||||||
type Done chan<- interface{}
|
|
||||||
|
|
||||||
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
|
||||||
// FullTestText: a concatenation of ComponentTexts and the TestText
|
|
||||||
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
|
||||||
// TestText: the text in the actual It or Measure node
|
|
||||||
// IsMeasurement: true if the current test is a measurement
|
|
||||||
// FileName: the name of the file containing the current test
|
|
||||||
// LineNumber: the line number for the current test
|
|
||||||
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
|
||||||
type GinkgoTestDescription struct {
|
|
||||||
FullTestText string
|
|
||||||
ComponentTexts []string
|
|
||||||
TestText string
|
|
||||||
|
|
||||||
IsMeasurement bool
|
|
||||||
|
|
||||||
FileName string
|
|
||||||
LineNumber int
|
|
||||||
|
|
||||||
Failed bool
|
|
||||||
Duration time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
|
||||||
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
|
||||||
summary, ok := global.Suite.CurrentRunningSpecSummary()
|
|
||||||
if !ok {
|
|
||||||
return GinkgoTestDescription{}
|
|
||||||
}
|
|
||||||
|
|
||||||
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
|
||||||
|
|
||||||
return GinkgoTestDescription{
|
|
||||||
ComponentTexts: summary.ComponentTexts[1:],
|
|
||||||
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
|
||||||
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
|
||||||
IsMeasurement: summary.IsMeasurement,
|
|
||||||
FileName: subjectCodeLocation.FileName,
|
|
||||||
LineNumber: subjectCodeLocation.LineNumber,
|
|
||||||
Failed: summary.HasFailureState(),
|
|
||||||
Duration: summary.RunTime,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Measurement tests receive a Benchmarker.
|
|
||||||
//
|
|
||||||
//You use the Time() function to time how long the passed in body function takes to run
|
|
||||||
//You use the RecordValue() function to track arbitrary numerical measurements.
|
|
||||||
//The RecordValueWithPrecision() function can be used alternatively to provide the unit
|
|
||||||
//and resolution of the numeric measurement.
|
|
||||||
//The optional info argument is passed to the test reporter and can be used to
|
|
||||||
// provide the measurement data to a custom reporter with context.
|
|
||||||
//
|
|
||||||
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
|
||||||
type Benchmarker interface {
|
|
||||||
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
|
||||||
RecordValue(name string, value float64, info ...interface{})
|
|
||||||
RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
//RunSpecs is the entry point for the Ginkgo test runner.
|
|
||||||
//You must call this within a Golang testing TestX(t *testing.T) function.
|
|
||||||
//
|
|
||||||
//To bootstrap a test suite you can use the Ginkgo CLI:
|
|
||||||
//
|
|
||||||
// ginkgo bootstrap
|
|
||||||
func RunSpecs(t GinkgoTestingT, description string) bool {
|
|
||||||
specReporters := []Reporter{buildDefaultReporter()}
|
|
||||||
if config.DefaultReporterConfig.ReportFile != "" {
|
|
||||||
reportFile := config.DefaultReporterConfig.ReportFile
|
|
||||||
specReporters[0] = reporters.NewJUnitReporter(reportFile)
|
|
||||||
specReporters = append(specReporters, buildDefaultReporter())
|
|
||||||
}
|
|
||||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
|
||||||
}
|
|
||||||
|
|
||||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
|
||||||
//RunSpecs() with this method.
|
|
||||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
|
||||||
specReporters = append(specReporters, buildDefaultReporter())
|
|
||||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
|
||||||
}
|
|
||||||
|
|
||||||
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
|
||||||
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
|
||||||
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
|
||||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
|
||||||
writer := GinkgoWriter.(*writer.Writer)
|
|
||||||
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
|
||||||
reporters := make([]reporters.Reporter, len(specReporters))
|
|
||||||
for i, reporter := range specReporters {
|
|
||||||
reporters[i] = reporter
|
|
||||||
}
|
|
||||||
passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
|
||||||
|
|
||||||
if deprecationTracker.DidTrackDeprecations() {
|
|
||||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
|
||||||
}
|
|
||||||
|
|
||||||
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
|
||||||
fmt.Println("PASS | FOCUSED")
|
|
||||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
|
||||||
}
|
|
||||||
return passed
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildDefaultReporter() Reporter {
|
|
||||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
|
||||||
if remoteReportingServer == "" {
|
|
||||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
|
||||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
|
||||||
} else {
|
|
||||||
debugFile := ""
|
|
||||||
if config.GinkgoConfig.DebugParallel {
|
|
||||||
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
|
|
||||||
}
|
|
||||||
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Skip notifies Ginkgo that the current spec was skipped.
|
|
||||||
func Skip(message string, callerSkip ...int) {
|
|
||||||
skip := 0
|
|
||||||
if len(callerSkip) > 0 {
|
|
||||||
skip = callerSkip[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
global.Failer.Skip(message, codelocation.New(skip+1))
|
|
||||||
panic(GINKGO_PANIC)
|
|
||||||
}
|
|
||||||
|
|
||||||
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
|
||||||
func Fail(message string, callerSkip ...int) {
|
|
||||||
skip := 0
|
|
||||||
if len(callerSkip) > 0 {
|
|
||||||
skip = callerSkip[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
global.Failer.Fail(message, codelocation.New(skip+1))
|
|
||||||
panic(GINKGO_PANIC)
|
|
||||||
}
|
|
||||||
|
|
||||||
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
|
||||||
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
|
||||||
//calls out to Gomega
|
|
||||||
//
|
|
||||||
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
|
||||||
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
|
||||||
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
|
||||||
//
|
|
||||||
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
|
||||||
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
|
||||||
func GinkgoRecover() {
|
|
||||||
e := recover()
|
|
||||||
if e != nil {
|
|
||||||
global.Failer.Panic(codelocation.New(1), e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
|
||||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
|
||||||
//
|
|
||||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
|
||||||
//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
|
|
||||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
|
||||||
func Describe(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus the tests within a describe block using FDescribe
|
|
||||||
func FDescribe(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using PDescribe
|
|
||||||
func PDescribe(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using XDescribe
|
|
||||||
func XDescribe(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
|
||||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
|
||||||
//
|
|
||||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
|
||||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
|
||||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
|
||||||
func Context(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus the tests within a describe block using FContext
|
|
||||||
func FContext(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using PContext
|
|
||||||
func PContext(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using XContext
|
|
||||||
func XContext(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//When blocks allow you to organize your specs. A When block can contain any number of
|
|
||||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
|
||||||
//
|
|
||||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
|
||||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
|
||||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
|
||||||
func When(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus the tests within a describe block using FWhen
|
|
||||||
func FWhen(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using PWhen
|
|
||||||
func PWhen(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark the tests within a describe block as pending using XWhen
|
|
||||||
func XWhen(text string, body func()) bool {
|
|
||||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
|
||||||
//within an It block.
|
|
||||||
//
|
|
||||||
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
|
||||||
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
|
||||||
func It(text string, body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus individual Its using FIt
|
|
||||||
func FIt(text string, body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Its as pending using PIt
|
|
||||||
func PIt(text string, _ ...interface{}) bool {
|
|
||||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Its as pending using XIt
|
|
||||||
func XIt(text string, _ ...interface{}) bool {
|
|
||||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//Specify blocks are aliases for It blocks and allow for more natural wording in situations
|
|
||||||
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
|
|
||||||
//which apply to It blocks.
|
|
||||||
func Specify(text string, body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus individual Specifys using FSpecify
|
|
||||||
func FSpecify(text string, body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Specifys as pending using PSpecify
|
|
||||||
func PSpecify(text string, is ...interface{}) bool {
|
|
||||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Specifys as pending using XSpecify
|
|
||||||
func XSpecify(text string, is ...interface{}) bool {
|
|
||||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//By allows you to better document large Its.
|
|
||||||
//
|
|
||||||
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
|
||||||
//especially in the context of integration tests that capture a particular workflow.
|
|
||||||
//
|
|
||||||
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
|
||||||
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
|
||||||
func By(text string, callbacks ...func()) {
|
|
||||||
preamble := "\x1b[1mSTEP\x1b[0m"
|
|
||||||
if config.DefaultReporterConfig.NoColor {
|
|
||||||
preamble = "STEP"
|
|
||||||
}
|
|
||||||
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
|
||||||
if len(callbacks) == 1 {
|
|
||||||
callbacks[0]()
|
|
||||||
}
|
|
||||||
if len(callbacks) > 1 {
|
|
||||||
panic("just one callback per By, please")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
|
||||||
//and accumulate metrics provided to the Benchmarker by the body function.
|
|
||||||
//
|
|
||||||
//The body function must have the signature:
|
|
||||||
// func(b Benchmarker)
|
|
||||||
func Measure(text string, body interface{}, samples int) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
|
||||||
global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can focus individual Measures using FMeasure
|
|
||||||
func FMeasure(text string, body interface{}, samples int) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
|
||||||
global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Measurements as pending using PMeasure
|
|
||||||
func PMeasure(text string, _ ...interface{}) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
|
||||||
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//You can mark Measurements as pending using XMeasure
|
|
||||||
func XMeasure(text string, _ ...interface{}) bool {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
|
||||||
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
|
||||||
//parallel node process will call BeforeSuite.
|
|
||||||
//
|
|
||||||
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
|
||||||
//
|
|
||||||
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
|
||||||
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
|
||||||
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
|
||||||
//
|
|
||||||
//When running in parallel, each parallel node process will call AfterSuite.
|
|
||||||
//
|
|
||||||
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
|
||||||
//
|
|
||||||
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
|
||||||
func AfterSuite(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
|
||||||
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
|
||||||
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
|
||||||
//until that node is done before running.
|
|
||||||
//
|
|
||||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
|
||||||
//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
|
||||||
//to the second function (on all the other nodes).
|
|
||||||
//
|
|
||||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
|
||||||
//
|
|
||||||
// func() []byte
|
|
||||||
//
|
|
||||||
//or, to run asynchronously:
|
|
||||||
//
|
|
||||||
// func(done Done) []byte
|
|
||||||
//
|
|
||||||
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
|
||||||
//
|
|
||||||
// func(data []byte)
|
|
||||||
//
|
|
||||||
//or, to run asynchronously:
|
|
||||||
//
|
|
||||||
// func(data []byte, done Done)
|
|
||||||
//
|
|
||||||
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
|
||||||
//
|
|
||||||
// var dbClient db.Client
|
|
||||||
// var dbRunner db.Runner
|
|
||||||
//
|
|
||||||
// var _ = SynchronizedBeforeSuite(func() []byte {
|
|
||||||
// dbRunner = db.NewRunner()
|
|
||||||
// err := dbRunner.Start()
|
|
||||||
// Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
// return []byte(dbRunner.URL)
|
|
||||||
// }, func(data []byte) {
|
|
||||||
// dbClient = db.NewClient()
|
|
||||||
// err := dbClient.Connect(string(data))
|
|
||||||
// Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
// })
|
|
||||||
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
|
||||||
global.Suite.SetSynchronizedBeforeSuiteNode(
|
|
||||||
node1Body,
|
|
||||||
allNodesBody,
|
|
||||||
codelocation.New(1),
|
|
||||||
parseTimeout(timeout...),
|
|
||||||
)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
|
||||||
//external singleton resources shared across nodes when running tests in parallel.
|
|
||||||
//
|
|
||||||
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
|
||||||
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
|
||||||
//all other nodes are finished.
|
|
||||||
//
|
|
||||||
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
|
||||||
//
|
|
||||||
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
|
||||||
//only after all nodes have finished:
|
|
||||||
//
|
|
||||||
// var _ = SynchronizedAfterSuite(func() {
|
|
||||||
// dbClient.Cleanup()
|
|
||||||
// }, func() {
|
|
||||||
// dbRunner.Stop()
|
|
||||||
// })
|
|
||||||
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
|
||||||
global.Suite.SetSynchronizedAfterSuiteNode(
|
|
||||||
allNodesBody,
|
|
||||||
node1Body,
|
|
||||||
codelocation.New(1),
|
|
||||||
parseTimeout(timeout...),
|
|
||||||
)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
|
||||||
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
|
||||||
//
|
|
||||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
|
||||||
//a Done channel
|
|
||||||
func BeforeEach(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
|
||||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
|
||||||
//
|
|
||||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
|
||||||
//a Done channel
|
|
||||||
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
|
|
||||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
|
||||||
//
|
|
||||||
//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
|
|
||||||
//a Done channel
|
|
||||||
func JustAfterEach(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
|
||||||
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
|
||||||
//
|
|
||||||
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
|
||||||
//a Done channel
|
|
||||||
func AfterEach(body interface{}, timeout ...float64) bool {
|
|
||||||
validateBodyFunc(body, codelocation.New(1))
|
|
||||||
global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateBodyFunc(body interface{}, cl types.CodeLocation) {
|
|
||||||
t := reflect.TypeOf(body)
|
|
||||||
if t.Kind() != reflect.Func {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.NumOut() > 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.NumIn() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.In(0) == reflect.TypeOf(make(Done)) {
|
|
||||||
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTimeout(timeout ...float64) time.Duration {
|
|
||||||
if len(timeout) == 0 {
|
|
||||||
return global.DefaultTimeout
|
|
||||||
} else {
|
|
||||||
return time.Duration(timeout[0] * float64(time.Second))
|
|
||||||
}
|
|
||||||
}
|
|
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
сгенерированный
поставляемый
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
сгенерированный
поставляемый
|
@ -1,48 +0,0 @@
|
||||||
package codelocation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func New(skip int) types.CodeLocation {
|
|
||||||
_, file, line, _ := runtime.Caller(skip + 1)
|
|
||||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
|
||||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PruneStack removes references to functions that are internal to Ginkgo
|
|
||||||
// and the Go runtime from a stack string and a certain number of stack entries
|
|
||||||
// at the beginning of the stack. The stack string has the format
|
|
||||||
// as returned by runtime/debug.Stack. The leading goroutine information is
|
|
||||||
// optional and always removed if present. Beware that runtime/debug.Stack
|
|
||||||
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
|
||||||
// entry.
|
|
||||||
func PruneStack(fullStackTrace string, skip int) string {
|
|
||||||
stack := strings.Split(fullStackTrace, "\n")
|
|
||||||
// Ensure that the even entries are the method names and the
|
|
||||||
// the odd entries the source code information.
|
|
||||||
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
|
||||||
// Ignore "goroutine 29 [running]:" line.
|
|
||||||
stack = stack[1:]
|
|
||||||
}
|
|
||||||
// The "+1" is for skipping over the initial entry, which is
|
|
||||||
// runtime/debug.Stack() itself.
|
|
||||||
if len(stack) > 2*(skip+1) {
|
|
||||||
stack = stack[2*(skip+1):]
|
|
||||||
}
|
|
||||||
prunedStack := []string{}
|
|
||||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
|
||||||
for i := 0; i < len(stack)/2; i++ {
|
|
||||||
// We filter out based on the source code file name.
|
|
||||||
if !re.Match([]byte(stack[i*2+1])) {
|
|
||||||
prunedStack = append(prunedStack, stack[i*2])
|
|
||||||
prunedStack = append(prunedStack, stack[i*2+1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(prunedStack, "\n")
|
|
||||||
}
|
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
сгенерированный
поставляемый
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
сгенерированный
поставляемый
|
@ -1,151 +0,0 @@
|
||||||
package containernode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type subjectOrContainerNode struct {
|
|
||||||
containerNode *ContainerNode
|
|
||||||
subjectNode leafnodes.SubjectNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n subjectOrContainerNode) text() string {
|
|
||||||
if n.containerNode != nil {
|
|
||||||
return n.containerNode.Text()
|
|
||||||
} else {
|
|
||||||
return n.subjectNode.Text()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type CollatedNodes struct {
|
|
||||||
Containers []*ContainerNode
|
|
||||||
Subject leafnodes.SubjectNode
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerNode struct {
|
|
||||||
text string
|
|
||||||
flag types.FlagType
|
|
||||||
codeLocation types.CodeLocation
|
|
||||||
|
|
||||||
setupNodes []leafnodes.BasicNode
|
|
||||||
subjectAndContainerNodes []subjectOrContainerNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
|
||||||
return &ContainerNode{
|
|
||||||
text: text,
|
|
||||||
flag: flag,
|
|
||||||
codeLocation: codeLocation,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
|
||||||
sort.Sort(container)
|
|
||||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
|
||||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
|
||||||
for i, j := range permutation {
|
|
||||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
|
||||||
}
|
|
||||||
container.subjectAndContainerNodes = shuffledNodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
|
||||||
if node.flag == types.FlagTypePending {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldUnfocus := false
|
|
||||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
|
||||||
if subjectOrContainerNode.containerNode != nil {
|
|
||||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
|
||||||
} else {
|
|
||||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldUnfocus {
|
|
||||||
if node.flag == types.FlagTypeFocused {
|
|
||||||
node.flag = types.FlagTypeNone
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return node.flag == types.FlagTypeFocused
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
|
||||||
return node.collate([]*ContainerNode{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
|
||||||
collated := make([]CollatedNodes, 0)
|
|
||||||
|
|
||||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
|
||||||
copy(containers, enclosingContainers)
|
|
||||||
containers = append(containers, node)
|
|
||||||
|
|
||||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
|
||||||
if subjectOrContainer.containerNode != nil {
|
|
||||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
|
||||||
} else {
|
|
||||||
collated = append(collated, CollatedNodes{
|
|
||||||
Containers: containers,
|
|
||||||
Subject: subjectOrContainer.subjectNode,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return collated
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
|
||||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
|
||||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
|
||||||
node.setupNodes = append(node.setupNodes, setupNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
|
||||||
nodes := []leafnodes.BasicNode{}
|
|
||||||
for _, setupNode := range node.setupNodes {
|
|
||||||
if setupNode.Type() == nodeType {
|
|
||||||
nodes = append(nodes, setupNode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) Text() string {
|
|
||||||
return node.text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
|
||||||
return node.codeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) Flag() types.FlagType {
|
|
||||||
return node.flag
|
|
||||||
}
|
|
||||||
|
|
||||||
//sort.Interface
|
|
||||||
|
|
||||||
func (node *ContainerNode) Len() int {
|
|
||||||
return len(node.subjectAndContainerNodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) Less(i, j int) bool {
|
|
||||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ContainerNode) Swap(i, j int) {
|
|
||||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
package global
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/internal/suite"
|
|
||||||
)
|
|
||||||
|
|
||||||
const DefaultTimeout = time.Duration(1 * time.Second)
|
|
||||||
|
|
||||||
var Suite *suite.Suite
|
|
||||||
var Failer *failer.Failer
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
InitializeGlobals()
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitializeGlobals() {
|
|
||||||
Failer = failer.New()
|
|
||||||
Suite = suite.New(Failer)
|
|
||||||
}
|
|
|
@ -1,103 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type benchmarker struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
measurements map[string]*types.SpecMeasurement
|
|
||||||
orderCounter int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBenchmarker() *benchmarker {
|
|
||||||
return &benchmarker{
|
|
||||||
measurements: make(map[string]*types.SpecMeasurement),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
|
||||||
t := time.Now()
|
|
||||||
body()
|
|
||||||
elapsedTime = time.Since(t)
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
|
|
||||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
|
||||||
b.mu.Lock()
|
|
||||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
measurement.Results = append(measurement.Results, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
|
||||||
b.mu.Lock()
|
|
||||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
measurement.Results = append(measurement.Results, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
|
|
||||||
measurement, ok := b.measurements[name]
|
|
||||||
if !ok {
|
|
||||||
var computedInfo interface{}
|
|
||||||
computedInfo = nil
|
|
||||||
if len(info) > 0 {
|
|
||||||
computedInfo = info[0]
|
|
||||||
}
|
|
||||||
measurement = &types.SpecMeasurement{
|
|
||||||
Name: name,
|
|
||||||
Info: computedInfo,
|
|
||||||
Order: b.orderCounter,
|
|
||||||
SmallestLabel: smallestLabel,
|
|
||||||
LargestLabel: largestLabel,
|
|
||||||
AverageLabel: averageLabel,
|
|
||||||
Units: units,
|
|
||||||
Precision: precision,
|
|
||||||
Results: make([]float64, 0),
|
|
||||||
}
|
|
||||||
b.measurements[name] = measurement
|
|
||||||
b.orderCounter++
|
|
||||||
}
|
|
||||||
|
|
||||||
return measurement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
for _, measurement := range b.measurements {
|
|
||||||
measurement.Smallest = math.MaxFloat64
|
|
||||||
measurement.Largest = -math.MaxFloat64
|
|
||||||
sum := float64(0)
|
|
||||||
sumOfSquares := float64(0)
|
|
||||||
|
|
||||||
for _, result := range measurement.Results {
|
|
||||||
if result > measurement.Largest {
|
|
||||||
measurement.Largest = result
|
|
||||||
}
|
|
||||||
if result < measurement.Smallest {
|
|
||||||
measurement.Smallest = result
|
|
||||||
}
|
|
||||||
sum += result
|
|
||||||
sumOfSquares += result * result
|
|
||||||
}
|
|
||||||
|
|
||||||
n := float64(len(measurement.Results))
|
|
||||||
measurement.Average = sum / n
|
|
||||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.measurements
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BasicNode interface {
|
|
||||||
Type() types.SpecComponentType
|
|
||||||
Run() (types.SpecState, types.SpecFailure)
|
|
||||||
CodeLocation() types.CodeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
type SubjectNode interface {
|
|
||||||
BasicNode
|
|
||||||
|
|
||||||
Text() string
|
|
||||||
Flag() types.FlagType
|
|
||||||
Samples() int
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ItNode struct {
|
|
||||||
runner *runner
|
|
||||||
|
|
||||||
flag types.FlagType
|
|
||||||
text string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
|
||||||
return &ItNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
|
||||||
flag: flag,
|
|
||||||
text: text,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
return node.runner.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) Type() types.SpecComponentType {
|
|
||||||
return types.SpecComponentTypeIt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) Text() string {
|
|
||||||
return node.text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) Flag() types.FlagType {
|
|
||||||
return node.flag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
|
||||||
return node.runner.codeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *ItNode) Samples() int {
|
|
||||||
return 1
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MeasureNode struct {
|
|
||||||
runner *runner
|
|
||||||
|
|
||||||
text string
|
|
||||||
flag types.FlagType
|
|
||||||
samples int
|
|
||||||
benchmarker *benchmarker
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
|
||||||
benchmarker := newBenchmarker()
|
|
||||||
|
|
||||||
wrappedBody := func() {
|
|
||||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &MeasureNode{
|
|
||||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
|
||||||
|
|
||||||
text: text,
|
|
||||||
flag: flag,
|
|
||||||
samples: samples,
|
|
||||||
benchmarker: benchmarker,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
return node.runner.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
|
||||||
return node.benchmarker.measurementsReport()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
|
||||||
return types.SpecComponentTypeMeasure
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) Text() string {
|
|
||||||
return node.text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) Flag() types.FlagType {
|
|
||||||
return node.flag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
|
||||||
return node.runner.codeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *MeasureNode) Samples() int {
|
|
||||||
return node.samples
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/codelocation"
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type runner struct {
|
|
||||||
isAsync bool
|
|
||||||
asyncFunc func(chan<- interface{})
|
|
||||||
syncFunc func()
|
|
||||||
codeLocation types.CodeLocation
|
|
||||||
timeoutThreshold time.Duration
|
|
||||||
nodeType types.SpecComponentType
|
|
||||||
componentIndex int
|
|
||||||
failer *failer.Failer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
|
||||||
bodyType := reflect.TypeOf(body)
|
|
||||||
if bodyType.Kind() != reflect.Func {
|
|
||||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
|
||||||
}
|
|
||||||
|
|
||||||
runner := &runner{
|
|
||||||
codeLocation: codeLocation,
|
|
||||||
timeoutThreshold: timeout,
|
|
||||||
failer: failer,
|
|
||||||
nodeType: nodeType,
|
|
||||||
componentIndex: componentIndex,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch bodyType.NumIn() {
|
|
||||||
case 0:
|
|
||||||
runner.syncFunc = body.(func())
|
|
||||||
return runner
|
|
||||||
case 1:
|
|
||||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
|
||||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
|
||||||
}
|
|
||||||
|
|
||||||
wrappedBody := func(done chan<- interface{}) {
|
|
||||||
bodyValue := reflect.ValueOf(body)
|
|
||||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.isAsync = true
|
|
||||||
runner.asyncFunc = wrappedBody
|
|
||||||
return runner
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
if r.isAsync {
|
|
||||||
return r.runAsync()
|
|
||||||
} else {
|
|
||||||
return r.runSync()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
done := make(chan interface{}, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
finished := false
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil || !finished {
|
|
||||||
r.failer.Panic(codelocation.New(2), e)
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
close(done)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
r.asyncFunc(done)
|
|
||||||
finished = true
|
|
||||||
}()
|
|
||||||
|
|
||||||
// If this goroutine gets no CPU time before the select block,
|
|
||||||
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
|
||||||
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
case <-time.After(r.timeoutThreshold):
|
|
||||||
r.failer.Timeout(r.codeLocation)
|
|
||||||
}
|
|
||||||
|
|
||||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
finished := false
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil || !finished {
|
|
||||||
r.failer.Panic(codelocation.New(2), e)
|
|
||||||
}
|
|
||||||
|
|
||||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
|
||||||
}()
|
|
||||||
|
|
||||||
r.syncFunc()
|
|
||||||
finished = true
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SetupNode struct {
|
|
||||||
runner *runner
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
|
||||||
return node.runner.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *SetupNode) Type() types.SpecComponentType {
|
|
||||||
return node.runner.nodeType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
|
||||||
return node.runner.codeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
|
||||||
return &SetupNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
|
||||||
return &SetupNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
|
||||||
return &SetupNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
|
||||||
return &SetupNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,55 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SuiteNode interface {
|
|
||||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
|
||||||
Passed() bool
|
|
||||||
Summary() *types.SetupSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleSuiteNode struct {
|
|
||||||
runner *runner
|
|
||||||
outcome types.SpecState
|
|
||||||
failure types.SpecFailure
|
|
||||||
runTime time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
|
||||||
t := time.Now()
|
|
||||||
node.outcome, node.failure = node.runner.run()
|
|
||||||
node.runTime = time.Since(t)
|
|
||||||
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *simpleSuiteNode) Passed() bool {
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
|
||||||
return &types.SetupSummary{
|
|
||||||
ComponentType: node.runner.nodeType,
|
|
||||||
CodeLocation: node.runner.codeLocation,
|
|
||||||
State: node.outcome,
|
|
||||||
RunTime: node.runTime,
|
|
||||||
Failure: node.failure,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
|
||||||
return &simpleSuiteNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
|
||||||
return &simpleSuiteNode{
|
|
||||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
|
||||||
}
|
|
||||||
}
|
|
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
сгенерированный
поставляемый
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
сгенерированный
поставляемый
|
@ -1,90 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type synchronizedAfterSuiteNode struct {
|
|
||||||
runnerA *runner
|
|
||||||
runnerB *runner
|
|
||||||
|
|
||||||
outcome types.SpecState
|
|
||||||
failure types.SpecFailure
|
|
||||||
runTime time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
|
||||||
return &synchronizedAfterSuiteNode{
|
|
||||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
|
||||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
|
||||||
node.outcome, node.failure = node.runnerA.run()
|
|
||||||
|
|
||||||
if parallelNode == 1 {
|
|
||||||
if parallelTotal > 1 {
|
|
||||||
node.waitUntilOtherNodesAreDone(syncHost)
|
|
||||||
}
|
|
||||||
|
|
||||||
outcome, failure := node.runnerB.run()
|
|
||||||
|
|
||||||
if node.outcome == types.SpecStatePassed {
|
|
||||||
node.outcome, node.failure = outcome, failure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
|
||||||
return &types.SetupSummary{
|
|
||||||
ComponentType: node.runnerA.nodeType,
|
|
||||||
CodeLocation: node.runnerA.codeLocation,
|
|
||||||
State: node.outcome,
|
|
||||||
RunTime: node.runTime,
|
|
||||||
Failure: node.failure,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
|
||||||
for {
|
|
||||||
if node.canRun(syncHost) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
|
||||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
|
|
||||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
|
||||||
err = json.Unmarshal(body, &afterSuiteData)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return afterSuiteData.CanRun
|
|
||||||
}
|
|
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
сгенерированный
поставляемый
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
сгенерированный
поставляемый
|
@ -1,181 +0,0 @@
|
||||||
package leafnodes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type synchronizedBeforeSuiteNode struct {
|
|
||||||
runnerA *runner
|
|
||||||
runnerB *runner
|
|
||||||
|
|
||||||
data []byte
|
|
||||||
|
|
||||||
outcome types.SpecState
|
|
||||||
failure types.SpecFailure
|
|
||||||
runTime time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
|
||||||
node := &synchronizedBeforeSuiteNode{}
|
|
||||||
|
|
||||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
|
||||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
|
||||||
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
|
||||||
t := time.Now()
|
|
||||||
defer func() {
|
|
||||||
node.runTime = time.Since(t)
|
|
||||||
}()
|
|
||||||
|
|
||||||
if parallelNode == 1 {
|
|
||||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
|
||||||
} else {
|
|
||||||
node.outcome, node.failure = node.waitForA(syncHost)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.outcome != types.SpecStatePassed {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
node.outcome, node.failure = node.runnerB.run()
|
|
||||||
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
|
||||||
outcome, failure := node.runnerA.run()
|
|
||||||
|
|
||||||
if parallelTotal > 1 {
|
|
||||||
state := types.RemoteBeforeSuiteStatePassed
|
|
||||||
if outcome != types.SpecStatePassed {
|
|
||||||
state = types.RemoteBeforeSuiteStateFailed
|
|
||||||
}
|
|
||||||
json := (types.RemoteBeforeSuiteData{
|
|
||||||
Data: node.data,
|
|
||||||
State: state,
|
|
||||||
}).ToJSON()
|
|
||||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
|
||||||
}
|
|
||||||
|
|
||||||
return outcome, failure
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
|
||||||
failure := func(message string) types.SpecFailure {
|
|
||||||
return types.SpecFailure{
|
|
||||||
Message: message,
|
|
||||||
Location: node.runnerA.codeLocation,
|
|
||||||
ComponentType: node.runnerA.nodeType,
|
|
||||||
ComponentIndex: node.runnerA.componentIndex,
|
|
||||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
|
||||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
|
|
||||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
|
||||||
err = json.Unmarshal(body, &beforeSuiteData)
|
|
||||||
if err != nil {
|
|
||||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch beforeSuiteData.State {
|
|
||||||
case types.RemoteBeforeSuiteStatePassed:
|
|
||||||
node.data = beforeSuiteData.Data
|
|
||||||
return types.SpecStatePassed, types.SpecFailure{}
|
|
||||||
case types.RemoteBeforeSuiteStateFailed:
|
|
||||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
|
||||||
case types.RemoteBeforeSuiteStateDisappeared:
|
|
||||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
|
||||||
return node.outcome == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
|
||||||
return &types.SetupSummary{
|
|
||||||
ComponentType: node.runnerA.nodeType,
|
|
||||||
CodeLocation: node.runnerA.codeLocation,
|
|
||||||
State: node.outcome,
|
|
||||||
RunTime: node.runTime,
|
|
||||||
Failure: node.failure,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
|
||||||
typeA := reflect.TypeOf(bodyA)
|
|
||||||
if typeA.Kind() != reflect.Func {
|
|
||||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
|
||||||
}
|
|
||||||
|
|
||||||
takesNothing := typeA.NumIn() == 0
|
|
||||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
|
||||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
|
||||||
|
|
||||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
|
||||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if takesADoneChannel {
|
|
||||||
return func(done chan<- interface{}) {
|
|
||||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
|
||||||
node.data = out[0].Interface().([]byte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() {
|
|
||||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
|
||||||
node.data = out[0].Interface().([]byte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
|
||||||
typeB := reflect.TypeOf(bodyB)
|
|
||||||
if typeB.Kind() != reflect.Func {
|
|
||||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
|
||||||
}
|
|
||||||
|
|
||||||
returnsNothing := typeB.NumOut() == 0
|
|
||||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
|
||||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
|
||||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
|
||||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
|
||||||
|
|
||||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
|
||||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
|
||||||
}
|
|
||||||
|
|
||||||
if takesBytesAndDone {
|
|
||||||
return func(done chan<- interface{}) {
|
|
||||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() {
|
|
||||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,249 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
|
||||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
|
||||||
|
|
||||||
ginkgo -nodes=N
|
|
||||||
|
|
||||||
where N is the number of nodes you desire.
|
|
||||||
*/
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type configAndSuite struct {
|
|
||||||
config config.GinkgoConfigType
|
|
||||||
summary *types.SuiteSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
type Aggregator struct {
|
|
||||||
nodeCount int
|
|
||||||
config config.DefaultReporterConfigType
|
|
||||||
stenographer stenographer.Stenographer
|
|
||||||
result chan bool
|
|
||||||
|
|
||||||
suiteBeginnings chan configAndSuite
|
|
||||||
aggregatedSuiteBeginnings []configAndSuite
|
|
||||||
|
|
||||||
beforeSuites chan *types.SetupSummary
|
|
||||||
aggregatedBeforeSuites []*types.SetupSummary
|
|
||||||
|
|
||||||
afterSuites chan *types.SetupSummary
|
|
||||||
aggregatedAfterSuites []*types.SetupSummary
|
|
||||||
|
|
||||||
specCompletions chan *types.SpecSummary
|
|
||||||
completedSpecs []*types.SpecSummary
|
|
||||||
|
|
||||||
suiteEndings chan *types.SuiteSummary
|
|
||||||
aggregatedSuiteEndings []*types.SuiteSummary
|
|
||||||
specs []*types.SpecSummary
|
|
||||||
|
|
||||||
startTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
|
||||||
aggregator := &Aggregator{
|
|
||||||
nodeCount: nodeCount,
|
|
||||||
result: result,
|
|
||||||
config: config,
|
|
||||||
stenographer: stenographer,
|
|
||||||
|
|
||||||
suiteBeginnings: make(chan configAndSuite),
|
|
||||||
beforeSuites: make(chan *types.SetupSummary),
|
|
||||||
afterSuites: make(chan *types.SetupSummary),
|
|
||||||
specCompletions: make(chan *types.SpecSummary),
|
|
||||||
suiteEndings: make(chan *types.SuiteSummary),
|
|
||||||
}
|
|
||||||
|
|
||||||
go aggregator.mux()
|
|
||||||
|
|
||||||
return aggregator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
|
||||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.beforeSuites <- setupSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.afterSuites <- setupSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
|
||||||
//noop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
|
||||||
aggregator.specCompletions <- specSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
|
||||||
aggregator.suiteEndings <- summary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) mux() {
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
|
||||||
aggregator.registerSuiteBeginning(configAndSuite)
|
|
||||||
case setupSummary := <-aggregator.beforeSuites:
|
|
||||||
aggregator.registerBeforeSuite(setupSummary)
|
|
||||||
case setupSummary := <-aggregator.afterSuites:
|
|
||||||
aggregator.registerAfterSuite(setupSummary)
|
|
||||||
case specSummary := <-aggregator.specCompletions:
|
|
||||||
aggregator.registerSpecCompletion(specSummary)
|
|
||||||
case suite := <-aggregator.suiteEndings:
|
|
||||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
|
||||||
if finished {
|
|
||||||
aggregator.result <- passed
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
|
||||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
|
||||||
|
|
||||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
|
||||||
aggregator.startTime = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
|
||||||
|
|
||||||
totalNumberOfSpecs := 0
|
|
||||||
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
|
||||||
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
|
||||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
|
||||||
aggregator.flushCompletedSpecs()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
|
||||||
aggregator.flushCompletedSpecs()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
|
||||||
aggregator.flushCompletedSpecs()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
|
||||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
|
||||||
aggregator.specs = append(aggregator.specs, specSummary)
|
|
||||||
aggregator.flushCompletedSpecs()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
|
||||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
|
||||||
aggregator.announceBeforeSuite(setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, specSummary := range aggregator.completedSpecs {
|
|
||||||
aggregator.announceSpec(specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
|
||||||
aggregator.announceAfterSuite(setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
|
||||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
|
||||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
|
||||||
if setupSummary.State != types.SpecStatePassed {
|
|
||||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
|
||||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
|
||||||
if setupSummary.State != types.SpecStatePassed {
|
|
||||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
|
||||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
|
||||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
|
||||||
|
|
||||||
switch specSummary.State {
|
|
||||||
case types.SpecStatePassed:
|
|
||||||
if specSummary.IsMeasurement {
|
|
||||||
aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
|
|
||||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
|
||||||
aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
|
|
||||||
} else {
|
|
||||||
aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
case types.SpecStatePending:
|
|
||||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
|
||||||
case types.SpecStateSkipped:
|
|
||||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
|
||||||
case types.SpecStateTimedOut:
|
|
||||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
|
||||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
|
||||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
|
||||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
|
||||||
|
|
||||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
|
||||||
if !suiteSummary.SuiteSucceeded {
|
|
||||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
|
||||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
|
||||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
|
||||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
|
||||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
|
||||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
|
||||||
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
|
||||||
|
|
||||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
|
||||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
|
||||||
|
|
||||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
|
||||||
}
|
|
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
сгенерированный
поставляемый
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
сгенерированный
поставляемый
|
@ -1,147 +0,0 @@
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/writer"
|
|
||||||
"github.com/onsi/ginkgo/reporters"
|
|
||||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
//An interface to net/http's client to allow the injection of fakes under test
|
|
||||||
type Poster interface {
|
|
||||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
|
||||||
a Ginkgo remote server.
|
|
||||||
|
|
||||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
|
||||||
|
|
||||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
|
||||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
|
||||||
in place of Ginkgo's DefaultReporter.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type ForwardingReporter struct {
|
|
||||||
serverHost string
|
|
||||||
poster Poster
|
|
||||||
outputInterceptor OutputInterceptor
|
|
||||||
debugMode bool
|
|
||||||
debugFile *os.File
|
|
||||||
nestedReporter *reporters.DefaultReporter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
|
||||||
reporter := &ForwardingReporter{
|
|
||||||
serverHost: serverHost,
|
|
||||||
poster: poster,
|
|
||||||
outputInterceptor: outputInterceptor,
|
|
||||||
}
|
|
||||||
|
|
||||||
if debugFile != "" {
|
|
||||||
var err error
|
|
||||||
reporter.debugMode = true
|
|
||||||
reporter.debugFile, err = os.Create(debugFile)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !config.Verbose {
|
|
||||||
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
|
||||||
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
|
||||||
}
|
|
||||||
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
|
||||||
|
|
||||||
stenographer := stenographer.New(false, true, reporter.debugFile)
|
|
||||||
config.Succinct = false
|
|
||||||
config.Verbose = true
|
|
||||||
config.FullTrace = true
|
|
||||||
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
|
||||||
}
|
|
||||||
|
|
||||||
return reporter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
|
||||||
encoded, _ := json.Marshal(data)
|
|
||||||
buffer := bytes.NewBuffer(encoded)
|
|
||||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
|
||||||
data := struct {
|
|
||||||
Config config.GinkgoConfigType `json:"config"`
|
|
||||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
|
||||||
}{
|
|
||||||
conf,
|
|
||||||
summary,
|
|
||||||
}
|
|
||||||
|
|
||||||
reporter.outputInterceptor.StartInterceptingOutput()
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/SpecSuiteWillBegin", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
|
||||||
reporter.outputInterceptor.StartInterceptingOutput()
|
|
||||||
setupSummary.CapturedOutput = output
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.SpecWillRun(specSummary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/SpecWillRun", specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
|
||||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
|
||||||
reporter.outputInterceptor.StartInterceptingOutput()
|
|
||||||
specSummary.CapturedOutput = output
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.SpecDidComplete(specSummary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/SpecDidComplete", specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
|
||||||
reporter.outputInterceptor.StartInterceptingOutput()
|
|
||||||
setupSummary.CapturedOutput = output
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
|
||||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
|
||||||
if reporter.debugMode {
|
|
||||||
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
|
||||||
reporter.debugFile.Sync()
|
|
||||||
}
|
|
||||||
reporter.post("/SpecSuiteDidEnd", summary)
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
package remote
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
/*
|
|
||||||
The OutputInterceptor is used by the ForwardingReporter to
|
|
||||||
intercept and capture all stdin and stderr output during a test run.
|
|
||||||
*/
|
|
||||||
type OutputInterceptor interface {
|
|
||||||
StartInterceptingOutput() error
|
|
||||||
StopInterceptingAndReturnOutput() (string, error)
|
|
||||||
StreamTo(*os.File)
|
|
||||||
}
|
|
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
сгенерированный
поставляемый
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
сгенерированный
поставляемый
|
@ -1,82 +0,0 @@
|
||||||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
|
||||||
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/nxadm/tail"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewOutputInterceptor() OutputInterceptor {
|
|
||||||
return &outputInterceptor{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type outputInterceptor struct {
|
|
||||||
redirectFile *os.File
|
|
||||||
streamTarget *os.File
|
|
||||||
intercepting bool
|
|
||||||
tailer *tail.Tail
|
|
||||||
doneTailing chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
|
||||||
if interceptor.intercepting {
|
|
||||||
return errors.New("Already intercepting output!")
|
|
||||||
}
|
|
||||||
interceptor.intercepting = true
|
|
||||||
|
|
||||||
var err error
|
|
||||||
|
|
||||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// This might call Dup3 if the dup2 syscall is not available, e.g. on
|
|
||||||
// linux/arm64 or linux/riscv64
|
|
||||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
|
||||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
|
||||||
|
|
||||||
if interceptor.streamTarget != nil {
|
|
||||||
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
|
||||||
interceptor.doneTailing = make(chan bool)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for line := range interceptor.tailer.Lines {
|
|
||||||
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
|
||||||
}
|
|
||||||
close(interceptor.doneTailing)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
|
||||||
if !interceptor.intercepting {
|
|
||||||
return "", errors.New("Not intercepting output!")
|
|
||||||
}
|
|
||||||
|
|
||||||
interceptor.redirectFile.Close()
|
|
||||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
|
||||||
os.Remove(interceptor.redirectFile.Name())
|
|
||||||
|
|
||||||
interceptor.intercepting = false
|
|
||||||
|
|
||||||
if interceptor.streamTarget != nil {
|
|
||||||
interceptor.tailer.Stop()
|
|
||||||
interceptor.tailer.Cleanup()
|
|
||||||
<-interceptor.doneTailing
|
|
||||||
interceptor.streamTarget.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(output), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
|
||||||
interceptor.streamTarget = out
|
|
||||||
}
|
|
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
сгенерированный
поставляемый
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
сгенерированный
поставляемый
|
@ -1,36 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewOutputInterceptor() OutputInterceptor {
|
|
||||||
return &outputInterceptor{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type outputInterceptor struct {
|
|
||||||
intercepting bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
|
||||||
if interceptor.intercepting {
|
|
||||||
return errors.New("Already intercepting output!")
|
|
||||||
}
|
|
||||||
interceptor.intercepting = true
|
|
||||||
|
|
||||||
// not working on windows...
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
|
||||||
// not working on windows...
|
|
||||||
interceptor.intercepting = false
|
|
||||||
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
|
|
@ -1,224 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
|
||||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/reporters"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
|
||||||
It then forwards that communication to attached reporters.
|
|
||||||
*/
|
|
||||||
type Server struct {
|
|
||||||
listener net.Listener
|
|
||||||
reporters []reporters.Reporter
|
|
||||||
alives []func() bool
|
|
||||||
lock *sync.Mutex
|
|
||||||
beforeSuiteData types.RemoteBeforeSuiteData
|
|
||||||
parallelTotal int
|
|
||||||
counter int
|
|
||||||
}
|
|
||||||
|
|
||||||
//Create a new server, automatically selecting a port
|
|
||||||
func NewServer(parallelTotal int) (*Server, error) {
|
|
||||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Server{
|
|
||||||
listener: listener,
|
|
||||||
lock: &sync.Mutex{},
|
|
||||||
alives: make([]func() bool, parallelTotal),
|
|
||||||
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
|
||||||
parallelTotal: parallelTotal,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
|
||||||
func (server *Server) Start() {
|
|
||||||
httpServer := &http.Server{}
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
httpServer.Handler = mux
|
|
||||||
|
|
||||||
//streaming endpoints
|
|
||||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
|
||||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
|
||||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
|
||||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
|
||||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
|
||||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
|
||||||
|
|
||||||
//synchronization endpoints
|
|
||||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
|
||||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
|
||||||
mux.HandleFunc("/counter", server.handleCounter)
|
|
||||||
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
|
||||||
|
|
||||||
go httpServer.Serve(server.listener)
|
|
||||||
}
|
|
||||||
|
|
||||||
//Stop the server
|
|
||||||
func (server *Server) Close() {
|
|
||||||
server.listener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
|
||||||
func (server *Server) Address() string {
|
|
||||||
return "http://" + server.listener.Addr().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Streaming Endpoints
|
|
||||||
//
|
|
||||||
|
|
||||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
|
||||||
func (server *Server) readAll(request *http.Request) []byte {
|
|
||||||
defer request.Body.Close()
|
|
||||||
body, _ := ioutil.ReadAll(request.Body)
|
|
||||||
return body
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
|
||||||
server.reporters = reporters
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
|
|
||||||
var data struct {
|
|
||||||
Config config.GinkgoConfigType `json:"config"`
|
|
||||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
|
||||||
}
|
|
||||||
|
|
||||||
json.Unmarshal(body, &data)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
var setupSummary *types.SetupSummary
|
|
||||||
json.Unmarshal(body, &setupSummary)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.BeforeSuiteDidRun(setupSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
var setupSummary *types.SetupSummary
|
|
||||||
json.Unmarshal(body, &setupSummary)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.AfterSuiteDidRun(setupSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
var specSummary *types.SpecSummary
|
|
||||||
json.Unmarshal(body, &specSummary)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.SpecWillRun(specSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
var specSummary *types.SpecSummary
|
|
||||||
json.Unmarshal(body, &specSummary)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.SpecDidComplete(specSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
body := server.readAll(request)
|
|
||||||
var suiteSummary *types.SuiteSummary
|
|
||||||
json.Unmarshal(body, &suiteSummary)
|
|
||||||
|
|
||||||
for _, reporter := range server.reporters {
|
|
||||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Synchronization Endpoints
|
|
||||||
//
|
|
||||||
|
|
||||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
|
||||||
server.lock.Lock()
|
|
||||||
defer server.lock.Unlock()
|
|
||||||
server.alives[node-1] = alive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) nodeIsAlive(node int) bool {
|
|
||||||
server.lock.Lock()
|
|
||||||
defer server.lock.Unlock()
|
|
||||||
alive := server.alives[node-1]
|
|
||||||
if alive == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return alive()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
if request.Method == "POST" {
|
|
||||||
dec := json.NewDecoder(request.Body)
|
|
||||||
dec.Decode(&(server.beforeSuiteData))
|
|
||||||
} else {
|
|
||||||
beforeSuiteData := server.beforeSuiteData
|
|
||||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
|
||||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
|
||||||
}
|
|
||||||
enc := json.NewEncoder(writer)
|
|
||||||
enc.Encode(beforeSuiteData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
afterSuiteData := types.RemoteAfterSuiteData{
|
|
||||||
CanRun: true,
|
|
||||||
}
|
|
||||||
for i := 2; i <= server.parallelTotal; i++ {
|
|
||||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
enc := json.NewEncoder(writer)
|
|
||||||
enc.Encode(afterSuiteData)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
c := spec_iterator.Counter{}
|
|
||||||
server.lock.Lock()
|
|
||||||
c.Index = server.counter
|
|
||||||
server.counter++
|
|
||||||
server.lock.Unlock()
|
|
||||||
|
|
||||||
json.NewEncoder(writer).Encode(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
writer.Write([]byte(""))
|
|
||||||
}
|
|
|
@ -1,247 +0,0 @@
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/containernode"
|
|
||||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Spec struct {
|
|
||||||
subject leafnodes.SubjectNode
|
|
||||||
focused bool
|
|
||||||
announceProgress bool
|
|
||||||
|
|
||||||
containers []*containernode.ContainerNode
|
|
||||||
|
|
||||||
state types.SpecState
|
|
||||||
runTime time.Duration
|
|
||||||
startTime time.Time
|
|
||||||
failure types.SpecFailure
|
|
||||||
previousFailures bool
|
|
||||||
|
|
||||||
stateMutex *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
|
||||||
spec := &Spec{
|
|
||||||
subject: subject,
|
|
||||||
containers: containers,
|
|
||||||
focused: subject.Flag() == types.FlagTypeFocused,
|
|
||||||
announceProgress: announceProgress,
|
|
||||||
stateMutex: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
|
|
||||||
spec.processFlag(subject.Flag())
|
|
||||||
for i := len(containers) - 1; i >= 0; i-- {
|
|
||||||
spec.processFlag(containers[i].Flag())
|
|
||||||
}
|
|
||||||
|
|
||||||
return spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
|
||||||
if flag == types.FlagTypeFocused {
|
|
||||||
spec.focused = true
|
|
||||||
} else if flag == types.FlagTypePending {
|
|
||||||
spec.setState(types.SpecStatePending)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Skip() {
|
|
||||||
spec.setState(types.SpecStateSkipped)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Failed() bool {
|
|
||||||
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Passed() bool {
|
|
||||||
return spec.getState() == types.SpecStatePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Flaked() bool {
|
|
||||||
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Pending() bool {
|
|
||||||
return spec.getState() == types.SpecStatePending
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Skipped() bool {
|
|
||||||
return spec.getState() == types.SpecStateSkipped
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Focused() bool {
|
|
||||||
return spec.focused
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) IsMeasurement() bool {
|
|
||||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
|
||||||
componentTexts := make([]string, len(spec.containers)+1)
|
|
||||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
|
||||||
|
|
||||||
for i, container := range spec.containers {
|
|
||||||
componentTexts[i] = container.Text()
|
|
||||||
componentCodeLocations[i] = container.CodeLocation()
|
|
||||||
}
|
|
||||||
|
|
||||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
|
||||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
|
||||||
|
|
||||||
runTime := spec.runTime
|
|
||||||
if runTime == 0 && !spec.startTime.IsZero() {
|
|
||||||
runTime = time.Since(spec.startTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &types.SpecSummary{
|
|
||||||
IsMeasurement: spec.IsMeasurement(),
|
|
||||||
NumberOfSamples: spec.subject.Samples(),
|
|
||||||
ComponentTexts: componentTexts,
|
|
||||||
ComponentCodeLocations: componentCodeLocations,
|
|
||||||
State: spec.getState(),
|
|
||||||
RunTime: runTime,
|
|
||||||
Failure: spec.failure,
|
|
||||||
Measurements: spec.measurementsReport(),
|
|
||||||
SuiteID: suiteID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) ConcatenatedString() string {
|
|
||||||
s := ""
|
|
||||||
for _, container := range spec.containers {
|
|
||||||
s += container.Text() + " "
|
|
||||||
}
|
|
||||||
|
|
||||||
return s + spec.subject.Text()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) Run(writer io.Writer) {
|
|
||||||
if spec.getState() == types.SpecStateFailed {
|
|
||||||
spec.previousFailures = true
|
|
||||||
}
|
|
||||||
|
|
||||||
spec.startTime = time.Now()
|
|
||||||
defer func() {
|
|
||||||
spec.runTime = time.Since(spec.startTime)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
|
||||||
spec.runSample(sample, writer)
|
|
||||||
|
|
||||||
if spec.getState() != types.SpecStatePassed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) getState() types.SpecState {
|
|
||||||
spec.stateMutex.Lock()
|
|
||||||
defer spec.stateMutex.Unlock()
|
|
||||||
return spec.state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) setState(state types.SpecState) {
|
|
||||||
spec.stateMutex.Lock()
|
|
||||||
defer spec.stateMutex.Unlock()
|
|
||||||
spec.state = state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
|
||||||
spec.setState(types.SpecStatePassed)
|
|
||||||
spec.failure = types.SpecFailure{}
|
|
||||||
innerMostContainerIndexToUnwind := -1
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
|
||||||
container := spec.containers[i]
|
|
||||||
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
|
|
||||||
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
|
|
||||||
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
|
|
||||||
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
|
||||||
spec.state = justAfterEachState
|
|
||||||
spec.failure = justAfterEachFailure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
|
||||||
container := spec.containers[i]
|
|
||||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
|
||||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
|
||||||
afterEachState, afterEachFailure := afterEach.Run()
|
|
||||||
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
|
||||||
spec.setState(afterEachState)
|
|
||||||
spec.failure = afterEachFailure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for i, container := range spec.containers {
|
|
||||||
innerMostContainerIndexToUnwind = i
|
|
||||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
|
||||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
|
||||||
s, f := beforeEach.Run()
|
|
||||||
spec.failure = f
|
|
||||||
spec.setState(s)
|
|
||||||
if spec.getState() != types.SpecStatePassed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, container := range spec.containers {
|
|
||||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
|
||||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
|
||||||
s, f := justBeforeEach.Run()
|
|
||||||
spec.failure = f
|
|
||||||
spec.setState(s)
|
|
||||||
if spec.getState() != types.SpecStatePassed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spec.announceSubject(writer, spec.subject)
|
|
||||||
s, f := spec.subject.Run()
|
|
||||||
spec.failure = f
|
|
||||||
spec.setState(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
|
||||||
if spec.announceProgress {
|
|
||||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
|
||||||
writer.Write([]byte(s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
|
||||||
if spec.announceProgress {
|
|
||||||
nodeType := ""
|
|
||||||
switch subject.Type() {
|
|
||||||
case types.SpecComponentTypeIt:
|
|
||||||
nodeType = "It"
|
|
||||||
case types.SpecComponentTypeMeasure:
|
|
||||||
nodeType = "Measure"
|
|
||||||
}
|
|
||||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
|
||||||
writer.Write([]byte(s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
|
||||||
if !spec.IsMeasurement() || spec.Failed() {
|
|
||||||
return map[string]*types.SpecMeasurement{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
|
||||||
}
|
|
|
@ -1,144 +0,0 @@
|
||||||
package spec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Specs struct {
|
|
||||||
specs []*Spec
|
|
||||||
names []string
|
|
||||||
|
|
||||||
hasProgrammaticFocus bool
|
|
||||||
RegexScansFilePath bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSpecs(specs []*Spec) *Specs {
|
|
||||||
names := make([]string, len(specs))
|
|
||||||
for i, spec := range specs {
|
|
||||||
names[i] = spec.ConcatenatedString()
|
|
||||||
}
|
|
||||||
return &Specs{
|
|
||||||
specs: specs,
|
|
||||||
names: names,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) Specs() []*Spec {
|
|
||||||
return e.specs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) HasProgrammaticFocus() bool {
|
|
||||||
return e.hasProgrammaticFocus
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
|
||||||
sort.Sort(e)
|
|
||||||
permutation := r.Perm(len(e.specs))
|
|
||||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
|
||||||
names := make([]string, len(e.specs))
|
|
||||||
for i, j := range permutation {
|
|
||||||
shuffledSpecs[i] = e.specs[j]
|
|
||||||
names[i] = e.names[j]
|
|
||||||
}
|
|
||||||
e.specs = shuffledSpecs
|
|
||||||
e.names = names
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) ApplyFocus(description string, focus, skip []string) {
|
|
||||||
if len(focus)+len(skip) == 0 {
|
|
||||||
e.applyProgrammaticFocus()
|
|
||||||
} else {
|
|
||||||
e.applyRegExpFocusAndSkip(description, focus, skip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) applyProgrammaticFocus() {
|
|
||||||
e.hasProgrammaticFocus = false
|
|
||||||
for _, spec := range e.specs {
|
|
||||||
if spec.Focused() && !spec.Pending() {
|
|
||||||
e.hasProgrammaticFocus = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.hasProgrammaticFocus {
|
|
||||||
for _, spec := range e.specs {
|
|
||||||
if !spec.Focused() {
|
|
||||||
spec.Skip()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
|
|
||||||
// this is the place which we append to.
|
|
||||||
func (e *Specs) toMatch(description string, i int) []byte {
|
|
||||||
if i > len(e.names) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if e.RegexScansFilePath {
|
|
||||||
return []byte(
|
|
||||||
description + " " +
|
|
||||||
e.names[i] + " " +
|
|
||||||
e.specs[i].subject.CodeLocation().FileName)
|
|
||||||
} else {
|
|
||||||
return []byte(
|
|
||||||
description + " " +
|
|
||||||
e.names[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) {
|
|
||||||
var focusFilter, skipFilter *regexp.Regexp
|
|
||||||
if len(focus) > 0 {
|
|
||||||
focusFilter = regexp.MustCompile(strings.Join(focus, "|"))
|
|
||||||
}
|
|
||||||
if len(skip) > 0 {
|
|
||||||
skipFilter = regexp.MustCompile(strings.Join(skip, "|"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, spec := range e.specs {
|
|
||||||
matchesFocus := true
|
|
||||||
matchesSkip := false
|
|
||||||
|
|
||||||
toMatch := e.toMatch(description, i)
|
|
||||||
|
|
||||||
if focusFilter != nil {
|
|
||||||
matchesFocus = focusFilter.Match(toMatch)
|
|
||||||
}
|
|
||||||
|
|
||||||
if skipFilter != nil {
|
|
||||||
matchesSkip = skipFilter.Match(toMatch)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !matchesFocus || matchesSkip {
|
|
||||||
spec.Skip()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) SkipMeasurements() {
|
|
||||||
for _, spec := range e.specs {
|
|
||||||
if spec.IsMeasurement() {
|
|
||||||
spec.Skip()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//sort.Interface
|
|
||||||
|
|
||||||
func (e *Specs) Len() int {
|
|
||||||
return len(e.specs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) Less(i, j int) bool {
|
|
||||||
return e.names[i] < e.names[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Specs) Swap(i, j int) {
|
|
||||||
e.names[i], e.names[j] = e.names[j], e.names[i]
|
|
||||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
|
||||||
}
|
|
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
сгенерированный
поставляемый
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
сгенерированный
поставляемый
|
@ -1,55 +0,0 @@
|
||||||
package spec_iterator
|
|
||||||
|
|
||||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
|
||||||
if length == 0 {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have more nodes than tests. Trivial case.
|
|
||||||
if parallelTotal >= length {
|
|
||||||
if parallelNode > length {
|
|
||||||
return 0, 0
|
|
||||||
} else {
|
|
||||||
return parallelNode - 1, 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the minimum amount of tests that a node will be required to run
|
|
||||||
minTestsPerNode := length / parallelTotal
|
|
||||||
|
|
||||||
// This is the maximum amount of tests that a node will be required to run
|
|
||||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
|
||||||
// and at most one more
|
|
||||||
maxTestsPerNode := minTestsPerNode
|
|
||||||
if length%parallelTotal != 0 {
|
|
||||||
maxTestsPerNode++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Number of nodes that will have to run the maximum amount of tests per node
|
|
||||||
numMaxLoadNodes := length % parallelTotal
|
|
||||||
|
|
||||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
|
||||||
var numPrecedingMaxLoadNodes int
|
|
||||||
if parallelNode > numMaxLoadNodes {
|
|
||||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
|
||||||
} else {
|
|
||||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
|
||||||
var numPrecedingMinLoadNodes int
|
|
||||||
if parallelNode <= numMaxLoadNodes {
|
|
||||||
numPrecedingMinLoadNodes = 0
|
|
||||||
} else {
|
|
||||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate the test start index and number of tests to run
|
|
||||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
|
||||||
if parallelNode > numMaxLoadNodes {
|
|
||||||
count = minTestsPerNode
|
|
||||||
} else {
|
|
||||||
count = maxTestsPerNode
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
сгенерированный
поставляемый
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
сгенерированный
поставляемый
|
@ -1,59 +0,0 @@
|
||||||
package spec_iterator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/spec"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ParallelIterator struct {
|
|
||||||
specs []*spec.Spec
|
|
||||||
host string
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
|
||||||
return &ParallelIterator{
|
|
||||||
specs: specs,
|
|
||||||
host: host,
|
|
||||||
client: &http.Client{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
|
||||||
resp, err := s.client.Get(s.host + "/counter")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
var counter Counter
|
|
||||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if counter.Index >= len(s.specs) {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.specs[counter.Index], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
|
||||||
return len(s.specs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
|
||||||
return -1, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
|
||||||
return -1, false
|
|
||||||
}
|
|
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
сгенерированный
поставляемый
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
сгенерированный
поставляемый
|
@ -1,45 +0,0 @@
|
||||||
package spec_iterator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/onsi/ginkgo/internal/spec"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SerialIterator struct {
|
|
||||||
specs []*spec.Spec
|
|
||||||
index int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
|
||||||
return &SerialIterator{
|
|
||||||
specs: specs,
|
|
||||||
index: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
|
||||||
if s.index >= len(s.specs) {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
spec := s.specs[s.index]
|
|
||||||
s.index += 1
|
|
||||||
return spec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
|
||||||
return len(s.specs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
|
||||||
return len(s.specs), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
|
||||||
count := 0
|
|
||||||
for _, s := range s.specs {
|
|
||||||
if !s.Skipped() && !s.Pending() {
|
|
||||||
count += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return count, true
|
|
||||||
}
|
|
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
сгенерированный
поставляемый
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
сгенерированный
поставляемый
|
@ -1,47 +0,0 @@
|
||||||
package spec_iterator
|
|
||||||
|
|
||||||
import "github.com/onsi/ginkgo/internal/spec"
|
|
||||||
|
|
||||||
type ShardedParallelIterator struct {
|
|
||||||
specs []*spec.Spec
|
|
||||||
index int
|
|
||||||
maxIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
|
||||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
|
||||||
|
|
||||||
return &ShardedParallelIterator{
|
|
||||||
specs: specs,
|
|
||||||
index: startIndex,
|
|
||||||
maxIndex: startIndex + count,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
|
||||||
if s.index >= s.maxIndex {
|
|
||||||
return nil, ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
spec := s.specs[s.index]
|
|
||||||
s.index += 1
|
|
||||||
return spec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
|
||||||
return len(s.specs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
|
||||||
return s.maxIndex - s.index, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
|
||||||
count := 0
|
|
||||||
for i := s.index; i < s.maxIndex; i += 1 {
|
|
||||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
|
||||||
count += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return count, true
|
|
||||||
}
|
|
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
сгенерированный
поставляемый
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
сгенерированный
поставляемый
|
@ -1,20 +0,0 @@
|
||||||
package spec_iterator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/spec"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ErrClosed = errors.New("no more specs to run")
|
|
||||||
|
|
||||||
type SpecIterator interface {
|
|
||||||
Next() (*spec.Spec, error)
|
|
||||||
NumberOfSpecsPriorToIteration() int
|
|
||||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
|
||||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Counter struct {
|
|
||||||
Index int `json:"index"`
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
package specrunner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func randomID() string {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
_, err := rand.Read(b)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
|
||||||
}
|
|
|
@ -1,411 +0,0 @@
|
||||||
package specrunner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
|
||||||
"github.com/onsi/ginkgo/internal/spec"
|
|
||||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
|
||||||
"github.com/onsi/ginkgo/reporters"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SpecRunner struct {
|
|
||||||
description string
|
|
||||||
beforeSuiteNode leafnodes.SuiteNode
|
|
||||||
iterator spec_iterator.SpecIterator
|
|
||||||
afterSuiteNode leafnodes.SuiteNode
|
|
||||||
reporters []reporters.Reporter
|
|
||||||
startTime time.Time
|
|
||||||
suiteID string
|
|
||||||
runningSpec *spec.Spec
|
|
||||||
writer Writer.WriterInterface
|
|
||||||
config config.GinkgoConfigType
|
|
||||||
interrupted bool
|
|
||||||
processedSpecs []*spec.Spec
|
|
||||||
lock *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
|
||||||
return &SpecRunner{
|
|
||||||
description: description,
|
|
||||||
beforeSuiteNode: beforeSuiteNode,
|
|
||||||
iterator: iterator,
|
|
||||||
afterSuiteNode: afterSuiteNode,
|
|
||||||
reporters: reporters,
|
|
||||||
writer: writer,
|
|
||||||
config: config,
|
|
||||||
suiteID: randomID(),
|
|
||||||
lock: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) Run() bool {
|
|
||||||
if runner.config.DryRun {
|
|
||||||
runner.performDryRun()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.reportSuiteWillBegin()
|
|
||||||
signalRegistered := make(chan struct{})
|
|
||||||
go runner.registerForInterrupts(signalRegistered)
|
|
||||||
<-signalRegistered
|
|
||||||
|
|
||||||
suitePassed := runner.runBeforeSuite()
|
|
||||||
|
|
||||||
if suitePassed {
|
|
||||||
suitePassed = runner.runSpecs()
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.blockForeverIfInterrupted()
|
|
||||||
|
|
||||||
suitePassed = runner.runAfterSuite() && suitePassed
|
|
||||||
|
|
||||||
runner.reportSuiteDidEnd(suitePassed)
|
|
||||||
|
|
||||||
return suitePassed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) performDryRun() {
|
|
||||||
runner.reportSuiteWillBegin()
|
|
||||||
|
|
||||||
if runner.beforeSuiteNode != nil {
|
|
||||||
summary := runner.beforeSuiteNode.Summary()
|
|
||||||
summary.State = types.SpecStatePassed
|
|
||||||
runner.reportBeforeSuite(summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
spec, err := runner.iterator.Next()
|
|
||||||
if err == spec_iterator.ErrClosed {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
|
||||||
|
|
||||||
summary := spec.Summary(runner.suiteID)
|
|
||||||
runner.reportSpecWillRun(summary)
|
|
||||||
if summary.State == types.SpecStateInvalid {
|
|
||||||
summary.State = types.SpecStatePassed
|
|
||||||
}
|
|
||||||
runner.reportSpecDidComplete(summary, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if runner.afterSuiteNode != nil {
|
|
||||||
summary := runner.afterSuiteNode.Summary()
|
|
||||||
summary.State = types.SpecStatePassed
|
|
||||||
runner.reportAfterSuite(summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.reportSuiteDidEnd(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
|
||||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.writer.Truncate()
|
|
||||||
conf := runner.config
|
|
||||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
|
||||||
if !passed {
|
|
||||||
runner.writer.DumpOut()
|
|
||||||
}
|
|
||||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
|
||||||
return passed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) runAfterSuite() bool {
|
|
||||||
if runner.afterSuiteNode == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.writer.Truncate()
|
|
||||||
conf := runner.config
|
|
||||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
|
||||||
if !passed {
|
|
||||||
runner.writer.DumpOut()
|
|
||||||
}
|
|
||||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
|
||||||
return passed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) runSpecs() bool {
|
|
||||||
suiteFailed := false
|
|
||||||
skipRemainingSpecs := false
|
|
||||||
for {
|
|
||||||
spec, err := runner.iterator.Next()
|
|
||||||
if err == spec_iterator.ErrClosed {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
|
||||||
suiteFailed = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
|
||||||
|
|
||||||
if runner.wasInterrupted() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if skipRemainingSpecs {
|
|
||||||
spec.Skip()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !spec.Skipped() && !spec.Pending() {
|
|
||||||
if passed := runner.runSpec(spec); !passed {
|
|
||||||
suiteFailed = true
|
|
||||||
}
|
|
||||||
} else if spec.Pending() && runner.config.FailOnPending {
|
|
||||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
|
||||||
suiteFailed = true
|
|
||||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
|
||||||
} else {
|
|
||||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
|
||||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.Failed() && runner.config.FailFast {
|
|
||||||
skipRemainingSpecs = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return !suiteFailed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
|
|
||||||
maxAttempts := 1
|
|
||||||
if runner.config.FlakeAttempts > 0 {
|
|
||||||
// uninitialized configs count as 1
|
|
||||||
maxAttempts = runner.config.FlakeAttempts
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < maxAttempts; i++ {
|
|
||||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
|
||||||
runner.runningSpec = spec
|
|
||||||
spec.Run(runner.writer)
|
|
||||||
runner.runningSpec = nil
|
|
||||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
|
||||||
if !spec.Failed() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
|
||||||
if runner.runningSpec == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return runner.runningSpec.Summary(runner.suiteID), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
|
|
||||||
c := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
|
||||||
close(signalRegistered)
|
|
||||||
|
|
||||||
<-c
|
|
||||||
signal.Stop(c)
|
|
||||||
runner.markInterrupted()
|
|
||||||
go runner.registerForHardInterrupts()
|
|
||||||
runner.writer.DumpOutWithHeader(`
|
|
||||||
Received interrupt. Emitting contents of GinkgoWriter...
|
|
||||||
---------------------------------------------------------
|
|
||||||
`)
|
|
||||||
if runner.afterSuiteNode != nil {
|
|
||||||
fmt.Fprint(os.Stderr, `
|
|
||||||
---------------------------------------------------------
|
|
||||||
Received interrupt. Running AfterSuite...
|
|
||||||
^C again to terminate immediately
|
|
||||||
`)
|
|
||||||
runner.runAfterSuite()
|
|
||||||
}
|
|
||||||
runner.reportSuiteDidEnd(false)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
|
||||||
c := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
|
||||||
|
|
||||||
<-c
|
|
||||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
|
||||||
runner.lock.Lock()
|
|
||||||
interrupted := runner.interrupted
|
|
||||||
runner.lock.Unlock()
|
|
||||||
|
|
||||||
if interrupted {
|
|
||||||
select {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) markInterrupted() {
|
|
||||||
runner.lock.Lock()
|
|
||||||
defer runner.lock.Unlock()
|
|
||||||
runner.interrupted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) wasInterrupted() bool {
|
|
||||||
runner.lock.Lock()
|
|
||||||
defer runner.lock.Unlock()
|
|
||||||
return runner.interrupted
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
|
||||||
runner.startTime = time.Now()
|
|
||||||
summary := runner.suiteWillBeginSummary()
|
|
||||||
for _, reporter := range runner.reporters {
|
|
||||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
|
||||||
for _, reporter := range runner.reporters {
|
|
||||||
reporter.BeforeSuiteDidRun(summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
|
||||||
for _, reporter := range runner.reporters {
|
|
||||||
reporter.AfterSuiteDidRun(summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
|
||||||
runner.writer.Truncate()
|
|
||||||
|
|
||||||
for _, reporter := range runner.reporters {
|
|
||||||
reporter.SpecWillRun(summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
|
||||||
if len(summary.CapturedOutput) == 0 {
|
|
||||||
summary.CapturedOutput = string(runner.writer.Bytes())
|
|
||||||
}
|
|
||||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
|
||||||
runner.reporters[i].SpecDidComplete(summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
if failed {
|
|
||||||
runner.writer.DumpOut()
|
|
||||||
}
|
|
||||||
|
|
||||||
runner.reporters[0].SpecDidComplete(summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
|
||||||
summary := runner.suiteDidEndSummary(success)
|
|
||||||
summary.RunTime = time.Since(runner.startTime)
|
|
||||||
for _, reporter := range runner.reporters {
|
|
||||||
reporter.SpecSuiteDidEnd(summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
for _, spec := range runner.processedSpecs {
|
|
||||||
if filter(spec) {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
|
|
||||||
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return !ex.Skipped() && !ex.Pending()
|
|
||||||
})
|
|
||||||
|
|
||||||
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return ex.Pending()
|
|
||||||
})
|
|
||||||
|
|
||||||
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return ex.Skipped()
|
|
||||||
})
|
|
||||||
|
|
||||||
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return ex.Passed()
|
|
||||||
})
|
|
||||||
|
|
||||||
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return ex.Flaked()
|
|
||||||
})
|
|
||||||
|
|
||||||
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
|
||||||
return ex.Failed()
|
|
||||||
})
|
|
||||||
|
|
||||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
|
||||||
var known bool
|
|
||||||
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
|
||||||
if !known {
|
|
||||||
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
|
|
||||||
}
|
|
||||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
|
||||||
}
|
|
||||||
|
|
||||||
return &types.SuiteSummary{
|
|
||||||
SuiteDescription: runner.description,
|
|
||||||
SuiteSucceeded: success,
|
|
||||||
SuiteID: runner.suiteID,
|
|
||||||
|
|
||||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
|
||||||
NumberOfTotalSpecs: len(runner.processedSpecs),
|
|
||||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
|
||||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
|
||||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
|
||||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
|
||||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
|
||||||
NumberOfFlakedSpecs: numberOfFlakedSpecs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
|
|
||||||
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
|
|
||||||
if !known {
|
|
||||||
numTotal = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
|
||||||
if !known {
|
|
||||||
numToRun = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return &types.SuiteSummary{
|
|
||||||
SuiteDescription: runner.description,
|
|
||||||
SuiteID: runner.suiteID,
|
|
||||||
|
|
||||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
|
||||||
NumberOfTotalSpecs: numTotal,
|
|
||||||
NumberOfSpecsThatWillBeRun: numToRun,
|
|
||||||
NumberOfPendingSpecs: -1,
|
|
||||||
NumberOfSkippedSpecs: -1,
|
|
||||||
NumberOfPassedSpecs: -1,
|
|
||||||
NumberOfFailedSpecs: -1,
|
|
||||||
NumberOfFlakedSpecs: -1,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
package suite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/internal/containernode"
|
|
||||||
"github.com/onsi/ginkgo/internal/failer"
|
|
||||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
|
||||||
"github.com/onsi/ginkgo/internal/spec"
|
|
||||||
"github.com/onsi/ginkgo/internal/specrunner"
|
|
||||||
"github.com/onsi/ginkgo/internal/writer"
|
|
||||||
"github.com/onsi/ginkgo/reporters"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ginkgoTestingT interface {
|
|
||||||
Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
type deferredContainerNode struct {
|
|
||||||
text string
|
|
||||||
body func()
|
|
||||||
flag types.FlagType
|
|
||||||
codeLocation types.CodeLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
type Suite struct {
|
|
||||||
topLevelContainer *containernode.ContainerNode
|
|
||||||
currentContainer *containernode.ContainerNode
|
|
||||||
|
|
||||||
deferredContainerNodes []deferredContainerNode
|
|
||||||
|
|
||||||
containerIndex int
|
|
||||||
beforeSuiteNode leafnodes.SuiteNode
|
|
||||||
afterSuiteNode leafnodes.SuiteNode
|
|
||||||
runner *specrunner.SpecRunner
|
|
||||||
failer *failer.Failer
|
|
||||||
running bool
|
|
||||||
expandTopLevelNodes bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(failer *failer.Failer) *Suite {
|
|
||||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
|
||||||
|
|
||||||
return &Suite{
|
|
||||||
topLevelContainer: topLevelContainer,
|
|
||||||
currentContainer: topLevelContainer,
|
|
||||||
failer: failer,
|
|
||||||
containerIndex: 1,
|
|
||||||
deferredContainerNodes: []deferredContainerNode{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
|
||||||
if config.ParallelTotal < 1 {
|
|
||||||
panic("ginkgo.parallel.total must be >= 1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
|
||||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
|
||||||
}
|
|
||||||
|
|
||||||
suite.expandTopLevelNodes = true
|
|
||||||
for _, deferredNode := range suite.deferredContainerNodes {
|
|
||||||
suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
|
||||||
suite.topLevelContainer.Shuffle(r)
|
|
||||||
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
|
|
||||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
|
|
||||||
|
|
||||||
suite.running = true
|
|
||||||
success := suite.runner.Run()
|
|
||||||
if !success {
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
return success, hasProgrammaticFocus
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
|
|
||||||
specsSlice := []*spec.Spec{}
|
|
||||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
|
||||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
|
||||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
|
||||||
}
|
|
||||||
|
|
||||||
specs := spec.NewSpecs(specsSlice)
|
|
||||||
specs.RegexScansFilePath = config.RegexScansFilePath
|
|
||||||
|
|
||||||
if config.RandomizeAllSpecs {
|
|
||||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
|
||||||
}
|
|
||||||
|
|
||||||
specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings)
|
|
||||||
|
|
||||||
if config.SkipMeasurements {
|
|
||||||
specs.SkipMeasurements()
|
|
||||||
}
|
|
||||||
|
|
||||||
var iterator spec_iterator.SpecIterator
|
|
||||||
|
|
||||||
if config.ParallelTotal > 1 {
|
|
||||||
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
|
|
||||||
resp, err := http.Get(config.SyncHost + "/has-counter")
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
|
||||||
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
iterator = spec_iterator.NewSerialIterator(specs.Specs())
|
|
||||||
}
|
|
||||||
|
|
||||||
return iterator, specs.HasProgrammaticFocus()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
|
||||||
if !suite.running {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return suite.runner.CurrentSpecSummary()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.beforeSuiteNode != nil {
|
|
||||||
panic("You may only call BeforeSuite once!")
|
|
||||||
}
|
|
||||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.afterSuiteNode != nil {
|
|
||||||
panic("You may only call AfterSuite once!")
|
|
||||||
}
|
|
||||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.beforeSuiteNode != nil {
|
|
||||||
panic("You may only call BeforeSuite once!")
|
|
||||||
}
|
|
||||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.afterSuiteNode != nil {
|
|
||||||
panic("You may only call AfterSuite once!")
|
|
||||||
}
|
|
||||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
|
||||||
/*
|
|
||||||
We defer walking the container nodes (which immediately evaluates the `body` function)
|
|
||||||
until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when
|
|
||||||
`RunSpecs` is called we actually go through and add the container nodes to the test structure.
|
|
||||||
|
|
||||||
This allows us to defer calling all the `body` functions until _after_ the top level functions
|
|
||||||
have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`.
|
|
||||||
|
|
||||||
This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs`
|
|
||||||
is invoked and solves issues like #693 and makes the lifecycle easier to reason about.
|
|
||||||
|
|
||||||
*/
|
|
||||||
if !suite.expandTopLevelNodes {
|
|
||||||
suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
container := containernode.New(text, flag, codeLocation)
|
|
||||||
suite.currentContainer.PushContainerNode(container)
|
|
||||||
|
|
||||||
previousContainer := suite.currentContainer
|
|
||||||
suite.currentContainer = container
|
|
||||||
suite.containerIndex++
|
|
||||||
|
|
||||||
body()
|
|
||||||
|
|
||||||
suite.containerIndex--
|
|
||||||
suite.currentContainer = previousContainer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
|
||||||
if suite.running {
|
|
||||||
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
|
|
||||||
}
|
|
||||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
package writer
|
|
||||||
|
|
||||||
type FakeGinkgoWriter struct {
|
|
||||||
EventStream []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFake() *FakeGinkgoWriter {
|
|
||||||
return &FakeGinkgoWriter{
|
|
||||||
EventStream: []string{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
|
||||||
writer.EventStream = append(writer.EventStream, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) Truncate() {
|
|
||||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
|
||||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
|
||||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) Bytes() []byte {
|
|
||||||
writer.EventStream = append(writer.EventStream, "BYTES")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
package writer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type WriterInterface interface {
|
|
||||||
io.Writer
|
|
||||||
|
|
||||||
Truncate()
|
|
||||||
DumpOut()
|
|
||||||
DumpOutWithHeader(header string)
|
|
||||||
Bytes() []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type Writer struct {
|
|
||||||
buffer *bytes.Buffer
|
|
||||||
outWriter io.Writer
|
|
||||||
lock *sync.Mutex
|
|
||||||
stream bool
|
|
||||||
redirector io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(outWriter io.Writer) *Writer {
|
|
||||||
return &Writer{
|
|
||||||
buffer: &bytes.Buffer{},
|
|
||||||
lock: &sync.Mutex{},
|
|
||||||
outWriter: outWriter,
|
|
||||||
stream: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
|
||||||
w.redirector = writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) SetStream(stream bool) {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
w.stream = stream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
|
|
||||||
n, err = w.buffer.Write(b)
|
|
||||||
if w.redirector != nil {
|
|
||||||
w.redirector.Write(b)
|
|
||||||
}
|
|
||||||
if w.stream {
|
|
||||||
return w.outWriter.Write(b)
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Truncate() {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
w.buffer.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) DumpOut() {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
if !w.stream {
|
|
||||||
w.buffer.WriteTo(w.outWriter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) Bytes() []byte {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
b := w.buffer.Bytes()
|
|
||||||
copied := make([]byte, len(b))
|
|
||||||
copy(copied, b)
|
|
||||||
return copied
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) DumpOutWithHeader(header string) {
|
|
||||||
w.lock.Lock()
|
|
||||||
defer w.lock.Unlock()
|
|
||||||
if !w.stream && w.buffer.Len() > 0 {
|
|
||||||
w.outWriter.Write([]byte(header))
|
|
||||||
w.buffer.WriteTo(w.outWriter)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,87 +0,0 @@
|
||||||
/*
|
|
||||||
Ginkgo's Default Reporter
|
|
||||||
|
|
||||||
A number of command line flags are available to tweak Ginkgo's default output.
|
|
||||||
|
|
||||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
|
||||||
*/
|
|
||||||
package reporters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DefaultReporter struct {
|
|
||||||
config config.DefaultReporterConfigType
|
|
||||||
stenographer stenographer.Stenographer
|
|
||||||
specSummaries []*types.SpecSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
|
||||||
return &DefaultReporter{
|
|
||||||
config: config,
|
|
||||||
stenographer: stenographer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
|
||||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
|
||||||
if config.ParallelTotal > 1 {
|
|
||||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
|
|
||||||
} else {
|
|
||||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
if setupSummary.State != types.SpecStatePassed {
|
|
||||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
if setupSummary.State != types.SpecStatePassed {
|
|
||||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
|
||||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
|
||||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
|
||||||
switch specSummary.State {
|
|
||||||
case types.SpecStatePassed:
|
|
||||||
if specSummary.IsMeasurement {
|
|
||||||
reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
|
|
||||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
|
||||||
reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
|
|
||||||
} else {
|
|
||||||
reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
|
|
||||||
if reporter.config.ReportPassed {
|
|
||||||
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case types.SpecStatePending:
|
|
||||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
|
||||||
case types.SpecStateSkipped:
|
|
||||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
|
|
||||||
case types.SpecStateTimedOut:
|
|
||||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
|
||||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
|
||||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package reporters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
//FakeReporter is useful for testing purposes
|
|
||||||
type FakeReporter struct {
|
|
||||||
Config config.GinkgoConfigType
|
|
||||||
|
|
||||||
BeginSummary *types.SuiteSummary
|
|
||||||
BeforeSuiteSummary *types.SetupSummary
|
|
||||||
SpecWillRunSummaries []*types.SpecSummary
|
|
||||||
SpecSummaries []*types.SpecSummary
|
|
||||||
AfterSuiteSummary *types.SetupSummary
|
|
||||||
EndSummary *types.SuiteSummary
|
|
||||||
|
|
||||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
|
||||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFakeReporter() *FakeReporter {
|
|
||||||
return &FakeReporter{
|
|
||||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
|
||||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
|
||||||
fakeR.Config = config
|
|
||||||
fakeR.BeginSummary = summary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
fakeR.BeforeSuiteSummary = setupSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
|
||||||
if fakeR.SpecWillRunStub != nil {
|
|
||||||
fakeR.SpecWillRunStub(specSummary)
|
|
||||||
}
|
|
||||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
|
||||||
if fakeR.SpecDidCompleteStub != nil {
|
|
||||||
fakeR.SpecDidCompleteStub(specSummary)
|
|
||||||
}
|
|
||||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
fakeR.AfterSuiteSummary = setupSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
|
||||||
fakeR.EndSummary = summary
|
|
||||||
}
|
|
|
@ -1,178 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
JUnit XML Reporter for Ginkgo
|
|
||||||
|
|
||||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
package reporters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JUnitTestSuite struct {
|
|
||||||
XMLName xml.Name `xml:"testsuite"`
|
|
||||||
TestCases []JUnitTestCase `xml:"testcase"`
|
|
||||||
Name string `xml:"name,attr"`
|
|
||||||
Tests int `xml:"tests,attr"`
|
|
||||||
Failures int `xml:"failures,attr"`
|
|
||||||
Errors int `xml:"errors,attr"`
|
|
||||||
Time float64 `xml:"time,attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JUnitTestCase struct {
|
|
||||||
Name string `xml:"name,attr"`
|
|
||||||
ClassName string `xml:"classname,attr"`
|
|
||||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
|
||||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
|
||||||
Time float64 `xml:"time,attr"`
|
|
||||||
SystemOut string `xml:"system-out,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JUnitFailureMessage struct {
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
Message string `xml:",chardata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JUnitSkipped struct {
|
|
||||||
Message string `xml:",chardata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type JUnitReporter struct {
|
|
||||||
suite JUnitTestSuite
|
|
||||||
filename string
|
|
||||||
testSuiteName string
|
|
||||||
ReporterConfig config.DefaultReporterConfigType
|
|
||||||
}
|
|
||||||
|
|
||||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
|
||||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
|
||||||
return &JUnitReporter{
|
|
||||||
filename: filename,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
|
|
||||||
reporter.suite = JUnitTestSuite{
|
|
||||||
Name: summary.SuiteDescription,
|
|
||||||
TestCases: []JUnitTestCase{},
|
|
||||||
}
|
|
||||||
reporter.testSuiteName = summary.SuiteDescription
|
|
||||||
reporter.ReporterConfig = config.DefaultReporterConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
|
||||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func failureMessage(failure types.SpecFailure) string {
|
|
||||||
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
|
||||||
if setupSummary.State != types.SpecStatePassed {
|
|
||||||
testCase := JUnitTestCase{
|
|
||||||
Name: name,
|
|
||||||
ClassName: reporter.testSuiteName,
|
|
||||||
}
|
|
||||||
|
|
||||||
testCase.FailureMessage = &JUnitFailureMessage{
|
|
||||||
Type: reporter.failureTypeForState(setupSummary.State),
|
|
||||||
Message: failureMessage(setupSummary.Failure),
|
|
||||||
}
|
|
||||||
testCase.SystemOut = setupSummary.CapturedOutput
|
|
||||||
testCase.Time = setupSummary.RunTime.Seconds()
|
|
||||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
|
||||||
testCase := JUnitTestCase{
|
|
||||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
|
||||||
ClassName: reporter.testSuiteName,
|
|
||||||
}
|
|
||||||
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
|
||||||
testCase.SystemOut = specSummary.CapturedOutput
|
|
||||||
}
|
|
||||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
|
||||||
testCase.FailureMessage = &JUnitFailureMessage{
|
|
||||||
Type: reporter.failureTypeForState(specSummary.State),
|
|
||||||
Message: failureMessage(specSummary.Failure),
|
|
||||||
}
|
|
||||||
if specSummary.State == types.SpecStatePanicked {
|
|
||||||
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
|
|
||||||
specSummary.Failure.ForwardedPanic,
|
|
||||||
specSummary.Failure.Location.FullStackTrace)
|
|
||||||
}
|
|
||||||
testCase.SystemOut = specSummary.CapturedOutput
|
|
||||||
}
|
|
||||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
|
||||||
testCase.Skipped = &JUnitSkipped{}
|
|
||||||
if specSummary.Failure.Message != "" {
|
|
||||||
testCase.Skipped.Message = failureMessage(specSummary.Failure)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
testCase.Time = specSummary.RunTime.Seconds()
|
|
||||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
|
||||||
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
|
||||||
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
|
|
||||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
|
||||||
reporter.suite.Errors = 0
|
|
||||||
if reporter.ReporterConfig.ReportFile != "" {
|
|
||||||
reporter.filename = reporter.ReporterConfig.ReportFile
|
|
||||||
fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
|
|
||||||
}
|
|
||||||
filePath, _ := filepath.Abs(reporter.filename)
|
|
||||||
dirPath := filepath.Dir(filePath)
|
|
||||||
err := os.MkdirAll(dirPath, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
|
|
||||||
}
|
|
||||||
file, err := os.Create(filePath)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
file.WriteString(xml.Header)
|
|
||||||
encoder := xml.NewEncoder(file)
|
|
||||||
encoder.Indent(" ", " ")
|
|
||||||
err = encoder.Encode(reporter.suite)
|
|
||||||
if err == nil {
|
|
||||||
fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
|
||||||
switch state {
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
return "Failure"
|
|
||||||
case types.SpecStateTimedOut:
|
|
||||||
return "Timeout"
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
return "Panic"
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
package reporters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/onsi/ginkgo/config"
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Reporter interface {
|
|
||||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
|
||||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
|
||||||
SpecWillRun(specSummary *types.SpecSummary)
|
|
||||||
SpecDidComplete(specSummary *types.SpecSummary)
|
|
||||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
|
||||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
|
||||||
}
|
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
сгенерированный
поставляемый
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
сгенерированный
поставляемый
|
@ -1,64 +0,0 @@
|
||||||
package stenographer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
|
||||||
var out string
|
|
||||||
|
|
||||||
if len(args) > 0 {
|
|
||||||
out = fmt.Sprintf(format, args...)
|
|
||||||
} else {
|
|
||||||
out = format
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.color {
|
|
||||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
|
||||||
} else {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
|
||||||
fmt.Fprintln(s.w, text)
|
|
||||||
fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printNewLine() {
|
|
||||||
fmt.Fprintln(s.w, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printDelimiter() {
|
|
||||||
fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
|
||||||
fmt.Fprint(s.w, s.indent(indentation, format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
|
||||||
fmt.Fprintln(s.w, s.indent(indentation, format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
|
||||||
var text string
|
|
||||||
|
|
||||||
if len(args) > 0 {
|
|
||||||
text = fmt.Sprintf(format, args...)
|
|
||||||
} else {
|
|
||||||
text = format
|
|
||||||
}
|
|
||||||
|
|
||||||
stringArray := strings.Split(text, "\n")
|
|
||||||
padding := ""
|
|
||||||
if indentation >= 0 {
|
|
||||||
padding = strings.Repeat(" ", indentation)
|
|
||||||
}
|
|
||||||
for i, s := range stringArray {
|
|
||||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(stringArray, "\n")
|
|
||||||
}
|
|
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
сгенерированный
поставляемый
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
сгенерированный
поставляемый
|
@ -1,142 +0,0 @@
|
||||||
package stenographer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
|
||||||
return FakeStenographerCall{
|
|
||||||
Method: method,
|
|
||||||
Args: args,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type FakeStenographer struct {
|
|
||||||
calls []FakeStenographerCall
|
|
||||||
lock *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type FakeStenographerCall struct {
|
|
||||||
Method string
|
|
||||||
Args []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFakeStenographer() *FakeStenographer {
|
|
||||||
stenographer := &FakeStenographer{
|
|
||||||
lock: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
stenographer.Reset()
|
|
||||||
return stenographer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
|
||||||
stenographer.lock.Lock()
|
|
||||||
defer stenographer.lock.Unlock()
|
|
||||||
|
|
||||||
return stenographer.calls
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) Reset() {
|
|
||||||
stenographer.lock.Lock()
|
|
||||||
defer stenographer.lock.Unlock()
|
|
||||||
|
|
||||||
stenographer.calls = make([]FakeStenographerCall, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
|
||||||
stenographer.lock.Lock()
|
|
||||||
defer stenographer.lock.Unlock()
|
|
||||||
|
|
||||||
results := make([]FakeStenographerCall, 0)
|
|
||||||
for _, call := range stenographer.calls {
|
|
||||||
if call.Method == method {
|
|
||||||
results = append(results, call)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
|
||||||
stenographer.lock.Lock()
|
|
||||||
defer stenographer.lock.Unlock()
|
|
||||||
|
|
||||||
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
|
||||||
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
|
||||||
stenographer.registerCall("AnnounceCapturedOutput", output)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
|
||||||
stenographer.registerCall("AnnounceSuccessfulSpec", spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
|
||||||
stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
|
||||||
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
|
||||||
stenographer.registerCall("SummarizeFailures", summaries)
|
|
||||||
}
|
|
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
сгенерированный
поставляемый
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
сгенерированный
поставляемый
|
@ -1,572 +0,0 @@
|
||||||
/*
|
|
||||||
The stenographer is used by Ginkgo's reporters to generate output.
|
|
||||||
|
|
||||||
Move along, nothing to see here.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package stenographer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultStyle = "\x1b[0m"
|
|
||||||
const boldStyle = "\x1b[1m"
|
|
||||||
const redColor = "\x1b[91m"
|
|
||||||
const greenColor = "\x1b[32m"
|
|
||||||
const yellowColor = "\x1b[33m"
|
|
||||||
const cyanColor = "\x1b[36m"
|
|
||||||
const grayColor = "\x1b[90m"
|
|
||||||
const lightGrayColor = "\x1b[37m"
|
|
||||||
|
|
||||||
type cursorStateType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
cursorStateTop cursorStateType = iota
|
|
||||||
cursorStateStreaming
|
|
||||||
cursorStateMidBlock
|
|
||||||
cursorStateEndBlock
|
|
||||||
)
|
|
||||||
|
|
||||||
type Stenographer interface {
|
|
||||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
|
||||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
|
||||||
AnnounceParallelRun(node int, nodes int, succinct bool)
|
|
||||||
AnnounceTotalNumberOfSpecs(total int, succinct bool)
|
|
||||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
|
||||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
|
||||||
|
|
||||||
AnnounceSpecWillRun(spec *types.SpecSummary)
|
|
||||||
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
|
||||||
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
|
||||||
|
|
||||||
AnnounceCapturedOutput(output string)
|
|
||||||
|
|
||||||
AnnounceSuccessfulSpec(spec *types.SpecSummary)
|
|
||||||
AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
|
||||||
AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
|
|
||||||
|
|
||||||
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
|
||||||
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
|
||||||
|
|
||||||
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
|
||||||
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
|
||||||
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
|
||||||
|
|
||||||
SummarizeFailures(summaries []*types.SpecSummary)
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
|
|
||||||
denoter := "•"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
denoter = "+"
|
|
||||||
}
|
|
||||||
return &consoleStenographer{
|
|
||||||
color: color,
|
|
||||||
denoter: denoter,
|
|
||||||
cursorState: cursorStateTop,
|
|
||||||
enableFlakes: enableFlakes,
|
|
||||||
w: writer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type consoleStenographer struct {
|
|
||||||
color bool
|
|
||||||
denoter string
|
|
||||||
cursorState cursorStateType
|
|
||||||
enableFlakes bool
|
|
||||||
w io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
var alternatingColors = []string{defaultStyle, grayColor}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
|
||||||
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
|
||||||
if randomizingAll {
|
|
||||||
s.print(0, " - Will randomize all specs")
|
|
||||||
}
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, "- node #%d ", node)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.println(0,
|
|
||||||
"Parallel test node %s/%s.",
|
|
||||||
s.colorize(boldStyle, "%d", node),
|
|
||||||
s.colorize(boldStyle, "%d", nodes),
|
|
||||||
)
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, "- %d nodes ", nodes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.println(0,
|
|
||||||
"Running in parallel across %s nodes",
|
|
||||||
s.colorize(boldStyle, "%d", nodes),
|
|
||||||
)
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, "- %d/%d specs ", specsToRun, total)
|
|
||||||
s.stream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.println(0,
|
|
||||||
"Will run %s of %s specs",
|
|
||||||
s.colorize(boldStyle, "%d", specsToRun),
|
|
||||||
s.colorize(boldStyle, "%d", total),
|
|
||||||
)
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, "- %d specs ", total)
|
|
||||||
s.stream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.println(0,
|
|
||||||
"Will run %s specs",
|
|
||||||
s.colorize(boldStyle, "%d", total),
|
|
||||||
)
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
|
||||||
if succinct && summary.SuiteSucceeded {
|
|
||||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.printNewLine()
|
|
||||||
color := greenColor
|
|
||||||
if !summary.SuiteSucceeded {
|
|
||||||
color = redColor
|
|
||||||
}
|
|
||||||
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
|
||||||
|
|
||||||
status := ""
|
|
||||||
if summary.SuiteSucceeded {
|
|
||||||
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
|
||||||
} else {
|
|
||||||
status = s.colorize(boldStyle+redColor, "FAIL!")
|
|
||||||
}
|
|
||||||
|
|
||||||
flakes := ""
|
|
||||||
if s.enableFlakes {
|
|
||||||
flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.print(0,
|
|
||||||
"%s -- %s | %s | %s | %s\n",
|
|
||||||
status,
|
|
||||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
|
||||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
|
|
||||||
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
|
||||||
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
|
||||||
s.startBlock()
|
|
||||||
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
|
||||||
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
indentation := 0
|
|
||||||
if len(spec.ComponentTexts) > 2 {
|
|
||||||
indentation = 1
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
index := len(spec.ComponentTexts) - 1
|
|
||||||
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
|
||||||
s.printNewLine()
|
|
||||||
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
|
||||||
s.printNewLine()
|
|
||||||
s.midBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.startBlock()
|
|
||||||
var message string
|
|
||||||
switch summary.State {
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
message = "Failure"
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
message = "Panic"
|
|
||||||
case types.SpecStateTimedOut:
|
|
||||||
message = "Timeout"
|
|
||||||
}
|
|
||||||
|
|
||||||
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
|
||||||
|
|
||||||
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
|
||||||
|
|
||||||
s.endBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
|
||||||
if output == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.startBlock()
|
|
||||||
s.println(0, output)
|
|
||||||
s.midBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
|
||||||
s.print(0, s.colorize(greenColor, s.denoter))
|
|
||||||
s.stream()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
|
||||||
s.printBlockWithMessage(
|
|
||||||
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
|
||||||
"",
|
|
||||||
spec,
|
|
||||||
succinct,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
|
||||||
s.printBlockWithMessage(
|
|
||||||
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
|
||||||
s.measurementReport(spec, succinct),
|
|
||||||
spec,
|
|
||||||
succinct,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
|
||||||
if noisy {
|
|
||||||
s.printBlockWithMessage(
|
|
||||||
s.colorize(yellowColor, "P [PENDING]"),
|
|
||||||
"",
|
|
||||||
spec,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
s.print(0, s.colorize(yellowColor, "P"))
|
|
||||||
s.stream()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
|
||||||
if succinct || spec.Failure == (types.SpecFailure{}) {
|
|
||||||
s.print(0, s.colorize(cyanColor, "S"))
|
|
||||||
s.stream()
|
|
||||||
} else {
|
|
||||||
s.startBlock()
|
|
||||||
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
|
||||||
|
|
||||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
s.printSkip(indentation, spec.Failure)
|
|
||||||
s.endBlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
|
||||||
failingSpecs := []*types.SpecSummary{}
|
|
||||||
|
|
||||||
for _, summary := range summaries {
|
|
||||||
if summary.HasFailureState() {
|
|
||||||
failingSpecs = append(failingSpecs, summary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(failingSpecs) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
s.printNewLine()
|
|
||||||
plural := "s"
|
|
||||||
if len(failingSpecs) == 1 {
|
|
||||||
plural = ""
|
|
||||||
}
|
|
||||||
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
|
||||||
for _, summary := range failingSpecs {
|
|
||||||
s.printNewLine()
|
|
||||||
if summary.HasFailureState() {
|
|
||||||
if summary.TimedOut() {
|
|
||||||
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
|
||||||
} else if summary.Panicked() {
|
|
||||||
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
|
||||||
} else if summary.Failed() {
|
|
||||||
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
|
||||||
}
|
|
||||||
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) startBlock() {
|
|
||||||
if s.cursorState == cursorStateStreaming {
|
|
||||||
s.printNewLine()
|
|
||||||
s.printDelimiter()
|
|
||||||
} else if s.cursorState == cursorStateMidBlock {
|
|
||||||
s.printNewLine()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) midBlock() {
|
|
||||||
s.cursorState = cursorStateMidBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) endBlock() {
|
|
||||||
s.printDelimiter()
|
|
||||||
s.cursorState = cursorStateEndBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) stream() {
|
|
||||||
s.cursorState = cursorStateStreaming
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
|
||||||
s.startBlock()
|
|
||||||
s.println(0, header)
|
|
||||||
|
|
||||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
|
||||||
|
|
||||||
if message != "" {
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(indentation, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.endBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
|
||||||
s.startBlock()
|
|
||||||
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
|
||||||
|
|
||||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
|
||||||
|
|
||||||
s.printNewLine()
|
|
||||||
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
|
||||||
s.endBlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
|
||||||
switch failedComponentType {
|
|
||||||
case types.SpecComponentTypeBeforeSuite:
|
|
||||||
return " in Suite Setup (BeforeSuite)"
|
|
||||||
case types.SpecComponentTypeAfterSuite:
|
|
||||||
return " in Suite Teardown (AfterSuite)"
|
|
||||||
case types.SpecComponentTypeBeforeEach:
|
|
||||||
return " in Spec Setup (BeforeEach)"
|
|
||||||
case types.SpecComponentTypeJustBeforeEach:
|
|
||||||
return " in Spec Setup (JustBeforeEach)"
|
|
||||||
case types.SpecComponentTypeAfterEach:
|
|
||||||
return " in Spec Teardown (AfterEach)"
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
|
||||||
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(indentation, spec.Location.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
|
||||||
if state == types.SpecStatePanicked {
|
|
||||||
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
|
||||||
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
|
||||||
s.println(indentation, failure.Location.String())
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
|
||||||
s.println(indentation, failure.Location.FullStackTrace)
|
|
||||||
} else {
|
|
||||||
s.println(indentation, s.colorize(redColor, failure.Message))
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(indentation, failure.Location.String())
|
|
||||||
if fullTrace {
|
|
||||||
s.printNewLine()
|
|
||||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
|
||||||
s.println(indentation, failure.Location.FullStackTrace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
|
||||||
startIndex := 1
|
|
||||||
indentation := 0
|
|
||||||
|
|
||||||
if len(componentTexts) == 1 {
|
|
||||||
startIndex = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := startIndex; i < len(componentTexts); i++ {
|
|
||||||
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
|
||||||
color := redColor
|
|
||||||
if state == types.SpecStateSkipped {
|
|
||||||
color = cyanColor
|
|
||||||
}
|
|
||||||
blockType := ""
|
|
||||||
switch failedComponentType {
|
|
||||||
case types.SpecComponentTypeBeforeSuite:
|
|
||||||
blockType = "BeforeSuite"
|
|
||||||
case types.SpecComponentTypeAfterSuite:
|
|
||||||
blockType = "AfterSuite"
|
|
||||||
case types.SpecComponentTypeBeforeEach:
|
|
||||||
blockType = "BeforeEach"
|
|
||||||
case types.SpecComponentTypeJustBeforeEach:
|
|
||||||
blockType = "JustBeforeEach"
|
|
||||||
case types.SpecComponentTypeAfterEach:
|
|
||||||
blockType = "AfterEach"
|
|
||||||
case types.SpecComponentTypeIt:
|
|
||||||
blockType = "It"
|
|
||||||
case types.SpecComponentTypeMeasure:
|
|
||||||
blockType = "Measurement"
|
|
||||||
}
|
|
||||||
if succinct {
|
|
||||||
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
|
||||||
} else {
|
|
||||||
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
|
||||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if succinct {
|
|
||||||
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
|
||||||
} else {
|
|
||||||
s.println(indentation, componentTexts[i])
|
|
||||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
indentation++
|
|
||||||
}
|
|
||||||
|
|
||||||
return indentation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
|
||||||
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
|
||||||
|
|
||||||
if succinct {
|
|
||||||
if len(componentTexts) > 0 {
|
|
||||||
s.printNewLine()
|
|
||||||
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
|
||||||
}
|
|
||||||
s.printNewLine()
|
|
||||||
indentation = 1
|
|
||||||
} else {
|
|
||||||
indentation--
|
|
||||||
}
|
|
||||||
|
|
||||||
return indentation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
|
||||||
orderedKeys := make([]string, len(measurements))
|
|
||||||
for key, measurement := range measurements {
|
|
||||||
orderedKeys[measurement.Order] = key
|
|
||||||
}
|
|
||||||
return orderedKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
|
||||||
if len(spec.Measurements) == 0 {
|
|
||||||
return "Found no measurements"
|
|
||||||
}
|
|
||||||
|
|
||||||
message := []string{}
|
|
||||||
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
|
||||||
|
|
||||||
if succinct {
|
|
||||||
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
|
||||||
for _, key := range orderedKeys {
|
|
||||||
measurement := spec.Measurements[key]
|
|
||||||
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
|
||||||
s.colorize(boldStyle, "%s", measurement.Name),
|
|
||||||
measurement.SmallestLabel,
|
|
||||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
|
||||||
measurement.Units,
|
|
||||||
measurement.AverageLabel,
|
|
||||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
|
||||||
measurement.Units,
|
|
||||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
|
||||||
measurement.Units,
|
|
||||||
measurement.LargestLabel,
|
|
||||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
|
||||||
measurement.Units,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
|
||||||
for _, key := range orderedKeys {
|
|
||||||
measurement := spec.Measurements[key]
|
|
||||||
info := ""
|
|
||||||
if measurement.Info != nil {
|
|
||||||
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
|
||||||
}
|
|
||||||
|
|
||||||
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
|
||||||
s.colorize(boldStyle, "%s", measurement.Name),
|
|
||||||
info,
|
|
||||||
measurement.SmallestLabel,
|
|
||||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
|
||||||
measurement.Units,
|
|
||||||
measurement.LargestLabel,
|
|
||||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
|
||||||
measurement.Units,
|
|
||||||
measurement.AverageLabel,
|
|
||||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
|
||||||
measurement.Units,
|
|
||||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
|
||||||
measurement.Units,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(message, "\n")
|
|
||||||
}
|
|
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
сгенерированный
поставляемый
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
сгенерированный
поставляемый
|
@ -1,43 +0,0 @@
|
||||||
# go-colorable
|
|
||||||
|
|
||||||
Colorable writer for windows.
|
|
||||||
|
|
||||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
|
||||||
This package is possible to handle escape sequence for ansi color on windows.
|
|
||||||
|
|
||||||
## Too Bad!
|
|
||||||
|
|
||||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
|
|
||||||
|
|
||||||
|
|
||||||
## So Good!
|
|
||||||
|
|
||||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
|
||||||
logrus.SetOutput(colorable.NewColorableStdout())
|
|
||||||
|
|
||||||
logrus.Info("succeeded")
|
|
||||||
logrus.Warn("not correct")
|
|
||||||
logrus.Error("something error")
|
|
||||||
logrus.Fatal("panic")
|
|
||||||
```
|
|
||||||
|
|
||||||
You can compile above code on non-windows OSs.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get github.com/mattn/go-colorable
|
|
||||||
```
|
|
||||||
|
|
||||||
# License
|
|
||||||
|
|
||||||
MIT
|
|
||||||
|
|
||||||
# Author
|
|
||||||
|
|
||||||
Yasuhiro Matsumoto (a.k.a mattn)
|
|
24
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
сгенерированный
поставляемый
24
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
сгенерированный
поставляемый
|
@ -1,24 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package colorable
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewColorable(file *os.File) io.Writer {
|
|
||||||
if file == nil {
|
|
||||||
panic("nil passed instead of *os.File to NewColorable()")
|
|
||||||
}
|
|
||||||
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewColorableStdout() io.Writer {
|
|
||||||
return os.Stdout
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewColorableStderr() io.Writer {
|
|
||||||
return os.Stderr
|
|
||||||
}
|
|
57
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
сгенерированный
поставляемый
57
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
сгенерированный
поставляемый
|
@ -1,57 +0,0 @@
|
||||||
package colorable
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NonColorable struct {
|
|
||||||
out io.Writer
|
|
||||||
lastbuf bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNonColorable(w io.Writer) io.Writer {
|
|
||||||
return &NonColorable{out: w}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
|
||||||
er := bytes.NewBuffer(data)
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
c1, _, err := er.ReadRune()
|
|
||||||
if err != nil {
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
if c1 != 0x1b {
|
|
||||||
fmt.Fprint(w.out, string(c1))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c2, _, err := er.ReadRune()
|
|
||||||
if err != nil {
|
|
||||||
w.lastbuf.WriteRune(c1)
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
if c2 != 0x5b {
|
|
||||||
w.lastbuf.WriteRune(c1)
|
|
||||||
w.lastbuf.WriteRune(c2)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for {
|
|
||||||
c, _, err := er.ReadRune()
|
|
||||||
if err != nil {
|
|
||||||
w.lastbuf.WriteRune(c1)
|
|
||||||
w.lastbuf.WriteRune(c2)
|
|
||||||
w.lastbuf.Write(buf.Bytes())
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
buf.Write([]byte(string(c)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(data) - w.lastbuf.Len(), nil
|
|
||||||
}
|
|
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
сгенерированный
поставляемый
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
сгенерированный
поставляемый
|
@ -1,9 +0,0 @@
|
||||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
|
||||||
|
|
||||||
MIT License (Expat)
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
сгенерированный
поставляемый
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
сгенерированный
поставляемый
|
@ -1,37 +0,0 @@
|
||||||
# go-isatty
|
|
||||||
|
|
||||||
isatty for golang
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/mattn/go-isatty"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
|
||||||
fmt.Println("Is Terminal")
|
|
||||||
} else {
|
|
||||||
fmt.Println("Is Not Terminal")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get github.com/mattn/go-isatty
|
|
||||||
```
|
|
||||||
|
|
||||||
# License
|
|
||||||
|
|
||||||
MIT
|
|
||||||
|
|
||||||
# Author
|
|
||||||
|
|
||||||
Yasuhiro Matsumoto (a.k.a mattn)
|
|
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
сгенерированный
поставляемый
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
сгенерированный
поставляемый
|
@ -1,2 +0,0 @@
|
||||||
// Package isatty implements interface to isatty
|
|
||||||
package isatty
|
|
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
сгенерированный
поставляемый
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
сгенерированный
поставляемый
|
@ -1,9 +0,0 @@
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package isatty
|
|
||||||
|
|
||||||
// IsTerminal returns true if the file descriptor is terminal which
|
|
||||||
// is always false on on appengine classic which is a sandboxed PaaS.
|
|
||||||
func IsTerminal(fd uintptr) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
сгенерированный
поставляемый
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
сгенерированный
поставляемый
|
@ -1,18 +0,0 @@
|
||||||
// +build darwin freebsd openbsd netbsd
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package isatty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
// IsTerminal return true if the file descriptor is terminal.
|
|
||||||
func IsTerminal(fd uintptr) bool {
|
|
||||||
var termios syscall.Termios
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
}
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче