зеркало из https://github.com/microsoft/LaBench.git
First Open Source version
This commit is contained in:
Коммит
c40cd76ecf
|
@ -0,0 +1,13 @@
|
|||
out/
|
||||
/Run*/
|
||||
/debug
|
||||
/labench.exe
|
||||
/labench
|
||||
*.help
|
||||
*.yaml
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
|
@ -0,0 +1,8 @@
|
|||
// Place your settings in this file to overwrite default and user settings.
|
||||
{
|
||||
"go.formatOnSave": true,
|
||||
"go.buildOnSave": true,
|
||||
"go.formatTool": "goreturns",
|
||||
"go.lintTool": "gometalinter",
|
||||
"go.lintOnSave": true
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
set GOARCH=amd64
|
||||
set GOOS=linux
|
||||
go build %*
|
|
@ -0,0 +1,3 @@
|
|||
set GOARCH=amd64
|
||||
set GOOS=windows
|
||||
go build %*
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
|
@ -0,0 +1,21 @@
|
|||
# NOTICES AND INFORMATION
|
||||
Do Not Translate or Localize
|
||||
|
||||
This software incorporates material from third parties. Microsoft makes certain
|
||||
open source code available at http://3rdpartysource.microsoft.com, or you may
|
||||
send a check or money order for US $5.00, including the product name, the open
|
||||
source component name, and version number, to:
|
||||
|
||||
Source Code Compliance Team
|
||||
Microsoft Corporation
|
||||
One Microsoft Way
|
||||
Redmond, WA 98052
|
||||
USA
|
||||
|
||||
Notwithstanding any other terms, you may reverse engineer this software to the
|
||||
extent required to debug changes to any libraries licensed under the GNU Lesser
|
||||
General Public License.
|
||||
|
||||
## bench ([github.com/tylertreat/bench](https://github.com/tylertreat/bench))
|
||||
|
||||
**Open Source License/Copyright Notice:** Apache 2.0 License [bench/LICENSE](bench/LICENSE)
|
|
@ -0,0 +1,35 @@
|
|||
## Introduction
|
||||
|
||||
LaBench (for LAtency BENCHmark) is a tool that measures latency percentiles of HTTP GET or POST requests under very even and steady load.
|
||||
|
||||
The main feature and distinction of this tool is that (unlike many other benchmarking tools) it dictates request rate to the server and tries to maintain that rate very evenly even when server is experiencing slowdowns and hiccups. While other tools would usually back off and let the server to recover (see [Coordinated Omission Problem](https://groups.google.com/forum/#!msg/mechanical-sympathy/icNZJejUHfE/BfDekfBEs_sJ) for more details).
|
||||
|
||||
The main difference from [wrk2](https://github.com/giltene/wrk2) tool is very even load generated by LaBench.
|
||||
|
||||
## Quick-Start Guide
|
||||
|
||||
1. Copy or compile LaBench binary (there are both Windows and Linux executables). Windows version has more precise clock.
|
||||
2. Modify `labench.yaml` to meet your needs, most basic params should be self-explanatory. For the full list of supported parameters look at `full_config.yaml`.
|
||||
3. Run the benchmark by simply running labench (you can also specify .yaml file on command line, but labench.yaml is used by default).
|
||||
4. **BEFORE looking at the latency results** check the following things in the tool output:
|
||||
1. *TimelyTicks percentage*. If it's less than say 99.9% then you need to increase number of Clients in yaml config. It's very realistic to keep it at 100%.
|
||||
2. *TimelySends percentage*. If it's less than say 99.9% then you need a beefier machine to run the test. It's very realistic to keep it at 100%.
|
||||
3. Number of errors returned by the server (non-200 responses). Some small percentage is OK, but they are not accounted for in latency results.
|
||||
4. Throughput reported in last line. If should be close to the value RequestRatePerSec in your .yaml config.
|
||||
5. **If ANY of the above is not satisfied** then the run was not valid and there is no point in looking at the latency results produced, so fix and re-run.
|
||||
6. The measurement results (latency percentiles) are placed in `out\res.hgrm` file. You can open it in Excel or go to [http://hdrhistogram.github.io/HdrHistogram/plotFiles.html]()) to plot it.
|
||||
7. Note that plotted results have logarithmic X axis (i.e. the distance between 99% and 99.9% is the same as the distance between 99.9% and 99.99%).
|
||||
|
||||
# Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# Heavily modified version of [github.com/tylertreat/bench](https://github.com/tylertreat/bench)
|
||||
|
||||
Bench is a generic latency benchmarking library. It's generic in the sense that it exposes a simple interface (`Requester`) which can be implemented for various systems under test. Several [example Requesters](https://github.com/tylertreat/bench/tree/master/requester) are provided out of the box.
|
||||
|
||||
Bench works by attempting to issue a fixed rate of requests per second and measuring the latency of each request issued synchronously. Latencies are captured using [HDR Histogram](https://github.com/codahale/hdrhistogram), which observes the complete latency distribution and attempts to correct for [Coordinated Omission](https://groups.google.com/forum/#!msg/mechanical-sympathy/icNZJejUHfE/BfDekfBEs_sJ). It provides facilities to generate output which can be [plotted](http://hdrhistogram.github.io/HdrHistogram/plotFiles.html) to produce graphs like the following:
|
|
@ -0,0 +1,390 @@
|
|||
/*
|
||||
Package bench provides a generic framework for performing latency benchmarks.
|
||||
*/
|
||||
package bench
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/codahale/hdrhistogram"
|
||||
)
|
||||
|
||||
const (
|
||||
minRecordableLatencyNS = 1000000
|
||||
maxRecordableLatencyNS = 100000000000
|
||||
sigFigs = 5
|
||||
)
|
||||
|
||||
// RequesterFactory creates new Requesters.
|
||||
type RequesterFactory interface {
|
||||
// GetRequester returns a new Requester, called for each Benchmark
|
||||
// connection.
|
||||
GetRequester(number uint64) Requester
|
||||
}
|
||||
|
||||
// Requester synchronously issues requests for a particular system under test.
|
||||
type Requester interface {
|
||||
// Setup prepares the Requester for benchmarking.
|
||||
Setup() error
|
||||
|
||||
// Request performs a synchronous request to the system under test.
|
||||
Request() error
|
||||
|
||||
// Teardown is called upon benchmark completion.
|
||||
Teardown() error
|
||||
}
|
||||
|
||||
// Benchmark performs a system benchmark by attempting to issue requests at a
|
||||
// specified rate and capturing the latency distribution. The request rate is
|
||||
// divided across the number of configured connections.
|
||||
type Benchmark struct {
|
||||
connections uint64
|
||||
requestRate float64
|
||||
duration time.Duration
|
||||
baseLatency time.Duration
|
||||
expectedInterval time.Duration
|
||||
successHistogram *hdrhistogram.Histogram
|
||||
successTotal uint64
|
||||
errorTotal uint64
|
||||
avgRequestTime float64
|
||||
elapsed time.Duration
|
||||
factory RequesterFactory
|
||||
timelyTicks uint64
|
||||
missedTicks uint64
|
||||
timelySends uint64
|
||||
lateSends uint64
|
||||
errors map[string]int
|
||||
}
|
||||
|
||||
// NewBenchmark creates a Benchmark which runs a system benchmark using the
|
||||
// given RequesterFactory. The requestRate argument specifies the number of
|
||||
// requests per second to issue. This value is divided across the number of
|
||||
// connections specified, so if requestRate is 50,000 and connections is 10,
|
||||
// each connection will attempt to issue 5,000 requests per second. A zero
|
||||
// value disables rate limiting entirely. The duration argument specifies how
|
||||
// long to run the benchmark.
|
||||
func NewBenchmark(factory RequesterFactory, requestRate, connections uint64, duration time.Duration, baseLatency time.Duration) *Benchmark {
|
||||
|
||||
if connections == 0 {
|
||||
connections = 1
|
||||
}
|
||||
|
||||
if requestRate <= 0 {
|
||||
log.Panicln("RequestRate must be positive")
|
||||
}
|
||||
|
||||
return &Benchmark{
|
||||
connections: connections,
|
||||
requestRate: float64(requestRate),
|
||||
duration: duration,
|
||||
baseLatency: baseLatency,
|
||||
expectedInterval: time.Duration(float64(time.Second) / float64(requestRate)),
|
||||
successHistogram: hdrhistogram.New(minRecordableLatencyNS, maxRecordableLatencyNS, sigFigs),
|
||||
factory: factory,
|
||||
errors: make(map[string]int)}
|
||||
}
|
||||
|
||||
// Run the benchmark and return a summary of the results. An error is returned
|
||||
// if something went wrong along the way.
|
||||
func (b *Benchmark) Run(outputJson bool, forceTightTicker bool) (*Summary, error) {
|
||||
var (
|
||||
ticker = make(chan time.Time)
|
||||
results = make(chan int64, 100)
|
||||
errors = make(chan error, 100)
|
||||
done = make(chan struct{})
|
||||
stopCollector = make(chan struct{})
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
// Prepare connection benchmarks
|
||||
wg.Add(int(b.connections))
|
||||
for i := uint64(0); i < b.connections; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
b.worker(b.factory.GetRequester(i), ticker, results, errors)
|
||||
// log.Printf("Worker %d done\n", i)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// Prepare ticker
|
||||
go b.tickerFunc(done, ticker, forceTightTicker)
|
||||
|
||||
// Prepare results collector
|
||||
go func() {
|
||||
b.collectorFunc(stopCollector, results, errors)
|
||||
// log.Println("Collector done")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait for completion of workers
|
||||
wg.Wait()
|
||||
// log.Println("Workers have finished")
|
||||
|
||||
wg.Add(1)
|
||||
close(stopCollector)
|
||||
wg.Wait()
|
||||
|
||||
// log.Println("Collector has finished")
|
||||
|
||||
fmt.Printf("Ticks=%d, TimelyTicks = %d, MissedTicks = %d, %.2f%% good\n", b.timelyTicks+b.missedTicks, b.timelyTicks, b.missedTicks, float64(b.timelyTicks)*100/float64(b.timelyTicks+b.missedTicks))
|
||||
fmt.Printf("Sends=%d, TimelySends = %d, LateSends = %d, %.2f%% good\n", b.timelySends+b.lateSends, b.timelySends, b.lateSends, float64(b.timelySends)*100/float64(b.timelySends+b.lateSends))
|
||||
|
||||
if len(b.errors) > 0 {
|
||||
fmt.Println()
|
||||
fmt.Println("Errors:")
|
||||
for etext, count := range b.errors {
|
||||
fmt.Println(count, "=", etext)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
summary := b.summarize(outputJson)
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func (b *Benchmark) collectorFunc(doneCh <-chan struct{}, results <-chan int64, errors <-chan error) {
|
||||
var (
|
||||
baseLatency = b.baseLatency.Nanoseconds()
|
||||
successTotal int64
|
||||
avgRequestTime float64 // Average latency for processing requests
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case sample := <-results:
|
||||
successTotal++
|
||||
maybePanic(b.successHistogram.RecordValue(sample - baseLatency))
|
||||
avgRequestTime = (avgRequestTime*float64(successTotal-1) + float64(sample/1e6)) / float64(successTotal)
|
||||
case err := <-errors:
|
||||
b.errors[err.Error()]++
|
||||
case <-doneCh:
|
||||
b.avgRequestTime = avgRequestTime
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func detectOsTimerResolution() time.Duration {
|
||||
bestTimerRes := time.Hour
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
start := time.Now()
|
||||
var timerRes time.Duration
|
||||
for {
|
||||
timerRes = time.Since(start)
|
||||
if timerRes > 0 {
|
||||
if timerRes < bestTimerRes {
|
||||
bestTimerRes = timerRes
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bestTimerRes
|
||||
}
|
||||
|
||||
func (b *Benchmark) tickerFunc(doneCh chan<- struct{}, outCh chan<- time.Time, forceTightTicker bool) {
|
||||
timerRes := detectOsTimerResolution()
|
||||
fmt.Printf("ExpectedInterval = %v, Detected OS timer resolution = %v\n", b.expectedInterval, timerRes)
|
||||
if timerRes*3 > b.expectedInterval {
|
||||
fmt.Println("WARNING! Detected OS timer resolution may not be sufficient for desired request rate")
|
||||
}
|
||||
|
||||
// let other go routines to start running
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if !forceTightTicker && b.expectedInterval >= 7*timerRes {
|
||||
fmt.Println("Using sleeping ticker")
|
||||
b.sleepingTicker(doneCh, outCh)
|
||||
} else {
|
||||
fmt.Println("Using tight ticker")
|
||||
b.tightTicker(doneCh, outCh)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Benchmark) tightTicker(doneCh chan<- struct{}, outCh chan<- time.Time) {
|
||||
start := time.Now()
|
||||
lastTick := start
|
||||
|
||||
var (
|
||||
timelyTicks uint64
|
||||
missedTicks uint64
|
||||
)
|
||||
|
||||
expectedInterval := b.expectedInterval
|
||||
duration := b.duration
|
||||
|
||||
for {
|
||||
var thisTick time.Time
|
||||
|
||||
for {
|
||||
thisTick = time.Now()
|
||||
if thisTick.Sub(lastTick) >= expectedInterval {
|
||||
lastTick = lastTick.Add(expectedInterval)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case outCh <- thisTick:
|
||||
timelyTicks++
|
||||
default:
|
||||
missedTicks++
|
||||
}
|
||||
|
||||
if thisTick.Sub(start) > duration {
|
||||
// log.Println("Signaling DONE")
|
||||
close(outCh)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
close(doneCh)
|
||||
b.elapsed = time.Since(start)
|
||||
|
||||
b.timelyTicks = timelyTicks
|
||||
b.missedTicks = missedTicks
|
||||
}
|
||||
|
||||
func (b *Benchmark) sleepingTicker(doneCh chan<- struct{}, outCh chan<- time.Time) {
|
||||
completion := time.After(b.duration)
|
||||
|
||||
inCh := time.Tick(b.expectedInterval)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var (
|
||||
timelyTicks uint64
|
||||
missedTicks uint64
|
||||
)
|
||||
|
||||
// initial tick
|
||||
outCh <- start
|
||||
timelyTicks++
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case t := <-inCh:
|
||||
select {
|
||||
case outCh <- t:
|
||||
timelyTicks++
|
||||
default:
|
||||
missedTicks++
|
||||
}
|
||||
|
||||
case <-completion:
|
||||
// log.Println("Signaling DONE")
|
||||
close(outCh)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
close(doneCh)
|
||||
b.elapsed = time.Since(start)
|
||||
|
||||
b.timelyTicks = timelyTicks
|
||||
b.missedTicks = missedTicks
|
||||
}
|
||||
|
||||
func maybePanic(err error) {
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Benchmark) worker(requester Requester, ticker <-chan time.Time, results chan<- int64, errors chan<- error) {
|
||||
maybePanic(requester.Setup())
|
||||
|
||||
// initialized to 0 by default
|
||||
var (
|
||||
lateSends uint64
|
||||
timelySends uint64
|
||||
errorTotal uint64
|
||||
successTotal uint64
|
||||
)
|
||||
|
||||
for tick := range ticker {
|
||||
before := time.Now()
|
||||
if before.Sub(tick) >= b.expectedInterval {
|
||||
lateSends++
|
||||
} else {
|
||||
timelySends++
|
||||
}
|
||||
|
||||
err := requester.Request()
|
||||
latency := time.Since(before).Nanoseconds()
|
||||
if err != nil {
|
||||
errorTotal++
|
||||
errors <- err
|
||||
} else {
|
||||
// On Linux, sometimes time interval measurement comes back negative, report it as 0
|
||||
if latency < 0 {
|
||||
latency = 0
|
||||
}
|
||||
results <- latency
|
||||
successTotal++
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddUint64(&b.lateSends, lateSends)
|
||||
atomic.AddUint64(&b.timelySends, timelySends)
|
||||
atomic.AddUint64(&b.errorTotal, errorTotal)
|
||||
atomic.AddUint64(&b.successTotal, successTotal)
|
||||
|
||||
err := requester.Teardown()
|
||||
if err != nil {
|
||||
log.Println("Failure in Teardown:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// summarize returns a Summary of the last benchmark run.
|
||||
func (b *Benchmark) summarize(outputJson bool) *Summary {
|
||||
|
||||
//Checks the list of target errors against the errors found during benchmarking
|
||||
formattedErrors := make(map[string]int)
|
||||
r := regexp.MustCompile(`Expected 200-response, but got (\d+)`)
|
||||
|
||||
//For every error that was found during benchmarking
|
||||
for errorText, count := range b.errors {
|
||||
|
||||
//Use regex to extract error code
|
||||
errorCodeMatches := r.FindStringSubmatch(errorText)
|
||||
|
||||
// If the regex extracted an errorCode then use the errorCode as the key
|
||||
if len(errorCodeMatches) > 1 {
|
||||
|
||||
//Set the error count
|
||||
errorCode := errorCodeMatches[1]
|
||||
formattedErrors[errorCode] = count
|
||||
|
||||
} else {
|
||||
//If the error doesnt have an errorCode then use the full text as the key
|
||||
formattedErrors[errorText] = count
|
||||
}
|
||||
}
|
||||
|
||||
return &Summary{
|
||||
SuccessTotal: b.successTotal,
|
||||
ErrorTotal: b.errorTotal,
|
||||
TimeElapsed: b.elapsed,
|
||||
SuccessHistogram: hdrhistogram.Import(b.successHistogram.Export()),
|
||||
Throughput: float64(b.successTotal+b.errorTotal) / b.elapsed.Seconds(),
|
||||
AvgRequestTime: b.avgRequestTime,
|
||||
RequestRate: b.requestRate,
|
||||
Connections: b.connections,
|
||||
Errors: formattedErrors,
|
||||
TicksTimely: b.timelyTicks,
|
||||
TicksTimelyRatio: float64(b.timelyTicks) * 100 / float64(b.timelyTicks+b.missedTicks),
|
||||
SendsTimely: b.timelySends,
|
||||
SendsTimelyRatio: float64(b.timelySends) * 100 / float64(b.timelySends+b.lateSends),
|
||||
OutputJson: outputJson,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
module labench/bench
|
||||
|
||||
require (
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.1
|
||||
)
|
|
@ -0,0 +1,6 @@
|
|||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
|
@ -0,0 +1,13 @@
|
|||
package bench
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ntdll := syscall.MustLoadDLL("ntdll.dll")
|
||||
setTimerResolution := ntdll.MustFindProc("NtSetTimerResolution")
|
||||
var prevRes int
|
||||
setTimerResolution.Call(5000, 1, uintptr(unsafe.Pointer(&prevRes)))
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package bench
|
||||
|
||||
// Percentiles is a list of percentiles to include in a latency distribution,
|
||||
// e.g. 10.0, 50.0, 99.0, 99.99, etc.
|
||||
type Percentiles []float64
|
||||
|
||||
// Logarithmic percentile scale.
|
||||
var Logarithmic = Percentiles{
|
||||
0.0,
|
||||
10.0,
|
||||
20.0,
|
||||
30.0,
|
||||
40.0,
|
||||
50.0,
|
||||
55.0,
|
||||
60.0,
|
||||
65.0,
|
||||
70.0,
|
||||
75.0,
|
||||
77.5,
|
||||
80.0,
|
||||
82.5,
|
||||
85.0,
|
||||
87.5,
|
||||
88.75,
|
||||
90.0,
|
||||
91.25,
|
||||
92.5,
|
||||
93.75,
|
||||
94.375,
|
||||
95.0,
|
||||
95.625,
|
||||
96.25,
|
||||
96.875,
|
||||
97.1875,
|
||||
97.5,
|
||||
97.8125,
|
||||
98.125,
|
||||
98.4375,
|
||||
98.5938,
|
||||
98.75,
|
||||
98.9062,
|
||||
99.0625,
|
||||
99.2188,
|
||||
99.2969,
|
||||
99.375,
|
||||
99.4531,
|
||||
99.5313,
|
||||
99.6094,
|
||||
99.6484,
|
||||
99.6875,
|
||||
99.7266,
|
||||
99.7656,
|
||||
99.8047,
|
||||
99.8242,
|
||||
99.8437,
|
||||
99.8633,
|
||||
99.8828,
|
||||
99.9023,
|
||||
99.9121,
|
||||
99.9219,
|
||||
99.9316,
|
||||
99.9414,
|
||||
99.9512,
|
||||
99.9561,
|
||||
99.9609,
|
||||
99.9658,
|
||||
99.9707,
|
||||
99.9756,
|
||||
99.978,
|
||||
99.9805,
|
||||
99.9829,
|
||||
99.9854,
|
||||
99.9878,
|
||||
99.989,
|
||||
99.9902,
|
||||
99.9915,
|
||||
99.9927,
|
||||
99.9939,
|
||||
99.9945,
|
||||
99.9951,
|
||||
99.9957,
|
||||
99.9963,
|
||||
99.9969,
|
||||
99.9973,
|
||||
99.9976,
|
||||
99.9979,
|
||||
99.9982,
|
||||
99.9985,
|
||||
99.9986,
|
||||
99.9988,
|
||||
99.9989,
|
||||
99.9991,
|
||||
99.9992,
|
||||
99.9993,
|
||||
99.9994,
|
||||
99.9995,
|
||||
99.9996,
|
||||
99.9997,
|
||||
99.9998,
|
||||
99.9999,
|
||||
100.0,
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
package bench
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/codahale/hdrhistogram"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
// Summary contains the results of a Benchmark run.
|
||||
type Summary struct {
|
||||
Connections uint64
|
||||
RequestRate float64
|
||||
SuccessTotal uint64
|
||||
ErrorTotal uint64
|
||||
TimeElapsed time.Duration
|
||||
SuccessHistogram *hdrhistogram.Histogram
|
||||
Throughput float64
|
||||
AvgRequestTime float64
|
||||
Errors map[string]int
|
||||
TicksTimely uint64
|
||||
TicksTimelyRatio float64
|
||||
SendsTimely uint64
|
||||
SendsTimelyRatio float64
|
||||
OutputJson bool
|
||||
}
|
||||
|
||||
// Struct and functions for sorting errors
|
||||
type Error struct {
|
||||
ErrorCode string
|
||||
Count int
|
||||
}
|
||||
type ErrorList []Error
|
||||
|
||||
func (p ErrorList) Len() int { return len(p) }
|
||||
func (p ErrorList) Less(i, j int) bool { return p[i].Count < p[j].Count }
|
||||
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// String returns a stringified version of the Summary.
|
||||
func (s *Summary) String() string {
|
||||
requestTotal := s.SuccessTotal + s.ErrorTotal
|
||||
successRate := 0.
|
||||
if requestTotal > 0 {
|
||||
successRate = float64(s.SuccessTotal) / float64(requestTotal) * 100
|
||||
}
|
||||
|
||||
var outputBuffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&outputBuffer,
|
||||
"\n{SuccessRate: %.2f%%, Throughput: %.2f req/s, AvgRequestTime: %.2f ms, Connections: %d, RequestRate: %.0f, RequestTotal: %d, SuccessTotal: %d, ErrorTotal: %d, TimeElapsed: %s}\n",
|
||||
successRate, s.Throughput, s.AvgRequestTime, s.Connections, s.RequestRate, requestTotal, s.SuccessTotal, s.ErrorTotal, s.TimeElapsed)
|
||||
|
||||
if s.OutputJson {
|
||||
// Serializing Summary object into JSON
|
||||
jsonString, err := json.Marshal(s)
|
||||
outputBuffer.WriteString("\nJson Output: ")
|
||||
outputBuffer.WriteString(string(jsonString) + "\n")
|
||||
if err != nil {
|
||||
outputBuffer.WriteString("Error creating Json\n")
|
||||
}
|
||||
}
|
||||
|
||||
metricsTable := tablewriter.NewWriter(&outputBuffer)
|
||||
metricsTable.SetHeader([]string{"Metric", "Absolute", "Percentage %"})
|
||||
|
||||
//Printing metric data as a table
|
||||
metricsTable.Append([]string{"Total Requests", strconv.FormatUint(requestTotal, 10), ""})
|
||||
metricsTable.Append([]string{"Successful Requests", strconv.FormatUint(s.SuccessTotal, 10), strconv.FormatFloat(successRate, 'f', 2, 64)})
|
||||
metricsTable.Append([]string{"Failed Requests", strconv.FormatUint(s.ErrorTotal, 10), strconv.FormatFloat(100-successRate, 'f', 2, 64)})
|
||||
metricsTable.Append([]string{"Time Elapsed (sec)", strconv.FormatFloat(s.TimeElapsed.Seconds(), 'f', 2, 64), ""})
|
||||
metricsTable.Append([]string{"Request Rate (req/sec)", strconv.FormatFloat(s.RequestRate, 'f', 2, 64), ""})
|
||||
metricsTable.Append([]string{"Throughput (req/sec)", strconv.FormatFloat(s.Throughput, 'f', 2, 64), ""})
|
||||
metricsTable.Append([]string{"AvgRequestTime (ms)", strconv.FormatFloat(s.AvgRequestTime, 'f', 2, 64), ""})
|
||||
metricsTable.Append([]string{"Timely Ticks", strconv.FormatUint(s.TicksTimely, 10), strconv.FormatFloat(s.TicksTimelyRatio, 'f', 2, 64)})
|
||||
metricsTable.Append([]string{"Timely Sends", strconv.FormatUint(s.SendsTimely, 10), strconv.FormatFloat(s.SendsTimelyRatio, 'f', 2, 64)})
|
||||
|
||||
//Printing error results as a table
|
||||
//Laying out headers and values
|
||||
errorTable := tablewriter.NewWriter(&outputBuffer)
|
||||
errorTable.SetHeader([]string{"Error", "Absolute", "Percentage %"})
|
||||
|
||||
//Sorting errors by highest count
|
||||
el := make(ErrorList, len(s.Errors))
|
||||
i := 0
|
||||
for code, count := range s.Errors {
|
||||
el[i] = Error{code, count}
|
||||
i++
|
||||
}
|
||||
sort.Sort(sort.Reverse(el)) //Sort in descending order
|
||||
|
||||
//Loop through each Error and print count
|
||||
for _, err := range el {
|
||||
percentage := float64(err.Count) / float64(requestTotal) * 100
|
||||
errorTable.Append([]string{err.ErrorCode, strconv.Itoa(err.Count), strconv.FormatFloat(percentage, 'f', 2, 64)})
|
||||
}
|
||||
|
||||
outputBuffer.WriteString("\n")
|
||||
metricsTable.Render()
|
||||
|
||||
if el.Len() > 0 {
|
||||
outputBuffer.WriteString("\n")
|
||||
errorTable.Render()
|
||||
}
|
||||
|
||||
return outputBuffer.String()
|
||||
}
|
||||
|
||||
// GenerateLatencyDistribution generates a text file containing the specified
|
||||
// latency distribution in a format plottable by
|
||||
// http://hdrhistogram.github.io/HdrHistogram/plotFiles.html. Percentiles is a
|
||||
// list of percentiles to include, e.g. 10.0, 50.0, 99.0, 99.99, etc. If
|
||||
// percentiles is nil, it defaults to a logarithmic percentile scale. If a
|
||||
// request rate was specified for the benchmark, this will also generate an
|
||||
// uncorrected distribution file which does not account for coordinated
|
||||
// omission.
|
||||
func (s *Summary) GenerateLatencyDistribution(percentiles Percentiles, file string) error {
|
||||
return generateLatencyDistribution(s.SuccessHistogram, nil, s.RequestRate, percentiles, file)
|
||||
}
|
||||
|
||||
func generateLatencyDistribution(histogram, unHistogram *hdrhistogram.Histogram, requestRate float64, percentiles Percentiles, file string) error {
|
||||
if percentiles == nil {
|
||||
percentiles = Logarithmic
|
||||
}
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
f.WriteString("Value Percentile TotalCount 1/(1-Percentile)\n\n")
|
||||
for _, percentile := range percentiles {
|
||||
value := float64(histogram.ValueAtQuantile(percentile)) / 1000000
|
||||
_, err := f.WriteString(fmt.Sprintf("%f %f %d %f\n",
|
||||
value, percentile/100, 0, 1/(1-(percentile/100))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Generate uncorrected distribution.
|
||||
if requestRate > 0 && unHistogram != nil {
|
||||
f, err := os.Create(file + ".uncorrected")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
f.WriteString("Value Percentile TotalCount 1/(1-Percentile)\n\n")
|
||||
for _, percentile := range percentiles {
|
||||
value := float64(unHistogram.ValueAtQuantile(percentile)) / 1000000
|
||||
_, err := f.WriteString(fmt.Sprintf("%f %f %d %f\n",
|
||||
value, percentile/100, 0, 1/(1-(percentile/100))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
# Target RPS (requests per second)
|
||||
RequestRatePerSec: 200
|
||||
|
||||
# Number of clients used to send requests. It should be sufficiently big to make sure requests are sent even when server is slow
|
||||
# Defaults to: RequestRatePerSec * RequestTimeout + 20%, which guarantees there is always a client available to send a request
|
||||
Clients: 1000
|
||||
|
||||
# How long to run the test
|
||||
Duration: 10s
|
||||
|
||||
# BaseLatency is simply a number (in ms) that is subtracted from every latency measurement .
|
||||
# Helps making output graph show just variability of overhead
|
||||
BaseLatency: 10
|
||||
|
||||
# Timeout of individual HTTP request, defaults to 10s
|
||||
RequestTimeout: 5s
|
||||
|
||||
# By default a new TCP connection is created for every request,
|
||||
# but if set to false, then connections will be long-lived and reused
|
||||
ReuseConnections: true
|
||||
|
||||
# When RPS is high and ReuseConnections is true the machine running benchmark can run out of TCP ports for outbound connections
|
||||
# Setting DontLinger to true will make ports from closed sockets available right away
|
||||
DontLinger: true
|
||||
|
||||
# Produce JSON with results of the run, defaults to false
|
||||
OutputJSON: true
|
||||
|
||||
# If time resolution logic to pick sleeping or tight ticker does not work, then TightTicker can be forced by setting this to true.
|
||||
# TightTicker is very precise but it takes an entire CPU Core.
|
||||
# SleepingTicker uses OS thread sleep API, but if OS sleeping precision is not sufficient then there will be a lot of missing TimelyTicks.
|
||||
TightTicker: true
|
||||
|
||||
Request:
|
||||
# HTTPMethod defaults to GET if Body (below) is not present and to POST otherwise, but can be specified explicitly
|
||||
HTTPMethod: POST
|
||||
|
||||
# ExpectedHTTPStatusCode defaults to 200
|
||||
ExpectedHTTPStatusCode: 202
|
||||
|
||||
# The URL and URLs settings are mutually exclusive
|
||||
# If URL is specified, then it's simply used
|
||||
# If URLs is specified then the list of URLs is used in round-robin fashion evenly distributing requests to them
|
||||
URL: https://my.server/services/e0cb/execute?api-version=2.0&details=true
|
||||
URLs:
|
||||
- https://my.server1/services/e0cb/execute?api-version=2.0&details=true
|
||||
- https://my.server2/services/e0cb/execute?api-version=2.0&details=true
|
||||
|
||||
# Hosts can be used with URL param above (and not with URLs). If Hosts is specified, then the host part in URL is ignored (can be anything) and instead Hosts are substituted
|
||||
# in round-robin fashion evenly distributing requests to them
|
||||
Hosts:
|
||||
- my.server1
|
||||
- my.server2
|
||||
|
||||
# Any HTTP headers, $APIKEY syntax expands environment variable
|
||||
Headers:
|
||||
Authorization: Bearer $APIKEY
|
||||
Content-Type: application/json
|
||||
|
||||
# POST request body
|
||||
# For binary body see https://yaml.org/type/binary.html
|
||||
Body: |-
|
||||
{
|
||||
"Inputs": {
|
||||
"input1": {
|
||||
"ColumnNames": ["Measured"],
|
||||
"Values": [["200"]]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
module labench
|
||||
|
||||
require (
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
labench/bench v0.0.0
|
||||
)
|
||||
|
||||
replace labench/bench => ./bench
|
|
@ -0,0 +1,16 @@
|
|||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -0,0 +1,4 @@
|
|||
RequestRatePerSec: 2
|
||||
Duration: 3s
|
||||
Request:
|
||||
URL: http://example.com
|
|
@ -0,0 +1,116 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"labench/bench"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type benchParams struct {
|
||||
RequestRatePerSec uint64 `yaml:"RequestRatePerSec"`
|
||||
Clients uint64 `yaml:"Clients"`
|
||||
Duration time.Duration `yaml:"Duration"`
|
||||
BaseLatency time.Duration `yaml:"BaseLatency"`
|
||||
RequestTimeout time.Duration `yaml:"RequestTimeout"`
|
||||
ReuseConnections bool `yaml:"ReuseConnections"`
|
||||
DontLinger bool `yaml:"DontLinger"`
|
||||
OutputJSON bool `yaml:"OutputJSON"`
|
||||
TightTicker bool `yaml:"TightTicker"`
|
||||
}
|
||||
|
||||
type config struct {
|
||||
Params benchParams `yaml:",inline"`
|
||||
Protocol string `yaml:"Protocol"`
|
||||
Request WebRequesterFactory `yaml:"Request"`
|
||||
}
|
||||
|
||||
func maybePanic(err error) {
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func assert(cond bool, err string) {
|
||||
if !cond {
|
||||
log.Panic(errors.New(err))
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
configFile := "labench.yaml"
|
||||
if len(os.Args) > 1 {
|
||||
assert(len(os.Args) == 2, fmt.Sprintf("Usage: %s [config.yaml]\n\tThe default config file name is: %s", os.Args[0], configFile))
|
||||
configFile = os.Args[1]
|
||||
}
|
||||
|
||||
configBytes, err := ioutil.ReadFile(configFile)
|
||||
maybePanic(err)
|
||||
|
||||
var conf config
|
||||
err = yaml.Unmarshal(configBytes, &conf)
|
||||
maybePanic(err)
|
||||
|
||||
// fmt.Printf("%+v\n", conf)
|
||||
fmt.Println("timeStart =", time.Now().UTC().Add(-5*time.Second).Truncate(time.Second))
|
||||
|
||||
if conf.Request.ExpectedHTTPStatusCode == 0 {
|
||||
conf.Request.ExpectedHTTPStatusCode = 200
|
||||
}
|
||||
|
||||
if conf.Request.HTTPMethod == "" {
|
||||
if conf.Request.Body == "" {
|
||||
conf.Request.HTTPMethod = http.MethodGet
|
||||
} else {
|
||||
conf.Request.HTTPMethod = http.MethodPost
|
||||
}
|
||||
}
|
||||
|
||||
if conf.Protocol == "" {
|
||||
conf.Protocol = "HTTP/1.1"
|
||||
}
|
||||
|
||||
fmt.Println("Protocol:", conf.Protocol)
|
||||
|
||||
switch conf.Protocol {
|
||||
case "HTTP/2":
|
||||
initHTTP2Client(conf.Params.RequestTimeout, conf.Params.DontLinger)
|
||||
|
||||
default:
|
||||
initHTTPClient(conf.Params.ReuseConnections, conf.Params.RequestTimeout, conf.Params.DontLinger)
|
||||
}
|
||||
|
||||
if conf.Params.RequestTimeout == 0 {
|
||||
conf.Params.RequestTimeout = 10 * time.Second
|
||||
}
|
||||
|
||||
if conf.Params.Clients == 0 {
|
||||
clients := conf.Params.RequestRatePerSec * uint64(math.Ceil(conf.Params.RequestTimeout.Seconds()))
|
||||
clients += clients / 5 // add 20%
|
||||
conf.Params.Clients = clients
|
||||
fmt.Println("Clients:", clients)
|
||||
}
|
||||
|
||||
benchmark := bench.NewBenchmark(&conf.Request, conf.Params.RequestRatePerSec, conf.Params.Clients, conf.Params.Duration, conf.Params.BaseLatency)
|
||||
summary, err := benchmark.Run(conf.Params.OutputJSON, conf.Params.TightTicker)
|
||||
maybePanic(err)
|
||||
|
||||
fmt.Println("timeEnd =", time.Now().UTC().Add(5*time.Second).Round(time.Second))
|
||||
|
||||
fmt.Println(summary)
|
||||
|
||||
err = os.MkdirAll("out", os.ModeDir|os.ModePerm)
|
||||
maybePanic(err)
|
||||
|
||||
err = summary.GenerateLatencyDistribution(bench.Logarithmic, path.Join("out", "res.hgrm"))
|
||||
maybePanic(err)
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
"time"
|
||||
|
||||
"labench/bench"
|
||||
)
|
||||
|
||||
var (
|
||||
httpClient *http.Client
|
||||
defaultDialer *net.Dialer
|
||||
noLinger bool
|
||||
)
|
||||
|
||||
func noLingerDialer(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
con, err := defaultDialer.DialContext(ctx, network, addr)
|
||||
if err == nil && con != nil && noLinger {
|
||||
maybePanic(con.(*net.TCPConn).SetLinger(0))
|
||||
}
|
||||
return con, err
|
||||
}
|
||||
|
||||
func initHTTPClient(reuseConnections bool, requestTimeout time.Duration, dontLinger bool) {
|
||||
defaultDialer = &net.Dialer{
|
||||
Timeout: requestTimeout,
|
||||
// Disable TCP keepalives as we are sending data very actively anyway.
|
||||
// Should not be confused with HTTP keep alive.
|
||||
KeepAlive: 0,
|
||||
}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: noLingerDialer,
|
||||
DisableKeepAlives: !reuseConnections,
|
||||
MaxIdleConns: 0,
|
||||
MaxIdleConnsPerHost: 0,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
ResponseHeaderTimeout: requestTimeout,
|
||||
TLSHandshakeTimeout: requestTimeout,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
Timeout: requestTimeout}
|
||||
|
||||
noLinger = dontLinger
|
||||
}
|
||||
|
||||
func initHTTP2Client(requestTimeout time.Duration, dontLinger bool) {
|
||||
defaultDialer = &net.Dialer{
|
||||
Timeout: requestTimeout,
|
||||
// Disable TCP keepalives as we are sending data very actively anyway.
|
||||
// Should not be confused with HTTP keep alive.
|
||||
KeepAlive: 0,
|
||||
}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Transport: &http2.Transport{
|
||||
AllowHTTP: true,
|
||||
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
|
||||
con, err := defaultDialer.Dial(network, addr)
|
||||
if err == nil && con != nil && noLinger {
|
||||
maybePanic(con.(*net.TCPConn).SetLinger(0))
|
||||
}
|
||||
return con, err
|
||||
},
|
||||
},
|
||||
Timeout: requestTimeout}
|
||||
|
||||
noLinger = dontLinger
|
||||
}
|
||||
|
||||
// WebRequesterFactory implements RequesterFactory by creating a Requester
|
||||
// which makes GET requests to the provided URL.
|
||||
type WebRequesterFactory struct {
|
||||
URL string `yaml:"URL"`
|
||||
URLs []string `yaml:"URLs"`
|
||||
Hosts []string `yaml:"Hosts"`
|
||||
Headers map[string]string `yaml:"Headers"`
|
||||
Body string `yaml:"Body"`
|
||||
ExpectedHTTPStatusCode int `yaml:"ExpectedHTTPStatusCode"`
|
||||
HTTPMethod string `yaml:"HTTPMethod"`
|
||||
|
||||
expandedHeaders map[string][]string
|
||||
}
|
||||
|
||||
// GetRequester returns a new Requester, called for each Benchmark connection.
|
||||
func (w *WebRequesterFactory) GetRequester(uint64) bench.Requester {
|
||||
// if len(w.expandedHeaders) != len(w.Headers) {
|
||||
if w.expandedHeaders == nil {
|
||||
expandedHeaders := make(map[string][]string)
|
||||
for key, val := range w.Headers {
|
||||
expandedHeaders[key] = []string{os.ExpandEnv(val)}
|
||||
}
|
||||
w.expandedHeaders = expandedHeaders
|
||||
}
|
||||
|
||||
return &webRequester{w.URL, w.URLs, w.Hosts, w.expandedHeaders, w.Body, w.ExpectedHTTPStatusCode, w.HTTPMethod}
|
||||
}
|
||||
|
||||
// webRequester implements Requester by making a GET request to the provided
|
||||
// URL.
|
||||
type webRequester struct {
|
||||
url string
|
||||
urls []string
|
||||
hosts []string
|
||||
headers map[string][]string
|
||||
body string
|
||||
expectedReturnCode int
|
||||
httpMethod string
|
||||
}
|
||||
|
||||
var nextHostOrURL int32 = -1
|
||||
|
||||
// Setup prepares the Requester for benchmarking.
|
||||
func (w *webRequester) Setup() error { return nil }
|
||||
|
||||
// Request performs a synchronous request to the system under test.
|
||||
func (w *webRequester) Request() error {
|
||||
var reqURL string
|
||||
if w.urls != nil {
|
||||
h := atomic.AddInt32(&nextHostOrURL, 1)
|
||||
reqURL = w.urls[h%int32(len(w.urls))]
|
||||
} else if w.hosts != nil {
|
||||
parsedURL, err := url.Parse(w.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := atomic.AddInt32(&nextHostOrURL, 1)
|
||||
parsedURL.Host = w.hosts[h%int32(len(w.hosts))]
|
||||
reqURL = parsedURL.String()
|
||||
} else {
|
||||
reqURL = w.url
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(w.httpMethod, reqURL, strings.NewReader(w.body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header = w.headers
|
||||
resp, err := httpClient.Do(req)
|
||||
|
||||
/* to look at the response body
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(resp.Body)
|
||||
s := buf.String()
|
||||
_ = s
|
||||
*/
|
||||
|
||||
// #nosec
|
||||
if resp != nil && resp.Body != nil {
|
||||
_, _ = io.Copy(ioutil.Discard, resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
return errors.New("Nil response")
|
||||
}
|
||||
|
||||
if resp.StatusCode != w.expectedReturnCode {
|
||||
return fmt.Errorf("Expected %v got %v", w.expectedReturnCode, resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Teardown is called upon benchmark completion.
|
||||
func (w *webRequester) Teardown() error { return nil }
|
Загрузка…
Ссылка в новой задаче