using reflectionfunction, cleanups, results in milliseconds
This commit is contained in:
Родитель
39e39bb440
Коммит
cd1dc96cc3
|
@ -1,3 +1,3 @@
|
||||||
*.Dockerfile
|
*.dockerfile
|
||||||
Run.ps1
|
run.ps1
|
||||||
results.json
|
results.json
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
<PropertyGroup>
|
<PropertyGroup>
|
||||||
<OutputType>Exe</OutputType>
|
<OutputType>Exe</OutputType>
|
||||||
<TargetFramework>netcoreapp3.0</TargetFramework>
|
<TargetFramework>netcoreapp3.1</TargetFramework>
|
||||||
<StartupObject>run.php</StartupObject>
|
<StartupObject>run.php</StartupObject>
|
||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
|
||||||
|
|
|
@ -7,4 +7,4 @@ RUN echo "{ \"msbuild-sdks\": { \"Peachpie.NET.Sdk\": \"0.9.981\" } }" > global.
|
||||||
RUN dotnet restore
|
RUN dotnet restore
|
||||||
RUN dotnet build -c Release
|
RUN dotnet build -c Release
|
||||||
|
|
||||||
ENTRYPOINT ["dotnet", "run", "-c", "Release"]
|
ENTRYPOINT ["dotnet", "run", "--no-build", "-c", "Release"]
|
||||||
|
|
|
@ -1,96 +1,124 @@
|
||||||
<?php
|
<?php
|
||||||
|
|
||||||
// Arguments: <benchmarkFilter> <iterationCount>
|
// Arguments: <benchmarkFilter> <iterations>
|
||||||
// Example: "functions/*" 1000000
|
// Example: "functions/*" 1000000
|
||||||
|
|
||||||
const EMPTY_BENCHMARK = 'emptyBenchmark';
|
class Runner
|
||||||
|
{
|
||||||
|
const CHUNK = 100;
|
||||||
|
const EMPTY_FUNCTION = "empty_func";
|
||||||
|
|
||||||
const CHUNK_COUNT = 100;
|
static function run(string $benchmarkFilter, int $iterations): array
|
||||||
const WARMUP_CHUNK_COUNT = 20;
|
{
|
||||||
|
$benchmarks = self::collectBechmarks($benchmarkFilter);
|
||||||
|
$results = [];
|
||||||
|
|
||||||
$benchmarkFilter = $argv[1];
|
// run benchmarks, collect results
|
||||||
$iterationCount = $argv[2];
|
foreach ($benchmarks as $benchmark) {
|
||||||
runAll($benchmarkFilter, $iterationCount);
|
|
||||||
|
|
||||||
function runAll(string $benchmarkFilter, int $iterationCount) {
|
// warmup
|
||||||
// Include all the files with benchmarks according to the filter
|
$time = self::runSingle($benchmark, self::CHUNK);
|
||||||
foreach (glob($benchmarkFilter, GLOB_BRACE) as $file) {
|
|
||||||
if (pathinfo($file, PATHINFO_EXTENSION) == "php") {
|
//
|
||||||
require_once $file;
|
$time = self::runSingle($benchmark, $iterations);
|
||||||
|
$times = [];
|
||||||
|
|
||||||
|
$blocks = $iterations / self::CHUNK;
|
||||||
|
for ($i = 0; $i < $blocks; $i++)
|
||||||
|
{
|
||||||
|
$times[] = self::runSingle($benchmark, self::CHUNK) * $blocks;
|
||||||
|
}
|
||||||
|
|
||||||
|
sort($times);
|
||||||
|
|
||||||
|
$results[$benchmark->getName()] =
|
||||||
|
[
|
||||||
|
"time" => $time,
|
||||||
|
//"avg" => $time / $iterations,
|
||||||
|
"min" => $times[0],
|
||||||
|
"max" => end($times),
|
||||||
|
"med" => $times[count($times) / 2],
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean up the results by calculating the overhead for test runs and deducing it
|
||||||
|
$empty = $results[self::EMPTY_FUNCTION]["time"];
|
||||||
|
foreach ($results as $name => $r)
|
||||||
|
{
|
||||||
|
$results[$name] =
|
||||||
|
[
|
||||||
|
"iterations" => $iterations,
|
||||||
|
"time_ms" => (int)(($r["time"] - $empty) / 1000000),
|
||||||
|
//"avg_ms" => (int)(($r["time"] - $empty) / $iterations / 1000000 ),
|
||||||
|
"min_ms" => (int)(($r["min"] - $empty) / 1000000 ),
|
||||||
|
"max_ms" => (int)(($r["max"] - $empty) / 1000000 ),
|
||||||
|
"med_ms" => (int)(($r["med"] - $empty) / 1000000 ),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
return $results;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all the benchmarking functions in the included files
|
static function collectBechmarks(string $benchmarkFilter)
|
||||||
$benchmarks = [];
|
{
|
||||||
foreach (get_defined_functions()['user'] as $fnName) {
|
// Include all the files with benchmarks according to the filter
|
||||||
$fn = new ReflectionFunction($fnName);
|
$files = [];
|
||||||
|
foreach (glob($benchmarkFilter, GLOB_BRACE) as $file)
|
||||||
// From this file, include only EMPTY_BENCHMARK
|
{
|
||||||
if ($fn->getFileName() == __FILE__ && $fn->getName() != EMPTY_BENCHMARK) {
|
if (pathinfo($file, PATHINFO_EXTENSION) == "php")
|
||||||
continue;
|
{
|
||||||
|
if (false !== require_once $file)
|
||||||
|
{
|
||||||
|
$files[] = realpath($file);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exclude helper functions starting with _
|
// Find all the benchmarking functions in the included files
|
||||||
if ($fn->getShortName()[0] == '_') {
|
$benchmarks = [
|
||||||
continue;
|
new ReflectionFunction(self::EMPTY_FUNCTION)
|
||||||
}
|
|
||||||
|
|
||||||
$benchmarks[] = $fn->getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warm-up opcache, JIT, call-sites etc.
|
|
||||||
foreach ($benchmarks as $benchmark) {
|
|
||||||
runSingle($benchmark, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run benchmark themselves and gather the rough results
|
|
||||||
$results = [];
|
|
||||||
foreach ($benchmarks as $benchmark) {
|
|
||||||
// Perform all the measurements in chunks
|
|
||||||
$chunkIterationCount = $iterationCount / CHUNK_COUNT; // TODO: Consider finding the iteration count dynamically
|
|
||||||
$chunkAvgs = [];
|
|
||||||
for ($i = 0; $i < WARMUP_CHUNK_COUNT + CHUNK_COUNT; $i++) {
|
|
||||||
$chunkAvgs[] = runSingle($benchmark, $chunkIterationCount) / $chunkIterationCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove warmup data and find average time
|
|
||||||
$chunkAvgs = array_slice($chunkAvgs, WARMUP_CHUNK_COUNT);
|
|
||||||
$avg = array_sum($chunkAvgs) / count($chunkAvgs);
|
|
||||||
|
|
||||||
$results[$benchmark] = [
|
|
||||||
'iterations' => $iterationCount,
|
|
||||||
'rough_avg_ns' => $avg,
|
|
||||||
'rough_std_dev_ns' => getStandardDeviation($chunkAvgs, $avg)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
|
foreach (get_defined_functions()['user'] as $fnName)
|
||||||
|
{
|
||||||
|
$fn = new ReflectionFunction($fnName);
|
||||||
|
|
||||||
|
if (in_array($fn->getFileName(), $files) && $fn->getShortName()[0] != '_')
|
||||||
|
{
|
||||||
|
$benchmarks[] = $fn;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
return $benchmarks;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up the results by calculating the overhead for test runs and deducing it
|
private static function runSingle(ReflectionFunction $benchmark, int $iterations): int {
|
||||||
$overheadAvg = $results[EMPTY_BENCHMARK]['rough_avg_ns'];
|
$start = hrtime(true);
|
||||||
foreach ($benchmarks as $benchmark) {
|
|
||||||
$results[$benchmark]['clean_avg_ns'] = $results[$benchmark]['rough_avg_ns'] - $overheadAvg;
|
// Perform the operation repeatively,
|
||||||
|
// measuring the total time of the whole batch
|
||||||
|
for ($i = 0; $i < $iterations; $i++) {
|
||||||
|
$benchmark->invoke(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
return hrtime(true) - $start;
|
||||||
}
|
}
|
||||||
|
|
||||||
echo json_encode($results);
|
private static function getStandardDeviation($values, $avg) {
|
||||||
|
$variance = 0.0;
|
||||||
|
foreach ($values as $value) {
|
||||||
|
$variance += ($value - $avg) * ($value - $avg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sqrt($variance / count($values));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function runSingle(string $benchmark, int $iterationCount) : float {
|
function empty_func()
|
||||||
$start = hrtime(true);
|
{
|
||||||
|
|
||||||
// Perform the operation repeatively, measuring the total time of the whole batch
|
|
||||||
for ($i = 0; $i < $iterationCount; $i++) {
|
|
||||||
$benchmark();
|
|
||||||
}
|
|
||||||
|
|
||||||
return hrtime(true) - $start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function getStandardDeviation(array $values, float $avg) : float {
|
//
|
||||||
$variance = 0.0;
|
echo json_encode(Runner::run(@$argv[1] ?? "*/*", @$argv[2] ?? 100000));
|
||||||
foreach ($values as $value) {
|
|
||||||
$variance += ($value - $avg) * ($value - $avg);
|
|
||||||
}
|
|
||||||
|
|
||||||
return sqrt($variance / count($values));
|
|
||||||
}
|
|
||||||
|
|
||||||
function emptyBenchmark() {}
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче