зеркало из https://github.com/AvaloniaUI/angle.git
Add cpu time measurement to perf tests
Bug: angleproject:6667 Change-Id: I16570a7b51d363dd1c0f35789b985520cb76d488 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/3270601 Commit-Queue: Lingfeng Yang <lfy@google.com> Reviewed-by: Tim Van Patten <timvp@google.com> Reviewed-by: Jamie Madill <jmadill@chromium.org> Reviewed-by: Charlie Lao <cclao@google.com>
This commit is contained in:
Родитель
30c03b56b5
Коммит
e9f9fa17da
|
@ -234,7 +234,7 @@ int SampleApplication::run()
|
|||
|
||||
while (mRunning)
|
||||
{
|
||||
double elapsedTime = mTimer.getElapsedTime();
|
||||
double elapsedTime = mTimer.getElapsedWallClockTime();
|
||||
double deltaTime = elapsedTime - prevTime;
|
||||
|
||||
step(static_cast<float>(deltaTime), elapsedTime);
|
||||
|
@ -277,7 +277,7 @@ int SampleApplication::run()
|
|||
if (mFrameCount % 100 == 0)
|
||||
{
|
||||
printf("Rate: %0.2lf frames / second\n",
|
||||
static_cast<double>(mFrameCount) / mTimer.getElapsedTime());
|
||||
static_cast<double>(mFrameCount) / mTimer.getElapsedWallClockTime());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -195,8 +195,8 @@ void main()
|
|||
mOrigTimer.stop();
|
||||
// This timer indicates draw time for an already-created texture resident on the GPU,
|
||||
// which needs no updates. It will be faster than the other draws.
|
||||
std::cout << "Original texture draw: " << mOrigTimer.getElapsedTime() * 1000 << "msec"
|
||||
<< std::endl;
|
||||
std::cout << "Original texture draw: " << mOrigTimer.getElapsedWallClockTime() * 1000
|
||||
<< "msec" << std::endl;
|
||||
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
|
||||
|
@ -212,9 +212,9 @@ void main()
|
|||
// causing the underlying resource to be allocated, and then resized, requiring resource
|
||||
// reallocation and related overhead.
|
||||
std::cout << "Resized texture definition: "
|
||||
<< mResizeDefineTimer.getElapsedTime() * 1000 << "msec" << std::endl;
|
||||
std::cout << "Resized texture draw: " << mResizeDrawTimer.getElapsedTime() * 1000
|
||||
<< "msec" << std::endl;
|
||||
<< mResizeDefineTimer.getElapsedWallClockTime() * 1000 << "msec" << std::endl;
|
||||
std::cout << "Resized texture draw: "
|
||||
<< mResizeDrawTimer.getElapsedWallClockTime() * 1000 << "msec" << std::endl;
|
||||
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
|
||||
|
@ -231,9 +231,9 @@ void main()
|
|||
// is no already-resident texture object to manage. This draw is expected to be faster
|
||||
// than the resized texture draw.
|
||||
std::cout << "Newly created texture definition: "
|
||||
<< mNewTexDefineTimer.getElapsedTime() * 1000 << "msec" << std::endl;
|
||||
std::cout << "Newly created texture draw: " << mNewTexDrawTimer.getElapsedTime() * 1000
|
||||
<< "msec" << std::endl;
|
||||
<< mNewTexDefineTimer.getElapsedWallClockTime() * 1000 << "msec" << std::endl;
|
||||
std::cout << "Newly created texture draw: "
|
||||
<< mNewTexDrawTimer.getElapsedWallClockTime() * 1000 << "msec" << std::endl;
|
||||
}
|
||||
|
||||
if (mFrameCount == 5)
|
||||
|
|
|
@ -198,8 +198,9 @@ void main()
|
|||
}
|
||||
mFanTimer.stop();
|
||||
|
||||
mFanTotalTime += static_cast<unsigned int>(
|
||||
mFanTimer.getElapsedTime() * 1000); // convert from usec to msec when accumulating
|
||||
mFanTotalTime +=
|
||||
static_cast<unsigned int>(mFanTimer.getElapsedWallClockTime() *
|
||||
1000); // convert from usec to msec when accumulating
|
||||
|
||||
// Clear to eliminate driver-side gains from occlusion
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
@ -214,8 +215,9 @@ void main()
|
|||
}
|
||||
mTriTimer.stop();
|
||||
|
||||
mTriTotalTime += static_cast<unsigned int>(
|
||||
mTriTimer.getElapsedTime() * 1000); // convert from usec to msec when accumulating
|
||||
mTriTotalTime +=
|
||||
static_cast<unsigned int>(mTriTimer.getElapsedWallClockTime() *
|
||||
1000); // convert from usec to msec when accumulating
|
||||
|
||||
mFrameCount++;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,8 @@ std::string ConcatenatePath(std::string first, std::string second);
|
|||
|
||||
// Get absolute time in seconds. Use this function to get an absolute time with an unknown origin.
|
||||
double GetCurrentSystemTime();
|
||||
// Get CPU time for current process in seconds.
|
||||
double GetCurrentProcessCpuTime();
|
||||
|
||||
// Run an application and get the output. Gets a nullptr-terminated set of args to execute the
|
||||
// application with, and returns the stdout and stderr outputs as well as the exit code.
|
||||
|
|
|
@ -12,6 +12,12 @@
|
|||
#include <iostream>
|
||||
|
||||
#include <dlfcn.h>
|
||||
#ifdef ANGLE_PLATFORM_FUCHSIA
|
||||
# include <zircon/process.h>
|
||||
# include <zircon/syscalls.h>
|
||||
#else
|
||||
# include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
|
@ -199,4 +205,26 @@ std::string GetRootDirectory()
|
|||
{
|
||||
return "/";
|
||||
}
|
||||
|
||||
double GetCurrentProcessCpuTime()
|
||||
{
|
||||
#ifdef ANGLE_PLATFORM_FUCHSIA
|
||||
static zx_handle_t me = zx_process_self();
|
||||
zx_info_task_runtime_t task_runtime;
|
||||
zx_object_get_info(me, ZX_INFO_TASK_RUNTIME, &task_runtime, sizeof(task_runtime), nullptr,
|
||||
nullptr);
|
||||
return static_cast<double>(task_runtime.cpu_time) * 1e-9;
|
||||
#else
|
||||
// We could also have used /proc/stat, but that requires us to read the
|
||||
// filesystem and convert from jiffies. /proc/stat also relies on jiffies
|
||||
// (lower resolution) while getrusage can potentially use a sched_clock()
|
||||
// underneath that has higher resolution.
|
||||
struct rusage usage;
|
||||
getrusage(RUSAGE_SELF, &usage);
|
||||
double userTime = usage.ru_utime.tv_sec + usage.ru_utime.tv_usec * 1e-6;
|
||||
double systemTime = usage.ru_stime.tv_sec + usage.ru_stime.tv_usec * 1e-6;
|
||||
return userTime + systemTime;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace angle
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
#include "gtest/gtest.h"
|
||||
|
||||
#include "common/system_utils.h"
|
||||
#include "util/test_utils.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace angle;
|
||||
|
||||
|
@ -57,6 +60,38 @@ TEST(SystemUtils, Environment)
|
|||
EXPECT_EQ("", readback);
|
||||
}
|
||||
|
||||
// Test CPU time measurement with a small operation
|
||||
// (pretty much the measurement itself)
|
||||
TEST(SystemUtils, CpuTimeSmallOp)
|
||||
{
|
||||
double cpuTimeStart = GetCurrentProcessCpuTime();
|
||||
double cpuTimeEnd = GetCurrentProcessCpuTime();
|
||||
EXPECT_GE(cpuTimeEnd, cpuTimeStart);
|
||||
}
|
||||
|
||||
// Test CPU time measurement with a sleepy operation
|
||||
TEST(SystemUtils, CpuTimeSleepy)
|
||||
{
|
||||
double wallTimeStart = GetCurrentSystemTime();
|
||||
double cpuTimeStart = GetCurrentProcessCpuTime();
|
||||
angle::Sleep(1);
|
||||
double cpuTimeEnd = GetCurrentProcessCpuTime();
|
||||
double wallTimeEnd = GetCurrentSystemTime();
|
||||
EXPECT_GE(cpuTimeEnd, cpuTimeStart);
|
||||
EXPECT_GE(wallTimeEnd - wallTimeStart, cpuTimeEnd - cpuTimeStart);
|
||||
}
|
||||
|
||||
// Test CPU time measurement with a heavy operation
|
||||
TEST(SystemUtils, CpuTimeHeavyOp)
|
||||
{
|
||||
constexpr size_t bufferSize = 1048576;
|
||||
std::vector<uint8_t> buffer(bufferSize, 1);
|
||||
double cpuTimeStart = GetCurrentProcessCpuTime();
|
||||
memset(buffer.data(), 0, bufferSize);
|
||||
double cpuTimeEnd = GetCurrentProcessCpuTime();
|
||||
EXPECT_GE(cpuTimeEnd, cpuTimeStart);
|
||||
}
|
||||
|
||||
#if defined(ANGLE_PLATFORM_POSIX)
|
||||
TEST(SystemUtils, ConcatenatePathSimple)
|
||||
{
|
||||
|
|
|
@ -83,6 +83,37 @@ double GetCurrentSystemTime()
|
|||
return static_cast<double>(curTime.QuadPart) / frequency.QuadPart;
|
||||
}
|
||||
|
||||
double GetCurrentProcessCpuTime()
|
||||
{
|
||||
FILETIME creationTime = {};
|
||||
FILETIME exitTime = {};
|
||||
FILETIME kernelTime = {};
|
||||
FILETIME userTime = {};
|
||||
|
||||
// Note this will not give accurate results if the current thread is
|
||||
// scheduled for less than the tick rate, which is often 15 ms. In that
|
||||
// case, GetProcessTimes will not return different values, making it
|
||||
// possible to end up with 0 ms for a process that takes 93% of a core
|
||||
// (14/15 ms)! An alternative is QueryProcessCycleTime but there is no
|
||||
// simple way to convert cycles back to seconds, and on top of that, it's
|
||||
// not supported pre-Windows Vista.
|
||||
|
||||
// Returns 100-ns intervals, so we want to divide by 1e7 to get seconds
|
||||
GetProcessTimes(GetCurrentProcess(), &creationTime, &exitTime, &kernelTime, &userTime);
|
||||
|
||||
ULARGE_INTEGER kernelInt64;
|
||||
kernelInt64.LowPart = kernelTime.dwLowDateTime;
|
||||
kernelInt64.HighPart = kernelTime.dwHighDateTime;
|
||||
double systemTimeSeconds = static_cast<double>(kernelInt64.QuadPart) * 1e-7;
|
||||
|
||||
ULARGE_INTEGER userInt64;
|
||||
userInt64.LowPart = userTime.dwLowDateTime;
|
||||
userInt64.HighPart = userTime.dwHighDateTime;
|
||||
double userTimeSeconds = static_cast<double>(userInt64.QuadPart) * 1e-7;
|
||||
|
||||
return systemTimeSeconds + userTimeSeconds;
|
||||
}
|
||||
|
||||
bool IsDirectory(const char *filename)
|
||||
{
|
||||
WIN32_FILE_ATTRIBUTE_DATA fileInformation;
|
||||
|
|
|
@ -264,6 +264,7 @@ ANGLEPerfTest::ANGLEPerfTest(const std::string &name,
|
|||
}
|
||||
mReporter = std::make_unique<perf_test::PerfResultReporter>(mName + mBackend, mStory);
|
||||
mReporter->RegisterImportantMetric(".wall_time", units);
|
||||
mReporter->RegisterImportantMetric(".cpu_time", units);
|
||||
mReporter->RegisterImportantMetric(".gpu_time", units);
|
||||
mReporter->RegisterFyiMetric(".trial_steps", "count");
|
||||
mReporter->RegisterFyiMetric(".total_steps", "count");
|
||||
|
@ -299,7 +300,7 @@ void ANGLEPerfTest::run()
|
|||
printResults();
|
||||
if (gVerboseLogging)
|
||||
{
|
||||
double trialTime = mTimer.getElapsedTime();
|
||||
double trialTime = mTimer.getElapsedWallClockTime();
|
||||
printf("Trial %d time: %.2lf seconds.\n", trial + 1, trialTime);
|
||||
|
||||
double secondsPerStep = trialTime / static_cast<double>(mTrialNumStepsPerformed);
|
||||
|
@ -354,7 +355,7 @@ void ANGLEPerfTest::doRunLoop(double maxRunTime, int maxStepsToRun, RunLoopPolic
|
|||
}
|
||||
mRunning = false;
|
||||
}
|
||||
else if (mTimer.getElapsedTime() > maxRunTime)
|
||||
else if (mTimer.getElapsedWallClockTime() > maxRunTime)
|
||||
{
|
||||
mRunning = false;
|
||||
}
|
||||
|
@ -389,18 +390,22 @@ void ANGLEPerfTest::TearDown() {}
|
|||
|
||||
double ANGLEPerfTest::printResults()
|
||||
{
|
||||
double elapsedTimeSeconds[2] = {
|
||||
mTimer.getElapsedTime(),
|
||||
// Get cpu time first, so it doesn't include the cpu time of getting the
|
||||
// wall time. We want cpu time to be more accurate.
|
||||
double elapsedTimeSeconds[3] = {
|
||||
mTimer.getElapsedCpuTime(),
|
||||
mTimer.getElapsedWallClockTime(),
|
||||
mGPUTimeNs * 1e-9,
|
||||
};
|
||||
|
||||
const char *clockNames[2] = {
|
||||
const char *clockNames[3] = {
|
||||
".cpu_time",
|
||||
".wall_time",
|
||||
".gpu_time",
|
||||
};
|
||||
|
||||
// If measured gpu time is non-zero, print that too.
|
||||
size_t clocksToOutput = mGPUTimeNs > 0 ? 2 : 1;
|
||||
size_t clocksToOutput = mGPUTimeNs > 0 ? 3 : 2;
|
||||
|
||||
double retValue = 0.0;
|
||||
for (size_t i = 0; i < clocksToOutput; ++i)
|
||||
|
@ -475,7 +480,7 @@ void ANGLEPerfTest::calibrateStepsToRun(RunLoopPolicy policy)
|
|||
// Run initially for "gCalibrationTimeSeconds" using the run loop policy.
|
||||
doRunLoop(gCalibrationTimeSeconds, std::numeric_limits<int>::max(), policy);
|
||||
|
||||
double elapsedTime = mTimer.getElapsedTime();
|
||||
double elapsedTime = mTimer.getElapsedWallClockTime();
|
||||
int stepsPerformed = mTrialNumStepsPerformed;
|
||||
|
||||
double scale = gCalibrationTimeSeconds / elapsedTime;
|
||||
|
@ -504,7 +509,7 @@ void ANGLEPerfTest::calibrateStepsToRun(RunLoopPolicy policy)
|
|||
doRunLoop(gMaxTrialTimeSeconds, stepsToRun, RunLoopPolicy::RunContinuously);
|
||||
|
||||
// Compute mean of the calibration results.
|
||||
double sampleElapsedTime = mTimer.getElapsedTime();
|
||||
double sampleElapsedTime = mTimer.getElapsedWallClockTime();
|
||||
int sampleStepsPerformed = mTrialNumStepsPerformed;
|
||||
|
||||
if (gVerboseLogging)
|
||||
|
@ -815,7 +820,7 @@ void ANGLERenderTest::SetUp()
|
|||
RunLoopPolicy::FinishEveryStep);
|
||||
if (gVerboseLogging)
|
||||
{
|
||||
printf("Warm-up loop took %.2lf seconds.\n", mTimer.getElapsedTime());
|
||||
printf("Warm-up loop took %.2lf seconds.\n", mTimer.getElapsedWallClockTime());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ struct Captures final : private angle::NonCopyable
|
|||
double CapturePlatform_currentTime(angle::PlatformMethods *platformMethods)
|
||||
{
|
||||
Captures *captures = static_cast<Captures *>(platformMethods->context);
|
||||
return captures->timer.getElapsedTime();
|
||||
return captures->timer.getElapsedWallClockTime();
|
||||
}
|
||||
|
||||
void CapturePlatform_histogramCustomCounts(angle::PlatformMethods *platformMethods,
|
||||
|
|
|
@ -276,7 +276,7 @@ void InstancingPerfBenchmark::drawBenchmark()
|
|||
// Animatino makes the test more interesting visually, but also eats up many CPU cycles.
|
||||
if (params.animationEnabled)
|
||||
{
|
||||
float time = static_cast<float>(mTimer.getElapsedTime());
|
||||
float time = static_cast<float>(mTimer.getElapsedWallClockTime());
|
||||
|
||||
for (size_t pointIndex = 0; pointIndex < mTranslateData.size(); ++pointIndex)
|
||||
{
|
||||
|
|
|
@ -429,7 +429,7 @@ void UpdateCurrentTestResult(const testing::TestResult &resultIn, TestResults *r
|
|||
resultOut.type = TestResultType::Pass;
|
||||
}
|
||||
|
||||
resultOut.elapsedTimeSeconds = resultsOut->currentTestTimer.getElapsedTime();
|
||||
resultOut.elapsedTimeSeconds = resultsOut->currentTestTimer.getElapsedWallClockTime();
|
||||
}
|
||||
|
||||
TestIdentifier GetTestIdentifier(const testing::TestInfo &testInfo)
|
||||
|
@ -1363,7 +1363,7 @@ void TestSuite::onCrashOrTimeout(TestResultType crashOrTimeout)
|
|||
{
|
||||
TestResult &result = mTestResults.results[mTestResults.currentTest];
|
||||
result.type = crashOrTimeout;
|
||||
result.elapsedTimeSeconds = mTestResults.currentTestTimer.getElapsedTime();
|
||||
result.elapsedTimeSeconds = mTestResults.currentTestTimer.getElapsedWallClockTime();
|
||||
}
|
||||
|
||||
if (mResultsFile.empty())
|
||||
|
@ -1795,7 +1795,7 @@ int TestSuite::run()
|
|||
{
|
||||
messageTimer.start();
|
||||
}
|
||||
else if (messageTimer.getElapsedTime() > kIdleMessageTimeout)
|
||||
else if (messageTimer.getElapsedWallClockTime() > kIdleMessageTimeout)
|
||||
{
|
||||
const ProcessInfo &processInfo = mCurrentProcesses[0];
|
||||
double processTime = processInfo.process->getElapsedTimeSeconds();
|
||||
|
@ -1831,7 +1831,7 @@ int TestSuite::run()
|
|||
}
|
||||
|
||||
totalRunTime.stop();
|
||||
printf("Tests completed in %lf seconds\n", totalRunTime.getElapsedTime());
|
||||
printf("Tests completed in %lf seconds\n", totalRunTime.getElapsedWallClockTime());
|
||||
|
||||
return printFailuresAndReturnCount() == 0 ? 0 : 1;
|
||||
}
|
||||
|
@ -1884,7 +1884,7 @@ void TestSuite::startWatchdog()
|
|||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mTestResults.currentTestMutex);
|
||||
if (mTestResults.currentTestTimer.getElapsedTime() >
|
||||
if (mTestResults.currentTestTimer.getElapsedWallClockTime() >
|
||||
mTestResults.currentTestTimeout)
|
||||
{
|
||||
break;
|
||||
|
|
|
@ -14,17 +14,19 @@ Timer::Timer() : mRunning(false), mStartTime(0), mStopTime(0) {}
|
|||
|
||||
void Timer::start()
|
||||
{
|
||||
mStartTime = angle::GetCurrentSystemTime();
|
||||
mRunning = true;
|
||||
mStartTime = angle::GetCurrentSystemTime();
|
||||
mStartCpuTime = angle::GetCurrentProcessCpuTime();
|
||||
mRunning = true;
|
||||
}
|
||||
|
||||
void Timer::stop()
|
||||
{
|
||||
mStopTime = angle::GetCurrentSystemTime();
|
||||
mRunning = false;
|
||||
mStopTime = angle::GetCurrentSystemTime();
|
||||
mStopCpuTime = angle::GetCurrentProcessCpuTime();
|
||||
mRunning = false;
|
||||
}
|
||||
|
||||
double Timer::getElapsedTime() const
|
||||
double Timer::getElapsedWallClockTime() const
|
||||
{
|
||||
double endTime;
|
||||
if (mRunning)
|
||||
|
@ -38,3 +40,18 @@ double Timer::getElapsedTime() const
|
|||
|
||||
return endTime - mStartTime;
|
||||
}
|
||||
|
||||
double Timer::getElapsedCpuTime() const
|
||||
{
|
||||
double endTime;
|
||||
if (mRunning)
|
||||
{
|
||||
endTime = angle::GetCurrentProcessCpuTime();
|
||||
}
|
||||
else
|
||||
{
|
||||
endTime = mStopCpuTime;
|
||||
}
|
||||
|
||||
return endTime - mStartCpuTime;
|
||||
}
|
||||
|
|
11
util/Timer.h
11
util/Timer.h
|
@ -13,17 +13,20 @@ class Timer final
|
|||
Timer();
|
||||
~Timer() {}
|
||||
|
||||
// Use start() and stop() to record the duration and use getElapsedTime() to query that
|
||||
// duration. If getElapsedTime() is called in between, it will report the elapsed time since
|
||||
// start().
|
||||
// Use start() and stop() to record the duration and use getElapsedWallClockTime() to query that
|
||||
// duration. If getElapsedWallClockTime() is called in between, it will report the elapsed time
|
||||
// since start().
|
||||
void start();
|
||||
void stop();
|
||||
double getElapsedTime() const;
|
||||
double getElapsedWallClockTime() const;
|
||||
double getElapsedCpuTime() const;
|
||||
|
||||
private:
|
||||
bool mRunning;
|
||||
double mStartTime;
|
||||
double mStopTime;
|
||||
double mStartCpuTime;
|
||||
double mStopCpuTime;
|
||||
};
|
||||
|
||||
#endif // SAMPLE_UTIL_TIMER_H
|
||||
|
|
|
@ -68,7 +68,7 @@ class Process : angle::NonCopyable
|
|||
virtual bool kill() = 0;
|
||||
virtual int getExitCode() = 0;
|
||||
|
||||
double getElapsedTimeSeconds() const { return mTimer.getElapsedTime(); }
|
||||
double getElapsedTimeSeconds() const { return mTimer.getElapsedWallClockTime(); }
|
||||
const std::string &getStdout() const { return mStdout; }
|
||||
const std::string &getStderr() const { return mStderr; }
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ TEST(TestUtils, Sleep)
|
|||
timer.stop();
|
||||
|
||||
// Use a slightly fuzzy range
|
||||
EXPECT_GT(timer.getElapsedTime(), 0.48);
|
||||
EXPECT_GT(timer.getElapsedWallClockTime(), 0.48);
|
||||
}
|
||||
|
||||
constexpr uint32_t kMaxPath = 1000;
|
||||
|
@ -206,7 +206,7 @@ TEST(TestUtils, MAYBE_RunAppAsync)
|
|||
|
||||
Timer timer;
|
||||
timer.start();
|
||||
while (!process->finished() && timer.getElapsedTime() < kTimeout)
|
||||
while (!process->finished() && timer.getElapsedWallClockTime() < kTimeout)
|
||||
{
|
||||
angle::Sleep(1);
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ TEST(TestUtils, MAYBE_RunAppAsyncRedirectStderrToStdout)
|
|||
|
||||
Timer timer;
|
||||
timer.start();
|
||||
while (!process->finished() && timer.getElapsedTime() < kTimeout)
|
||||
while (!process->finished() && timer.getElapsedWallClockTime() < kTimeout)
|
||||
{
|
||||
angle::Sleep(1);
|
||||
}
|
||||
|
|
|
@ -619,7 +619,7 @@ bool Win32Window::takeScreenshot(uint8_t *pixelData)
|
|||
// for a while before issuing screenshot if window was just made visible.
|
||||
{
|
||||
static const double WAIT_WINDOW_VISIBLE_MS = 0.5; // Half a second for the animation
|
||||
double timeSinceVisible = mSetVisibleTimer.getElapsedTime();
|
||||
double timeSinceVisible = mSetVisibleTimer.getElapsedWallClockTime();
|
||||
|
||||
if (timeSinceVisible < WAIT_WINDOW_VISIBLE_MS)
|
||||
{
|
||||
|
|
|
@ -441,7 +441,8 @@ bool X11Window::resize(int width, int height)
|
|||
// Wait until the window has actually been resized so that the code calling resize
|
||||
// can assume the window has been resized.
|
||||
const double kResizeWaitDelay = 0.2;
|
||||
while ((mHeight != height || mWidth != width) && timer.getElapsedTime() < kResizeWaitDelay)
|
||||
while ((mHeight != height || mWidth != width) &&
|
||||
timer.getElapsedWallClockTime() < kResizeWaitDelay)
|
||||
{
|
||||
messageLoop();
|
||||
angle::Sleep(10);
|
||||
|
|
Загрузка…
Ссылка в новой задаче