зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1627563 - Replace MOZ_MUST_USE with [[nodiscard]] in mozglue/baseprofiler. r=canaltinova
Also move MOZ_MUST_USE before function declarations' specifiers and return type. While clang and gcc's attribute((warn_unused_result)) can appear before, between, or after function specifiers and return types, the [[nodiscard]] attribute must precede the function specifiers. And removed a few unneded `#include "mozilla/Attributes.h"`. Differential Revision: https://phabricator.services.mozilla.com/D69755 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
aef12bf890
Коммит
ca9fa3fe8f
|
@ -32,7 +32,6 @@
|
|||
# include "platform.h"
|
||||
|
||||
# include "mozilla/Atomics.h"
|
||||
# include "mozilla/Attributes.h"
|
||||
# include "mozilla/DebugOnly.h"
|
||||
# include "mozilla/EndianUtils.h"
|
||||
|
||||
|
|
|
@ -270,14 +270,14 @@ class UniqueStacks {
|
|||
UniqueStacks();
|
||||
|
||||
// Return a StackKey for aFrame as the stack's root frame (no prefix).
|
||||
MOZ_MUST_USE StackKey BeginStack(const FrameKey& aFrame);
|
||||
[[nodiscard]] StackKey BeginStack(const FrameKey& aFrame);
|
||||
|
||||
// Return a new StackKey that is obtained by appending aFrame to aStack.
|
||||
MOZ_MUST_USE StackKey AppendFrame(const StackKey& aStack,
|
||||
const FrameKey& aFrame);
|
||||
[[nodiscard]] StackKey AppendFrame(const StackKey& aStack,
|
||||
const FrameKey& aFrame);
|
||||
|
||||
MOZ_MUST_USE uint32_t GetOrAddFrameIndex(const FrameKey& aFrame);
|
||||
MOZ_MUST_USE uint32_t GetOrAddStackIndex(const StackKey& aStack);
|
||||
[[nodiscard]] uint32_t GetOrAddFrameIndex(const FrameKey& aFrame);
|
||||
[[nodiscard]] uint32_t GetOrAddStackIndex(const StackKey& aStack);
|
||||
|
||||
void SpliceFrameTableElements(SpliceableJSONWriter& aWriter);
|
||||
void SpliceStackTableElements(SpliceableJSONWriter& aWriter);
|
||||
|
|
|
@ -598,7 +598,7 @@ class ActivePS {
|
|||
aFilterCount, aDuration);
|
||||
}
|
||||
|
||||
static MOZ_MUST_USE SamplerThread* Destroy(PSLockRef aLock) {
|
||||
[[nodiscard]] static SamplerThread* Destroy(PSLockRef aLock) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
auto samplerThread = sInstance->mSamplerThread;
|
||||
delete sInstance;
|
||||
|
@ -2947,7 +2947,7 @@ void profiler_ensure_started(PowerOfTwo32 aCapacity, double aInterval,
|
|||
}
|
||||
}
|
||||
|
||||
static MOZ_MUST_USE SamplerThread* locked_profiler_stop(PSLockRef aLock) {
|
||||
[[nodiscard]] static SamplerThread* locked_profiler_stop(PSLockRef aLock) {
|
||||
LOG("locked_profiler_stop");
|
||||
|
||||
MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#endif
|
||||
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/ProfileBufferEntrySerialization.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
|
|
|
@ -78,14 +78,14 @@ class ProfileBufferChunk {
|
|||
// Hint about the size of the metadata (public and private headers).
|
||||
// `Create()` below takes the minimum *buffer* size, so the minimum total
|
||||
// Chunk size is at least `SizeofChunkMetadata() + aMinBufferBytes`.
|
||||
static constexpr MOZ_MUST_USE Length SizeofChunkMetadata() {
|
||||
[[nodiscard]] static constexpr Length SizeofChunkMetadata() {
|
||||
return static_cast<Length>(sizeof(InternalHeader));
|
||||
}
|
||||
|
||||
// Allocate space for a chunk with a given minimum size, and construct it.
|
||||
// The actual size may be higher, to match the actual space taken in the
|
||||
// memory pool.
|
||||
static MOZ_MUST_USE UniquePtr<ProfileBufferChunk> Create(
|
||||
[[nodiscard]] static UniquePtr<ProfileBufferChunk> Create(
|
||||
Length aMinBufferBytes) {
|
||||
// We need at least one byte, to cover the always-present `mBuffer` byte.
|
||||
aMinBufferBytes = std::max(aMinBufferBytes, Length(1));
|
||||
|
@ -148,7 +148,7 @@ class ProfileBufferChunk {
|
|||
|
||||
// Must be called with the first block tail (may be empty), which will be
|
||||
// skipped if the reader starts with this ProfileBufferChunk.
|
||||
MOZ_MUST_USE SpanOfBytes ReserveInitialBlockAsTail(Length aTailSize) {
|
||||
[[nodiscard]] SpanOfBytes ReserveInitialBlockAsTail(Length aTailSize) {
|
||||
#ifdef DEBUG
|
||||
MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
|
||||
mInternalHeader.mState == InternalHeader::State::Recycled);
|
||||
|
@ -167,7 +167,7 @@ class ProfileBufferChunk {
|
|||
// Reserve a block of up to `aBlockSize` bytes, and return a Span to it, and
|
||||
// its starting index. The actual size may be smaller, if the block cannot fit
|
||||
// in the remaining space.
|
||||
MOZ_MUST_USE ReserveReturn ReserveBlock(Length aBlockSize) {
|
||||
[[nodiscard]] ReserveReturn ReserveBlock(Length aBlockSize) {
|
||||
MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse);
|
||||
MOZ_ASSERT(RangeStart() != 0,
|
||||
"Expected valid range start before first Reserve()");
|
||||
|
@ -265,53 +265,55 @@ class ProfileBufferChunk {
|
|||
const int mPADDING = 0;
|
||||
};
|
||||
|
||||
MOZ_MUST_USE const Header& ChunkHeader() const {
|
||||
[[nodiscard]] const Header& ChunkHeader() const {
|
||||
return mInternalHeader.mHeader;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length BufferBytes() const { return ChunkHeader().mBufferBytes; }
|
||||
[[nodiscard]] Length BufferBytes() const {
|
||||
return ChunkHeader().mBufferBytes;
|
||||
}
|
||||
|
||||
// Total size of the chunk (buffer + header).
|
||||
MOZ_MUST_USE Length ChunkBytes() const {
|
||||
[[nodiscard]] Length ChunkBytes() const {
|
||||
return static_cast<Length>(sizeof(InternalHeader)) + BufferBytes();
|
||||
}
|
||||
|
||||
// Size of external resources, in this case all the following chunks.
|
||||
MOZ_MUST_USE size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
[[nodiscard]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
const ProfileBufferChunk* const next = GetNext();
|
||||
return next ? next->SizeOfIncludingThis(aMallocSizeOf) : 0;
|
||||
}
|
||||
|
||||
// Size of this chunk and all following ones.
|
||||
MOZ_MUST_USE size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
[[nodiscard]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
||||
// Just in case `aMallocSizeOf` falls back on just `sizeof`, make sure we
|
||||
// account for at least the actual Chunk requested allocation size.
|
||||
return std::max<size_t>(aMallocSizeOf(this), ChunkBytes()) +
|
||||
SizeOfExcludingThis(aMallocSizeOf);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length RemainingBytes() const {
|
||||
[[nodiscard]] Length RemainingBytes() const {
|
||||
return BufferBytes() - OffsetPastLastBlock();
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length OffsetFirstBlock() const {
|
||||
[[nodiscard]] Length OffsetFirstBlock() const {
|
||||
return ChunkHeader().mOffsetFirstBlock;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length OffsetPastLastBlock() const {
|
||||
[[nodiscard]] Length OffsetPastLastBlock() const {
|
||||
return ChunkHeader().mOffsetPastLastBlock;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length BlockCount() const { return ChunkHeader().mBlockCount; }
|
||||
[[nodiscard]] Length BlockCount() const { return ChunkHeader().mBlockCount; }
|
||||
|
||||
MOZ_MUST_USE int ProcessId() const { return ChunkHeader().mProcessId; }
|
||||
[[nodiscard]] int ProcessId() const { return ChunkHeader().mProcessId; }
|
||||
|
||||
void SetProcessId(int aProcessId) {
|
||||
mInternalHeader.mHeader.mProcessId = aProcessId;
|
||||
}
|
||||
|
||||
// Global range index at the start of this Chunk.
|
||||
MOZ_MUST_USE ProfileBufferIndex RangeStart() const {
|
||||
[[nodiscard]] ProfileBufferIndex RangeStart() const {
|
||||
return ChunkHeader().mRangeStart;
|
||||
}
|
||||
|
||||
|
@ -321,23 +323,23 @@ class ProfileBufferChunk {
|
|||
|
||||
// Get a read-only Span to the buffer. It is up to the caller to decypher the
|
||||
// contents, based on known offsets and the internal block structure.
|
||||
MOZ_MUST_USE Span<const Byte> BufferSpan() const {
|
||||
[[nodiscard]] Span<const Byte> BufferSpan() const {
|
||||
return Span<const Byte>(&mBuffer, BufferBytes());
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Byte ByteAt(Length aOffset) const {
|
||||
[[nodiscard]] Byte ByteAt(Length aOffset) const {
|
||||
MOZ_ASSERT(aOffset < OffsetPastLastBlock());
|
||||
return *(&mBuffer + aOffset);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE ProfileBufferChunk* GetNext() {
|
||||
[[nodiscard]] ProfileBufferChunk* GetNext() {
|
||||
return mInternalHeader.mNext.get();
|
||||
}
|
||||
MOZ_MUST_USE const ProfileBufferChunk* GetNext() const {
|
||||
[[nodiscard]] const ProfileBufferChunk* GetNext() const {
|
||||
return mInternalHeader.mNext.get();
|
||||
}
|
||||
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> ReleaseNext() {
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> ReleaseNext() {
|
||||
return std::move(mInternalHeader.mNext);
|
||||
}
|
||||
|
||||
|
@ -350,7 +352,7 @@ class ProfileBufferChunk {
|
|||
}
|
||||
|
||||
// Find the last chunk in this chain (it may be `this`).
|
||||
MOZ_MUST_USE ProfileBufferChunk* Last() {
|
||||
[[nodiscard]] ProfileBufferChunk* Last() {
|
||||
ProfileBufferChunk* chunk = this;
|
||||
for (;;) {
|
||||
ProfileBufferChunk* next = chunk->GetNext();
|
||||
|
@ -360,7 +362,7 @@ class ProfileBufferChunk {
|
|||
chunk = next;
|
||||
}
|
||||
}
|
||||
MOZ_MUST_USE const ProfileBufferChunk* Last() const {
|
||||
[[nodiscard]] const ProfileBufferChunk* Last() const {
|
||||
const ProfileBufferChunk* chunk = this;
|
||||
for (;;) {
|
||||
const ProfileBufferChunk* next = chunk->GetNext();
|
||||
|
@ -379,7 +381,7 @@ class ProfileBufferChunk {
|
|||
}
|
||||
|
||||
// Join two possibly-null chunk lists.
|
||||
static MOZ_MUST_USE UniquePtr<ProfileBufferChunk> Join(
|
||||
[[nodiscard]] static UniquePtr<ProfileBufferChunk> Join(
|
||||
UniquePtr<ProfileBufferChunk>&& aFirst,
|
||||
UniquePtr<ProfileBufferChunk>&& aLast) {
|
||||
if (aFirst) {
|
||||
|
|
|
@ -36,13 +36,13 @@ class ProfileBufferChunkManager {
|
|||
#endif
|
||||
|
||||
// Estimated maximum buffer size.
|
||||
virtual MOZ_MUST_USE size_t MaxTotalSize() const = 0;
|
||||
[[nodiscard]] virtual size_t MaxTotalSize() const = 0;
|
||||
|
||||
// Create or recycle a chunk right now. May return null in case of allocation
|
||||
// failure.
|
||||
// Note that the chunk-destroyed callback may be invoked during this call;
|
||||
// user should be careful with reentrancy issues.
|
||||
virtual MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetChunk() = 0;
|
||||
[[nodiscard]] virtual UniquePtr<ProfileBufferChunk> GetChunk() = 0;
|
||||
|
||||
// `aChunkReceiver` may be called with a new or recycled chunk, or nullptr.
|
||||
// (See `FulfillChunkRequests()` regarding when the callback may happen.)
|
||||
|
@ -76,13 +76,13 @@ class ProfileBufferChunkManager {
|
|||
aChunkDestroyedCallback) = 0;
|
||||
|
||||
// Give away all released chunks that have not yet been destroyed.
|
||||
virtual MOZ_MUST_USE UniquePtr<ProfileBufferChunk>
|
||||
[[nodiscard]] virtual UniquePtr<ProfileBufferChunk>
|
||||
GetExtantReleasedChunks() = 0;
|
||||
|
||||
// Let a callback see all released chunks that have not yet been destroyed, if
|
||||
// any. Return whatever the callback returns.
|
||||
template <typename Callback>
|
||||
MOZ_MUST_USE auto PeekExtantReleasedChunks(Callback&& aCallback) {
|
||||
[[nodiscard]] auto PeekExtantReleasedChunks(Callback&& aCallback) {
|
||||
const ProfileBufferChunk* chunks = PeekExtantReleasedChunksAndLock();
|
||||
auto unlock =
|
||||
MakeScopeExit([&]() { UnlockAfterPeekExtantReleasedChunks(); });
|
||||
|
@ -92,10 +92,10 @@ class ProfileBufferChunkManager {
|
|||
// Chunks that were still unreleased will never be released.
|
||||
virtual void ForgetUnreleasedChunks() = 0;
|
||||
|
||||
virtual MOZ_MUST_USE size_t
|
||||
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const = 0;
|
||||
virtual MOZ_MUST_USE size_t
|
||||
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const = 0;
|
||||
[[nodiscard]] virtual size_t SizeOfExcludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const = 0;
|
||||
[[nodiscard]] virtual size_t SizeOfIncludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const = 0;
|
||||
|
||||
protected:
|
||||
// Derived classes to implement `PeekExtantReleasedChunks` through these:
|
||||
|
|
|
@ -66,11 +66,11 @@ class ProfileBufferChunkManagerSingle final : public ProfileBufferChunkManager {
|
|||
}
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t MaxTotalSize() const final { return mBufferBytes; }
|
||||
[[nodiscard]] size_t MaxTotalSize() const final { return mBufferBytes; }
|
||||
|
||||
// One of `GetChunk` and `RequestChunk` will only work the very first time (if
|
||||
// there's even a chunk).
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetChunk() final {
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> GetChunk() final {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
return std::move(mInitialChunk);
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ class ProfileBufferChunkManagerSingle final : public ProfileBufferChunkManager {
|
|||
mChunkDestroyedCallback = std::move(aChunkDestroyedCallback);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetExtantReleasedChunks() final {
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> GetExtantReleasedChunks() final {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
return std::move(mReleasedChunk);
|
||||
}
|
||||
|
@ -112,8 +112,8 @@ class ProfileBufferChunkManagerSingle final : public ProfileBufferChunkManager {
|
|||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t
|
||||
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const final {
|
||||
[[nodiscard]] size_t SizeOfExcludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const final {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
size_t size = 0;
|
||||
if (mInitialChunk) {
|
||||
|
@ -126,8 +126,8 @@ class ProfileBufferChunkManagerSingle final : public ProfileBufferChunkManager {
|
|||
return size;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t
|
||||
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const final {
|
||||
[[nodiscard]] size_t SizeOfIncludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const final {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
||||
}
|
||||
|
|
|
@ -39,12 +39,12 @@ class ProfileBufferChunkManagerWithLocalLimit final
|
|||
: mMaxTotalBytes(aMaxTotalBytes),
|
||||
mChunkMinBufferBytes(aChunkMinBufferBytes) {}
|
||||
|
||||
MOZ_MUST_USE size_t MaxTotalSize() const final {
|
||||
[[nodiscard]] size_t MaxTotalSize() const final {
|
||||
// `mMaxTotalBytes` is `const` so there is no need to lock the mutex.
|
||||
return mMaxTotalBytes;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetChunk() final {
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> GetChunk() final {
|
||||
AUTO_PROFILER_STATS(Local_GetChunk);
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
|
||||
return GetChunk(lock);
|
||||
|
@ -123,7 +123,7 @@ class ProfileBufferChunkManagerWithLocalLimit final
|
|||
mChunkDestroyedCallback = std::move(aChunkDestroyedCallback);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetExtantReleasedChunks() final {
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> GetExtantReleasedChunks() final {
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
mReleasedBytes = 0;
|
||||
|
@ -136,14 +136,14 @@ class ProfileBufferChunkManagerWithLocalLimit final
|
|||
mUnreleasedBytes = 0;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t
|
||||
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const final {
|
||||
[[nodiscard]] size_t SizeOfExcludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const final {
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
|
||||
return SizeOfExcludingThis(aMallocSizeOf, lock);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t
|
||||
SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const final {
|
||||
[[nodiscard]] size_t SizeOfIncludingThis(
|
||||
MallocSizeOf aMallocSizeOf) const final {
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf, lock);
|
||||
|
@ -158,7 +158,7 @@ class ProfileBufferChunkManagerWithLocalLimit final
|
|||
void UnlockAfterPeekExtantReleasedChunks() final { mMutex.Unlock(); }
|
||||
|
||||
private:
|
||||
MOZ_MUST_USE UniquePtr<ProfileBufferChunk> GetChunk(
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunk> GetChunk(
|
||||
const baseprofiler::detail::BaseProfilerAutoLock&) {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
UniquePtr<ProfileBufferChunk> chunk;
|
||||
|
@ -209,9 +209,9 @@ class ProfileBufferChunkManagerWithLocalLimit final
|
|||
return chunk;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE size_t
|
||||
SizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
|
||||
const baseprofiler::detail::BaseProfilerAutoLock&) const {
|
||||
[[nodiscard]] size_t SizeOfExcludingThis(
|
||||
MallocSizeOf aMallocSizeOf,
|
||||
const baseprofiler::detail::BaseProfilerAutoLock&) const {
|
||||
MOZ_ASSERT(mUser, "Not registered yet");
|
||||
// Note: Missing size of std::function external resources (if any).
|
||||
return mReleasedChunks ? mReleasedChunks->SizeOfIncludingThis(aMallocSizeOf)
|
||||
|
|
|
@ -96,7 +96,7 @@ class ProfileBufferEntryReader {
|
|||
|
||||
// Don't =default moving, as it doesn't bring any benefit in this class.
|
||||
|
||||
MOZ_MUST_USE Length RemainingBytes() const {
|
||||
[[nodiscard]] Length RemainingBytes() const {
|
||||
return mCurrentSpan.LengthBytes() + mNextSpanOrEmpty.LengthBytes();
|
||||
}
|
||||
|
||||
|
@ -111,18 +111,18 @@ class ProfileBufferEntryReader {
|
|||
}
|
||||
}
|
||||
|
||||
MOZ_MUST_USE ProfileBufferBlockIndex CurrentBlockIndex() const {
|
||||
[[nodiscard]] ProfileBufferBlockIndex CurrentBlockIndex() const {
|
||||
return mCurrentBlockIndex;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE ProfileBufferBlockIndex NextBlockIndex() const {
|
||||
[[nodiscard]] ProfileBufferBlockIndex NextBlockIndex() const {
|
||||
return mNextBlockIndex;
|
||||
}
|
||||
|
||||
// Create a reader of size zero, pointing at aOffset past the current position
|
||||
// of this Reader, so it can be used as end iterator.
|
||||
MOZ_MUST_USE ProfileBufferEntryReader
|
||||
EmptyIteratorAtOffset(Length aOffset) const {
|
||||
[[nodiscard]] ProfileBufferEntryReader EmptyIteratorAtOffset(
|
||||
Length aOffset) const {
|
||||
MOZ_RELEASE_ASSERT(aOffset <= RemainingBytes());
|
||||
if (MOZ_LIKELY(aOffset < mCurrentSpan.LengthBytes())) {
|
||||
// aOffset is before the end of mCurrentSpan.
|
||||
|
@ -144,7 +144,7 @@ class ProfileBufferEntryReader {
|
|||
using reference = const Byte&;
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
|
||||
MOZ_MUST_USE const Byte& operator*() {
|
||||
[[nodiscard]] const Byte& operator*() {
|
||||
// Assume the caller will read from the returned reference (and not just
|
||||
// take the address).
|
||||
MOZ_RELEASE_ASSERT(mCurrentSpan.LengthBytes() >= 1);
|
||||
|
@ -192,16 +192,16 @@ class ProfileBufferEntryReader {
|
|||
return *this;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool operator==(const ProfileBufferEntryReader& aOther) const {
|
||||
[[nodiscard]] bool operator==(const ProfileBufferEntryReader& aOther) const {
|
||||
return mCurrentSpan.Elements() == aOther.mCurrentSpan.Elements();
|
||||
}
|
||||
MOZ_MUST_USE bool operator!=(const ProfileBufferEntryReader& aOther) const {
|
||||
[[nodiscard]] bool operator!=(const ProfileBufferEntryReader& aOther) const {
|
||||
return mCurrentSpan.Elements() != aOther.mCurrentSpan.Elements();
|
||||
}
|
||||
|
||||
// Read an unsigned LEB128 number and move iterator ahead.
|
||||
template <typename T>
|
||||
MOZ_MUST_USE T ReadULEB128() {
|
||||
[[nodiscard]] T ReadULEB128() {
|
||||
return ::mozilla::ReadULEB128<T>(*this);
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ class ProfileBufferEntryReader {
|
|||
|
||||
// Read data as an object and move iterator ahead.
|
||||
template <typename T>
|
||||
MOZ_MUST_USE T ReadObject() {
|
||||
[[nodiscard]] T ReadObject() {
|
||||
T ob = Deserializer<T>::Read(*this);
|
||||
return ob;
|
||||
}
|
||||
|
@ -361,15 +361,15 @@ class ProfileBufferEntryWriter {
|
|||
MOZ_RELEASE_ASSERT(!mCurrentSpan.IsEmpty() || mNextSpanOrEmpty.IsEmpty());
|
||||
}
|
||||
|
||||
MOZ_MUST_USE Length RemainingBytes() const {
|
||||
[[nodiscard]] Length RemainingBytes() const {
|
||||
return mCurrentSpan.LengthBytes() + mNextSpanOrEmpty.LengthBytes();
|
||||
}
|
||||
|
||||
MOZ_MUST_USE ProfileBufferBlockIndex CurrentBlockIndex() const {
|
||||
[[nodiscard]] ProfileBufferBlockIndex CurrentBlockIndex() const {
|
||||
return mCurrentBlockIndex;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE ProfileBufferBlockIndex NextBlockIndex() const {
|
||||
[[nodiscard]] ProfileBufferBlockIndex NextBlockIndex() const {
|
||||
return mNextBlockIndex;
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,7 @@ class ProfileBufferEntryWriter {
|
|||
using reference = Byte&;
|
||||
using iterator_category = std::output_iterator_tag;
|
||||
|
||||
MOZ_MUST_USE Byte& operator*() {
|
||||
[[nodiscard]] Byte& operator*() {
|
||||
MOZ_RELEASE_ASSERT(RemainingBytes() >= 1);
|
||||
return *(
|
||||
(MOZ_LIKELY(!mCurrentSpan.IsEmpty()) ? mCurrentSpan : mNextSpanOrEmpty)
|
||||
|
@ -427,7 +427,7 @@ class ProfileBufferEntryWriter {
|
|||
|
||||
// Number of bytes needed to represent `aValue` in unsigned LEB128.
|
||||
template <typename T>
|
||||
static MOZ_MUST_USE unsigned ULEB128Size(T aValue) {
|
||||
[[nodiscard]] static unsigned ULEB128Size(T aValue) {
|
||||
return ::mozilla::ULEB128Size(aValue);
|
||||
}
|
||||
|
||||
|
@ -439,7 +439,7 @@ class ProfileBufferEntryWriter {
|
|||
|
||||
// Number of bytes needed to serialize objects.
|
||||
template <typename... Ts>
|
||||
static MOZ_MUST_USE Length SumBytes(const Ts&... aTs) {
|
||||
[[nodiscard]] static Length SumBytes(const Ts&... aTs) {
|
||||
return (0 + ... + Serializer<Ts>::Bytes(aTs));
|
||||
}
|
||||
|
||||
|
|
|
@ -43,35 +43,35 @@ class ProfileBufferBlockIndex {
|
|||
|
||||
// Comparison operators. Default `ProfileBufferBlockIndex{}` value is always
|
||||
// the lowest.
|
||||
MOZ_MUST_USE bool operator==(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator==(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex == aRhs.mBlockIndex;
|
||||
}
|
||||
MOZ_MUST_USE bool operator!=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator!=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex != aRhs.mBlockIndex;
|
||||
}
|
||||
MOZ_MUST_USE bool operator<(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator<(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex < aRhs.mBlockIndex;
|
||||
}
|
||||
MOZ_MUST_USE bool operator<=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator<=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex <= aRhs.mBlockIndex;
|
||||
}
|
||||
MOZ_MUST_USE bool operator>(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator>(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex > aRhs.mBlockIndex;
|
||||
}
|
||||
MOZ_MUST_USE bool operator>=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
[[nodiscard]] bool operator>=(const ProfileBufferBlockIndex& aRhs) const {
|
||||
return mBlockIndex >= aRhs.mBlockIndex;
|
||||
}
|
||||
|
||||
// Explicit conversion to ProfileBufferIndex, mostly used by internal Profile
|
||||
// buffer code.
|
||||
MOZ_MUST_USE ProfileBufferIndex ConvertToProfileBufferIndex() const {
|
||||
[[nodiscard]] ProfileBufferIndex ConvertToProfileBufferIndex() const {
|
||||
return mBlockIndex;
|
||||
}
|
||||
|
||||
// Explicit creation from ProfileBufferIndex, mostly used by internal
|
||||
// Profile buffer code.
|
||||
static MOZ_MUST_USE ProfileBufferBlockIndex
|
||||
CreateFromProfileBufferIndex(ProfileBufferIndex aIndex) {
|
||||
[[nodiscard]] static ProfileBufferBlockIndex CreateFromProfileBufferIndex(
|
||||
ProfileBufferIndex aIndex) {
|
||||
return ProfileBufferBlockIndex(aIndex);
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ class ULEB128Reader {
|
|||
|
||||
// Feed a byte into the parser.
|
||||
// Returns true if this was the last byte.
|
||||
constexpr MOZ_MUST_USE bool FeedByteIsComplete(unsigned aByte) {
|
||||
[[nodiscard]] constexpr bool FeedByteIsComplete(unsigned aByte) {
|
||||
MOZ_ASSERT(!IsComplete());
|
||||
// Extract the 7 bits of value, and shift them in place into the value.
|
||||
mValue |= static_cast<T>(aByte & 0x7fu) << mShift;
|
||||
|
@ -185,11 +185,11 @@ class ULEB128Reader {
|
|||
mShift = 0;
|
||||
}
|
||||
|
||||
constexpr MOZ_MUST_USE bool IsComplete() const {
|
||||
[[nodiscard]] constexpr bool IsComplete() const {
|
||||
return mShift == mCompleteShift;
|
||||
}
|
||||
|
||||
constexpr MOZ_MUST_USE T Value() const {
|
||||
[[nodiscard]] constexpr T Value() const {
|
||||
MOZ_ASSERT(IsComplete());
|
||||
return mValue;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче