Bug 1459225 - Baldr: fix race in tier-2 initialization (r=bbouvier)

--HG--
extra : rebase_source : bad94fd55da9cbe9ef1d1a7493cd2e91cbd466b1
This commit is contained in:
Luke Wagner 2018-05-10 18:48:56 -05:00
Родитель 4139925b80
Коммит e8b42871cc
6 изменённых файлов: 284 добавлений и 312 удалений

Просмотреть файл

@ -40,18 +40,9 @@ using mozilla::BinarySearch;
using mozilla::MakeEnumeratedRange;
using mozilla::PodAssign;
bool
CodeSegment::registerInProcessMap()
{
if (!RegisterCodeSegment(this))
return false;
registered_ = true;
return true;
}
CodeSegment::~CodeSegment()
{
if (registered_)
if (unregisterOnDestroy_)
UnregisterCodeSegment(this);
}
@ -97,6 +88,26 @@ CodeSegment::AllocateCodeBytes(uint32_t codeLength)
return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength));
}
bool
CodeSegment::initialize(const CodeTier& codeTier)
{
MOZ_ASSERT(!initialized());
codeTier_ = &codeTier;
MOZ_ASSERT(initialized());
// In the case of tiering, RegisterCodeSegment() immediately makes this code
// segment live to access from other threads executing the containing
// module. So only call once the CodeSegment is fully initialized.
if (!RegisterCodeSegment(this))
return false;
// This bool is only used by the destructor which cannot be called racily
// and so it is not a problem to mutate it after RegisterCodeSegment().
MOZ_ASSERT(!unregisterOnDestroy_);
unregisterOnDestroy_ = true;
return true;
}
const Code&
CodeSegment::code() const
{
@ -107,7 +118,7 @@ CodeSegment::code() const
void
CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const
{
*code += RoundupCodeLength(length_);
*code += RoundupCodeLength(length());
}
void
@ -261,13 +272,20 @@ SendCodeRangesToProfiler(const ModuleSegment& ms, const Bytes& bytecode, const M
}
}
ModuleSegment::ModuleSegment(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const LinkDataTier& linkData)
: CodeSegment(Move(codeBytes), codeLength, CodeSegment::Kind::Module),
tier_(tier),
outOfBoundsCode_(base() + linkData.outOfBoundsOffset),
unalignedAccessCode_(base() + linkData.unalignedAccessOffset),
trapCode_(base() + linkData.trapOffset)
{
}
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges)
ModuleSegment::create(Tier tier, MacroAssembler& masm, const LinkDataTier& linkData)
{
uint32_t codeLength = masm.bytesNeeded();
@ -278,16 +296,11 @@ ModuleSegment::create(Tier tier,
// We'll flush the icache after static linking, in initialize().
masm.executableCopy(codeBytes.get(), /* flushICache = */ false);
return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges);
return js::MakeUnique<ModuleSegment>(tier, Move(codeBytes), codeLength, linkData);
}
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges)
ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const LinkDataTier& linkData)
{
uint32_t codeLength = unlinkedBytes.length();
@ -297,69 +310,35 @@ ModuleSegment::create(Tier tier,
memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength);
return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges);
}
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges)
{
// These should always exist and should never be first in the code segment.
auto ms = js::MakeUnique<ModuleSegment>();
if (!ms)
return nullptr;
if (!ms->initialize(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges))
return nullptr;
return UniqueModuleSegment(ms.release());
return js::MakeUnique<ModuleSegment>(tier, Move(codeBytes), codeLength, linkData);
}
bool
ModuleSegment::initialize(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
ModuleSegment::initialize(const CodeTier& codeTier,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges)
const MetadataTier& metadataTier)
{
MOZ_ASSERT(!bytes_);
tier_ = tier;
bytes_ = Move(codeBytes);
length_ = codeLength;
outOfBoundsCode_ = bytes_.get() + linkData.outOfBoundsOffset;
unalignedAccessCode_ = bytes_.get() + linkData.unalignedAccessOffset;
trapCode_ = bytes_.get() + linkData.trapOffset;
if (!StaticallyLink(*this, linkData))
return false;
ExecutableAllocator::cacheFlush(bytes_.get(), RoundupCodeLength(codeLength));
ExecutableAllocator::cacheFlush(base(), RoundupCodeLength(length()));
// Reprotect the whole region to avoid having separate RW and RX mappings.
if (!ExecutableAllocator::makeExecutable(bytes_.get(), RoundupCodeLength(codeLength)))
if (!ExecutableAllocator::makeExecutable(base(), RoundupCodeLength(length())))
return false;
if (!registerInProcessMap())
return false;
SendCodeRangesToProfiler(*this, bytecode.bytes, metadata, metadataTier.codeRanges);
SendCodeRangesToProfiler(*this, bytecode.bytes, metadata, codeRanges);
return true;
// See comments in CodeSegment::initialize() for why this must be last.
return CodeSegment::initialize(codeTier);
}
size_t
ModuleSegment::serializedSize() const
{
return sizeof(uint32_t) + length_;
return sizeof(uint32_t) + length();
}
void
@ -374,17 +353,16 @@ ModuleSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
{
MOZ_ASSERT(tier() == Tier::Serialized);
cursor = WriteScalar<uint32_t>(cursor, length_);
uint8_t* base = cursor;
cursor = WriteBytes(cursor, bytes_.get(), length_);
StaticallyUnlink(base, linkData);
cursor = WriteScalar<uint32_t>(cursor, length());
uint8_t* serializedBase = cursor;
cursor = WriteBytes(cursor, base(), length());
StaticallyUnlink(serializedBase, linkData);
return cursor;
}
const uint8_t*
ModuleSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
const LinkDataTier& linkData, const Metadata& metadata,
const CodeRangeVector& codeRanges)
/* static */ const uint8_t*
ModuleSegment::deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
UniqueModuleSegment* segment)
{
uint32_t length;
cursor = ReadScalar<uint32_t>(cursor, &length);
@ -399,7 +377,8 @@ ModuleSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode
if (!cursor)
return nullptr;
if (!initialize(Tier::Serialized, Move(bytes), length, bytecode, linkData, metadata, codeRanges))
*segment = js::MakeUnique<ModuleSegment>(Tier::Serialized, Move(bytes), length, linkData);
if (!*segment)
return nullptr;
return cursor;
@ -560,17 +539,6 @@ MetadataTier::deserialize(const uint8_t* cursor)
return cursor;
}
bool
LazyStubSegment::initialize(UniqueCodeBytes codeBytes, size_t length)
{
MOZ_ASSERT(bytes_ == nullptr);
bytes_ = Move(codeBytes);
length_ = length;
return registerInProcessMap();
}
UniqueLazyStubSegment
LazyStubSegment::create(const CodeTier& codeTier, size_t length)
{
@ -578,9 +546,10 @@ LazyStubSegment::create(const CodeTier& codeTier, size_t length)
if (!codeBytes)
return nullptr;
auto segment = js::MakeUnique<LazyStubSegment>(codeTier);
if (!segment || !segment->initialize(Move(codeBytes), length))
auto segment = js::MakeUnique<LazyStubSegment>(Move(codeBytes), length);
if (!segment || !segment->initialize(codeTier))
return nullptr;
return segment;
}
@ -588,8 +557,8 @@ bool
LazyStubSegment::hasSpace(size_t bytes) const
{
MOZ_ASSERT(bytes % MPROTECT_PAGE_SIZE == 0);
return bytes <= length_ &&
usedBytes_ <= length_ - bytes;
return bytes <= length() &&
usedBytes_ <= length() - bytes;
}
bool
@ -1021,24 +990,25 @@ CodeTier::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
return cursor;
}
const uint8_t*
CodeTier::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, Metadata& metadata,
const LinkDataTier& linkData)
/* static */ const uint8_t*
CodeTier::deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
UniqueCodeTier* codeTier)
{
metadata_ = js::MakeUnique<MetadataTier>(Tier::Serialized);
if (!metadata_)
auto metadata = js::MakeUnique<MetadataTier>(Tier::Serialized);
if (!metadata)
return nullptr;
cursor = metadata_->deserialize(cursor);
cursor = metadata->deserialize(cursor);
if (!cursor)
return nullptr;
auto segment = Move(js::MakeUnique<ModuleSegment>());
if (!segment)
return nullptr;
cursor = segment->deserialize(cursor, *bytecode, linkData, metadata, metadata_->codeRanges);
UniqueModuleSegment segment;
cursor = ModuleSegment::deserialize(cursor, linkData, &segment);
if (!cursor)
return nullptr;
segment_ = takeOwnership(Move(segment));
*codeTier = js::MakeUnique<CodeTier>(Move(metadata), Move(segment));
if (!*codeTier)
return nullptr;
return cursor;
}
@ -1108,25 +1078,38 @@ JumpTables::init(CompileMode mode, const ModuleSegment& ms, const CodeRangeVecto
return true;
}
Code::Code(UniqueCodeTier codeTier, const Metadata& metadata, JumpTables&& maybeJumpTables)
: tier1_(takeOwnership(Move(codeTier))),
Code::Code(UniqueCodeTier tier1, const Metadata& metadata, JumpTables&& maybeJumpTables)
: tier1_(Move(tier1)),
metadata_(&metadata),
profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()),
jumpTables_(Move(maybeJumpTables))
{}
bool
Code::initialize(const ShareableBytes& bytecode, const LinkDataTier& linkData)
{
MOZ_ASSERT(!initialized());
if (!tier1_->initialize(*this, bytecode, linkData, *metadata_))
return false;
MOZ_ASSERT(initialized());
return true;
}
Code::Code()
: profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector())
{
}
void
Code::setTier2(UniqueCodeTier tier2) const
bool
Code::setTier2(UniqueCodeTier tier2, const ShareableBytes& bytecode,
const LinkDataTier& linkData) const
{
MOZ_RELEASE_ASSERT(!hasTier2());
MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Ion && tier1_->tier() == Tier::Baseline);
tier2_ = takeOwnership(Move(tier2));
if (!tier2->initialize(*this, bytecode, linkData, *metadata_))
return false;
tier2_ = Move(tier2);
return true;
}
void
@ -1135,6 +1118,7 @@ Code::commitTier2() const
MOZ_RELEASE_ASSERT(!hasTier2());
MOZ_RELEASE_ASSERT(tier2_.get());
hasTier2_ = true;
MOZ_ASSERT(hasTier2());
}
uint32_t
@ -1180,14 +1164,20 @@ Code::codeTier(Tier tier) const
{
switch (tier) {
case Tier::Baseline:
if (tier1_->tier() == Tier::Baseline)
if (tier1_->tier() == Tier::Baseline) {
MOZ_ASSERT(tier1_->initialized());
return *tier1_;
}
MOZ_CRASH("No code segment at this tier");
case Tier::Ion:
if (tier1_->tier() == Tier::Ion)
if (tier1_->tier() == Tier::Ion) {
MOZ_ASSERT(tier1_->initialized());
return *tier1_;
if (hasTier2())
}
if (tier2_) {
MOZ_ASSERT(tier2_->initialized());
return *tier2_;
}
MOZ_CRASH("No code segment at this tier");
default:
MOZ_CRASH();
@ -1370,6 +1360,25 @@ Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
codeTier(t).addSizeOfMisc(mallocSizeOf, code, data);
}
bool
CodeTier::initialize(const Code& code,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
{
MOZ_ASSERT(!initialized());
code_ = &code;
MOZ_ASSERT(lazyStubs_.lock()->empty());
// See comments in CodeSegment::initialize() for why this must be last.
if (!segment_->initialize(*this, bytecode, linkData, metadata, *metadata_))
return false;
MOZ_ASSERT(initialized());
return true;
}
size_t
Code::serializedSize() const
{
@ -1378,35 +1387,39 @@ Code::serializedSize() const
}
uint8_t*
Code::serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const
Code::serialize(uint8_t* cursor, const LinkData& linkData) const
{
MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
cursor = metadata().serialize(cursor);
cursor = codeTier(Tier::Serialized).serialize(cursor, linkDataTier);
cursor = codeTier(Tier::Serialized).serialize(cursor, linkData.tier(Tier::Serialized));
return cursor;
}
const uint8_t*
Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode,
const LinkDataTier& linkDataTier, Metadata& metadata)
/* static */ const uint8_t*
Code::deserialize(const uint8_t* cursor,
const ShareableBytes& bytecode,
const LinkData& linkData,
Metadata& metadata,
SharedCode* out)
{
cursor = metadata.deserialize(cursor);
if (!cursor)
return nullptr;
auto codeTier = js::MakeUnique<CodeTier>(Tier::Serialized);
if (!codeTier)
return nullptr;
cursor = codeTier->deserialize(cursor, bytecode, metadata, linkDataTier);
UniqueCodeTier codeTier;
cursor = CodeTier::deserialize(cursor, linkData.tier(Tier::Serialized), &codeTier);
if (!cursor)
return nullptr;
tier1_ = takeOwnership(Move(codeTier));
metadata_ = &metadata;
if (!jumpTables_.init(CompileMode::Once, tier1_->segment(), tier1_->metadata().codeRanges))
JumpTables jumpTables;
if (!jumpTables.init(CompileMode::Once, codeTier->segment(), codeTier->metadata().codeRanges))
return nullptr;
MutableCode code = js_new<Code>(Move(codeTier), metadata, Move(jumpTables));
if (!code || !code->initialize(bytecode, linkData.tier(Tier::Serialized)))
return nullptr;
*out = code;
return cursor;
}

Просмотреть файл

@ -34,6 +34,7 @@ namespace wasm {
struct LinkDataTier;
struct MetadataTier;
struct Metadata;
class LinkData;
// ShareableBytes is a reference-counted Vector of bytes.
@ -81,30 +82,30 @@ class CodeSegment
protected:
static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
UniqueCodeBytes bytes_;
uint32_t length_;
// A back reference to the owning code.
const CodeTier* codeTier_;
enum class Kind {
LazyStubs,
Module
} kind_;
};
bool registerInProcessMap();
private:
bool registered_;
public:
explicit CodeSegment(Kind kind = Kind::Module)
: length_(UINT32_MAX),
codeTier_(nullptr),
CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
: bytes_(Move(bytes)),
length_(length),
kind_(kind),
registered_(false)
codeTier_(nullptr),
unregisterOnDestroy_(false)
{}
bool initialize(const CodeTier& codeTier);
private:
const UniqueCodeBytes bytes_;
const uint32_t length_;
const Kind kind_;
const CodeTier* codeTier_;
bool unregisterOnDestroy_;
public:
bool initialized() const { return !!codeTier_; }
~CodeSegment();
bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
@ -125,11 +126,7 @@ class CodeSegment
return pc >= base() && pc < (base() + length_);
}
void initCodeTier(const CodeTier* codeTier) {
MOZ_ASSERT(!codeTier_);
codeTier_ = codeTier;
}
const CodeTier& codeTier() const { return *codeTier_; }
const CodeTier& codeTier() const { MOZ_ASSERT(initialized()); return *codeTier_; }
const Code& code() const;
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
@ -138,61 +135,37 @@ class CodeSegment
// A wasm ModuleSegment owns the allocated executable code for a wasm module.
typedef UniquePtr<ModuleSegment> UniqueModuleSegment;
typedef UniquePtr<const ModuleSegment> UniqueConstModuleSegment;
class ModuleSegment : public CodeSegment
{
Tier tier_;
const Tier tier_;
uint8_t* const outOfBoundsCode_;
uint8_t* const unalignedAccessCode_;
uint8_t* const trapCode_;
// These are pointers into code for stubs used for signal-handler
// control-flow transfer.
uint8_t* outOfBoundsCode_;
uint8_t* unalignedAccessCode_;
uint8_t* trapCode_;
bool initialize(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges);
static UniqueModuleSegment create(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges);
public:
ModuleSegment(const ModuleSegment&) = delete;
void operator=(const ModuleSegment&) = delete;
ModuleSegment()
: CodeSegment(),
tier_(Tier(-1)),
outOfBoundsCode_(nullptr),
unalignedAccessCode_(nullptr),
trapCode_(nullptr)
{}
ModuleSegment(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const LinkDataTier& linkData);
static UniqueModuleSegment create(Tier tier,
jit::MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges);
const LinkDataTier& linkData);
static UniqueModuleSegment create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const CodeRangeVector& codeRanges);
const LinkDataTier& linkData);
bool initialize(const CodeTier& codeTier,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata,
const MetadataTier& metadataTier);
Tier tier() const { return tier_; }
// Pointers to stubs to which PC is redirected from the signal-handler.
uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
uint8_t* trapCode() const { return trapCode_; }
@ -200,10 +173,9 @@ class ModuleSegment : public CodeSegment
// Structured clone support:
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const;
const uint8_t* deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
const LinkDataTier& linkDataTier, const Metadata& metadata,
const CodeRangeVector& codeRanges);
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const;
static const uint8_t* deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
UniqueModuleSegment* segment);
const CodeRange* lookupRange(const void* pc) const;
@ -527,6 +499,9 @@ using UniqueMetadataTier = UniquePtr<MetadataTier>;
// isn't (64KiB), a given stub segment can contain entry stubs of many
// functions.
using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
using LazyStubSegmentVector = Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
class LazyStubSegment : public CodeSegment
{
CodeRangeVector codeRanges_;
@ -534,17 +509,14 @@ class LazyStubSegment : public CodeSegment
static constexpr size_t MPROTECT_PAGE_SIZE = 4 * 1024;
bool initialize(UniqueCodeBytes codeBytes, size_t length);
public:
explicit LazyStubSegment(const CodeTier& codeTier)
: CodeSegment(CodeSegment::Kind::LazyStubs),
LazyStubSegment(UniqueCodeBytes bytes, size_t length)
: CodeSegment(Move(bytes), length, CodeSegment::Kind::LazyStubs),
usedBytes_(0)
{
initCodeTier(&codeTier);
}
{}
static UniqueLazyStubSegment create(const CodeTier& codeTier, size_t codeLength);
static UniquePtr<LazyStubSegment> create(const CodeTier& codeTier, size_t length);
static size_t AlignBytesNeeded(size_t bytes) { return AlignBytes(bytes, MPROTECT_PAGE_SIZE); }
bool hasSpace(size_t bytes) const;
@ -558,9 +530,6 @@ class LazyStubSegment : public CodeSegment
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const;
};
using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
using LazyStubSegmentVector = Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
// LazyFuncExport helps to efficiently lookup a CodeRange from a given function
// index. It is inserted in a vector sorted by function index, to perform
// binary search on it later.
@ -623,23 +592,20 @@ class LazyStubTier
// CodeTier contains all the data related to a given compilation tier. It is
// built during module generation and then immutably stored in a Code.
typedef UniquePtr<CodeTier> UniqueCodeTier;
typedef UniquePtr<const CodeTier> UniqueConstCodeTier;
class CodeTier
{
const Tier tier_;
const Code* code_;
const Code* code_;
// Serialized information.
UniqueMetadataTier metadata_;
UniqueConstModuleSegment segment_;
const UniqueMetadataTier metadata_;
const UniqueModuleSegment segment_;
// Lazy stubs, not serialized.
ExclusiveData<LazyStubTier> lazyStubs_;
UniqueConstModuleSegment takeOwnership(UniqueModuleSegment segment) const {
segment->initCodeTier(this);
return UniqueConstModuleSegment(segment.release());
}
static const MutexId& mutexForTier(Tier tier) {
if (tier == Tier::Baseline)
return mutexid::WasmLazyStubsTier1;
@ -648,45 +614,35 @@ class CodeTier
}
public:
explicit CodeTier(Tier tier)
: tier_(tier),
code_(nullptr),
metadata_(nullptr),
segment_(nullptr),
lazyStubs_(mutexForTier(tier))
{}
CodeTier(Tier tier, UniqueMetadataTier metadata, UniqueModuleSegment segment)
: tier_(tier),
code_(nullptr),
CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
: code_(nullptr),
metadata_(Move(metadata)),
segment_(takeOwnership(Move(segment))),
lazyStubs_(mutexForTier(tier))
segment_(Move(segment)),
lazyStubs_(mutexForTier(segment_->tier()))
{}
void initCode(const Code* code) {
MOZ_ASSERT(!code_);
code_ = code;
}
bool initialized() const { return !!code_ && segment_->initialized(); }
Tier tier() const { return tier_; }
bool initialize(const Code& code,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
Tier tier() const { return segment_->tier(); }
const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
const MetadataTier& metadata() const { return *metadata_.get(); }
const ModuleSegment& segment() const { return *segment_.get(); }
const Code& code() const { return *code_; }
const Code& code() const { MOZ_ASSERT(initialized()); return *code_; }
const CodeRange* lookupRange(const void* pc) const;
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const;
const uint8_t* deserialize(const uint8_t* cursor, const SharedBytes& bytecode,
Metadata& metadata, const LinkDataTier& linkData);
static const uint8_t* deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
UniqueCodeTier* codeTier);
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const;
};
typedef UniquePtr<CodeTier> UniqueCodeTier;
typedef UniquePtr<const CodeTier> UniqueConstCodeTier;
// Jump tables to take tiering into account, when calling either from wasm to
// wasm (through rabaldr) or from jit to wasm (jit entry).
@ -743,23 +699,23 @@ class JumpTables
//
// profilingLabels_ is lazily initialized, but behind a lock.
typedef RefPtr<const Code> SharedCode;
typedef RefPtr<Code> MutableCode;
class Code : public ShareableBase<Code>
{
UniqueConstCodeTier tier1_;
UniqueCodeTier tier1_;
mutable UniqueConstCodeTier tier2_; // Access only when hasTier2() is true
mutable Atomic<bool> hasTier2_;
SharedMetadata metadata_;
ExclusiveData<CacheableCharsVector> profilingLabels_;
JumpTables jumpTables_;
UniqueConstCodeTier takeOwnership(UniqueCodeTier codeTier) const {
codeTier->initCode(this);
return UniqueConstCodeTier(codeTier.release());
}
public:
Code();
Code(UniqueCodeTier tier, const Metadata& metadata, JumpTables&& maybeJumpTables);
Code(UniqueCodeTier tier1, const Metadata& metadata, JumpTables&& maybeJumpTables);
bool initialized() const { return tier1_->initialized(); }
bool initialize(const ShareableBytes& bytecode, const LinkDataTier& linkData);
void setTieringEntry(size_t i, void* target) const { jumpTables_.setTieringEntry(i, target); }
void** tieringJumpTable() const { return jumpTables_.tiering(); }
@ -768,7 +724,8 @@ class Code : public ShareableBase<Code>
void** getAddressOfJitEntry(size_t i) const { return jumpTables_.getAddressOfJitEntry(i); }
uint32_t getFuncIndex(JSFunction* fun) const;
void setTier2(UniqueCodeTier tier2) const;
bool setTier2(UniqueCodeTier tier2, const ShareableBytes& bytecode,
const LinkDataTier& linkData) const;
void commitTier2() const;
bool hasTier2() const { return hasTier2_; }
@ -814,14 +771,14 @@ class Code : public ShareableBase<Code>
// machine code and other parts.
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const;
const uint8_t* deserialize(const uint8_t* cursor, const SharedBytes& bytecode,
const LinkDataTier& linkDataTier, Metadata& metadata);
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
static const uint8_t* deserialize(const uint8_t* cursor,
const ShareableBytes& bytecode,
const LinkData& linkData,
Metadata& metadata,
SharedCode* code);
};
typedef RefPtr<const Code> SharedCode;
typedef RefPtr<Code> MutableCode;
} // namespace wasm
} // namespace js

Просмотреть файл

@ -943,8 +943,7 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
if (!finishMetadata(bytecode))
return nullptr;
return ModuleSegment::create(tier(), masm_, bytecode, *linkDataTier_, *metadata_,
metadataTier_->codeRanges);
return ModuleSegment::create(tier(), masm_, *linkDataTier_);
}
SharedModule
@ -972,12 +971,12 @@ ModuleGenerator::finishModule(const ShareableBytes& bytecode)
return nullptr;
}
auto codeTier = js::MakeUnique<CodeTier>(tier(), Move(metadataTier_), Move(moduleSegment));
auto codeTier = js::MakeUnique<CodeTier>(Move(metadataTier_), Move(moduleSegment));
if (!codeTier)
return nullptr;
SharedCode code = js_new<Code>(Move(codeTier), *metadata_, Move(jumpTables));
if (!code)
MutableCode code = js_new<Code>(Move(codeTier), *metadata_, Move(jumpTables));
if (!code || !code->initialize(bytecode, *linkDataTier_))
return nullptr;
SharedModule module(js_new<Module>(Move(assumptions_),
@ -1012,7 +1011,7 @@ ModuleGenerator::finishTier2(Module& module)
if (!moduleSegment)
return false;
auto tier2 = js::MakeUnique<CodeTier>(tier(), Move(metadataTier_), Move(moduleSegment));
auto tier2 = js::MakeUnique<CodeTier>(Move(metadataTier_), Move(moduleSegment));
if (!tier2)
return false;

Просмотреть файл

@ -110,26 +110,26 @@ LinkDataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
}
void
LinkData::setTier2(UniqueLinkDataTier linkData) const
LinkData::setTier2(UniqueLinkDataTier tier) const
{
MOZ_RELEASE_ASSERT(linkData->tier == Tier::Ion && linkData1_->tier == Tier::Baseline);
MOZ_RELEASE_ASSERT(!linkData2_.get());
linkData2_ = Move(linkData);
MOZ_RELEASE_ASSERT(tier->tier == Tier::Ion && tier1_->tier == Tier::Baseline);
MOZ_RELEASE_ASSERT(!tier2_.get());
tier2_ = Move(tier);
}
const LinkDataTier&
LinkData::linkData(Tier tier) const
LinkData::tier(Tier tier) const
{
switch (tier) {
case Tier::Baseline:
if (linkData1_->tier == Tier::Baseline)
return *linkData1_;
if (tier1_->tier == Tier::Baseline)
return *tier1_;
MOZ_CRASH("No linkData at this tier");
case Tier::Ion:
if (linkData1_->tier == Tier::Ion)
return *linkData1_;
if (linkData2_)
return *linkData2_;
if (tier1_->tier == Tier::Ion)
return *tier1_;
if (tier2_)
return *tier2_;
MOZ_CRASH("No linkData at this tier");
default:
MOZ_CRASH();
@ -139,24 +139,24 @@ LinkData::linkData(Tier tier) const
size_t
LinkData::serializedSize() const
{
return linkData(Tier::Serialized).serializedSize();
return tier(Tier::Serialized).serializedSize();
}
uint8_t*
LinkData::serialize(uint8_t* cursor) const
{
cursor = linkData(Tier::Serialized).serialize(cursor);
cursor = tier(Tier::Serialized).serialize(cursor);
return cursor;
}
const uint8_t*
LinkData::deserialize(const uint8_t* cursor)
{
MOZ_ASSERT(!linkData1_);
linkData1_ = js::MakeUnique<LinkDataTier>(Tier::Serialized);
if (!linkData1_)
MOZ_ASSERT(!tier1_);
tier1_ = js::MakeUnique<LinkDataTier>(Tier::Serialized);
if (!tier1_)
return nullptr;
cursor = linkData1_->deserialize(cursor);
cursor = tier1_->deserialize(cursor);
return cursor;
}
@ -164,9 +164,9 @@ size_t
LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
{
size_t sum = 0;
sum += linkData1_->sizeOfExcludingThis(mallocSizeOf);
if (linkData2_)
sum += linkData2_->sizeOfExcludingThis(mallocSizeOf);
sum += tier1_->sizeOfExcludingThis(mallocSizeOf);
if (tier2_)
sum += tier2_->sizeOfExcludingThis(mallocSizeOf);
return sum;
}
@ -243,10 +243,23 @@ Module::notifyCompilationListeners()
}
bool
Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEnvironment* env2)
Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2Arg, ModuleEnvironment* env2)
{
MOZ_ASSERT(code().bestTier() == Tier::Baseline && tier2->tier() == Tier::Ion);
MOZ_ASSERT(code().bestTier() == Tier::Baseline && tier2Arg->tier() == Tier::Ion);
// Install the data in the data structures. They will not be visible
// until commitTier2().
if (!code().setTier2(Move(tier2Arg), *bytecode_, *linkData2))
return false;
linkData().setTier2(Move(linkData2));
for (uint32_t i = 0; i < elemSegments_.length(); i++)
elemSegments_[i].setTier2(Move(env2->elemSegments[i].elemCodeRangeIndices(Tier::Ion)));
// Before we can make tier-2 live, we need to compile tier2 versions of any
// extant tier1 lazy stubs (otherwise, tiering would break the assumption
// that any extant exported wasm function has had a lazy entry stub already
// compiled for it).
{
// We need to prevent new tier1 stubs generation until we've committed
// the newer tier2 stubs, otherwise we might not generate one tier2
@ -255,7 +268,7 @@ Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEn
const MetadataTier& metadataTier1 = metadata(Tier::Baseline);
auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().lock();
auto stubs2 = tier2->lazyStubs().lock();
auto stubs2 = code().codeTier(Tier::Ion).lazyStubs().lock();
MOZ_ASSERT(stubs2->empty());
@ -272,23 +285,15 @@ Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEn
}
HasGcTypes gcTypesEnabled = code().metadata().temporaryHasGcTypes;
const CodeTier& tier2 = code().codeTier(Tier::Ion);
Maybe<size_t> stub2Index;
if (!stubs2->createTier2(gcTypesEnabled, funcExportIndices, *tier2, &stub2Index))
if (!stubs2->createTier2(gcTypesEnabled, funcExportIndices, tier2, &stub2Index))
return false;
// Install the data in the data structures. They will not be visible
// yet.
// Now that we can't fail or otherwise abort tier2, make it live.
MOZ_ASSERT(!code().hasTier2());
linkData().setTier2(Move(linkData2));
code().setTier2(Move(tier2));
for (uint32_t i = 0; i < elemSegments_.length(); i++)
elemSegments_[i].setTier2(Move(env2->elemSegments[i].elemCodeRangeIndices(Tier::Ion)));
// Now that all the code and metadata is valid, make tier 2 code
// visible and unblock anyone waiting on it.
code().commitTier2();
// Now tier2 is committed and we can update jump tables entries to
@ -418,7 +423,7 @@ Module::compiledSerialize(uint8_t* compiledBegin, size_t compiledSize) const
cursor = SerializeVector(cursor, exports_);
cursor = SerializePodVector(cursor, dataSegments_);
cursor = SerializeVector(cursor, elemSegments_);
cursor = code_->serialize(cursor, linkData_.linkData(Tier::Serialized));
cursor = code_->serialize(cursor, linkData_);
MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize);
}
@ -481,8 +486,8 @@ Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
if (!cursor)
return nullptr;
MutableCode code = js_new<Code>();
cursor = code->deserialize(cursor, bytecode, linkData.linkData(Tier::Serialized), *metadata);
SharedCode code;
cursor = Code::deserialize(cursor, *bytecode, linkData, *metadata, &code);
if (!cursor)
return nullptr;
@ -1245,12 +1250,7 @@ Module::instantiate(JSContext* cx,
// may patch the pre-linked code at any time.
if (!codeIsBusy_.compareExchange(false, true)) {
Tier tier = Tier::Baseline;
auto segment = ModuleSegment::create(tier,
*unlinkedCodeForDebugging_,
*bytecode_,
linkData(tier),
metadata(),
metadata(tier).codeRanges);
auto segment = ModuleSegment::create(tier, *unlinkedCodeForDebugging_, linkData(tier));
if (!segment) {
ReportOutOfMemory(cx);
return false;
@ -1260,7 +1260,7 @@ Module::instantiate(JSContext* cx,
if (!metadataTier || !metadataTier->clone(metadata(tier)))
return false;
auto codeTier = js::MakeUnique<CodeTier>(tier, Move(metadataTier), Move(segment));
auto codeTier = js::MakeUnique<CodeTier>(Move(metadataTier), Move(segment));
if (!codeTier)
return false;
@ -1268,11 +1268,13 @@ Module::instantiate(JSContext* cx,
if (!jumpTables.init(CompileMode::Once, codeTier->segment(), metadata(tier).codeRanges))
return false;
code = js_new<Code>(Move(codeTier), metadata(), Move(jumpTables));
if (!code) {
MutableCode debugCode = js_new<Code>(Move(codeTier), metadata(), Move(jumpTables));
if (!debugCode || !debugCode->initialize(*bytecode_, linkData(tier))) {
ReportOutOfMemory(cx);
return false;
}
code = debugCode;
}
}

Просмотреть файл

@ -82,15 +82,15 @@ typedef UniquePtr<LinkDataTier> UniqueLinkDataTier;
class LinkData
{
UniqueLinkDataTier linkData1_; // Always present
mutable UniqueLinkDataTier linkData2_; // Access only if hasTier2() is true
UniqueLinkDataTier tier1_; // Always present
mutable UniqueLinkDataTier tier2_; // Access only if hasTier2() is true
public:
LinkData() {}
explicit LinkData(UniqueLinkDataTier linkData) : linkData1_(Move(linkData)) {}
explicit LinkData(UniqueLinkDataTier tier) : tier1_(Move(tier)) {}
void setTier2(UniqueLinkDataTier linkData) const;
const LinkDataTier& linkData(Tier tier) const;
const LinkDataTier& tier(Tier tier) const;
WASM_DECLARE_SERIALIZABLE(LinkData)
};
@ -192,7 +192,7 @@ class Module : public JS::WasmModule
const Metadata& metadata() const { return code_->metadata(); }
const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
const LinkData& linkData() const { return linkData_; }
const LinkDataTier& linkData(Tier t) const { return linkData_.linkData(t); }
const LinkDataTier& linkData(Tier t) const { return linkData_.tier(t); }
const ImportVector& imports() const { return imports_; }
const ExportVector& exports() const { return exports_; }
const ShareableBytes& bytecode() const { return *bytecode_; }

Просмотреть файл

@ -217,6 +217,7 @@ static ProcessCodeSegmentMap processCodeSegmentMap;
bool
wasm::RegisterCodeSegment(const CodeSegment* cs)
{
MOZ_ASSERT(cs->codeTier().code().initialized());
return processCodeSegmentMap.insert(cs);
}