зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1276028 - Baldr: split out CodeSegment and Metadata from Module (r=bbouvier)
MozReview-Commit-ID: AnITPZYpgp1 --HG-- extra : rebase_source : d3164565407ff3ddcb7da967ba32319f318c4f5d
This commit is contained in:
Родитель
57323900e3
Коммит
e0962c91f1
|
@ -296,19 +296,28 @@ class AsmJSExport
|
|||
|
||||
typedef Vector<AsmJSExport, 0, SystemAllocPolicy> AsmJSExportVector;
|
||||
|
||||
// Holds the trivially-memcpy()able, serializable portion of AsmJSModuleData.
|
||||
struct AsmJSModuleCacheablePod
|
||||
enum class CacheResult
|
||||
{
|
||||
Hit,
|
||||
Miss
|
||||
};
|
||||
|
||||
// Holds the immutable guts of an AsmJSModule.
|
||||
//
|
||||
// AsmJSMetadata is built incrementally by ModuleValidator and then shared
|
||||
// immutably between AsmJSModules.
|
||||
|
||||
struct AsmJSMetadataCacheablePod
|
||||
{
|
||||
uint32_t minHeapLength;
|
||||
uint32_t numFFIs;
|
||||
uint32_t srcLength;
|
||||
uint32_t srcLengthWithRightBrace;
|
||||
|
||||
AsmJSMetadataCacheablePod() { PodZero(this); }
|
||||
};
|
||||
|
||||
// Holds the immutable guts of an AsmJSModule. This struct is mutably built up
|
||||
// by ModuleValidator and then handed over to the AsmJSModule constructor in
|
||||
// finish().
|
||||
struct AsmJSModuleData : AsmJSModuleCacheablePod
|
||||
struct AsmJSMetadata : RefCounted<AsmJSMetadata>, AsmJSMetadataCacheablePod
|
||||
{
|
||||
AsmJSGlobalVector globals;
|
||||
AsmJSImportVector imports;
|
||||
|
@ -317,6 +326,8 @@ struct AsmJSModuleData : AsmJSModuleCacheablePod
|
|||
PropertyName* importArgumentName;
|
||||
PropertyName* bufferArgumentName;
|
||||
|
||||
CacheResult cacheResult;
|
||||
|
||||
// These values are not serialized since they are relative to the
|
||||
// containing script which can be different between serialization and
|
||||
// deserialization contexts. Thus, they must be set explicitly using the
|
||||
|
@ -327,19 +338,18 @@ struct AsmJSModuleData : AsmJSModuleCacheablePod
|
|||
bool strict;
|
||||
ScriptSourceHolder scriptSource;
|
||||
|
||||
AsmJSModuleData()
|
||||
AsmJSMetadata()
|
||||
: globalArgumentName(nullptr),
|
||||
importArgumentName(nullptr),
|
||||
bufferArgumentName(nullptr),
|
||||
cacheResult(CacheResult::Miss),
|
||||
srcStart(0),
|
||||
srcBodyStart(0),
|
||||
strict(false)
|
||||
{
|
||||
PodZero(&pod());
|
||||
}
|
||||
{}
|
||||
|
||||
AsmJSModuleCacheablePod& pod() { return *this; }
|
||||
const AsmJSModuleCacheablePod& pod() const { return *this; }
|
||||
AsmJSMetadataCacheablePod& pod() { return *this; }
|
||||
const AsmJSMetadataCacheablePod& pod() const { return *this; }
|
||||
|
||||
void trace(JSTracer* trc) const {
|
||||
for (const AsmJSGlobal& global : globals)
|
||||
|
@ -349,42 +359,41 @@ struct AsmJSModuleData : AsmJSModuleCacheablePod
|
|||
TraceNameField(trc, &bufferArgumentName, "asm.js buffer argument name");
|
||||
}
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(AsmJSModuleData)
|
||||
WASM_DECLARE_SERIALIZABLE(AsmJSMetadata)
|
||||
};
|
||||
|
||||
typedef UniquePtr<AsmJSModuleData> UniqueAsmJSModuleData;
|
||||
typedef RefPtr<AsmJSMetadata> MutableAsmJSMetadata;
|
||||
typedef RefPtr<const AsmJSMetadata> SharedAsmJSMetadata;
|
||||
|
||||
// An AsmJSModule is-a Module with the extra persistent state necessary to
|
||||
// represent a compiled asm.js module.
|
||||
class js::AsmJSModule final : public Module
|
||||
{
|
||||
typedef UniquePtr<const AsmJSModuleData> UniqueConstAsmJSModuleData;
|
||||
typedef UniquePtr<const StaticLinkData> UniqueConstStaticLinkData;
|
||||
|
||||
const UniqueConstStaticLinkData link_;
|
||||
const UniqueExportMap exportMap_;
|
||||
const UniqueConstAsmJSModuleData module_;
|
||||
const SharedStaticLinkData staticLinkData_;
|
||||
const SharedExportMap exportMap_;
|
||||
const SharedAsmJSMetadata asmJSMetadata_;
|
||||
|
||||
public:
|
||||
AsmJSModule(UniqueModuleData base,
|
||||
UniqueStaticLinkData link,
|
||||
UniqueExportMap exportMap,
|
||||
UniqueAsmJSModuleData module)
|
||||
: Module(Move(base)),
|
||||
link_(Move(link)),
|
||||
exportMap_(Move(exportMap)),
|
||||
module_(Move(module))
|
||||
AsmJSModule(UniqueCodeSegment code,
|
||||
const Metadata& metadata,
|
||||
const StaticLinkData& staticLinkData,
|
||||
const ExportMap& exportMap,
|
||||
const AsmJSMetadata& asmJSMetadata)
|
||||
: Module(Move(code), metadata),
|
||||
staticLinkData_(&staticLinkData),
|
||||
exportMap_(&exportMap),
|
||||
asmJSMetadata_(&asmJSMetadata)
|
||||
{}
|
||||
|
||||
virtual void trace(JSTracer* trc) override {
|
||||
Module::trace(trc);
|
||||
module_->trace(trc);
|
||||
asmJSMetadata_->trace(trc);
|
||||
}
|
||||
virtual void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) override {
|
||||
Module::addSizeOfMisc(mallocSizeOf, code, data);
|
||||
*data += mallocSizeOf(link_.get()) + link_->sizeOfExcludingThis(mallocSizeOf);
|
||||
*data += mallocSizeOf(staticLinkData_.get()) + staticLinkData_->sizeOfExcludingThis(mallocSizeOf);
|
||||
*data += mallocSizeOf(exportMap_.get()) + exportMap_->sizeOfExcludingThis(mallocSizeOf);
|
||||
*data += mallocSizeOf(module_.get()) + module_->sizeOfExcludingThis(mallocSizeOf);
|
||||
*data += mallocSizeOf(asmJSMetadata_.get()) + asmJSMetadata_->sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
virtual bool mutedErrors() const override {
|
||||
return scriptSource()->mutedErrors();
|
||||
|
@ -396,16 +405,17 @@ class js::AsmJSModule final : public Module
|
|||
return scriptSource();
|
||||
}
|
||||
|
||||
uint32_t minHeapLength() const { return module_->minHeapLength; }
|
||||
uint32_t numFFIs() const { return module_->numFFIs; }
|
||||
bool strict() const { return module_->strict; }
|
||||
ScriptSource* scriptSource() const { return module_->scriptSource.get(); }
|
||||
const AsmJSGlobalVector& asmJSGlobals() const { return module_->globals; }
|
||||
const AsmJSImportVector& asmJSImports() const { return module_->imports; }
|
||||
const AsmJSExportVector& asmJSExports() const { return module_->exports; }
|
||||
PropertyName* globalArgumentName() const { return module_->globalArgumentName; }
|
||||
PropertyName* importArgumentName() const { return module_->importArgumentName; }
|
||||
PropertyName* bufferArgumentName() const { return module_->bufferArgumentName; }
|
||||
uint32_t minHeapLength() const { return asmJSMetadata_->minHeapLength; }
|
||||
uint32_t numFFIs() const { return asmJSMetadata_->numFFIs; }
|
||||
bool strict() const { return asmJSMetadata_->strict; }
|
||||
ScriptSource* scriptSource() const { return asmJSMetadata_->scriptSource.get(); }
|
||||
const AsmJSGlobalVector& asmJSGlobals() const { return asmJSMetadata_->globals; }
|
||||
const AsmJSImportVector& asmJSImports() const { return asmJSMetadata_->imports; }
|
||||
const AsmJSExportVector& asmJSExports() const { return asmJSMetadata_->exports; }
|
||||
PropertyName* globalArgumentName() const { return asmJSMetadata_->globalArgumentName; }
|
||||
PropertyName* importArgumentName() const { return asmJSMetadata_->importArgumentName; }
|
||||
PropertyName* bufferArgumentName() const { return asmJSMetadata_->bufferArgumentName; }
|
||||
bool loadedFromCache() const { return asmJSMetadata_->cacheResult == CacheResult::Hit; }
|
||||
|
||||
// srcStart() refers to the offset in the ScriptSource to the beginning of
|
||||
// the asm.js module function. If the function has been created with the
|
||||
|
@ -413,23 +423,23 @@ class js::AsmJSModule final : public Module
|
|||
// source. Otherwise, it will be the opening parenthesis of the arguments
|
||||
// list.
|
||||
uint32_t srcStart() const {
|
||||
return module_->srcStart;
|
||||
return asmJSMetadata_->srcStart;
|
||||
}
|
||||
uint32_t srcEndBeforeCurly() const {
|
||||
return module_->srcStart + module_->srcLength;
|
||||
return asmJSMetadata_->srcStart + asmJSMetadata_->srcLength;
|
||||
}
|
||||
uint32_t srcEndAfterCurly() const {
|
||||
return module_->srcStart + module_->srcLengthWithRightBrace;
|
||||
return asmJSMetadata_->srcStart + asmJSMetadata_->srcLengthWithRightBrace;
|
||||
}
|
||||
|
||||
// srcBodyStart() refers to the offset in the ScriptSource to the end
|
||||
// of the 'use asm' string-literal token.
|
||||
uint32_t srcBodyStart() const {
|
||||
return module_->srcBodyStart;
|
||||
return asmJSMetadata_->srcBodyStart;
|
||||
}
|
||||
|
||||
bool staticallyLink(ExclusiveContext* cx) {
|
||||
return Module::staticallyLink(cx, *link_);
|
||||
return Module::staticallyLink(cx, *staticLinkData_);
|
||||
}
|
||||
bool dynamicallyLink(JSContext* cx,
|
||||
Handle<WasmModuleObject*> moduleObj,
|
||||
|
@ -1685,7 +1695,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
// State used to build the AsmJSModule in finish():
|
||||
ModuleGenerator mg_;
|
||||
UniqueAsmJSModuleData module_;
|
||||
MutableAsmJSMetadata asmJSMetadata_;
|
||||
|
||||
// Error reporting:
|
||||
UniqueChars errorString_;
|
||||
|
@ -1776,15 +1786,15 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
}
|
||||
|
||||
bool init() {
|
||||
module_ = cx_->make_unique<AsmJSModuleData>();
|
||||
if (!module_)
|
||||
asmJSMetadata_ = cx_->new_<AsmJSMetadata>();
|
||||
if (!asmJSMetadata_)
|
||||
return false;
|
||||
|
||||
module_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
|
||||
module_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
|
||||
module_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
|
||||
module_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
|
||||
module_->scriptSource.reset(parser_.ss);
|
||||
asmJSMetadata_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
|
||||
asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
|
||||
asmJSMetadata_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
|
||||
asmJSMetadata_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
|
||||
asmJSMetadata_->scriptSource.reset(parser_.ss);
|
||||
|
||||
if (!globalMap_.init() || !sigMap_.init() || !importMap_.init())
|
||||
return false;
|
||||
|
@ -1871,23 +1881,23 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
if (!mg_.init(Move(genData), Move(filename)))
|
||||
return false;
|
||||
|
||||
mg_.bumpMinHeapLength(module_->minHeapLength);
|
||||
mg_.bumpMinHeapLength(asmJSMetadata_->minHeapLength);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ExclusiveContext* cx() const { return cx_; }
|
||||
PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
|
||||
PropertyName* globalArgumentName() const { return module_->globalArgumentName; }
|
||||
PropertyName* importArgumentName() const { return module_->importArgumentName; }
|
||||
PropertyName* bufferArgumentName() const { return module_->bufferArgumentName; }
|
||||
PropertyName* globalArgumentName() const { return asmJSMetadata_->globalArgumentName; }
|
||||
PropertyName* importArgumentName() const { return asmJSMetadata_->importArgumentName; }
|
||||
PropertyName* bufferArgumentName() const { return asmJSMetadata_->bufferArgumentName; }
|
||||
ModuleGenerator& mg() { return mg_; }
|
||||
AsmJSParser& parser() const { return parser_; }
|
||||
TokenStream& tokenStream() const { return parser_.tokenStream; }
|
||||
RootedFunction& dummyFunction() { return dummyFunction_; }
|
||||
bool supportsSimd() const { return cx_->jitSupportsSimd(); }
|
||||
bool atomicsPresent() const { return atomicsPresent_; }
|
||||
uint32_t minHeapLength() const { return module_->minHeapLength; }
|
||||
uint32_t minHeapLength() const { return asmJSMetadata_->minHeapLength; }
|
||||
|
||||
void initModuleFunctionName(PropertyName* name) {
|
||||
MOZ_ASSERT(!moduleFunctionName_);
|
||||
|
@ -1895,15 +1905,15 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
}
|
||||
void initGlobalArgumentName(PropertyName* n) {
|
||||
MOZ_ASSERT(n->isTenured());
|
||||
module_->globalArgumentName = n;
|
||||
asmJSMetadata_->globalArgumentName = n;
|
||||
}
|
||||
void initImportArgumentName(PropertyName* n) {
|
||||
MOZ_ASSERT(n->isTenured());
|
||||
module_->importArgumentName = n;
|
||||
asmJSMetadata_->importArgumentName = n;
|
||||
}
|
||||
void initBufferArgumentName(PropertyName* n) {
|
||||
MOZ_ASSERT(n->isTenured());
|
||||
module_->bufferArgumentName = n;
|
||||
asmJSMetadata_->bufferArgumentName = n;
|
||||
}
|
||||
bool addGlobalVarInit(PropertyName* var, const NumLit& lit, Type type, bool isConst)
|
||||
{
|
||||
|
@ -1929,7 +1939,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant;
|
||||
g.pod.u.var.u.val_ = lit.value();
|
||||
g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addGlobalVarImport(PropertyName* var, PropertyName* field, Type type, bool isConst) {
|
||||
MOZ_ASSERT(type.isGlobalVarType());
|
||||
|
@ -1952,7 +1962,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
g.pod.u.var.initKind_ = AsmJSGlobal::InitImport;
|
||||
g.pod.u.var.u.importType_ = valType;
|
||||
g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
|
||||
if (!arrayViews_.append(ArrayView(var, vt)))
|
||||
|
@ -1967,7 +1977,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::ArrayView, maybeField);
|
||||
g.pod.u.viewType_ = vt;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
|
||||
PropertyName* field)
|
||||
|
@ -1981,7 +1991,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::MathBuiltinFunction, field);
|
||||
g.pod.u.mathBuiltinFunc_ = func;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
private:
|
||||
bool addGlobalDoubleConstant(PropertyName* var, double constant) {
|
||||
|
@ -2000,7 +2010,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
AsmJSGlobal g(AsmJSGlobal::Constant, field);
|
||||
g.pod.u.constant.value_ = constant;
|
||||
g.pod.u.constant.kind_ = AsmJSGlobal::MathConstant;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
|
||||
if (!addGlobalDoubleConstant(var, constant))
|
||||
|
@ -2009,7 +2019,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
AsmJSGlobal g(AsmJSGlobal::Constant, field);
|
||||
g.pod.u.constant.value_ = constant;
|
||||
g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
|
||||
PropertyName* field)
|
||||
|
@ -2025,7 +2035,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, field);
|
||||
g.pod.u.atomicsBuiltinFunc_ = func;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
|
||||
Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
|
||||
|
@ -2037,7 +2047,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::SimdCtor, field);
|
||||
g.pod.u.simdCtorType_ = type;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* opName)
|
||||
{
|
||||
|
@ -2052,7 +2062,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
AsmJSGlobal g(AsmJSGlobal::SimdOp, opName);
|
||||
g.pod.u.simdOp.type_ = type;
|
||||
g.pod.u.simdOp.which_ = op;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
|
||||
Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
|
||||
|
@ -2064,12 +2074,12 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::ArrayViewCtor, field);
|
||||
g.pod.u.viewType_ = vt;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addFFI(PropertyName* var, PropertyName* field) {
|
||||
if (module_->numFFIs == UINT32_MAX)
|
||||
if (asmJSMetadata_->numFFIs == UINT32_MAX)
|
||||
return false;
|
||||
uint32_t ffiIndex = module_->numFFIs++;
|
||||
uint32_t ffiIndex = asmJSMetadata_->numFFIs++;
|
||||
|
||||
Global* global = validationLifo_.new_<Global>(Global::FFI);
|
||||
if (!global)
|
||||
|
@ -2080,7 +2090,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
AsmJSGlobal g(AsmJSGlobal::FFI, field);
|
||||
g.pod.u.ffiIndex_ = ffiIndex;
|
||||
return module_->globals.append(g);
|
||||
return asmJSMetadata_->globals.append(g);
|
||||
}
|
||||
bool addExportField(ParseNode* pn, const Func& func, PropertyName* maybeFieldName) {
|
||||
// Record the field name of this export.
|
||||
|
@ -2100,10 +2110,10 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
|
||||
// The exported function might have already been exported in which case
|
||||
// the index will refer into the range of AsmJSExports.
|
||||
MOZ_ASSERT(exportIndex <= module_->exports.length());
|
||||
return exportIndex < module_->exports.length() ||
|
||||
module_->exports.emplaceBack(func.srcBegin() - module_->srcStart,
|
||||
func.srcEnd() - module_->srcStart);
|
||||
MOZ_ASSERT(exportIndex <= asmJSMetadata_->exports.length());
|
||||
return exportIndex < asmJSMetadata_->exports.length() ||
|
||||
asmJSMetadata_->exports.emplaceBack(func.srcBegin() - asmJSMetadata_->srcStart,
|
||||
func.srcEnd() - asmJSMetadata_->srcStart);
|
||||
}
|
||||
bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
|
||||
uint32_t sigIndex;
|
||||
|
@ -2155,10 +2165,10 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
*importIndex = p->value();
|
||||
return true;
|
||||
}
|
||||
*importIndex = module_->imports.length();
|
||||
*importIndex = asmJSMetadata_->imports.length();
|
||||
if (*importIndex >= MaxImports)
|
||||
return failCurrentOffset("too many imports");
|
||||
if (!module_->imports.emplaceBack(ffiIndex))
|
||||
if (!asmJSMetadata_->imports.emplaceBack(ffiIndex))
|
||||
return false;
|
||||
uint32_t sigIndex;
|
||||
if (!declareSig(Move(sig), &sigIndex))
|
||||
|
@ -2174,8 +2184,8 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
if (len > uint64_t(INT32_MAX) + 1)
|
||||
return false;
|
||||
len = RoundUpToNextValidAsmJSHeapLength(len);
|
||||
if (len > module_->minHeapLength) {
|
||||
module_->minHeapLength = len;
|
||||
if (len > asmJSMetadata_->minHeapLength) {
|
||||
asmJSMetadata_->minHeapLength = len;
|
||||
mg_.bumpMinHeapLength(len);
|
||||
}
|
||||
return true;
|
||||
|
@ -2320,24 +2330,31 @@ class MOZ_STACK_CLASS ModuleValidator
|
|||
}
|
||||
|
||||
uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
|
||||
module_->srcLength = endBeforeCurly - module_->srcStart;
|
||||
asmJSMetadata_->srcLength = endBeforeCurly - asmJSMetadata_->srcStart;
|
||||
|
||||
TokenPos pos;
|
||||
JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
|
||||
uint32_t endAfterCurly = pos.end;
|
||||
module_->srcLengthWithRightBrace = endAfterCurly - module_->srcStart;
|
||||
asmJSMetadata_->srcLengthWithRightBrace = endAfterCurly - asmJSMetadata_->srcStart;
|
||||
|
||||
UniqueModuleData base;
|
||||
UniqueStaticLinkData link;
|
||||
UniqueExportMap exportMap;
|
||||
if (!mg_.finish(Move(funcNames), &base, &link, &exportMap, slowFuncs))
|
||||
UniqueCodeSegment code;
|
||||
SharedMetadata metadata;
|
||||
SharedStaticLinkData staticLinkData;
|
||||
SharedExportMap exportMap;
|
||||
if (!mg_.finish(Move(funcNames), &code, &metadata, &staticLinkData, &exportMap, slowFuncs))
|
||||
return false;
|
||||
|
||||
moduleObj.set(WasmModuleObject::create(cx_));
|
||||
if (!moduleObj)
|
||||
return false;
|
||||
|
||||
return moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module_)));
|
||||
auto* module = js_new<AsmJSModule>(Move(code), *metadata, *staticLinkData, *exportMap,
|
||||
*asmJSMetadata_);
|
||||
if (!module)
|
||||
return false;
|
||||
|
||||
moduleObj->init(*module);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -8049,15 +8066,8 @@ AsmJSGlobal::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
|||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
AsmJSGlobal::clone(JSContext* cx, AsmJSGlobal* out) const
|
||||
{
|
||||
*out = *this;
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
AsmJSModuleData::serializedSize() const
|
||||
AsmJSMetadata::serializedSize() const
|
||||
{
|
||||
return sizeof(pod()) +
|
||||
SerializedVectorSize(globals) +
|
||||
|
@ -8069,7 +8079,7 @@ AsmJSModuleData::serializedSize() const
|
|||
}
|
||||
|
||||
uint8_t*
|
||||
AsmJSModuleData::serialize(uint8_t* cursor) const
|
||||
AsmJSMetadata::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
|
||||
cursor = SerializeVector(cursor, globals);
|
||||
|
@ -8082,7 +8092,7 @@ AsmJSModuleData::serialize(uint8_t* cursor) const
|
|||
}
|
||||
|
||||
const uint8_t*
|
||||
AsmJSModuleData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
AsmJSMetadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &globals)) &&
|
||||
|
@ -8091,27 +8101,12 @@ AsmJSModuleData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
|||
(cursor = DeserializeName(cx, cursor, &globalArgumentName)) &&
|
||||
(cursor = DeserializeName(cx, cursor, &importArgumentName)) &&
|
||||
(cursor = DeserializeName(cx, cursor, &bufferArgumentName));
|
||||
cacheResult = CacheResult::Hit;
|
||||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
AsmJSModuleData::clone(JSContext* cx, AsmJSModuleData* out) const
|
||||
{
|
||||
out->pod() = pod();
|
||||
out->globalArgumentName = globalArgumentName;
|
||||
out->importArgumentName = importArgumentName;
|
||||
out->bufferArgumentName = bufferArgumentName;
|
||||
out->srcStart = srcStart;
|
||||
out->srcBodyStart = srcBodyStart;
|
||||
out->strict = strict;
|
||||
out->scriptSource.reset(scriptSource.get());
|
||||
return CloneVector(cx, globals, &out->globals) &&
|
||||
ClonePodVector(cx, imports, &out->imports) &&
|
||||
ClonePodVector(cx, exports, &out->exports);
|
||||
}
|
||||
|
||||
size_t
|
||||
AsmJSModuleData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
AsmJSMetadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return globals.sizeOfExcludingThis(mallocSizeOf) +
|
||||
imports.sizeOfExcludingThis(mallocSizeOf) +
|
||||
|
@ -8121,19 +8116,21 @@ AsmJSModuleData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
|||
size_t
|
||||
AsmJSModule::serializedSize() const
|
||||
{
|
||||
return base().serializedSize() +
|
||||
link_->serializedSize() +
|
||||
return codeSegment().serializedSize() +
|
||||
metadata().serializedSize() +
|
||||
staticLinkData_->serializedSize() +
|
||||
exportMap_->serializedSize() +
|
||||
module_->serializedSize();
|
||||
asmJSMetadata_->serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
AsmJSModule::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = base().serialize(cursor);
|
||||
cursor = link_->serialize(cursor);
|
||||
cursor = codeSegment().serialize(cursor);
|
||||
cursor = metadata().serialize(cursor);
|
||||
cursor = staticLinkData_->serialize(cursor);
|
||||
cursor = exportMap_->serialize(cursor);
|
||||
cursor = module_->serialize(cursor);
|
||||
cursor = asmJSMetadata_->serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
|
@ -8149,46 +8146,53 @@ AsmJSModule::deserialize(ExclusiveContext* cx, const uint8_t* cursor, AsmJSParse
|
|||
// Vectors so, for simplicity, inhibit GC of the atoms zone.
|
||||
AutoKeepAtoms aka(cx->perThreadData);
|
||||
|
||||
UniqueModuleData base = cx->make_unique<ModuleData>();
|
||||
if (!base)
|
||||
UniqueCodeSegment code = MakeUnique<CodeSegment>();
|
||||
if (!code)
|
||||
return nullptr;
|
||||
cursor = base->deserialize(cx, cursor);
|
||||
cursor = code->deserialize(cx, cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
MOZ_ASSERT(!base->loadedFromCache);
|
||||
base->loadedFromCache = true;
|
||||
|
||||
UniqueStaticLinkData link = cx->make_unique<StaticLinkData>();
|
||||
if (!link)
|
||||
MutableMetadata metadata = js_new<Metadata>();
|
||||
if (!metadata)
|
||||
return nullptr;
|
||||
cursor = link->deserialize(cx, cursor);
|
||||
cursor = metadata->deserialize(cx, cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
UniqueExportMap exportMap = cx->make_unique<ExportMap>();
|
||||
MutableStaticLinkData staticLinkData = cx->new_<StaticLinkData>();
|
||||
if (!staticLinkData)
|
||||
return nullptr;
|
||||
cursor = staticLinkData->deserialize(cx, cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
MutableExportMap exportMap = cx->new_<ExportMap>();
|
||||
if (!exportMap)
|
||||
return nullptr;
|
||||
cursor = exportMap->deserialize(cx, cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
UniqueAsmJSModuleData module = cx->make_unique<AsmJSModuleData>();
|
||||
if (!module)
|
||||
MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>();
|
||||
if (!asmJSMetadata)
|
||||
return nullptr;
|
||||
cursor = module->deserialize(cx, cursor);
|
||||
cursor = asmJSMetadata->deserialize(cx, cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
// See AsmJSModuleData comment as well as ModuleValidator::init().
|
||||
module->srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
|
||||
module->srcBodyStart = parser.tokenStream.currentToken().pos.end;
|
||||
module->strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
|
||||
module->scriptSource.reset(parser.ss);
|
||||
// See AsmJSMetadata comment as well as ModuleValidator::init().
|
||||
asmJSMetadata->srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
|
||||
asmJSMetadata->srcBodyStart = parser.tokenStream.currentToken().pos.end;
|
||||
asmJSMetadata->strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
|
||||
asmJSMetadata->scriptSource.reset(parser.ss);
|
||||
|
||||
if (!moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module))))
|
||||
auto* module = js_new<AsmJSModule>(Move(code), *metadata, *staticLinkData, *exportMap,
|
||||
*asmJSMetadata);
|
||||
if (!module)
|
||||
return nullptr;
|
||||
|
||||
moduleObj->init(*module);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
|
@ -8202,26 +8206,18 @@ AsmJSModule::clone(JSContext* cx, MutableHandle<WasmModuleObject*> moduleObj) co
|
|||
// Prevent any GC that may move the temporarily-unrooted atoms being cloned.
|
||||
AutoKeepAtoms aka(cx->perThreadData);
|
||||
|
||||
UniqueModuleData base = cx->make_unique<ModuleData>();
|
||||
if (!base || !this->base().clone(cx, base.get()))
|
||||
UniqueCodeSegment code = CodeSegment::clone(cx, codeSegment());
|
||||
if (!code)
|
||||
return false;
|
||||
|
||||
UniqueStaticLinkData link = cx->make_unique<StaticLinkData>();
|
||||
if (!link || !link_->clone(cx, link.get()))
|
||||
auto* module = js_new<AsmJSModule>(Move(code), metadata(), *staticLinkData_, *exportMap_,
|
||||
*asmJSMetadata_);
|
||||
if (!module)
|
||||
return false;
|
||||
|
||||
UniqueExportMap exportMap = cx->make_unique<ExportMap>();
|
||||
if (!exportMap || !exportMap_->clone(cx, exportMap.get()))
|
||||
return false;
|
||||
moduleObj->init(*module);
|
||||
|
||||
UniqueAsmJSModuleData module = cx->make_unique<AsmJSModuleData>();
|
||||
if (!module || !module_->clone(cx, module.get()))
|
||||
return false;
|
||||
|
||||
if (!moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module))))
|
||||
return false;
|
||||
|
||||
return Module::clone(cx, *link_, &moduleObj->module());
|
||||
return Module::clone(cx, *staticLinkData_, &moduleObj->module());
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -8782,7 +8778,7 @@ js::IsAsmJSModuleLoadedFromCache(JSContext* cx, unsigned argc, Value* vp)
|
|||
return false;
|
||||
}
|
||||
|
||||
bool loadedFromCache = AsmJSModuleToModuleObject(fun)->module().loadedFromCache();
|
||||
bool loadedFromCache = AsmJSModuleToModuleObject(fun)->module().asAsmJS().loadedFromCache();
|
||||
|
||||
args.rval().set(BooleanValue(loadedFromCache));
|
||||
return true;
|
||||
|
|
|
@ -1102,7 +1102,7 @@ DecodeNameSection(JSContext* cx, Decoder& d, CacheableCharsVector* funcNames)
|
|||
|
||||
static bool
|
||||
DecodeModule(JSContext* cx, UniqueChars file, const uint8_t* bytes, uint32_t length,
|
||||
ImportNameVector* importNames, UniqueExportMap* exportMap,
|
||||
ImportNameVector* importNames, SharedExportMap* exportMap,
|
||||
MutableHandle<ArrayBufferObject*> heap, MutableHandle<WasmModuleObject*> moduleObj)
|
||||
{
|
||||
Decoder d(bytes, bytes + length);
|
||||
|
@ -1155,20 +1155,24 @@ DecodeModule(JSContext* cx, UniqueChars file, const uint8_t* bytes, uint32_t len
|
|||
return Fail(cx, d, "failed to skip unknown section at end");
|
||||
}
|
||||
|
||||
UniqueModuleData module;
|
||||
UniqueStaticLinkData staticLink;
|
||||
UniqueCodeSegment code;
|
||||
SharedMetadata metadata;
|
||||
SharedStaticLinkData staticLinkData;
|
||||
SlowFunctionVector slowFuncs(cx);
|
||||
if (!mg.finish(Move(funcNames), &module, &staticLink, exportMap, &slowFuncs))
|
||||
if (!mg.finish(Move(funcNames), &code, &metadata, &staticLinkData, exportMap, &slowFuncs))
|
||||
return false;
|
||||
|
||||
moduleObj.set(WasmModuleObject::create(cx));
|
||||
if (!moduleObj)
|
||||
return false;
|
||||
|
||||
if (!moduleObj->init(cx->new_<Module>(Move(module))))
|
||||
auto module = cx->new_<Module>(Move(code), *metadata);
|
||||
if (!module)
|
||||
return false;
|
||||
|
||||
return moduleObj->module().staticallyLink(cx, *staticLink);
|
||||
moduleObj->init(*module);
|
||||
|
||||
return moduleObj->module().staticallyLink(cx, *staticLinkData);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
@ -1295,7 +1299,7 @@ wasm::Eval(JSContext* cx, Handle<TypedArrayObject*> code, HandleObject importObj
|
|||
return false;
|
||||
|
||||
ImportNameVector importNames;
|
||||
UniqueExportMap exportMap;
|
||||
SharedExportMap exportMap;
|
||||
Rooted<ArrayBufferObject*> heap(cx);
|
||||
Rooted<WasmModuleObject*> moduleObj(cx);
|
||||
|
||||
|
|
|
@ -0,0 +1,381 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
*
|
||||
* Copyright 2016 Mozilla Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "asmjs/WasmCode.h"
|
||||
|
||||
#include "mozilla/Atomics.h"
|
||||
|
||||
#include "asmjs/WasmSerialize.h"
|
||||
#include "jit/ExecutableAllocator.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace js::wasm;
|
||||
using mozilla::Atomic;
|
||||
|
||||
// Limit the number of concurrent wasm code allocations per process. Note that
|
||||
// on Linux, the real maximum is ~32k, as each module requires 2 maps (RW/RX),
|
||||
// and the kernel's default max_map_count is ~65k.
|
||||
//
|
||||
// Note: this can be removed once writable/non-executable global data stops
|
||||
// being stored in the code segment.
|
||||
static Atomic<uint32_t> wasmCodeAllocations(0);
|
||||
static const uint32_t MaxWasmCodeAllocations = 16384;
|
||||
|
||||
static uint8_t*
|
||||
AllocateCodeSegment(ExclusiveContext* cx, uint32_t totalLength)
|
||||
{
|
||||
if (wasmCodeAllocations >= MaxWasmCodeAllocations)
|
||||
return nullptr;
|
||||
|
||||
// Allocate RW memory. DynamicallyLinkModule will reprotect the code as RX.
|
||||
unsigned permissions =
|
||||
ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
|
||||
|
||||
void* p = AllocateExecutableMemory(nullptr, totalLength, permissions,
|
||||
"wasm-code-segment", gc::SystemPageSize());
|
||||
if (!p) {
|
||||
ReportOutOfMemory(cx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
wasmCodeAllocations++;
|
||||
return (uint8_t*)p;
|
||||
}
|
||||
|
||||
/* static */ UniqueCodeSegment
|
||||
CodeSegment::allocate(ExclusiveContext* cx, uint32_t codeLength, uint32_t globalDataLength)
|
||||
{
|
||||
UniqueCodeSegment code = cx->make_unique<CodeSegment>();
|
||||
if (!code)
|
||||
return nullptr;
|
||||
|
||||
uint8_t* bytes = AllocateCodeSegment(cx, codeLength + globalDataLength);
|
||||
if (!bytes)
|
||||
return nullptr;
|
||||
|
||||
code->bytes_ = bytes;
|
||||
code->codeLength_ = codeLength;
|
||||
code->globalDataLength_ = globalDataLength;
|
||||
return code;
|
||||
}
|
||||
|
||||
/* static */ UniqueCodeSegment
|
||||
CodeSegment::clone(ExclusiveContext* cx, const CodeSegment& src)
|
||||
{
|
||||
UniqueCodeSegment dst = allocate(cx, src.codeLength_, src.globalDataLength_);
|
||||
if (!dst)
|
||||
return nullptr;
|
||||
|
||||
memcpy(dst->code(), src.code(), src.codeLength());
|
||||
return dst;
|
||||
}
|
||||
|
||||
CodeSegment::~CodeSegment()
|
||||
{
|
||||
if (!bytes_) {
|
||||
MOZ_ASSERT(!totalLength());
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(wasmCodeAllocations > 0);
|
||||
wasmCodeAllocations--;
|
||||
|
||||
MOZ_ASSERT(totalLength() > 0);
|
||||
DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
|
||||
}
|
||||
|
||||
size_t
|
||||
CodeSegment::serializedSize() const
|
||||
{
|
||||
return sizeof(uint32_t) +
|
||||
sizeof(uint32_t) +
|
||||
codeLength_;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
CodeSegment::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = WriteScalar<uint32_t>(cursor, codeLength_);
|
||||
cursor = WriteScalar<uint32_t>(cursor, globalDataLength_);
|
||||
cursor = WriteBytes(cursor, bytes_, codeLength_);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
CodeSegment::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
cursor = ReadScalar<uint32_t>(cursor, &codeLength_);
|
||||
cursor = ReadScalar<uint32_t>(cursor, &globalDataLength_);
|
||||
|
||||
bytes_ = AllocateCodeSegment(cx, codeLength_ + globalDataLength_);
|
||||
if (!bytes_)
|
||||
return nullptr;
|
||||
|
||||
cursor = ReadBytes(cursor, bytes_, codeLength_);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static size_t
|
||||
SerializedSigSize(const Sig& sig)
|
||||
{
|
||||
return sizeof(ExprType) +
|
||||
SerializedPodVectorSize(sig.args());
|
||||
}
|
||||
|
||||
static uint8_t*
|
||||
SerializeSig(uint8_t* cursor, const Sig& sig)
|
||||
{
|
||||
cursor = WriteScalar<ExprType>(cursor, sig.ret());
|
||||
cursor = SerializePodVector(cursor, sig.args());
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static const uint8_t*
|
||||
DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, Sig* sig)
|
||||
{
|
||||
ExprType ret;
|
||||
cursor = ReadScalar<ExprType>(cursor, &ret);
|
||||
|
||||
ValTypeVector args;
|
||||
cursor = DeserializePodVector(cx, cursor, &args);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
*sig = Sig(Move(args), ret);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static size_t
|
||||
SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf)
|
||||
{
|
||||
return sig.args().sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
size_t
|
||||
Export::serializedSize() const
|
||||
{
|
||||
return SerializedSigSize(sig_) +
|
||||
sizeof(pod);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Export::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = SerializeSig(cursor, sig_);
|
||||
cursor = WriteBytes(cursor, &pod, sizeof(pod));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = DeserializeSig(cx, cursor, &sig_)) &&
|
||||
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return SizeOfSigExcludingThis(sig_, mallocSizeOf);
|
||||
}
|
||||
|
||||
size_t
|
||||
Import::serializedSize() const
|
||||
{
|
||||
return SerializedSigSize(sig_) +
|
||||
sizeof(pod);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Import::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = SerializeSig(cursor, sig_);
|
||||
cursor = WriteBytes(cursor, &pod, sizeof(pod));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = DeserializeSig(cx, cursor, &sig_)) &&
|
||||
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return SizeOfSigExcludingThis(sig_, mallocSizeOf);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(Kind kind, Offsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(0),
|
||||
end_(offsets.end),
|
||||
funcIndex_(0),
|
||||
funcLineOrBytecode_(0),
|
||||
funcBeginToTableEntry_(0),
|
||||
funcBeginToTableProfilingJump_(0),
|
||||
funcBeginToNonProfilingEntry_(0),
|
||||
funcProfilingJumpToProfilingReturn_(0),
|
||||
funcProfilingEpilogueToProfilingReturn_(0),
|
||||
kind_(kind)
|
||||
{
|
||||
MOZ_ASSERT(begin_ <= end_);
|
||||
MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == CallThunk);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(offsets.profilingReturn),
|
||||
end_(offsets.end),
|
||||
funcIndex_(0),
|
||||
funcLineOrBytecode_(0),
|
||||
funcBeginToTableEntry_(0),
|
||||
funcBeginToTableProfilingJump_(0),
|
||||
funcBeginToNonProfilingEntry_(0),
|
||||
funcProfilingJumpToProfilingReturn_(0),
|
||||
funcProfilingEpilogueToProfilingReturn_(0),
|
||||
kind_(kind)
|
||||
{
|
||||
MOZ_ASSERT(begin_ < profilingReturn_);
|
||||
MOZ_ASSERT(profilingReturn_ < end_);
|
||||
MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(offsets.profilingReturn),
|
||||
end_(offsets.end),
|
||||
funcIndex_(funcIndex),
|
||||
funcLineOrBytecode_(funcLineOrBytecode),
|
||||
funcBeginToTableEntry_(offsets.tableEntry - begin_),
|
||||
funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
|
||||
funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
|
||||
funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
|
||||
funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
|
||||
kind_(Function)
|
||||
{
|
||||
MOZ_ASSERT(begin_ < profilingReturn_);
|
||||
MOZ_ASSERT(profilingReturn_ < end_);
|
||||
MOZ_ASSERT(funcBeginToTableEntry_ == offsets.tableEntry - begin_);
|
||||
MOZ_ASSERT(funcBeginToTableProfilingJump_ == offsets.tableProfilingJump - begin_);
|
||||
MOZ_ASSERT(funcBeginToNonProfilingEntry_ == offsets.nonProfilingEntry - begin_);
|
||||
MOZ_ASSERT(funcProfilingJumpToProfilingReturn_ == profilingReturn_ - offsets.profilingJump);
|
||||
MOZ_ASSERT(funcProfilingEpilogueToProfilingReturn_ == profilingReturn_ - offsets.profilingEpilogue);
|
||||
}
|
||||
|
||||
static size_t
|
||||
NullableStringLength(const char* chars)
|
||||
{
|
||||
return chars ? strlen(chars) : 0;
|
||||
}
|
||||
|
||||
size_t
|
||||
CacheableChars::serializedSize() const
|
||||
{
|
||||
return sizeof(uint32_t) + NullableStringLength(get());
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
CacheableChars::serialize(uint8_t* cursor) const
|
||||
{
|
||||
uint32_t length = NullableStringLength(get());
|
||||
cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
|
||||
cursor = WriteBytes(cursor, get(), length);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
uint32_t length;
|
||||
cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
|
||||
|
||||
reset(cx->pod_calloc<char>(length + 1));
|
||||
if (!get())
|
||||
return nullptr;
|
||||
|
||||
cursor = ReadBytes(cursor, get(), length);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return mallocSizeOf(get());
|
||||
}
|
||||
|
||||
size_t
|
||||
Metadata::serializedSize() const
|
||||
{
|
||||
return sizeof(pod()) +
|
||||
SerializedVectorSize(imports) +
|
||||
SerializedVectorSize(exports) +
|
||||
SerializedPodVectorSize(heapAccesses) +
|
||||
SerializedPodVectorSize(codeRanges) +
|
||||
SerializedPodVectorSize(callSites) +
|
||||
SerializedPodVectorSize(callThunks) +
|
||||
SerializedVectorSize(prettyFuncNames) +
|
||||
filename.serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Metadata::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
|
||||
cursor = SerializeVector(cursor, imports);
|
||||
cursor = SerializeVector(cursor, exports);
|
||||
cursor = SerializePodVector(cursor, heapAccesses);
|
||||
cursor = SerializePodVector(cursor, codeRanges);
|
||||
cursor = SerializePodVector(cursor, callSites);
|
||||
cursor = SerializePodVector(cursor, callThunks);
|
||||
cursor = SerializeVector(cursor, prettyFuncNames);
|
||||
cursor = filename.serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
/* static */ const uint8_t*
|
||||
Metadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &imports)) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &exports)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &callSites)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &prettyFuncNames)) &&
|
||||
(cursor = filename.deserialize(cx, cursor));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(exports, mallocSizeOf) +
|
||||
heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
|
||||
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
|
||||
callSites.sizeOfExcludingThis(mallocSizeOf) +
|
||||
callThunks.sizeOfExcludingThis(mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(prettyFuncNames, mallocSizeOf) +
|
||||
filename.sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
*
|
||||
* Copyright 2016 Mozilla Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef wasm_code_h
|
||||
#define wasm_code_h
|
||||
|
||||
#include "asmjs/WasmTypes.h"
|
||||
|
||||
namespace js {
|
||||
namespace wasm {
|
||||
|
||||
// A wasm CodeSegment owns the allocated executable code for a wasm module.
|
||||
// CodeSegment passed to the Module constructor must be allocated via allocate.
|
||||
|
||||
class CodeSegment;
|
||||
typedef UniquePtr<CodeSegment> UniqueCodeSegment;
|
||||
|
||||
class CodeSegment
|
||||
{
|
||||
uint8_t* bytes_;
|
||||
uint32_t codeLength_;
|
||||
uint32_t globalDataLength_;
|
||||
|
||||
CodeSegment(const CodeSegment&) = delete;
|
||||
void operator=(const CodeSegment&) = delete;
|
||||
|
||||
public:
|
||||
static UniqueCodeSegment allocate(ExclusiveContext* cx, uint32_t codeLength, uint32_t dataLength);
|
||||
static UniqueCodeSegment clone(ExclusiveContext* cx, const CodeSegment& code);
|
||||
CodeSegment() : bytes_(nullptr), codeLength_(0), globalDataLength_(0) {}
|
||||
~CodeSegment();
|
||||
|
||||
uint8_t* code() const { return bytes_; }
|
||||
uint8_t* globalData() const { return bytes_ + codeLength_; }
|
||||
uint32_t codeLength() const { return codeLength_; }
|
||||
uint32_t globalDataLength() const { return globalDataLength_; }
|
||||
uint32_t totalLength() const { return codeLength_ + globalDataLength_; }
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(CodeSegment)
|
||||
};
|
||||
|
||||
// An Export represents a single function inside a wasm Module that has been
|
||||
// exported one or more times.
|
||||
|
||||
class Export
|
||||
{
|
||||
Sig sig_;
|
||||
struct CacheablePod {
|
||||
uint32_t stubOffset_;
|
||||
} pod;
|
||||
|
||||
public:
|
||||
Export() = default;
|
||||
explicit Export(Sig&& sig)
|
||||
: sig_(Move(sig))
|
||||
{
|
||||
pod.stubOffset_ = UINT32_MAX;
|
||||
}
|
||||
void initStubOffset(uint32_t stubOffset) {
|
||||
MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
|
||||
pod.stubOffset_ = stubOffset;
|
||||
}
|
||||
|
||||
uint32_t stubOffset() const {
|
||||
return pod.stubOffset_;
|
||||
}
|
||||
const Sig& sig() const {
|
||||
return sig_;
|
||||
}
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(Export)
|
||||
};
|
||||
|
||||
typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
|
||||
|
||||
// An Import describes a wasm module import. Currently, only functions can be
|
||||
// imported in wasm. A function import includes the signature used within the
|
||||
// module to call it.
|
||||
|
||||
class Import
|
||||
{
|
||||
Sig sig_;
|
||||
struct CacheablePod {
|
||||
uint32_t exitGlobalDataOffset_;
|
||||
uint32_t interpExitCodeOffset_;
|
||||
uint32_t jitExitCodeOffset_;
|
||||
} pod;
|
||||
|
||||
public:
|
||||
Import() = default;
|
||||
Import(Sig&& sig, uint32_t exitGlobalDataOffset)
|
||||
: sig_(Move(sig))
|
||||
{
|
||||
pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
|
||||
pod.interpExitCodeOffset_ = 0;
|
||||
pod.jitExitCodeOffset_ = 0;
|
||||
}
|
||||
|
||||
void initInterpExitOffset(uint32_t off) {
|
||||
MOZ_ASSERT(!pod.interpExitCodeOffset_);
|
||||
pod.interpExitCodeOffset_ = off;
|
||||
}
|
||||
void initJitExitOffset(uint32_t off) {
|
||||
MOZ_ASSERT(!pod.jitExitCodeOffset_);
|
||||
pod.jitExitCodeOffset_ = off;
|
||||
}
|
||||
|
||||
const Sig& sig() const {
|
||||
return sig_;
|
||||
}
|
||||
uint32_t exitGlobalDataOffset() const {
|
||||
return pod.exitGlobalDataOffset_;
|
||||
}
|
||||
uint32_t interpExitCodeOffset() const {
|
||||
return pod.interpExitCodeOffset_;
|
||||
}
|
||||
uint32_t jitExitCodeOffset() const {
|
||||
return pod.jitExitCodeOffset_;
|
||||
}
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(Import)
|
||||
};
|
||||
|
||||
typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
|
||||
|
||||
// A CodeRange describes a single contiguous range of code within a wasm
|
||||
// module's code segment. A CodeRange describes what the code does and, for
|
||||
// function bodies, the name and source coordinates of the function.
|
||||
|
||||
class CodeRange
|
||||
{
|
||||
public:
|
||||
enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Inline, CallThunk };
|
||||
|
||||
private:
|
||||
// All fields are treated as cacheable POD:
|
||||
uint32_t begin_;
|
||||
uint32_t profilingReturn_;
|
||||
uint32_t end_;
|
||||
uint32_t funcIndex_;
|
||||
uint32_t funcLineOrBytecode_;
|
||||
uint8_t funcBeginToTableEntry_;
|
||||
uint8_t funcBeginToTableProfilingJump_;
|
||||
uint8_t funcBeginToNonProfilingEntry_;
|
||||
uint8_t funcProfilingJumpToProfilingReturn_;
|
||||
uint8_t funcProfilingEpilogueToProfilingReturn_;
|
||||
Kind kind_ : 8;
|
||||
|
||||
public:
|
||||
CodeRange() = default;
|
||||
CodeRange(Kind kind, Offsets offsets);
|
||||
CodeRange(Kind kind, ProfilingOffsets offsets);
|
||||
CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
|
||||
|
||||
// All CodeRanges have a begin and end.
|
||||
|
||||
uint32_t begin() const {
|
||||
return begin_;
|
||||
}
|
||||
uint32_t end() const {
|
||||
return end_;
|
||||
}
|
||||
|
||||
// Other fields are only available for certain CodeRange::Kinds.
|
||||
|
||||
Kind kind() const {
|
||||
return kind_;
|
||||
}
|
||||
|
||||
bool isFunction() const {
|
||||
return kind() == Function;
|
||||
}
|
||||
bool isImportExit() const {
|
||||
return kind() == ImportJitExit || kind() == ImportInterpExit;
|
||||
}
|
||||
bool isInline() const {
|
||||
return kind() == Inline;
|
||||
}
|
||||
|
||||
// Every CodeRange except entry and inline stubs has a profiling return
|
||||
// which is used for asynchronous profiling to determine the frame pointer.
|
||||
|
||||
uint32_t profilingReturn() const {
|
||||
MOZ_ASSERT(isFunction() || isImportExit());
|
||||
return profilingReturn_;
|
||||
}
|
||||
|
||||
// Functions have offsets which allow patching to selectively execute
|
||||
// profiling prologues/epilogues.
|
||||
|
||||
uint32_t funcProfilingEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin();
|
||||
}
|
||||
uint32_t funcTableEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToTableEntry_;
|
||||
}
|
||||
uint32_t funcTableProfilingJump() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToTableProfilingJump_;
|
||||
}
|
||||
uint32_t funcNonProfilingEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToNonProfilingEntry_;
|
||||
}
|
||||
uint32_t funcProfilingJump() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
|
||||
}
|
||||
uint32_t funcProfilingEpilogue() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
|
||||
}
|
||||
uint32_t funcIndex() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return funcIndex_;
|
||||
}
|
||||
uint32_t funcLineOrBytecode() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return funcLineOrBytecode_;
|
||||
}
|
||||
|
||||
// A sorted array of CodeRanges can be looked up via BinarySearch and PC.
|
||||
|
||||
struct PC {
|
||||
size_t offset;
|
||||
explicit PC(size_t offset) : offset(offset) {}
|
||||
bool operator==(const CodeRange& rhs) const {
|
||||
return offset >= rhs.begin() && offset < rhs.end();
|
||||
}
|
||||
bool operator<(const CodeRange& rhs) const {
|
||||
return offset < rhs.begin();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
|
||||
|
||||
// A CallThunk describes the offset and target of thunks so that they may be
|
||||
// patched at runtime when profiling is toggled. Thunks are emitted to connect
|
||||
// callsites that are too far away from callees to fit in a single call
|
||||
// instruction's relative offset.
|
||||
|
||||
struct CallThunk
|
||||
{
|
||||
uint32_t offset;
|
||||
union {
|
||||
uint32_t funcIndex;
|
||||
uint32_t codeRangeIndex;
|
||||
} u;
|
||||
|
||||
CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
|
||||
CallThunk() = default;
|
||||
};
|
||||
|
||||
WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
|
||||
|
||||
// CacheableChars is used to cacheably store UniqueChars.
|
||||
|
||||
struct CacheableChars : UniqueChars
|
||||
{
|
||||
CacheableChars() = default;
|
||||
explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
|
||||
MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
|
||||
WASM_DECLARE_SERIALIZABLE(CacheableChars)
|
||||
};
|
||||
|
||||
typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
|
||||
|
||||
// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
|
||||
// heap (SharedArrayBuffer).
|
||||
|
||||
enum class HeapUsage
|
||||
{
|
||||
None = false,
|
||||
Unshared = 1,
|
||||
Shared = 2
|
||||
};
|
||||
|
||||
static inline bool
|
||||
UsesHeap(HeapUsage heapUsage)
|
||||
{
|
||||
return bool(heapUsage);
|
||||
}
|
||||
|
||||
// Metadata holds all the data that is needed to describe compiled wasm code
|
||||
// at runtime (as opposed to data that is only used to statically link or
|
||||
// instantiate a module).
|
||||
//
|
||||
// Metadata is built incrementally by ModuleGenerator and then shared immutably
|
||||
// between modules.
|
||||
|
||||
struct MetadataCacheablePod
|
||||
{
|
||||
uint32_t functionLength;
|
||||
ModuleKind kind;
|
||||
HeapUsage heapUsage;
|
||||
CompileArgs compileArgs;
|
||||
|
||||
MetadataCacheablePod() { mozilla::PodZero(this); }
|
||||
};
|
||||
|
||||
struct Metadata : RefCounted<Metadata>, MetadataCacheablePod
|
||||
{
|
||||
MetadataCacheablePod& pod() { return *this; }
|
||||
const MetadataCacheablePod& pod() const { return *this; }
|
||||
|
||||
ImportVector imports;
|
||||
ExportVector exports;
|
||||
HeapAccessVector heapAccesses;
|
||||
CodeRangeVector codeRanges;
|
||||
CallSiteVector callSites;
|
||||
CallThunkVector callThunks;
|
||||
CacheableCharsVector prettyFuncNames;
|
||||
CacheableChars filename;
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(Metadata);
|
||||
};
|
||||
|
||||
typedef RefPtr<Metadata> MutableMetadata;
|
||||
typedef RefPtr<const Metadata> SharedMetadata;
|
||||
|
||||
} // namespace wasm
|
||||
} // namespace js
|
||||
|
||||
#endif // wasm_code_h
|
|
@ -40,6 +40,7 @@ static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
|
|||
ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
|
||||
: cx_(cx),
|
||||
jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
|
||||
globalDataLength_(InitialGlobalDataBytes),
|
||||
slowFuncs_(cx),
|
||||
numSigs_(0),
|
||||
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
|
||||
|
@ -115,17 +116,16 @@ ModuleGenerator::init(UniqueModuleGeneratorData shared, UniqueChars filename)
|
|||
if (!funcIndexToExport_.init())
|
||||
return false;
|
||||
|
||||
module_ = MakeUnique<ModuleData>();
|
||||
if (!module_)
|
||||
metadata_ = js_new<Metadata>();
|
||||
if (!metadata_)
|
||||
return false;
|
||||
|
||||
module_->globalBytes = InitialGlobalDataBytes;
|
||||
module_->compileArgs = shared->args;
|
||||
module_->kind = shared->kind;
|
||||
module_->heapUsage = HeapUsage::None;
|
||||
module_->filename = Move(filename);
|
||||
metadata_->compileArgs = shared->args;
|
||||
metadata_->kind = shared->kind;
|
||||
metadata_->heapUsage = HeapUsage::None;
|
||||
metadata_->filename = Move(filename);
|
||||
|
||||
exportMap_ = MakeUnique<ExportMap>();
|
||||
exportMap_ = js_new<ExportMap>();
|
||||
if (!exportMap_)
|
||||
return false;
|
||||
|
||||
|
@ -136,24 +136,24 @@ ModuleGenerator::init(UniqueModuleGeneratorData shared, UniqueChars filename)
|
|||
// module is generated. For wasm, the Vectors are correctly-sized and
|
||||
// already initialized.
|
||||
|
||||
if (module_->kind == ModuleKind::Wasm) {
|
||||
if (metadata_->kind == ModuleKind::Wasm) {
|
||||
numSigs_ = shared_->sigs.length();
|
||||
module_->globalBytes = AlignBytes(module_->globalBytes, sizeof(void*));
|
||||
globalDataLength_ = AlignBytes(globalDataLength_, sizeof(void*));
|
||||
|
||||
for (ImportModuleGeneratorData& import : shared_->imports) {
|
||||
MOZ_ASSERT(!import.globalDataOffset);
|
||||
import.globalDataOffset = module_->globalBytes;
|
||||
module_->globalBytes += Module::SizeOfImportExit;
|
||||
import.globalDataOffset = globalDataLength_;
|
||||
globalDataLength_ += Module::SizeOfImportExit;
|
||||
if (!addImport(*import.sig, import.globalDataOffset))
|
||||
return false;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(module_->globalBytes % sizeof(void*) == 0);
|
||||
MOZ_ASSERT(globalDataLength_ % sizeof(void*) == 0);
|
||||
MOZ_ASSERT(shared_->asmJSSigToTable.empty());
|
||||
MOZ_ASSERT(shared_->wasmTable.numElems == shared_->wasmTable.elemFuncIndices.length());
|
||||
MOZ_ASSERT(!shared_->wasmTable.globalDataOffset);
|
||||
shared_->wasmTable.globalDataOffset = module_->globalBytes;
|
||||
module_->globalBytes += shared_->wasmTable.numElems * sizeof(void*);
|
||||
shared_->wasmTable.globalDataOffset = globalDataLength_;
|
||||
globalDataLength_ += shared_->wasmTable.numElems * sizeof(void*);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -199,7 +199,7 @@ const CodeRange&
|
|||
ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
|
||||
{
|
||||
MOZ_ASSERT(funcIsDefined(funcIndex));
|
||||
const CodeRange& cr = module_->codeRanges[funcIndexToCodeRange_[funcIndex]];
|
||||
const CodeRange& cr = metadata_->codeRanges[funcIndexToCodeRange_[funcIndex]];
|
||||
MOZ_ASSERT(cr.isFunction());
|
||||
return cr;
|
||||
}
|
||||
|
@ -251,9 +251,9 @@ ModuleGenerator::convertOutOfRangeBranchesToThunks()
|
|||
return false;
|
||||
offsets.end = masm_.currentOffset();
|
||||
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
|
||||
return false;
|
||||
if (!module_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
|
||||
if (!metadata_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
|
||||
return false;
|
||||
if (!alreadyThunked.add(p, cs.targetIndex(), offsets.begin))
|
||||
return false;
|
||||
|
@ -282,7 +282,7 @@ ModuleGenerator::convertOutOfRangeBranchesToThunks()
|
|||
return false;
|
||||
offsets.end = masm_.currentOffset();
|
||||
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, offsets))
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, offsets))
|
||||
return false;
|
||||
if (!jumpThunks_[target].append(thunkOffset))
|
||||
return false;
|
||||
|
@ -316,8 +316,8 @@ ModuleGenerator::finishTask(IonCompileTask* task)
|
|||
results.offsets().offsetBy(offsetInWhole);
|
||||
|
||||
// Add the CodeRange for this function.
|
||||
uint32_t funcCodeRangeIndex = module_->codeRanges.length();
|
||||
if (!module_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
|
||||
uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
|
||||
if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
|
||||
return false;
|
||||
|
||||
// Maintain a mapping from function index to CodeRange index.
|
||||
|
@ -369,7 +369,7 @@ ModuleGenerator::finishCodegen(StaticLinkData* link)
|
|||
return false;
|
||||
for (uint32_t i = 0; i < numExports(); i++) {
|
||||
uint32_t target = exportMap_->exportFuncIndices[i];
|
||||
const Sig& sig = module_->exports[i].sig();
|
||||
const Sig& sig = metadata_->exports[i].sig();
|
||||
entries[i] = GenerateEntry(masm, target, sig, usesHeap());
|
||||
}
|
||||
|
||||
|
@ -378,8 +378,8 @@ ModuleGenerator::finishCodegen(StaticLinkData* link)
|
|||
if (!jitExits.resize(numImports()))
|
||||
return false;
|
||||
for (uint32_t i = 0; i < numImports(); i++) {
|
||||
interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i);
|
||||
jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap());
|
||||
interpExits[i] = GenerateInterpExit(masm, metadata_->imports[i], i);
|
||||
jitExits[i] = GenerateJitExit(masm, metadata_->imports[i], usesHeap());
|
||||
}
|
||||
|
||||
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
|
||||
|
@ -396,31 +396,31 @@ ModuleGenerator::finishCodegen(StaticLinkData* link)
|
|||
|
||||
for (uint32_t i = 0; i < numExports(); i++) {
|
||||
entries[i].offsetBy(offsetInWhole);
|
||||
module_->exports[i].initStubOffset(entries[i].begin);
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
|
||||
metadata_->exports[i].initStubOffset(entries[i].begin);
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
|
||||
return false;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < numImports(); i++) {
|
||||
interpExits[i].offsetBy(offsetInWhole);
|
||||
module_->imports[i].initInterpExitOffset(interpExits[i].begin);
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
|
||||
metadata_->imports[i].initInterpExitOffset(interpExits[i].begin);
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
|
||||
return false;
|
||||
|
||||
jitExits[i].offsetBy(offsetInWhole);
|
||||
module_->imports[i].initJitExitOffset(jitExits[i].begin);
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
|
||||
metadata_->imports[i].initJitExitOffset(jitExits[i].begin);
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
|
||||
return false;
|
||||
}
|
||||
|
||||
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
|
||||
jumpTargets[target].offsetBy(offsetInWhole);
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
|
||||
return false;
|
||||
}
|
||||
|
||||
interruptExit.offsetBy(offsetInWhole);
|
||||
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
|
||||
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
|
||||
return false;
|
||||
|
||||
// Fill in StaticLinkData with the offsets of these stubs.
|
||||
|
@ -436,7 +436,7 @@ ModuleGenerator::finishCodegen(StaticLinkData* link)
|
|||
|
||||
// Now that all thunks have been generated, patch all the thunks.
|
||||
|
||||
for (CallThunk& callThunk : module_->callThunks) {
|
||||
for (CallThunk& callThunk : metadata_->callThunks) {
|
||||
uint32_t funcIndex = callThunk.u.funcIndex;
|
||||
callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex];
|
||||
masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry());
|
||||
|
@ -454,7 +454,7 @@ ModuleGenerator::finishCodegen(StaticLinkData* link)
|
|||
}
|
||||
|
||||
bool
|
||||
ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticLinkData* link)
|
||||
ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeLength, StaticLinkData* link)
|
||||
{
|
||||
// Add links to absolute addresses identified symbolically.
|
||||
StaticLinkData::SymbolicLinkArray& symbolicLinks = link->symbolicLinks;
|
||||
|
@ -486,7 +486,7 @@ ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticL
|
|||
AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
|
||||
StaticLinkData::InternalLink inLink(StaticLinkData::InternalLink::RawPointer);
|
||||
inLink.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
|
||||
inLink.targetOffset = codeBytes + a.globalDataOffset;
|
||||
inLink.targetOffset = codeLength + a.globalDataOffset;
|
||||
if (!link->internalLinks.append(inLink))
|
||||
return false;
|
||||
}
|
||||
|
@ -495,7 +495,7 @@ ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticL
|
|||
#if defined(JS_CODEGEN_X64)
|
||||
// Global data accesses on x64 use rip-relative addressing and thus do
|
||||
// not need patching after deserialization.
|
||||
uint8_t* globalData = code + codeBytes;
|
||||
uint8_t* globalData = code + codeLength;
|
||||
for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
|
||||
AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
|
||||
masm_.patchAsmJSGlobalAccess(a.patchAt, code, globalData, a.globalDataOffset);
|
||||
|
@ -541,23 +541,20 @@ ModuleGenerator::addImport(const Sig& sig, uint32_t globalDataOffset)
|
|||
if (!copy.clone(sig))
|
||||
return false;
|
||||
|
||||
return module_->imports.emplaceBack(Move(copy), globalDataOffset);
|
||||
return metadata_->imports.emplaceBack(Move(copy), globalDataOffset);
|
||||
}
|
||||
|
||||
bool
|
||||
ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
|
||||
{
|
||||
uint32_t globalBytes = module_->globalBytes;
|
||||
|
||||
uint32_t pad = ComputeByteAlignment(globalBytes, align);
|
||||
if (UINT32_MAX - globalBytes < pad + bytes)
|
||||
uint32_t pad = ComputeByteAlignment(globalDataLength_, align);
|
||||
if (UINT32_MAX - globalDataLength_ < pad + bytes)
|
||||
return false;
|
||||
|
||||
globalBytes += pad;
|
||||
*globalDataOffset = globalBytes;
|
||||
globalBytes += bytes;
|
||||
globalDataLength_ += pad;
|
||||
*globalDataOffset = globalDataLength_;
|
||||
globalDataLength_ += bytes;
|
||||
|
||||
module_->globalBytes = globalBytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -600,15 +597,15 @@ ModuleGenerator::allocateGlobal(ValType type, bool isConst, uint32_t* index)
|
|||
void
|
||||
ModuleGenerator::initHeapUsage(HeapUsage heapUsage, uint32_t minHeapLength)
|
||||
{
|
||||
MOZ_ASSERT(module_->heapUsage == HeapUsage::None);
|
||||
module_->heapUsage = heapUsage;
|
||||
MOZ_ASSERT(metadata_->heapUsage == HeapUsage::None);
|
||||
metadata_->heapUsage = heapUsage;
|
||||
shared_->minHeapLength = minHeapLength;
|
||||
}
|
||||
|
||||
bool
|
||||
ModuleGenerator::usesHeap() const
|
||||
{
|
||||
return UsesHeap(module_->heapUsage);
|
||||
return UsesHeap(metadata_->heapUsage);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -663,7 +660,7 @@ ModuleGenerator::initImport(uint32_t importIndex, uint32_t sigIndex)
|
|||
if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
|
||||
return false;
|
||||
|
||||
MOZ_ASSERT(importIndex == module_->imports.length());
|
||||
MOZ_ASSERT(importIndex == metadata_->imports.length());
|
||||
if (!addImport(sig(sigIndex), globalDataOffset))
|
||||
return false;
|
||||
|
||||
|
@ -677,7 +674,7 @@ ModuleGenerator::initImport(uint32_t importIndex, uint32_t sigIndex)
|
|||
uint32_t
|
||||
ModuleGenerator::numImports() const
|
||||
{
|
||||
return module_->imports.length();
|
||||
return metadata_->imports.length();
|
||||
}
|
||||
|
||||
const ImportModuleGeneratorData&
|
||||
|
@ -700,7 +697,7 @@ ModuleGenerator::declareExport(UniqueChars fieldName, uint32_t funcIndex, uint32
|
|||
return exportMap_->fieldsToExports.append(p->value());
|
||||
}
|
||||
|
||||
uint32_t newExportIndex = module_->exports.length();
|
||||
uint32_t newExportIndex = metadata_->exports.length();
|
||||
MOZ_ASSERT(newExportIndex < MaxExports);
|
||||
|
||||
if (exportIndex)
|
||||
|
@ -710,7 +707,7 @@ ModuleGenerator::declareExport(UniqueChars fieldName, uint32_t funcIndex, uint32
|
|||
if (!copy.clone(funcSig(funcIndex)))
|
||||
return false;
|
||||
|
||||
return module_->exports.append(Move(copy)) &&
|
||||
return metadata_->exports.append(Move(copy)) &&
|
||||
funcIndexToExport_.add(p, funcIndex, newExportIndex) &&
|
||||
exportMap_->fieldsToExports.append(newExportIndex) &&
|
||||
exportMap_->exportFuncIndices.append(funcIndex);
|
||||
|
@ -719,7 +716,7 @@ ModuleGenerator::declareExport(UniqueChars fieldName, uint32_t funcIndex, uint32
|
|||
uint32_t
|
||||
ModuleGenerator::numExports() const
|
||||
{
|
||||
return module_->exports.length();
|
||||
return metadata_->exports.length();
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -846,7 +843,7 @@ ModuleGenerator::finishFuncDefs()
|
|||
for (uint32_t funcIndex = 0; funcIndex < funcIndexToCodeRange_.length(); funcIndex++)
|
||||
MOZ_ASSERT(funcIsDefined(funcIndex));
|
||||
|
||||
module_->functionBytes = masm_.size();
|
||||
metadata_->functionLength = masm_.size();
|
||||
finishedFuncDefs_ = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -884,41 +881,42 @@ ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncInd
|
|||
|
||||
bool
|
||||
ModuleGenerator::finish(CacheableCharsVector&& prettyFuncNames,
|
||||
UniqueModuleData* module,
|
||||
UniqueStaticLinkData* linkData,
|
||||
UniqueExportMap* exportMap,
|
||||
UniqueCodeSegment* codeSegment,
|
||||
SharedMetadata* metadata,
|
||||
SharedStaticLinkData* staticLinkDataOut,
|
||||
SharedExportMap* exportMap,
|
||||
SlowFunctionVector* slowFuncs)
|
||||
{
|
||||
MOZ_ASSERT(!activeFunc_);
|
||||
MOZ_ASSERT(finishedFuncDefs_);
|
||||
|
||||
UniqueStaticLinkData link = MakeUnique<StaticLinkData>();
|
||||
if (!link)
|
||||
MutableStaticLinkData staticLinkData = js_new<StaticLinkData>();
|
||||
if (!staticLinkData)
|
||||
return false;
|
||||
|
||||
if (!finishCodegen(link.get()))
|
||||
if (!finishCodegen(staticLinkData.get()))
|
||||
return false;
|
||||
|
||||
module_->prettyFuncNames = Move(prettyFuncNames);
|
||||
metadata_->prettyFuncNames = Move(prettyFuncNames);
|
||||
|
||||
// Start global data on a new page so JIT code may be given independent
|
||||
// protection flags. Note assumption that global data starts right after
|
||||
// code below.
|
||||
module_->codeBytes = AlignBytes(masm_.bytesNeeded(), gc::SystemPageSize());
|
||||
uint32_t codeLength = AlignBytes(masm_.bytesNeeded(), gc::SystemPageSize());
|
||||
|
||||
// Inflate the global bytes up to page size so that the total bytes are a
|
||||
// page size (as required by the allocator functions).
|
||||
module_->globalBytes = AlignBytes(module_->globalBytes, gc::SystemPageSize());
|
||||
globalDataLength_ = AlignBytes(globalDataLength_, gc::SystemPageSize());
|
||||
|
||||
// Allocate the code (guarded by a UniquePtr until it is given to the Module).
|
||||
module_->code = AllocateCode(cx_, module_->totalBytes());
|
||||
if (!module_->code)
|
||||
UniqueCodeSegment cs = CodeSegment::allocate(cx_, codeLength, globalDataLength_);
|
||||
if (!cs)
|
||||
return false;
|
||||
|
||||
// Delay flushing until Module::dynamicallyLink. The flush-inhibited range
|
||||
// is set by executableCopy.
|
||||
AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
|
||||
masm_.executableCopy(module_->code.get());
|
||||
masm_.executableCopy(cs->code());
|
||||
|
||||
// c.f. JitCode::copyFrom
|
||||
MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0);
|
||||
|
@ -928,25 +926,26 @@ ModuleGenerator::finish(CacheableCharsVector&& prettyFuncNames,
|
|||
|
||||
// Convert the CallSiteAndTargetVector (needed during generation) to a
|
||||
// CallSiteVector (what is stored in the Module).
|
||||
if (!module_->callSites.appendAll(masm_.callSites()))
|
||||
if (!metadata_->callSites.appendAll(masm_.callSites()))
|
||||
return false;
|
||||
|
||||
// The MacroAssembler has accumulated all the heap accesses during codegen.
|
||||
module_->heapAccesses = masm_.extractHeapAccesses();
|
||||
metadata_->heapAccesses = masm_.extractHeapAccesses();
|
||||
|
||||
if (!finishStaticLinkData(module_->code.get(), module_->codeBytes, link.get()))
|
||||
if (!finishStaticLinkData(cs->code(), cs->codeLength(), staticLinkData.get()))
|
||||
return false;
|
||||
|
||||
// These Vectors can get large and the excess capacity can be significant,
|
||||
// so realloc them down to size.
|
||||
module_->heapAccesses.podResizeToFit();
|
||||
module_->codeRanges.podResizeToFit();
|
||||
module_->callSites.podResizeToFit();
|
||||
module_->callThunks.podResizeToFit();
|
||||
metadata_->heapAccesses.podResizeToFit();
|
||||
metadata_->codeRanges.podResizeToFit();
|
||||
metadata_->callSites.podResizeToFit();
|
||||
metadata_->callThunks.podResizeToFit();
|
||||
|
||||
*module = Move(module_);
|
||||
*linkData = Move(link);
|
||||
*exportMap = Move(exportMap_);
|
||||
*codeSegment = Move(cs);
|
||||
*metadata = metadata_.forget();
|
||||
*staticLinkDataOut = staticLinkData.forget();
|
||||
*exportMap = exportMap_.forget();
|
||||
*slowFuncs = Move(slowFuncs_);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -119,8 +119,9 @@ class MOZ_STACK_CLASS ModuleGenerator
|
|||
jit::JitContext jcx_;
|
||||
|
||||
// Data handed back to the caller in finish()
|
||||
UniqueModuleData module_;
|
||||
UniqueExportMap exportMap_;
|
||||
uint32_t globalDataLength_;
|
||||
MutableMetadata metadata_;
|
||||
MutableExportMap exportMap_;
|
||||
SlowFunctionVector slowFuncs_;
|
||||
|
||||
// Data scoped to the ModuleGenerator's lifetime
|
||||
|
@ -152,7 +153,7 @@ class MOZ_STACK_CLASS ModuleGenerator
|
|||
MOZ_MUST_USE bool convertOutOfRangeBranchesToThunks();
|
||||
MOZ_MUST_USE bool finishTask(IonCompileTask* task);
|
||||
MOZ_MUST_USE bool finishCodegen(StaticLinkData* link);
|
||||
MOZ_MUST_USE bool finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticLinkData* link);
|
||||
MOZ_MUST_USE bool finishStaticLinkData(uint8_t* code, uint32_t codeLength, StaticLinkData* link);
|
||||
MOZ_MUST_USE bool addImport(const Sig& sig, uint32_t globalDataOffset);
|
||||
MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
|
||||
|
||||
|
@ -162,8 +163,8 @@ class MOZ_STACK_CLASS ModuleGenerator
|
|||
|
||||
MOZ_MUST_USE bool init(UniqueModuleGeneratorData shared, UniqueChars filename);
|
||||
|
||||
bool isAsmJS() const { return module_->kind == ModuleKind::AsmJS; }
|
||||
CompileArgs args() const { return module_->compileArgs; }
|
||||
bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
|
||||
CompileArgs args() const { return metadata_->compileArgs; }
|
||||
jit::MacroAssembler& masm() { return masm_; }
|
||||
|
||||
// Heap usage:
|
||||
|
@ -207,13 +208,14 @@ class MOZ_STACK_CLASS ModuleGenerator
|
|||
void initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
|
||||
void bumpMinHeapLength(uint32_t newMinHeapLength);
|
||||
|
||||
// Return a ModuleData object which may be used to construct a Module, the
|
||||
// Return a Metadata object which may be used to construct a Module, the
|
||||
// StaticLinkData required to call Module::staticallyLink, and the list of
|
||||
// functions that took a long time to compile.
|
||||
MOZ_MUST_USE bool finish(CacheableCharsVector&& prettyFuncNames,
|
||||
UniqueModuleData* module,
|
||||
UniqueStaticLinkData* staticLinkData,
|
||||
UniqueExportMap* exportMap,
|
||||
UniqueCodeSegment* codeSegment,
|
||||
SharedMetadata* metadata,
|
||||
SharedStaticLinkData* staticLinkData,
|
||||
SharedExportMap* exportMap,
|
||||
SlowFunctionVector* slowFuncs);
|
||||
};
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
# include "jit/PerfSpewer.h"
|
||||
#endif
|
||||
#include "jit/BaselineJIT.h"
|
||||
#include "jit/ExecutableAllocator.h"
|
||||
#include "jit/JitCommon.h"
|
||||
#include "js/MemoryMetrics.h"
|
||||
#include "vm/StringBuffer.h"
|
||||
|
@ -51,7 +50,6 @@
|
|||
using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace js::wasm;
|
||||
using mozilla::Atomic;
|
||||
using mozilla::BinarySearch;
|
||||
using mozilla::MakeEnumeratedRange;
|
||||
using mozilla::PodCopy;
|
||||
|
@ -59,40 +57,6 @@ using mozilla::PodZero;
|
|||
using mozilla::Swap;
|
||||
using JS::GenericNaN;
|
||||
|
||||
// Limit the number of concurrent wasm code allocations per process. Note that
|
||||
// on Linux, the real maximum is ~32k, as each module requires 2 maps (RW/RX),
|
||||
// and the kernel's default max_map_count is ~65k.
|
||||
static Atomic<uint32_t> wasmCodeAllocations(0);
|
||||
static const uint32_t MaxWasmCodeAllocations = 16384;
|
||||
|
||||
UniqueCodePtr
|
||||
wasm::AllocateCode(ExclusiveContext* cx, size_t bytes)
|
||||
{
|
||||
// Allocate RW memory. DynamicallyLinkModule will reprotect the code as RX.
|
||||
unsigned permissions =
|
||||
ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
|
||||
|
||||
void* p = nullptr;
|
||||
if (wasmCodeAllocations++ < MaxWasmCodeAllocations)
|
||||
p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", gc::SystemPageSize());
|
||||
if (!p) {
|
||||
wasmCodeAllocations--;
|
||||
ReportOutOfMemory(cx);
|
||||
}
|
||||
|
||||
return UniqueCodePtr((uint8_t*)p, CodeDeleter(bytes));
|
||||
}
|
||||
|
||||
void
|
||||
CodeDeleter::operator()(uint8_t* p)
|
||||
{
|
||||
MOZ_ASSERT(wasmCodeAllocations > 0);
|
||||
wasmCodeAllocations--;
|
||||
|
||||
MOZ_ASSERT(bytes_ != 0);
|
||||
DeallocateExecutableMemory(p, bytes_, gc::SystemPageSize());
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
// On MIPS, CodeLabels are instruction immediates so InternalLinks only
|
||||
// patch instruction immediates.
|
||||
|
@ -149,16 +113,6 @@ StaticLinkData::SymbolicLinkArray::deserialize(ExclusiveContext* cx, const uint8
|
|||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const
|
||||
{
|
||||
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
|
||||
if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
StaticLinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
|
@ -191,13 +145,6 @@ StaticLinkData::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* c
|
|||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
StaticLinkData::FuncPtrTable::clone(JSContext* cx, FuncPtrTable* out) const
|
||||
{
|
||||
out->globalDataOffset = globalDataOffset;
|
||||
return ClonePodVector(cx, elemOffsets, &out->elemOffsets);
|
||||
}
|
||||
|
||||
size_t
|
||||
StaticLinkData::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
|
@ -233,15 +180,6 @@ StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
|||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
StaticLinkData::clone(JSContext* cx, StaticLinkData* out) const
|
||||
{
|
||||
out->pod = pod;
|
||||
return ClonePodVector(cx, internalLinks, &out->internalLinks) &&
|
||||
symbolicLinks.clone(cx, &out->symbolicLinks) &&
|
||||
CloneVector(cx, funcPtrTables, &out->funcPtrTables);
|
||||
}
|
||||
|
||||
size_t
|
||||
StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
|
@ -250,227 +188,6 @@ StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
|||
SizeOfVectorExcludingThis(funcPtrTables, mallocSizeOf);
|
||||
}
|
||||
|
||||
static size_t
|
||||
SerializedSigSize(const Sig& sig)
|
||||
{
|
||||
return sizeof(ExprType) +
|
||||
SerializedPodVectorSize(sig.args());
|
||||
}
|
||||
|
||||
static uint8_t*
|
||||
SerializeSig(uint8_t* cursor, const Sig& sig)
|
||||
{
|
||||
cursor = WriteScalar<ExprType>(cursor, sig.ret());
|
||||
cursor = SerializePodVector(cursor, sig.args());
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static const uint8_t*
|
||||
DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, Sig* sig)
|
||||
{
|
||||
ExprType ret;
|
||||
cursor = ReadScalar<ExprType>(cursor, &ret);
|
||||
|
||||
ValTypeVector args;
|
||||
cursor = DeserializePodVector(cx, cursor, &args);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
*sig = Sig(Move(args), ret);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static size_t
|
||||
SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf)
|
||||
{
|
||||
return sig.args().sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
size_t
|
||||
Export::serializedSize() const
|
||||
{
|
||||
return SerializedSigSize(sig_) +
|
||||
sizeof(pod);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Export::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = SerializeSig(cursor, sig_);
|
||||
cursor = WriteBytes(cursor, &pod, sizeof(pod));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = DeserializeSig(cx, cursor, &sig_)) &&
|
||||
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
Export::clone(JSContext* cx, Export* out) const
|
||||
{
|
||||
out->pod = pod;
|
||||
return out->sig_.clone(sig_);
|
||||
}
|
||||
|
||||
size_t
|
||||
Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return SizeOfSigExcludingThis(sig_, mallocSizeOf);
|
||||
}
|
||||
|
||||
size_t
|
||||
Import::serializedSize() const
|
||||
{
|
||||
return SerializedSigSize(sig_) +
|
||||
sizeof(pod);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Import::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = SerializeSig(cursor, sig_);
|
||||
cursor = WriteBytes(cursor, &pod, sizeof(pod));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
(cursor = DeserializeSig(cx, cursor, &sig_)) &&
|
||||
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
Import::clone(JSContext* cx, Import* out) const
|
||||
{
|
||||
out->pod = pod;
|
||||
return out->sig_.clone(sig_);
|
||||
}
|
||||
|
||||
size_t
|
||||
Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return SizeOfSigExcludingThis(sig_, mallocSizeOf);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(Kind kind, Offsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(0),
|
||||
end_(offsets.end),
|
||||
funcIndex_(0),
|
||||
funcLineOrBytecode_(0),
|
||||
funcBeginToTableEntry_(0),
|
||||
funcBeginToTableProfilingJump_(0),
|
||||
funcBeginToNonProfilingEntry_(0),
|
||||
funcProfilingJumpToProfilingReturn_(0),
|
||||
funcProfilingEpilogueToProfilingReturn_(0),
|
||||
kind_(kind)
|
||||
{
|
||||
MOZ_ASSERT(begin_ <= end_);
|
||||
MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == CallThunk);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(offsets.profilingReturn),
|
||||
end_(offsets.end),
|
||||
funcIndex_(0),
|
||||
funcLineOrBytecode_(0),
|
||||
funcBeginToTableEntry_(0),
|
||||
funcBeginToTableProfilingJump_(0),
|
||||
funcBeginToNonProfilingEntry_(0),
|
||||
funcProfilingJumpToProfilingReturn_(0),
|
||||
funcProfilingEpilogueToProfilingReturn_(0),
|
||||
kind_(kind)
|
||||
{
|
||||
MOZ_ASSERT(begin_ < profilingReturn_);
|
||||
MOZ_ASSERT(profilingReturn_ < end_);
|
||||
MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit);
|
||||
}
|
||||
|
||||
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
|
||||
: begin_(offsets.begin),
|
||||
profilingReturn_(offsets.profilingReturn),
|
||||
end_(offsets.end),
|
||||
funcIndex_(funcIndex),
|
||||
funcLineOrBytecode_(funcLineOrBytecode),
|
||||
funcBeginToTableEntry_(offsets.tableEntry - begin_),
|
||||
funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
|
||||
funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
|
||||
funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
|
||||
funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
|
||||
kind_(Function)
|
||||
{
|
||||
MOZ_ASSERT(begin_ < profilingReturn_);
|
||||
MOZ_ASSERT(profilingReturn_ < end_);
|
||||
MOZ_ASSERT(funcBeginToTableEntry_ == offsets.tableEntry - begin_);
|
||||
MOZ_ASSERT(funcBeginToTableProfilingJump_ == offsets.tableProfilingJump - begin_);
|
||||
MOZ_ASSERT(funcBeginToNonProfilingEntry_ == offsets.nonProfilingEntry - begin_);
|
||||
MOZ_ASSERT(funcProfilingJumpToProfilingReturn_ == profilingReturn_ - offsets.profilingJump);
|
||||
MOZ_ASSERT(funcProfilingEpilogueToProfilingReturn_ == profilingReturn_ - offsets.profilingEpilogue);
|
||||
}
|
||||
|
||||
static size_t
|
||||
NullableStringLength(const char* chars)
|
||||
{
|
||||
return chars ? strlen(chars) : 0;
|
||||
}
|
||||
|
||||
size_t
|
||||
CacheableChars::serializedSize() const
|
||||
{
|
||||
return sizeof(uint32_t) + NullableStringLength(get());
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
CacheableChars::serialize(uint8_t* cursor) const
|
||||
{
|
||||
uint32_t length = NullableStringLength(get());
|
||||
cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
|
||||
cursor = WriteBytes(cursor, get(), length);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
uint32_t length;
|
||||
cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
|
||||
|
||||
reset(cx->pod_calloc<char>(length + 1));
|
||||
if (!get())
|
||||
return nullptr;
|
||||
|
||||
cursor = ReadBytes(cursor, get(), length);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
CacheableChars::clone(JSContext* cx, CacheableChars* out) const
|
||||
{
|
||||
uint32_t length = NullableStringLength(get());
|
||||
|
||||
UniqueChars chars(cx->pod_calloc<char>(length + 1));
|
||||
if (!chars)
|
||||
return false;
|
||||
|
||||
PodCopy(chars.get(), get(), length);
|
||||
|
||||
*out = Move(chars);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return mallocSizeOf(get());
|
||||
}
|
||||
|
||||
size_t
|
||||
ExportMap::serializedSize() const
|
||||
{
|
||||
|
@ -497,14 +214,6 @@ ExportMap::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
|||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
ExportMap::clone(JSContext* cx, ExportMap* map) const
|
||||
{
|
||||
return CloneVector(cx, fieldNames, &map->fieldNames) &&
|
||||
ClonePodVector(cx, fieldsToExports, &map->fieldsToExports) &&
|
||||
ClonePodVector(cx, exportFuncIndices, &map->exportFuncIndices);
|
||||
}
|
||||
|
||||
size_t
|
||||
ExportMap::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
|
@ -513,92 +222,6 @@ ExportMap::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
|||
exportFuncIndices.sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
size_t
|
||||
ModuleData::serializedSize() const
|
||||
{
|
||||
return sizeof(pod()) +
|
||||
codeBytes +
|
||||
SerializedVectorSize(imports) +
|
||||
SerializedVectorSize(exports) +
|
||||
SerializedPodVectorSize(heapAccesses) +
|
||||
SerializedPodVectorSize(codeRanges) +
|
||||
SerializedPodVectorSize(callSites) +
|
||||
SerializedPodVectorSize(callThunks) +
|
||||
SerializedVectorSize(prettyFuncNames) +
|
||||
filename.serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
ModuleData::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
|
||||
cursor = WriteBytes(cursor, code.get(), codeBytes);
|
||||
cursor = SerializeVector(cursor, imports);
|
||||
cursor = SerializeVector(cursor, exports);
|
||||
cursor = SerializePodVector(cursor, heapAccesses);
|
||||
cursor = SerializePodVector(cursor, codeRanges);
|
||||
cursor = SerializePodVector(cursor, callSites);
|
||||
cursor = SerializePodVector(cursor, callThunks);
|
||||
cursor = SerializeVector(cursor, prettyFuncNames);
|
||||
cursor = filename.serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
/* static */ const uint8_t*
|
||||
ModuleData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
|
||||
{
|
||||
cursor = ReadBytes(cursor, &pod(), sizeof(pod()));
|
||||
|
||||
code = AllocateCode(cx, totalBytes());
|
||||
if (!code)
|
||||
return nullptr;
|
||||
cursor = ReadBytes(cursor, code.get(), codeBytes);
|
||||
|
||||
(cursor = DeserializeVector(cx, cursor, &imports)) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &exports)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &callSites)) &&
|
||||
(cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
|
||||
(cursor = DeserializeVector(cx, cursor, &prettyFuncNames)) &&
|
||||
(cursor = filename.deserialize(cx, cursor));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
bool
|
||||
ModuleData::clone(JSContext* cx, ModuleData* out) const
|
||||
{
|
||||
out->pod() = pod();
|
||||
|
||||
out->code = AllocateCode(cx, totalBytes());
|
||||
if (!out->code)
|
||||
return false;
|
||||
memcpy(out->code.get(), code.get(), codeBytes);
|
||||
|
||||
return CloneVector(cx, imports, &out->imports) &&
|
||||
CloneVector(cx, exports, &out->exports) &&
|
||||
ClonePodVector(cx, heapAccesses, &out->heapAccesses) &&
|
||||
ClonePodVector(cx, codeRanges, &out->codeRanges) &&
|
||||
ClonePodVector(cx, callSites, &out->callSites) &&
|
||||
ClonePodVector(cx, callThunks, &out->callThunks) &&
|
||||
CloneVector(cx, prettyFuncNames, &out->prettyFuncNames) &&
|
||||
filename.clone(cx, &out->filename);
|
||||
}
|
||||
|
||||
size_t
|
||||
ModuleData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
// Module::addSizeOfMisc takes care of code and global memory.
|
||||
return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(exports, mallocSizeOf) +
|
||||
heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
|
||||
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
|
||||
callSites.sizeOfExcludingThis(mallocSizeOf) +
|
||||
callThunks.sizeOfExcludingThis(mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(prettyFuncNames, mallocSizeOf) +
|
||||
filename.sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Module::rawHeapPtr() const
|
||||
{
|
||||
|
@ -634,7 +257,7 @@ Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
|
|||
// i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
|
||||
// and offset are already included in the addend so we
|
||||
// just have to add the heap length here.
|
||||
for (const HeapAccess& access : module_->heapAccesses) {
|
||||
for (const HeapAccess& access : metadata_->heapAccesses) {
|
||||
if (access.hasLengthCheck())
|
||||
X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
|
||||
void* addr = access.patchHeapPtrImmAt(code());
|
||||
|
@ -650,14 +273,14 @@ Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
|
|||
// checks at the right places. All accesses that have been recorded are the
|
||||
// only ones that need bound checks (see also
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
|
||||
for (const HeapAccess& access : module_->heapAccesses) {
|
||||
for (const HeapAccess& access : metadata_->heapAccesses) {
|
||||
// See comment above for x86 codegen.
|
||||
if (access.hasLengthCheck())
|
||||
X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
for (const HeapAccess& access : module_->heapAccesses)
|
||||
for (const HeapAccess& access : metadata_->heapAccesses)
|
||||
Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code()));
|
||||
#endif
|
||||
|
||||
|
@ -677,8 +300,8 @@ Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
|
|||
#if defined(JS_CODEGEN_X86)
|
||||
uint32_t heapLength = heap->byteLength();
|
||||
uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - used for value*/);
|
||||
for (unsigned i = 0; i < module_->heapAccesses.length(); i++) {
|
||||
const HeapAccess& access = module_->heapAccesses[i];
|
||||
for (unsigned i = 0; i < metadata_->heapAccesses.length(); i++) {
|
||||
const HeapAccess& access = metadata_->heapAccesses[i];
|
||||
if (access.hasLengthCheck())
|
||||
X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
|
||||
void* addr = access.patchHeapPtrImmAt(code());
|
||||
|
@ -688,8 +311,8 @@ Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
|
|||
}
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
uint32_t heapLength = heap->byteLength();
|
||||
for (unsigned i = 0; i < module_->heapAccesses.length(); i++) {
|
||||
const HeapAccess& access = module_->heapAccesses[i];
|
||||
for (unsigned i = 0; i < metadata_->heapAccesses.length(); i++) {
|
||||
const HeapAccess& access = metadata_->heapAccesses[i];
|
||||
if (access.hasLengthCheck())
|
||||
X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
|
||||
}
|
||||
|
@ -712,7 +335,7 @@ Module::sendCodeRangesToProfiler(JSContext* cx)
|
|||
if (!enabled)
|
||||
return true;
|
||||
|
||||
for (const CodeRange& codeRange : module_->codeRanges) {
|
||||
for (const CodeRange& codeRange : metadata_->codeRanges) {
|
||||
if (!codeRange.isFunction())
|
||||
continue;
|
||||
|
||||
|
@ -732,7 +355,7 @@ Module::sendCodeRangesToProfiler(JSContext* cx)
|
|||
|
||||
#ifdef JS_ION_PERF
|
||||
if (PerfFuncEnabled()) {
|
||||
const char* file = module_->filename.get();
|
||||
const char* file = metadata_->filename.get();
|
||||
unsigned line = codeRange.funcLineOrBytecode();
|
||||
unsigned column = 0;
|
||||
writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, name);
|
||||
|
@ -775,7 +398,7 @@ Module::setProfilingEnabled(JSContext* cx, bool enabled)
|
|||
// do it now since, once we start sampling, we'll be in a signal-handing
|
||||
// context where we cannot malloc.
|
||||
if (enabled) {
|
||||
for (const CodeRange& codeRange : module_->codeRanges) {
|
||||
for (const CodeRange& codeRange : metadata_->codeRanges) {
|
||||
if (!codeRange.isFunction())
|
||||
continue;
|
||||
|
||||
|
@ -786,7 +409,7 @@ Module::setProfilingEnabled(JSContext* cx, bool enabled)
|
|||
|
||||
UniqueChars label(JS_smprintf("%s (%s:%u)",
|
||||
funcName,
|
||||
module_->filename.get(),
|
||||
metadata_->filename.get(),
|
||||
codeRange.funcLineOrBytecode()));
|
||||
if (!label) {
|
||||
ReportOutOfMemory(cx);
|
||||
|
@ -805,17 +428,17 @@ Module::setProfilingEnabled(JSContext* cx, bool enabled)
|
|||
|
||||
// Patch callsites and returns to execute profiling prologues/epilogues.
|
||||
{
|
||||
AutoWritableJitCode awjc(cx->runtime(), code(), codeBytes());
|
||||
AutoWritableJitCode awjc(cx->runtime(), code(), codeLength());
|
||||
AutoFlushICache afc("Module::setProfilingEnabled");
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeLength());
|
||||
|
||||
for (const CallSite& callSite : module_->callSites)
|
||||
for (const CallSite& callSite : metadata_->callSites)
|
||||
ToggleProfiling(*this, callSite, enabled);
|
||||
|
||||
for (const CallThunk& callThunk : module_->callThunks)
|
||||
for (const CallThunk& callThunk : metadata_->callThunks)
|
||||
ToggleProfiling(*this, callThunk, enabled);
|
||||
|
||||
for (const CodeRange& codeRange : module_->codeRanges)
|
||||
for (const CodeRange& codeRange : metadata_->codeRanges)
|
||||
ToggleProfiling(*this, codeRange, enabled);
|
||||
}
|
||||
|
||||
|
@ -853,14 +476,17 @@ Module::clone(JSContext* cx, const StaticLinkData& link, Module* out) const
|
|||
{
|
||||
MOZ_ASSERT(dynamicallyLinked_);
|
||||
|
||||
// The out->module_ field was already cloned and initialized when 'out' was
|
||||
// The out->metadata_ field was already cloned and initialized when 'out' was
|
||||
// constructed. This function should clone the rest.
|
||||
MOZ_ASSERT(out->module_);
|
||||
MOZ_ASSERT(out->metadata_);
|
||||
|
||||
// Copy the profiling state over too since the cloned machine code
|
||||
// implicitly brings the profiling mode.
|
||||
out->profilingEnabled_ = profilingEnabled_;
|
||||
|
||||
if (!CloneVector(cx, funcLabels_, &out->funcLabels_))
|
||||
return false;
|
||||
for (const CacheableChars& label : funcLabels_) {
|
||||
if (!out->funcLabels_.emplaceBack(DuplicateString(label.get())))
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
// Put the symbolic links back to -1 so PatchDataWithValueCheck assertions
|
||||
|
@ -869,9 +495,9 @@ Module::clone(JSContext* cx, const StaticLinkData& link, Module* out) const
|
|||
void* callee = AddressOf(imm, cx);
|
||||
const Uint32Vector& offsets = link.symbolicLinks[imm];
|
||||
for (uint32_t offset : offsets) {
|
||||
jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(out->code() + offset),
|
||||
jit::PatchedImmPtr((void*)-1),
|
||||
jit::PatchedImmPtr(callee));
|
||||
Assembler::PatchDataWithValueCheck(CodeLocationLabel(out->code() + offset),
|
||||
PatchedImmPtr((void*)-1),
|
||||
PatchedImmPtr(callee));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -884,9 +510,9 @@ Module::clone(JSContext* cx, const StaticLinkData& link, Module* out) const
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
Module::Module(UniqueModuleData module)
|
||||
: module_(Move(module)),
|
||||
Module::Module(UniqueCodeSegment codeSegment, const Metadata& metadata)
|
||||
: codeSegment_(Move(codeSegment)),
|
||||
metadata_(&metadata),
|
||||
staticallyLinked_(false),
|
||||
interrupt_(nullptr),
|
||||
outOfBounds_(nullptr),
|
||||
|
@ -898,7 +524,7 @@ Module::Module(UniqueModuleData module)
|
|||
|
||||
#ifdef DEBUG
|
||||
uint32_t lastEnd = 0;
|
||||
for (const CodeRange& cr : module_->codeRanges) {
|
||||
for (const CodeRange& cr : metadata_->codeRanges) {
|
||||
MOZ_ASSERT(cr.begin() >= lastEnd);
|
||||
lastEnd = cr.end();
|
||||
}
|
||||
|
@ -935,11 +561,11 @@ Module::readBarrier()
|
|||
/* virtual */ void
|
||||
Module::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data)
|
||||
{
|
||||
*code += codeBytes();
|
||||
*code += codeSegment_->codeLength();
|
||||
*data += mallocSizeOf(this) +
|
||||
globalBytes() +
|
||||
mallocSizeOf(module_.get()) +
|
||||
module_->sizeOfExcludingThis(mallocSizeOf) +
|
||||
codeSegment_->globalDataLength() +
|
||||
mallocSizeOf(metadata_.get()) +
|
||||
metadata_->sizeOfExcludingThis(mallocSizeOf) +
|
||||
source_.sizeOfExcludingThis(mallocSizeOf) +
|
||||
funcPtrTables_.sizeOfExcludingThis(mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(funcLabels_, mallocSizeOf);
|
||||
|
@ -964,13 +590,13 @@ Module::displayURL() const
|
|||
bool
|
||||
Module::containsFunctionPC(void* pc) const
|
||||
{
|
||||
return pc >= code() && pc < (code() + module_->functionBytes);
|
||||
return pc >= code() && pc < (code() + metadata_->functionLength);
|
||||
}
|
||||
|
||||
bool
|
||||
Module::containsCodePC(void* pc) const
|
||||
{
|
||||
return pc >= code() && pc < (code() + codeBytes());
|
||||
return pc >= code() && pc < (code() + codeLength());
|
||||
}
|
||||
|
||||
struct CallSiteRetAddrOffset
|
||||
|
@ -987,13 +613,13 @@ Module::lookupCallSite(void* returnAddress) const
|
|||
{
|
||||
uint32_t target = ((uint8_t*)returnAddress) - code();
|
||||
size_t lowerBound = 0;
|
||||
size_t upperBound = module_->callSites.length();
|
||||
size_t upperBound = metadata_->callSites.length();
|
||||
|
||||
size_t match;
|
||||
if (!BinarySearch(CallSiteRetAddrOffset(module_->callSites), lowerBound, upperBound, target, &match))
|
||||
if (!BinarySearch(CallSiteRetAddrOffset(metadata_->callSites), lowerBound, upperBound, target, &match))
|
||||
return nullptr;
|
||||
|
||||
return &module_->callSites[match];
|
||||
return &metadata_->callSites[match];
|
||||
}
|
||||
|
||||
const CodeRange*
|
||||
|
@ -1001,13 +627,13 @@ Module::lookupCodeRange(void* pc) const
|
|||
{
|
||||
CodeRange::PC target((uint8_t*)pc - code());
|
||||
size_t lowerBound = 0;
|
||||
size_t upperBound = module_->codeRanges.length();
|
||||
size_t upperBound = metadata_->codeRanges.length();
|
||||
|
||||
size_t match;
|
||||
if (!BinarySearch(module_->codeRanges, lowerBound, upperBound, target, &match))
|
||||
if (!BinarySearch(metadata_->codeRanges, lowerBound, upperBound, target, &match))
|
||||
return nullptr;
|
||||
|
||||
return &module_->codeRanges[match];
|
||||
return &metadata_->codeRanges[match];
|
||||
}
|
||||
|
||||
struct HeapAccessOffset
|
||||
|
@ -1026,13 +652,13 @@ Module::lookupHeapAccess(void* pc) const
|
|||
|
||||
uint32_t target = ((uint8_t*)pc) - code();
|
||||
size_t lowerBound = 0;
|
||||
size_t upperBound = module_->heapAccesses.length();
|
||||
size_t upperBound = metadata_->heapAccesses.length();
|
||||
|
||||
size_t match;
|
||||
if (!BinarySearch(HeapAccessOffset(module_->heapAccesses), lowerBound, upperBound, target, &match))
|
||||
if (!BinarySearch(HeapAccessOffset(metadata_->heapAccesses), lowerBound, upperBound, target, &match))
|
||||
return nullptr;
|
||||
|
||||
return &module_->heapAccesses[match];
|
||||
return &metadata_->heapAccesses[match];
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -1047,7 +673,7 @@ Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData)
|
|||
JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
|
||||
MOZ_ASSERT(IsCompilingAsmJS());
|
||||
AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true);
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeLength());
|
||||
|
||||
interrupt_ = code() + linkData.pod.interruptOffset;
|
||||
outOfBounds_ = code() + linkData.pod.outOfBoundsOffset;
|
||||
|
@ -1226,7 +852,7 @@ Module::dynamicallyLink(JSContext* cx,
|
|||
JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
|
||||
MOZ_ASSERT(IsCompilingAsmJS());
|
||||
AutoFlushICache afc("Module::dynamicallyLink");
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
|
||||
AutoFlushICache::setRange(uintptr_t(code()), codeLength());
|
||||
|
||||
// Initialize imports with actual imported values.
|
||||
MOZ_ASSERT(importArgs.length() == imports().length());
|
||||
|
@ -1242,8 +868,8 @@ Module::dynamicallyLink(JSContext* cx,
|
|||
if (usesHeap())
|
||||
specializeToHeap(heap);
|
||||
|
||||
// See AllocateCode comment above.
|
||||
if (!ExecutableAllocator::makeExecutable(code(), codeBytes())) {
|
||||
// See CodeSegment::allocate comment above.
|
||||
if (!ExecutableAllocator::makeExecutable(code(), codeLength())) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
|
@ -1701,9 +1327,9 @@ Module::callImport_f64(int32_t importIndex, int32_t argc, uint64_t* argv)
|
|||
const char*
|
||||
Module::maybePrettyFuncName(uint32_t funcIndex) const
|
||||
{
|
||||
if (funcIndex >= module_->prettyFuncNames.length())
|
||||
if (funcIndex >= metadata_->prettyFuncNames.length())
|
||||
return nullptr;
|
||||
return module_->prettyFuncNames[funcIndex].get();
|
||||
return metadata_->prettyFuncNames[funcIndex].get();
|
||||
}
|
||||
|
||||
const char*
|
||||
|
@ -1833,16 +1459,13 @@ WasmModuleObject::create(ExclusiveContext* cx)
|
|||
return &obj->as<WasmModuleObject>();
|
||||
}
|
||||
|
||||
bool
|
||||
WasmModuleObject::init(Module* module)
|
||||
void
|
||||
WasmModuleObject::init(Module& module)
|
||||
{
|
||||
MOZ_ASSERT(is<WasmModuleObject>());
|
||||
MOZ_ASSERT(!hasModule());
|
||||
if (!module)
|
||||
return false;
|
||||
module->setOwner(this);
|
||||
setReservedSlot(MODULE_SLOT, PrivateValue(module));
|
||||
return true;
|
||||
module.setOwner(this);
|
||||
setReservedSlot(MODULE_SLOT, PrivateValue(&module));
|
||||
}
|
||||
|
||||
Module&
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
#include "mozilla/LinkedList.h"
|
||||
|
||||
#include "asmjs/WasmTypes.h"
|
||||
#include "asmjs/WasmCode.h"
|
||||
#include "gc/Barrier.h"
|
||||
#include "vm/MallocProvider.h"
|
||||
#include "vm/NativeObject.h"
|
||||
|
@ -37,8 +37,11 @@ namespace wasm {
|
|||
|
||||
// The StaticLinkData contains all the metadata necessary to perform
|
||||
// Module::staticallyLink but is not necessary afterwards.
|
||||
//
|
||||
// StaticLinkData is built incrementing by ModuleGenerator and then shared
|
||||
// immutably between modules.
|
||||
|
||||
struct StaticLinkData
|
||||
struct StaticLinkData : RefCounted<StaticLinkData>
|
||||
{
|
||||
struct InternalLink {
|
||||
enum Kind {
|
||||
|
@ -84,244 +87,8 @@ struct StaticLinkData
|
|||
WASM_DECLARE_SERIALIZABLE(StaticLinkData)
|
||||
};
|
||||
|
||||
typedef UniquePtr<StaticLinkData> UniqueStaticLinkData;
|
||||
|
||||
// An Export represents a single function inside a wasm Module that has been
|
||||
// exported one or more times.
|
||||
|
||||
class Export
|
||||
{
|
||||
Sig sig_;
|
||||
struct CacheablePod {
|
||||
uint32_t stubOffset_;
|
||||
} pod;
|
||||
|
||||
public:
|
||||
Export() = default;
|
||||
explicit Export(Sig&& sig)
|
||||
: sig_(Move(sig))
|
||||
{
|
||||
pod.stubOffset_ = UINT32_MAX;
|
||||
}
|
||||
Export(Export&& rhs)
|
||||
: sig_(Move(rhs.sig_)),
|
||||
pod(rhs.pod)
|
||||
{}
|
||||
|
||||
void initStubOffset(uint32_t stubOffset) {
|
||||
MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
|
||||
pod.stubOffset_ = stubOffset;
|
||||
}
|
||||
|
||||
uint32_t stubOffset() const {
|
||||
return pod.stubOffset_;
|
||||
}
|
||||
const Sig& sig() const {
|
||||
return sig_;
|
||||
}
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(Export)
|
||||
};
|
||||
|
||||
typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
|
||||
|
||||
// An Import describes a wasm module import. Currently, only functions can be
|
||||
// imported in wasm. A function import includes the signature used within the
|
||||
// module to call it.
|
||||
|
||||
class Import
|
||||
{
|
||||
Sig sig_;
|
||||
struct CacheablePod {
|
||||
uint32_t exitGlobalDataOffset_;
|
||||
uint32_t interpExitCodeOffset_;
|
||||
uint32_t jitExitCodeOffset_;
|
||||
} pod;
|
||||
|
||||
public:
|
||||
Import() {}
|
||||
Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
|
||||
Import(Sig&& sig, uint32_t exitGlobalDataOffset)
|
||||
: sig_(Move(sig))
|
||||
{
|
||||
pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
|
||||
pod.interpExitCodeOffset_ = 0;
|
||||
pod.jitExitCodeOffset_ = 0;
|
||||
}
|
||||
|
||||
void initInterpExitOffset(uint32_t off) {
|
||||
MOZ_ASSERT(!pod.interpExitCodeOffset_);
|
||||
pod.interpExitCodeOffset_ = off;
|
||||
}
|
||||
void initJitExitOffset(uint32_t off) {
|
||||
MOZ_ASSERT(!pod.jitExitCodeOffset_);
|
||||
pod.jitExitCodeOffset_ = off;
|
||||
}
|
||||
|
||||
const Sig& sig() const {
|
||||
return sig_;
|
||||
}
|
||||
uint32_t exitGlobalDataOffset() const {
|
||||
return pod.exitGlobalDataOffset_;
|
||||
}
|
||||
uint32_t interpExitCodeOffset() const {
|
||||
return pod.interpExitCodeOffset_;
|
||||
}
|
||||
uint32_t jitExitCodeOffset() const {
|
||||
return pod.jitExitCodeOffset_;
|
||||
}
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(Import)
|
||||
};
|
||||
|
||||
typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
|
||||
|
||||
// A CodeRange describes a single contiguous range of code within a wasm
|
||||
// module's code segment. A CodeRange describes what the code does and, for
|
||||
// function bodies, the name and source coordinates of the function.
|
||||
|
||||
class CodeRange
|
||||
{
|
||||
public:
|
||||
enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Inline, CallThunk };
|
||||
|
||||
private:
|
||||
// All fields are treated as cacheable POD:
|
||||
uint32_t begin_;
|
||||
uint32_t profilingReturn_;
|
||||
uint32_t end_;
|
||||
uint32_t funcIndex_;
|
||||
uint32_t funcLineOrBytecode_;
|
||||
uint8_t funcBeginToTableEntry_;
|
||||
uint8_t funcBeginToTableProfilingJump_;
|
||||
uint8_t funcBeginToNonProfilingEntry_;
|
||||
uint8_t funcProfilingJumpToProfilingReturn_;
|
||||
uint8_t funcProfilingEpilogueToProfilingReturn_;
|
||||
Kind kind_ : 8;
|
||||
|
||||
public:
|
||||
CodeRange() = default;
|
||||
CodeRange(Kind kind, Offsets offsets);
|
||||
CodeRange(Kind kind, ProfilingOffsets offsets);
|
||||
CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
|
||||
|
||||
// All CodeRanges have a begin and end.
|
||||
|
||||
uint32_t begin() const {
|
||||
return begin_;
|
||||
}
|
||||
uint32_t end() const {
|
||||
return end_;
|
||||
}
|
||||
|
||||
// Other fields are only available for certain CodeRange::Kinds.
|
||||
|
||||
Kind kind() const {
|
||||
return kind_;
|
||||
}
|
||||
|
||||
bool isFunction() const {
|
||||
return kind() == Function;
|
||||
}
|
||||
bool isImportExit() const {
|
||||
return kind() == ImportJitExit || kind() == ImportInterpExit;
|
||||
}
|
||||
bool isInline() const {
|
||||
return kind() == Inline;
|
||||
}
|
||||
|
||||
// Every CodeRange except entry and inline stubs has a profiling return
|
||||
// which is used for asynchronous profiling to determine the frame pointer.
|
||||
|
||||
uint32_t profilingReturn() const {
|
||||
MOZ_ASSERT(isFunction() || isImportExit());
|
||||
return profilingReturn_;
|
||||
}
|
||||
|
||||
// Functions have offsets which allow patching to selectively execute
|
||||
// profiling prologues/epilogues.
|
||||
|
||||
uint32_t funcProfilingEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin();
|
||||
}
|
||||
uint32_t funcTableEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToTableEntry_;
|
||||
}
|
||||
uint32_t funcTableProfilingJump() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToTableProfilingJump_;
|
||||
}
|
||||
uint32_t funcNonProfilingEntry() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return begin_ + funcBeginToNonProfilingEntry_;
|
||||
}
|
||||
uint32_t funcProfilingJump() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
|
||||
}
|
||||
uint32_t funcProfilingEpilogue() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
|
||||
}
|
||||
uint32_t funcIndex() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return funcIndex_;
|
||||
}
|
||||
uint32_t funcLineOrBytecode() const {
|
||||
MOZ_ASSERT(isFunction());
|
||||
return funcLineOrBytecode_;
|
||||
}
|
||||
|
||||
// A sorted array of CodeRanges can be looked up via BinarySearch and PC.
|
||||
|
||||
struct PC {
|
||||
size_t offset;
|
||||
explicit PC(size_t offset) : offset(offset) {}
|
||||
bool operator==(const CodeRange& rhs) const {
|
||||
return offset >= rhs.begin() && offset < rhs.end();
|
||||
}
|
||||
bool operator<(const CodeRange& rhs) const {
|
||||
return offset < rhs.begin();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
|
||||
|
||||
// A CallThunk describes the offset and target of thunks so that they may be
|
||||
// patched at runtime when profiling is toggled. Thunks are emitted to connect
|
||||
// callsites that are too far away from callees to fit in a single call
|
||||
// instruction's relative offset.
|
||||
|
||||
struct CallThunk
|
||||
{
|
||||
uint32_t offset;
|
||||
union {
|
||||
uint32_t funcIndex;
|
||||
uint32_t codeRangeIndex;
|
||||
} u;
|
||||
|
||||
CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
|
||||
CallThunk() = default;
|
||||
};
|
||||
|
||||
WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
|
||||
|
||||
// CacheableChars is used to cacheably store UniqueChars.
|
||||
|
||||
struct CacheableChars : UniqueChars
|
||||
{
|
||||
CacheableChars() = default;
|
||||
explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
|
||||
MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
|
||||
CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {}
|
||||
void operator=(CacheableChars&& rhs) { UniqueChars::operator=(Move(rhs)); }
|
||||
WASM_DECLARE_SERIALIZABLE(CacheableChars)
|
||||
};
|
||||
|
||||
typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
|
||||
typedef RefPtr<StaticLinkData> MutableStaticLinkData;
|
||||
typedef RefPtr<const StaticLinkData> SharedStaticLinkData;
|
||||
|
||||
// The ExportMap describes how Exports are mapped to the fields of the export
|
||||
// object. This allows a single Export to be used in multiple fields.
|
||||
|
@ -332,10 +99,13 @@ typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
|
|||
// ExportMap's exportFuncIndices vector).
|
||||
// Lastly, the 'exportFuncIndices' vector provides, for each exported function,
|
||||
// the internal index of the function.
|
||||
//
|
||||
// The ExportMap is built incrementally by ModuleGenerator and then shared
|
||||
// immutably between modules.
|
||||
|
||||
static const uint32_t MemoryExport = UINT32_MAX;
|
||||
|
||||
struct ExportMap
|
||||
struct ExportMap : RefCounted<ExportMap>
|
||||
{
|
||||
CacheableCharsVector fieldNames;
|
||||
Uint32Vector fieldsToExports;
|
||||
|
@ -344,80 +114,8 @@ struct ExportMap
|
|||
WASM_DECLARE_SERIALIZABLE(ExportMap)
|
||||
};
|
||||
|
||||
typedef UniquePtr<ExportMap> UniqueExportMap;
|
||||
|
||||
// A UniqueCodePtr owns allocated executable code. Code passed to the Module
|
||||
// constructor must be allocated via AllocateCode.
|
||||
|
||||
class CodeDeleter
|
||||
{
|
||||
uint32_t bytes_;
|
||||
public:
|
||||
CodeDeleter() : bytes_(0) {}
|
||||
explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {}
|
||||
void operator()(uint8_t* p);
|
||||
};
|
||||
typedef UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
|
||||
|
||||
UniqueCodePtr
|
||||
AllocateCode(ExclusiveContext* cx, size_t bytes);
|
||||
|
||||
// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
|
||||
// heap (SharedArrayBuffer).
|
||||
|
||||
enum class HeapUsage
|
||||
{
|
||||
None = false,
|
||||
Unshared = 1,
|
||||
Shared = 2
|
||||
};
|
||||
|
||||
static inline bool
|
||||
UsesHeap(HeapUsage heapUsage)
|
||||
{
|
||||
return bool(heapUsage);
|
||||
}
|
||||
|
||||
// ModuleCacheablePod holds the trivially-memcpy()able serializable portion of
|
||||
// ModuleData.
|
||||
|
||||
struct ModuleCacheablePod
|
||||
{
|
||||
uint32_t functionBytes;
|
||||
uint32_t codeBytes;
|
||||
uint32_t globalBytes;
|
||||
ModuleKind kind;
|
||||
HeapUsage heapUsage;
|
||||
CompileArgs compileArgs;
|
||||
|
||||
uint32_t totalBytes() const { return codeBytes + globalBytes; }
|
||||
};
|
||||
|
||||
// ModuleData holds the guts of a Module. ModuleData is mutably built up by
|
||||
// ModuleGenerator and then handed over to the Module constructor in finish(),
|
||||
// where it is stored immutably.
|
||||
|
||||
struct ModuleData : ModuleCacheablePod
|
||||
{
|
||||
ModuleData() : loadedFromCache(false) { mozilla::PodZero(&pod()); }
|
||||
ModuleCacheablePod& pod() { return *this; }
|
||||
const ModuleCacheablePod& pod() const { return *this; }
|
||||
|
||||
UniqueCodePtr code;
|
||||
ImportVector imports;
|
||||
ExportVector exports;
|
||||
HeapAccessVector heapAccesses;
|
||||
CodeRangeVector codeRanges;
|
||||
CallSiteVector callSites;
|
||||
CallThunkVector callThunks;
|
||||
CacheableCharsVector prettyFuncNames;
|
||||
CacheableChars filename;
|
||||
bool loadedFromCache;
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(ModuleData);
|
||||
};
|
||||
|
||||
typedef UniquePtr<ModuleData> UniqueModuleData;
|
||||
typedef RefPtr<ExportMap> MutableExportMap;
|
||||
typedef RefPtr<const ExportMap> SharedExportMap;
|
||||
|
||||
// Module represents a compiled WebAssembly module which lives until the last
|
||||
// reference to any exported functions is dropped. Modules must be wrapped by a
|
||||
|
@ -441,7 +139,6 @@ typedef UniquePtr<ModuleData> UniqueModuleData;
|
|||
|
||||
class Module : public mozilla::LinkedListElement<Module>
|
||||
{
|
||||
typedef UniquePtr<const ModuleData> UniqueConstModuleData;
|
||||
struct ImportExit {
|
||||
void* code;
|
||||
jit::BaselineScript* baselineScript;
|
||||
|
@ -467,7 +164,8 @@ class Module : public mozilla::LinkedListElement<Module>
|
|||
typedef GCPtr<WasmModuleObject*> ModuleObjectPtr;
|
||||
|
||||
// Initialized when constructed:
|
||||
const UniqueConstModuleData module_;
|
||||
const UniqueCodeSegment codeSegment_;
|
||||
const SharedMetadata metadata_;
|
||||
|
||||
// Initialized during staticallyLink:
|
||||
bool staticallyLinked_;
|
||||
|
@ -509,7 +207,8 @@ class Module : public mozilla::LinkedListElement<Module>
|
|||
friend void* wasm::AddressOf(SymbolicAddress, ExclusiveContext*);
|
||||
|
||||
protected:
|
||||
const ModuleData& base() const { return *module_; }
|
||||
const CodeSegment& codeSegment() const { return *codeSegment_; }
|
||||
const Metadata& metadata() const { return *metadata_; }
|
||||
MOZ_MUST_USE bool clone(JSContext* cx, const StaticLinkData& link, Module* clone) const;
|
||||
|
||||
public:
|
||||
|
@ -517,7 +216,7 @@ class Module : public mozilla::LinkedListElement<Module>
|
|||
static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun);
|
||||
static const unsigned SizeOfEntryArg = sizeof(EntryArg);
|
||||
|
||||
explicit Module(UniqueModuleData module);
|
||||
explicit Module(UniqueCodeSegment codeSegment, const Metadata& metadata);
|
||||
virtual ~Module();
|
||||
virtual void trace(JSTracer* trc);
|
||||
virtual void readBarrier();
|
||||
|
@ -528,19 +227,18 @@ class Module : public mozilla::LinkedListElement<Module>
|
|||
|
||||
void setSource(Bytes&& source) { source_ = Move(source); }
|
||||
|
||||
uint8_t* code() const { return module_->code.get(); }
|
||||
uint32_t codeBytes() const { return module_->codeBytes; }
|
||||
uint8_t* globalData() const { return code() + module_->codeBytes; }
|
||||
uint32_t globalBytes() const { return module_->globalBytes; }
|
||||
HeapUsage heapUsage() const { return module_->heapUsage; }
|
||||
bool usesHeap() const { return UsesHeap(module_->heapUsage); }
|
||||
bool hasSharedHeap() const { return module_->heapUsage == HeapUsage::Shared; }
|
||||
CompileArgs compileArgs() const { return module_->compileArgs; }
|
||||
const ImportVector& imports() const { return module_->imports; }
|
||||
const ExportVector& exports() const { return module_->exports; }
|
||||
const CodeRangeVector& codeRanges() const { return module_->codeRanges; }
|
||||
const char* filename() const { return module_->filename.get(); }
|
||||
bool loadedFromCache() const { return module_->loadedFromCache; }
|
||||
uint8_t* code() const { return codeSegment_->code(); }
|
||||
uint32_t codeLength() const { return codeSegment_->codeLength(); }
|
||||
uint8_t* globalData() const { return codeSegment_->globalData(); }
|
||||
uint32_t globalDataLength() const { return codeSegment_->globalDataLength(); }
|
||||
HeapUsage heapUsage() const { return metadata_->heapUsage; }
|
||||
bool usesHeap() const { return UsesHeap(metadata_->heapUsage); }
|
||||
bool hasSharedHeap() const { return metadata_->heapUsage == HeapUsage::Shared; }
|
||||
CompileArgs compileArgs() const { return metadata_->compileArgs; }
|
||||
const ImportVector& imports() const { return metadata_->imports; }
|
||||
const ExportVector& exports() const { return metadata_->exports; }
|
||||
const CodeRangeVector& codeRanges() const { return metadata_->codeRanges; }
|
||||
const char* filename() const { return metadata_->filename.get(); }
|
||||
bool staticallyLinked() const { return staticallyLinked_; }
|
||||
bool dynamicallyLinked() const { return dynamicallyLinked_; }
|
||||
|
||||
|
@ -549,14 +247,14 @@ class Module : public mozilla::LinkedListElement<Module>
|
|||
// semantics. The asAsmJS() member may be used as a checked downcast when
|
||||
// isAsmJS() is true.
|
||||
|
||||
bool isAsmJS() const { return module_->kind == ModuleKind::AsmJS; }
|
||||
bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
|
||||
AsmJSModule& asAsmJS() { MOZ_ASSERT(isAsmJS()); return *(AsmJSModule*)this; }
|
||||
const AsmJSModule& asAsmJS() const { MOZ_ASSERT(isAsmJS()); return *(const AsmJSModule*)this; }
|
||||
virtual bool mutedErrors() const;
|
||||
virtual const char16_t* displayURL() const;
|
||||
virtual ScriptSource* maybeScriptSource() const { return nullptr; }
|
||||
|
||||
// The range [0, functionBytes) is a subrange of [0, codeBytes) that
|
||||
// The range [0, functionLength) is a subrange of [0, codeLength) that
|
||||
// contains only function body code, not the stub code. This distinction is
|
||||
// used by the async interrupt handler to only interrupt when the pc is in
|
||||
// function code which, in turn, simplifies reasoning about how stubs
|
||||
|
@ -666,7 +364,7 @@ class WasmModuleObject : public NativeObject
|
|||
public:
|
||||
static const unsigned RESERVED_SLOTS = 1;
|
||||
static WasmModuleObject* create(ExclusiveContext* cx);
|
||||
MOZ_MUST_USE bool init(wasm::Module* module);
|
||||
void init(wasm::Module& module);
|
||||
wasm::Module& module() const;
|
||||
void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data);
|
||||
static const Class class_;
|
||||
|
|
|
@ -164,20 +164,6 @@ DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor,
|
|||
return cursor;
|
||||
}
|
||||
|
||||
template <class T, size_t N>
|
||||
static inline MOZ_MUST_USE bool
|
||||
CloneVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
|
||||
mozilla::Vector<T, N, SystemAllocPolicy>* out)
|
||||
{
|
||||
if (!out->resize(in.length()))
|
||||
return false;
|
||||
for (size_t i = 0; i < in.length(); i++) {
|
||||
if (!in[i].clone(cx, &(*out)[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T, size_t N>
|
||||
static inline size_t
|
||||
SizeOfVectorExcludingThis(const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
|
||||
|
@ -219,17 +205,6 @@ DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor,
|
|||
return cursor;
|
||||
}
|
||||
|
||||
template <class T, size_t N>
|
||||
static inline MOZ_MUST_USE bool
|
||||
ClonePodVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
|
||||
mozilla::Vector<T, N, SystemAllocPolicy>* out)
|
||||
{
|
||||
if (!out->resize(in.length()))
|
||||
return false;
|
||||
mozilla::PodCopy(out->begin(), in.begin(), in.length());
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline MOZ_MUST_USE bool
|
||||
GetCPUID(uint32_t* cpuId)
|
||||
{
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include "mozilla/HashFunctions.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Move.h"
|
||||
#include "mozilla/RefCounted.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
|
||||
#include "NamespaceImports.h"
|
||||
|
||||
|
@ -44,6 +46,7 @@ using mozilla::EnumeratedArray;
|
|||
using mozilla::Maybe;
|
||||
using mozilla::Move;
|
||||
using mozilla::MallocSizeOf;
|
||||
using mozilla::RefCounted;
|
||||
|
||||
typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
|
||||
|
||||
|
@ -57,17 +60,16 @@ template <> struct IsPod<js::wasm::Type> : TrueType {};
|
|||
} namespace js { namespace wasm { \
|
||||
typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
|
||||
|
||||
// A wasm Module and everything it contains must support serialization,
|
||||
// deserialization and cloning. Some data can be simply copied as raw bytes and,
|
||||
// A wasm Module and everything it contains must support serialization and
|
||||
// deserialization. Some data can be simply copied as raw bytes and,
|
||||
// as a convention, is stored in an inline CacheablePod struct. Everything else
|
||||
// should implement the below methods which are called recusively by the
|
||||
// containing Module. See comments for these methods in wasm::Module.
|
||||
// containing Module.
|
||||
|
||||
#define WASM_DECLARE_SERIALIZABLE(Type) \
|
||||
size_t serializedSize() const; \
|
||||
uint8_t* serialize(uint8_t* cursor) const; \
|
||||
const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); \
|
||||
MOZ_MUST_USE bool clone(JSContext* cx, Type* out) const; \
|
||||
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
|
||||
|
||||
// ValType/ExprType utilities
|
||||
|
|
|
@ -160,6 +160,7 @@ UNIFIED_SOURCES += [
|
|||
'asmjs/WasmBinaryToAST.cpp',
|
||||
'asmjs/WasmBinaryToExperimentalText.cpp',
|
||||
'asmjs/WasmBinaryToText.cpp',
|
||||
'asmjs/WasmCode.cpp',
|
||||
'asmjs/WasmFrameIterator.cpp',
|
||||
'asmjs/WasmGenerator.cpp',
|
||||
'asmjs/WasmIonCompile.cpp',
|
||||
|
|
Загрузка…
Ссылка в новой задаче