Bug 1235631 - Odin: remove change-heap support (r=bbouvier)

--HG--
extra : commitid : 4s3rKVYz3Wr
extra : rebase_source : fb76a99fa15b5814b024926ccffc80bb0777ecba
This commit is contained in:
Luke Wagner 2015-12-30 12:32:47 -06:00
Родитель add7a54a34
Коммит 67ecabbdda
25 изменённых файлов: 187 добавлений и 1721 удалений

Просмотреть файл

@ -132,7 +132,7 @@ class js::AsmJSModule
{
public:
enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation, ByteLength };
AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation };
enum VarInitKind { InitConstant, InitImport };
enum ConstantKind { GlobalConstant, MathConstant };
@ -301,21 +301,19 @@ class js::AsmJSModule
PropertyName* name_;
PropertyName* maybeFieldName_;
struct CacheablePod {
uint32_t wasmIndex_;
uint32_t startOffsetInModule_; // Store module-start-relative offsets
uint32_t endOffsetInModule_; // so preserved by serialization.
} pod;
public:
Export() {}
Export(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
Export(PropertyName* name, PropertyName* maybeFieldName,
uint32_t startOffsetInModule, uint32_t endOffsetInModule)
: name_(name),
maybeFieldName_(maybeFieldName)
{
MOZ_ASSERT(name_->isTenured());
MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured());
pod.wasmIndex_ = wasmIndex;
pod.startOffsetInModule_ = startOffsetInModule;
pod.endOffsetInModule_ = endOffsetInModule;
}
@ -338,14 +336,6 @@ class js::AsmJSModule
uint32_t endOffsetInModule() const {
return pod.endOffsetInModule_;
}
static const uint32_t ChangeHeap = UINT32_MAX;
bool isChangeHeap() const {
return pod.wasmIndex_ == ChangeHeap;
}
uint32_t wasmIndex() const {
MOZ_ASSERT(!isChangeHeap());
return pod.wasmIndex_;
}
WASM_DECLARE_SERIALIZABLE(Export)
};
@ -359,15 +349,12 @@ class js::AsmJSModule
wasm::UniqueStaticLinkData linkData_;
struct CacheablePod {
uint32_t minHeapLength_;
uint32_t maxHeapLength_;
uint32_t heapLengthMask_;
uint32_t numFFIs_;
uint32_t srcLength_;
uint32_t srcLengthWithRightBrace_;
bool strict_;
bool hasArrayView_;
bool isSharedView_;
bool hasFixedMinHeapLength_;
} pod;
const ScriptSourceHolder scriptSource_;
const uint32_t srcStart_;
@ -391,7 +378,6 @@ class js::AsmJSModule
{
mozilla::PodZero(&pod);
pod.minHeapLength_ = RoundUpToNextValidAsmJSHeapLength(0);
pod.maxHeapLength_ = 0x80000000;
pod.strict_ = strict;
MOZ_ASSERT(srcStart_ <= srcBodyStart_);
@ -446,13 +432,6 @@ class js::AsmJSModule
uint32_t minHeapLength() const {
return pod.minHeapLength_;
}
uint32_t maxHeapLength() const {
return pod.maxHeapLength_;
}
uint32_t heapLengthMask() const {
MOZ_ASSERT(pod.hasFixedMinHeapLength_);
return pod.heapLengthMask_;
}
void initGlobalArgumentName(PropertyName* n) {
MOZ_ASSERT(!isFinished());
@ -520,11 +499,6 @@ class js::AsmJSModule
g.pod.u.viewType_ = vt;
return globals_.append(g);
}
bool addByteLength() {
MOZ_ASSERT(!isFinished());
Global g(Global::ByteLength, nullptr);
return globals_.append(g);
}
bool addMathBuiltinFunction(AsmJSMathBuiltinFunction func, PropertyName* field) {
MOZ_ASSERT(!isFinished());
Global g(Global::MathBuiltinFunction, field);
@ -569,30 +543,16 @@ class js::AsmJSModule
MOZ_ASSERT(imports_.length() == importIndex);
return imports_.emplaceBack(ffiIndex);
}
bool addExport(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
uint32_t funcSrcBegin, uint32_t funcSrcEnd)
{
// NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource
// (the entire file) and ExportedFunctions store offsets relative to
// the beginning of the module (so that they are caching-invariant).
bool addExport(PropertyName* name, PropertyName* maybeFieldName, uint32_t begin, uint32_t end) {
// The begin/end offsets are given relative to the ScriptSource (the
// entire file) and ExportedFunctions store offsets relative to the
// beginning of the module (so that they are caching-invariant).
MOZ_ASSERT(!isFinished());
MOZ_ASSERT(srcStart_ < funcSrcBegin);
MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
return exports_.emplaceBack(name, maybeFieldName, wasmIndex,
funcSrcBegin - srcStart_, funcSrcEnd - srcStart_);
}
bool addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
MOZ_ASSERT(!isFinished());
MOZ_ASSERT(!pod.hasFixedMinHeapLength_);
MOZ_ASSERT(IsValidAsmJSHeapLength(mask + 1));
MOZ_ASSERT(min >= RoundUpToNextValidAsmJSHeapLength(0));
MOZ_ASSERT(max <= pod.maxHeapLength_);
MOZ_ASSERT(min <= max);
pod.heapLengthMask_ = mask;
pod.minHeapLength_ = min;
pod.maxHeapLength_ = max;
pod.hasFixedMinHeapLength_ = true;
return true;
MOZ_ASSERT(srcStart_ < begin);
MOZ_ASSERT(begin < end);
uint32_t startOffsetInModule = begin - srcStart_;
uint32_t endOffsetInModule = end - srcStart_;
return exports_.emplaceBack(name, maybeFieldName, startOffsetInModule, endOffsetInModule);
}
const GlobalVector& globals() const {
@ -615,16 +575,11 @@ class js::AsmJSModule
bool isSharedView() const {
return pod.isSharedView_;
}
bool tryRequireHeapLengthToBeAtLeast(uint32_t len) {
void requireHeapLengthToBeAtLeast(uint32_t len) {
MOZ_ASSERT(!isFinished());
if (pod.hasFixedMinHeapLength_ && len > pod.minHeapLength_)
return false;
if (len > pod.maxHeapLength_)
return false;
len = RoundUpToNextValidAsmJSHeapLength(len);
if (len > pod.minHeapLength_)
pod.minHeapLength_ = len;
return true;
}
/*************************************************************************/
@ -1720,9 +1675,7 @@ class MOZ_STACK_CLASS ModuleValidator
MathBuiltinFunction,
AtomicsBuiltinFunction,
SimdCtor,
SimdOperation,
ByteLength,
ChangeHeap
SimdOperation
};
private:
@ -1746,10 +1699,6 @@ class MOZ_STACK_CLASS ModuleValidator
AsmJSSimdType type_;
AsmJSSimdOperation which_;
} simdOp;
struct {
uint32_t srcBegin_;
uint32_t srcEnd_;
} changeHeap;
} u;
friend class ModuleValidator;
@ -1827,14 +1776,6 @@ class MOZ_STACK_CLASS ModuleValidator
MOZ_ASSERT(which_ == SimdOperation);
return u.simdOp.type_;
}
uint32_t changeHeapSrcBegin() const {
MOZ_ASSERT(which_ == ChangeHeap);
return u.changeHeap.srcBegin_;
}
uint32_t changeHeapSrcEnd() const {
MOZ_ASSERT(which_ == ChangeHeap);
return u.changeHeap.srcEnd_;
}
};
struct MathBuiltin
@ -1931,8 +1872,6 @@ class MOZ_STACK_CLASS ModuleValidator
uint32_t errorOffset_;
bool errorOverRecursed_;
bool canValidateChangeHeap_;
bool hasChangeHeap_;
bool supportsSimd_;
bool atomicsPresent_;
@ -1955,8 +1894,6 @@ class MOZ_STACK_CLASS ModuleValidator
errorString_(nullptr),
errorOffset_(UINT32_MAX),
errorOverRecursed_(false),
canValidateChangeHeap_(false),
hasChangeHeap_(false),
supportsSimd_(cx->jitSupportsSimd()),
atomicsPresent_(false)
{
@ -2089,8 +2026,12 @@ class MOZ_STACK_CLASS ModuleValidator
JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
uint32_t endAfterCurly = pos.end;
auto usesHeap = Module::HeapBool(module_->hasArrayView());
auto sharedHeap = Module::SharedBool(module_->isSharedView());
HeapUsage heapUsage = module_->hasArrayView()
? module_->isSharedView()
? HeapUsage::Shared
: HeapUsage::Unshared
: HeapUsage::None;
auto mutedErrors = Module::MutedBool(parser_.ss->mutedErrors());
CacheableChars filename = make_string_copy(parser_.ss->filename());
@ -2107,8 +2048,7 @@ class MOZ_STACK_CLASS ModuleValidator
}
UniqueStaticLinkData linkData;
Module* wasm = mg_.finish(usesHeap, sharedHeap, mutedErrors,
Move(filename), Move(displayURL),
Module* wasm = mg_.finish(heapUsage, mutedErrors, Move(filename), Move(displayURL),
&linkData, slowFuncs);
if (!wasm)
return false;
@ -2219,23 +2159,6 @@ class MOZ_STACK_CLASS ModuleValidator
return globals_.putNew(var, global) &&
module().addSimdOperation(type, op, opName);
}
bool addByteLength(PropertyName* name) {
canValidateChangeHeap_ = true;
Global* global = validationLifo_.new_<Global>(Global::ByteLength);
return global &&
globals_.putNew(name, global) &&
module().addByteLength();
}
bool addChangeHeap(PropertyName* name, ParseNode* fn, uint32_t mask, uint32_t min, uint32_t max) {
hasChangeHeap_ = true;
Global* global = validationLifo_.new_<Global>(Global::ChangeHeap);
if (!global)
return false;
global->u.changeHeap.srcBegin_ = fn->pn_pos.begin;
global->u.changeHeap.srcEnd_ = fn->pn_pos.end;
return globals_.putNew(name, global) &&
module().addChangeHeap(mask, min, max);
}
bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
if (!global)
@ -2259,17 +2182,8 @@ class MOZ_STACK_CLASS ModuleValidator
if (!args.appendAll(func.sig().args()))
return false;
MallocSig sig(Move(args), func.sig().ret());
uint32_t wasmIndex;
if (!mg_.declareExport(Move(sig), func.index(), &wasmIndex))
return false;
if (wasmIndex == AsmJSModule::Export::ChangeHeap)
return fail(pn, "too many exports");
return module().addExport(func.name(), maybeFieldName, wasmIndex,
func.srcBegin(), func.srcEnd());
}
bool addChangeHeapExport(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
return module().addExport(name, maybeFieldName, AsmJSModule::Export::ChangeHeap,
g.changeHeapSrcBegin(), g.changeHeapSrcEnd());
return mg_.declareExport(Move(sig), func.index()) &&
module().addExport(func.name(), maybeFieldName, func.srcBegin(), func.srcEnd());
}
private:
const LifoSig* getLifoSig(const LifoSig& sig) {
@ -2338,19 +2252,13 @@ class MOZ_STACK_CLASS ModuleValidator
module().addImport(ffiIndex, *importIndex);
}
bool tryOnceToValidateChangeHeap() {
bool ret = canValidateChangeHeap_;
canValidateChangeHeap_ = false;
return ret;
}
bool hasChangeHeap() const {
return hasChangeHeap_;
}
bool tryRequireHeapLengthToBeAtLeast(uint32_t len) {
return module().tryRequireHeapLengthToBeAtLeast(len);
}
uint32_t minHeapLength() const {
return module().minHeapLength();
bool tryConstantAccess(uint64_t start, uint64_t width) {
MOZ_ASSERT(UINT64_MAX - start > width);
uint64_t end = start + width;
if (end > uint64_t(INT32_MAX) + 1)
return false;
module().requireHeapLengthToBeAtLeast(end);
return true;
}
bool usesSharedMemory() const {
@ -2822,8 +2730,6 @@ class MOZ_STACK_CLASS FunctionValidator
LocalMap locals_;
LabelMap labels_;
unsigned heapExpressionDepth_;
bool hasAlreadyReturned_;
ExprType ret_;
@ -2833,7 +2739,6 @@ class MOZ_STACK_CLASS FunctionValidator
fn_(fn),
locals_(m.cx()),
labels_(m.cx()),
heapExpressionDepth_(0),
hasAlreadyReturned_(false)
{}
@ -2887,19 +2792,6 @@ class MOZ_STACK_CLASS FunctionValidator
return funcIR().addVariable(init.value());
}
/*************************************************************************/
void enterHeapExpression() {
heapExpressionDepth_++;
}
void leaveHeapExpression() {
MOZ_ASSERT(heapExpressionDepth_ > 0);
heapExpressionDepth_--;
}
bool canCall() const {
return heapExpressionDepth_ == 0 || !m_.hasChangeHeap();
}
/****************************** For consistency of returns in a function */
bool hasAlreadyReturned() const {
@ -3471,8 +3363,6 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN
return m.addGlobalConstant(varName, GenericNaN(), field);
if (field == m.cx()->names().Infinity)
return m.addGlobalConstant(varName, PositiveInfinity<double>(), field);
if (field == m.cx()->names().byteLength)
return m.addByteLength(varName);
Scalar::Type type;
if (IsArrayViewCtorName(m, field, &type))
@ -3783,8 +3673,6 @@ CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type)
case ModuleValidator::Global::ArrayViewCtor:
case ModuleValidator::Global::SimdCtor:
case ModuleValidator::Global::SimdOperation:
case ModuleValidator::Global::ByteLength:
case ModuleValidator::Global::ChangeHeap:
return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
}
return true;
@ -3819,7 +3707,7 @@ FoldMaskedArrayIndex(FunctionValidator& f, ParseNode** indexExpr, int32_t* mask,
// constraint. The unsigned maximum of a masked index is the mask
// itself, so check that the mask is not negative and compare the mask
// to the known minimum heap length.
if (int32_t(mask2) >= 0 && mask2 < f.m().minHeapLength())
if (int32_t(mask2) >= 0 && mask2 < f.m().module().minHeapLength())
*needsBoundsCheck = NO_BOUNDS_CHECK;
*mask &= mask2;
*indexExpr = indexNode;
@ -3849,16 +3737,9 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
uint32_t index;
if (IsLiteralOrConstInt(f, indexExpr, &index)) {
uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
if (byteOffset > INT32_MAX)
if (!f.m().tryConstantAccess(byteOffset, TypedArrayElemSize(*viewType)))
return f.fail(indexExpr, "constant index out of range");
unsigned elementSize = TypedArrayElemSize(*viewType);
if (!f.m().tryRequireHeapLengthToBeAtLeast(byteOffset + elementSize)) {
return f.failf(indexExpr, "constant index outside heap size range declared by the "
"change-heap function (0x%x - 0x%x)",
f.m().minHeapLength(), f.m().module().maxHeapLength());
}
*mask = NoMask;
*needsBoundsCheck = NO_BOUNDS_CHECK;
f.writeInt32Lit(byteOffset);
@ -3886,14 +3767,10 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
if (pointerNode->isKind(PNK_BITAND))
FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
f.enterHeapExpression();
Type pointerType;
if (!CheckExpr(f, pointerNode, &pointerType))
return false;
f.leaveHeapExpression();
if (!pointerType.isIntish())
return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
} else {
@ -3909,14 +3786,10 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
if (pointerNode->isKind(PNK_BITAND))
folded = FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
f.enterHeapExpression();
Type pointerType;
if (!CheckExpr(f, pointerNode, &pointerType))
return false;
f.leaveHeapExpression();
if (folded) {
if (!pointerType.isIntish())
return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
@ -4010,14 +3883,10 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type
if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &needsBoundsCheck, &mask))
return false;
f.enterHeapExpression();
Type rhsType;
if (!CheckExpr(f, rhs, &rhsType))
return false;
f.leaveHeapExpression();
switch (viewType) {
case Scalar::Int8:
case Scalar::Int16:
@ -4657,11 +4526,6 @@ static bool
CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calleeName,
ExprType ret, Type* type)
{
if (!f.canCall()) {
return f.fail(callNode, "call expressions may not be nested inside heap expressions "
"when the module contains a change-heap function");
}
switch (ret) {
case ExprType::Void: f.writeOp(Stmt::CallInternal); break;
case ExprType::I32: f.writeOp(I32::CallInternal); break;
@ -4728,11 +4592,6 @@ CheckFuncPtrTableAgainstExisting(ModuleValidator& m, ParseNode* usepn, PropertyN
static bool
CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, ExprType ret, Type* type)
{
if (!f.canCall()) {
return f.fail(callNode, "function-pointer call expressions may not be nested inside heap "
"expressions when the module contains a change-heap function");
}
ParseNode* callee = CallCallee(callNode);
ParseNode* tableNode = ElemBase(callee);
ParseNode* indexExpr = ElemIndex(callee);
@ -4813,11 +4672,6 @@ static bool
CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprType ret,
Type* type)
{
if (!f.canCall()) {
return f.fail(callNode, "FFI call expressions may not be nested inside heap "
"expressions when the module contains a change-heap function");
}
PropertyName* calleeName = CallCallee(callNode)->name();
if (ret == ExprType::F32)
@ -5504,30 +5358,21 @@ CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, AsmJSSimdType opTy
ParseNode* indexExpr = NextNode(view);
uint32_t indexLit;
if (IsLiteralOrConstInt(f, indexExpr, &indexLit)) {
if (indexLit > INT32_MAX)
if (!f.m().tryConstantAccess(indexLit, Simd128DataSize))
return f.fail(indexExpr, "constant index out of range");
if (!f.m().tryRequireHeapLengthToBeAtLeast(indexLit + Simd128DataSize)) {
return f.failf(indexExpr, "constant index outside heap size range declared by the "
"change-heap function (0x%x - 0x%x)",
f.m().minHeapLength(), f.m().module().maxHeapLength());
}
*needsBoundsCheck = NO_BOUNDS_CHECK;
f.writeInt32Lit(indexLit);
return true;
}
f.enterHeapExpression();
Type indexType;
if (!CheckExpr(f, indexExpr, &indexType))
return false;
if (!indexType.isIntish())
return f.failf(indexExpr, "%s is not a subtype of intish", indexType.toChars());
f.leaveHeapExpression();
return true;
}
@ -5953,8 +5798,6 @@ CheckCoercedCall(FunctionValidator& f, ParseNode* call, ExprType ret, Type* type
case ModuleValidator::Global::FuncPtrTable:
case ModuleValidator::Global::ArrayView:
case ModuleValidator::Global::ArrayViewCtor:
case ModuleValidator::Global::ByteLength:
case ModuleValidator::Global::ChangeHeap:
return f.failName(callee, "'%s' is not callable function", callee->name());
case ModuleValidator::Global::SimdCtor:
case ModuleValidator::Global::SimdOperation:
@ -7020,225 +6863,6 @@ CheckStatement(FunctionValidator& f, ParseNode* stmt)
return f.fail(stmt, "unexpected statement kind");
}
static bool
CheckByteLengthCall(ModuleValidator& m, ParseNode* pn, PropertyName* newBufferName)
{
if (!pn->isKind(PNK_CALL) || !CallCallee(pn)->isKind(PNK_NAME))
return m.fail(pn, "expecting call to imported byteLength");
const ModuleValidator::Global* global = m.lookupGlobal(CallCallee(pn)->name());
if (!global || global->which() != ModuleValidator::Global::ByteLength)
return m.fail(pn, "expecting call to imported byteLength");
if (CallArgListLength(pn) != 1 || !IsUseOfName(CallArgList(pn), newBufferName))
return m.failName(pn, "expecting %s as argument to byteLength call", newBufferName);
return true;
}
static bool
CheckHeapLengthCondition(ModuleValidator& m, ParseNode* cond, PropertyName* newBufferName,
uint32_t* mask, uint32_t* minLength, uint32_t* maxLength)
{
if (!cond->isKind(PNK_OR) || !AndOrLeft(cond)->isKind(PNK_OR))
return m.fail(cond, "expecting byteLength & K || byteLength <= L || byteLength > M");
ParseNode* cond1 = AndOrLeft(AndOrLeft(cond));
ParseNode* cond2 = AndOrRight(AndOrLeft(cond));
ParseNode* cond3 = AndOrRight(cond);
if (!cond1->isKind(PNK_BITAND))
return m.fail(cond1, "expecting byteLength & K");
if (!CheckByteLengthCall(m, BitwiseLeft(cond1), newBufferName))
return false;
ParseNode* maskNode = BitwiseRight(cond1);
if (!IsLiteralInt(m, maskNode, mask))
return m.fail(maskNode, "expecting integer literal mask");
if (*mask == UINT32_MAX)
return m.fail(maskNode, "invalid mask value");
if ((*mask & 0xffffff) != 0xffffff)
return m.fail(maskNode, "mask value must have the bits 0xffffff set");
if (!cond2->isKind(PNK_LE))
return m.fail(cond2, "expecting byteLength <= L");
if (!CheckByteLengthCall(m, RelationalLeft(cond2), newBufferName))
return false;
ParseNode* minLengthNode = RelationalRight(cond2);
uint32_t minLengthExclusive;
if (!IsLiteralInt(m, minLengthNode, &minLengthExclusive))
return m.fail(minLengthNode, "expecting integer literal");
if (minLengthExclusive < 0xffffff || minLengthExclusive == UINT32_MAX)
return m.fail(minLengthNode, "literal must be >= 0xffffff and < 0xffffffff");
// Add one to convert from exclusive (the branch rejects if ==) to inclusive.
*minLength = minLengthExclusive + 1;
if (!cond3->isKind(PNK_GT))
return m.fail(cond3, "expecting byteLength > M");
if (!CheckByteLengthCall(m, RelationalLeft(cond3), newBufferName))
return false;
ParseNode* maxLengthNode = RelationalRight(cond3);
if (!IsLiteralInt(m, maxLengthNode, maxLength))
return m.fail(maxLengthNode, "expecting integer literal");
if (*maxLength > 0x80000000)
return m.fail(maxLengthNode, "literal must be <= 0x80000000");
if (*maxLength < *minLength)
return m.fail(maxLengthNode, "maximum length must be greater or equal to minimum length");
return true;
}
static bool
CheckReturnBoolLiteral(ModuleValidator& m, ParseNode* stmt, bool retval)
{
if (stmt->isKind(PNK_STATEMENTLIST)) {
ParseNode* next = SkipEmptyStatements(ListHead(stmt));
if (!next)
return m.fail(stmt, "expected return statement");
stmt = next;
if (NextNonEmptyStatement(stmt))
return m.fail(stmt, "expected single return statement");
}
if (!stmt->isKind(PNK_RETURN))
return m.fail(stmt, "expected return statement");
ParseNode* returnExpr = ReturnExpr(stmt);
if (!returnExpr || !returnExpr->isKind(retval ? PNK_TRUE : PNK_FALSE))
return m.failf(stmt, "expected 'return %s;'", retval ? "true" : "false");
return true;
}
static bool
CheckReassignmentTo(ModuleValidator& m, ParseNode* stmt, PropertyName* lhsName, ParseNode** rhs)
{
if (!stmt->isKind(PNK_SEMI))
return m.fail(stmt, "missing reassignment");
ParseNode* assign = UnaryKid(stmt);
if (!assign || !assign->isKind(PNK_ASSIGN))
return m.fail(stmt, "missing reassignment");
ParseNode* lhs = BinaryLeft(assign);
if (!IsUseOfName(lhs, lhsName))
return m.failName(lhs, "expecting reassignment of %s", lhsName);
*rhs = BinaryRight(assign);
return true;
}
static bool
CheckChangeHeap(ModuleValidator& m, ParseNode* fn, bool* validated)
{
MOZ_ASSERT(fn->isKind(PNK_FUNCTION));
// We don't yet know whether this is a change-heap function.
// The point at which we know we have a change-heap function is once we see
// whether the argument is coerced according to the normal asm.js rules. If
// it is coerced, it's not change-heap and must validate according to normal
// rules; otherwise it must validate as a change-heap function.
*validated = false;
PropertyName* changeHeapName = FunctionName(fn);
if (!CheckModuleLevelName(m, fn, changeHeapName))
return false;
unsigned numFormals;
ParseNode* arg = FunctionArgsList(fn, &numFormals);
if (numFormals != 1)
return true;
PropertyName* newBufferName;
if (!CheckArgument(m, arg, &newBufferName))
return false;
ParseNode* stmtIter = SkipEmptyStatements(ListHead(FunctionStatementList(fn)));
if (!stmtIter || !stmtIter->isKind(PNK_IF))
return true;
// We can now issue validation failures if we see something that isn't a
// valid change-heap function.
*validated = true;
PropertyName* bufferName = m.module().bufferArgumentName();
if (!bufferName)
return m.fail(fn, "to change heaps, the module must have a buffer argument");
ParseNode* cond = TernaryKid1(stmtIter);
ParseNode* thenStmt = TernaryKid2(stmtIter);
if (ParseNode* elseStmt = TernaryKid3(stmtIter))
return m.fail(elseStmt, "unexpected else statement");
uint32_t mask, min = 0, max; // initialize min to silence GCC warning
if (!CheckHeapLengthCondition(m, cond, newBufferName, &mask, &min, &max))
return false;
if (!CheckReturnBoolLiteral(m, thenStmt, false))
return false;
ParseNode* next = NextNonEmptyStatement(stmtIter);
for (unsigned i = 0; i < m.numArrayViews(); i++, next = NextNonEmptyStatement(stmtIter)) {
if (!next)
return m.failOffset(stmtIter->pn_pos.end, "missing reassignment");
stmtIter = next;
const ModuleValidator::ArrayView& view = m.arrayView(i);
ParseNode* rhs;
if (!CheckReassignmentTo(m, stmtIter, view.name, &rhs))
return false;
if (!rhs->isKind(PNK_NEW))
return m.failName(rhs, "expecting assignment of new array view to %s", view.name);
ParseNode* ctorExpr = ListHead(rhs);
if (!ctorExpr->isKind(PNK_NAME))
return m.fail(rhs, "expecting name of imported typed array constructor");
const ModuleValidator::Global* global = m.lookupGlobal(ctorExpr->name());
if (!global || global->which() != ModuleValidator::Global::ArrayViewCtor)
return m.fail(rhs, "expecting name of imported typed array constructor");
if (global->viewType() != view.type)
return m.fail(rhs, "can't change the type of a global view variable");
if (!CheckNewArrayViewArgs(m, ctorExpr, newBufferName))
return false;
}
if (!next)
return m.failOffset(stmtIter->pn_pos.end, "missing reassignment");
stmtIter = next;
ParseNode* rhs;
if (!CheckReassignmentTo(m, stmtIter, bufferName, &rhs))
return false;
if (!IsUseOfName(rhs, newBufferName))
return m.failName(stmtIter, "expecting assignment of new buffer to %s", bufferName);
next = NextNonEmptyStatement(stmtIter);
if (!next)
return m.failOffset(stmtIter->pn_pos.end, "expected return statement");
stmtIter = next;
if (!CheckReturnBoolLiteral(m, stmtIter, true))
return false;
stmtIter = NextNonEmptyStatement(stmtIter);
if (stmtIter)
return m.fail(stmtIter, "expecting end of function");
return m.addChangeHeap(changeHeapName, fn, mask, min, max);
}
static bool
ParseFunction(ModuleValidator& m, ParseNode** fnOut, unsigned* line, unsigned* column)
{
@ -7319,14 +6943,6 @@ CheckFunction(ModuleValidator& m)
if (!CheckFunctionHead(m, fn))
return false;
if (m.tryOnceToValidateChangeHeap()) {
bool validated;
if (!CheckChangeHeap(m, fn, &validated))
return false;
if (validated)
return true;
}
FunctionValidator f(m, fn);
if (!f.init(FunctionName(fn), line, column))
return m.fail(fn, "internal compiler failure (probably out of memory)");
@ -7493,13 +7109,10 @@ CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybe
if (!global)
return m.failName(pn, "exported function name '%s' not found", funcName);
if (global->which() == ModuleValidator::Global::Function)
return m.addExport(pn, m.function(global->funcIndex()), maybeFieldName);
if (global->which() != ModuleValidator::Global::Function)
return m.failName(pn, "'%s' is not a function", funcName);
if (global->which() == ModuleValidator::Global::ChangeHeap)
return m.addChangeHeapExport(funcName, *global, maybeFieldName);
return m.failName(pn, "'%s' is not a function", funcName);
return m.addExport(pn, m.function(global->funcIndex()), maybeFieldName);
}
static bool
@ -7636,13 +7249,13 @@ CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, Hand
}
/*****************************************************************************/
// Runtime calls to asm.js module exports
// Link-time validation
static AsmJSModuleObject&
FunctionToModuleObject(JSFunction* fun)
{
MOZ_ASSERT(IsAsmJSFunction(fun) || IsAsmJSModule(fun));
const Value& v = fun->getExtendedSlot(FunctionExtended::ASM_MODULE_SLOT);
const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_MODULE_SLOT);
return v.toObject().as<AsmJSModuleObject>();
}
@ -7650,44 +7263,10 @@ static unsigned
FunctionToExportIndex(JSFunction* fun)
{
MOZ_ASSERT(IsAsmJSFunction(fun));
const Value& v = fun->getExtendedSlot(FunctionExtended::ASM_EXPORT_INDEX_SLOT);
const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_EXPORT_INDEX_SLOT);
return v.toInt32();
}
static bool
ChangeHeap(JSContext* cx, AsmJSModule& module, const CallArgs& args)
{
HandleValue bufferArg = args.get(0);
if (!IsArrayBuffer(bufferArg)) {
ReportIncompatible(cx, args);
return false;
}
Rooted<ArrayBufferObject*> newBuffer(cx, &bufferArg.toObject().as<ArrayBufferObject>());
uint32_t heapLength = newBuffer->byteLength();
if (heapLength & module.heapLengthMask() ||
heapLength < module.minHeapLength() ||
heapLength > module.maxHeapLength())
{
args.rval().set(BooleanValue(false));
return true;
}
if (!module.hasArrayView()) {
args.rval().set(BooleanValue(true));
return true;
}
MOZ_ASSERT(IsValidAsmJSHeapLength(heapLength));
bool useSignalHandlers = module.wasmModule().compileArgs().useSignalHandlersForOOB;
if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, useSignalHandlers))
return false;
args.rval().set(BooleanValue(module.wasmModule().changeHeap(newBuffer, cx)));
return true;
}
static bool
CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
{
@ -7695,18 +7274,11 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
RootedFunction callee(cx, &args.callee().as<JSFunction>());
AsmJSModule& module = FunctionToModuleObject(callee).module();
const AsmJSModule::Export& exp = module.exports()[FunctionToExportIndex(callee)];
uint32_t exportIndex = FunctionToExportIndex(callee);
// The heap-changing function is a special-case and is implemented by C++.
if (exp.isChangeHeap())
return ChangeHeap(cx, module, args);
return module.wasmModule().callExport(cx, exp.wasmIndex(), args);
return module.wasmModule().callExport(cx, exportIndex, args);
}
/*****************************************************************************/
// Link-time validation
static bool
LinkFail(JSContext* cx, const char* str)
{
@ -7885,30 +7457,6 @@ ValidateArrayView(JSContext* cx, const AsmJSModule::Global& global, HandleValue
return true;
}
static bool
ValidateByteLength(JSContext* cx, HandleValue globalVal)
{
RootedPropertyName field(cx, cx->names().byteLength);
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, field, &v))
return false;
if (!v.isObject() || !v.toObject().isBoundFunction())
return LinkFail(cx, "byteLength must be a bound function object");
RootedFunction fun(cx, &v.toObject().as<JSFunction>());
RootedValue boundTarget(cx, ObjectValue(*fun->getBoundFunctionTarget()));
if (!IsNativeFunction(boundTarget, fun_call))
return LinkFail(cx, "bound target of byteLength must be Function.prototype.call");
RootedValue boundThis(cx, fun->getBoundFunctionThis());
if (!IsNativeFunction(boundThis, ArrayBufferObject::byteLengthGetter))
return LinkFail(cx, "bound this value must be ArrayBuffer.protototype.byteLength accessor");
return true;
}
static bool
ValidateMathBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
@ -8151,20 +7699,12 @@ CheckBuffer(JSContext* cx, AsmJSModule& module, HandleValue bufferVal,
if (heapLength < module.minHeapLength()) {
UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
"by const heap accesses and/or change-heap minimum-length requirements).",
"by const heap accesses).",
heapLength,
module.minHeapLength()));
return LinkFail(cx, msg.get());
}
if (heapLength > module.maxHeapLength()) {
UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength 0x%x is greater than maximum length of 0x%x",
heapLength,
module.maxHeapLength()));
return LinkFail(cx, msg.get());
}
// Shell builtins may have disabled signal handlers since the module we're
// cloning was compiled. LookupAsmJSModuleInCache checks for signal handlers
// as well for the caching case.
@ -8211,10 +7751,6 @@ DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module)
if (!ValidateArrayView(cx, global, globalVal))
return false;
break;
case AsmJSModule::Global::ByteLength:
if (!ValidateByteLength(cx, globalVal))
return false;
break;
case AsmJSModule::Global::MathBuiltinFunction:
if (!ValidateMathBuiltinFunction(cx, global, globalVal))
return false;
@ -8251,9 +7787,7 @@ static JSFunction*
NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule::Export& func,
HandleObject moduleObj, unsigned exportIndex)
{
unsigned numArgs = func.isChangeHeap()
? 1
: module.wasmModule().exports()[func.wasmIndex()].sig().args().length();
unsigned numArgs = module.wasmModule().exports()[exportIndex].sig().args().length();
RootedPropertyName name(cx, func.name());
JSFunction* fun =
@ -8263,8 +7797,8 @@ NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule:
if (!fun)
return nullptr;
fun->setExtendedSlot(FunctionExtended::ASM_MODULE_SLOT, ObjectValue(*moduleObj));
fun->setExtendedSlot(FunctionExtended::ASM_EXPORT_INDEX_SLOT, Int32Value(exportIndex));
fun->setExtendedSlot(FunctionExtended::WASM_MODULE_SLOT, ObjectValue(*moduleObj));
fun->setExtendedSlot(FunctionExtended::WASM_EXPORT_INDEX_SLOT, Int32Value(exportIndex));
return fun;
}
@ -8434,7 +7968,7 @@ NewModuleFunction(ExclusiveContext* cx, JSFunction* origFun, HandleObject module
if (!moduleFun)
return nullptr;
moduleFun->setExtendedSlot(FunctionExtended::ASM_MODULE_SLOT, ObjectValue(*moduleObj));
moduleFun->setExtendedSlot(FunctionExtended::WASM_MODULE_SLOT, ObjectValue(*moduleObj));
return moduleFun;
}
@ -9390,14 +8924,3 @@ js::RoundUpToNextValidAsmJSHeapLength(uint32_t length)
MOZ_ASSERT(length <= 0xff000000);
return (length + 0x00ffffff) & ~0x00ffffff;
}
bool
js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer)
{
for (Module* m = cx->runtime()->linkedWasmModules; m; m = m->nextLinked()) {
if (buffer == m->maybeBuffer() && !m->detachHeap(cx))
return false;
}
return true;
}

Просмотреть файл

@ -109,9 +109,6 @@ IsValidAsmJSHeapLength(uint32_t length);
extern uint32_t
RoundUpToNextValidAsmJSHeapLength(uint32_t length);
extern bool
OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer);
// The assumed page size; dynamically checked in CompileAsmJS.
#ifdef _MIPS_ARCH_LOONGSON3A
static const size_t AsmJSPageSize = 16384;

Просмотреть файл

@ -312,9 +312,8 @@ ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, Profi
}
bool
ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index)
ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex)
{
*index = exports_.length();
return exports_.emplaceBack(Move(sig), funcIndex);
}
@ -499,8 +498,7 @@ ModuleGenerator::defineOutOfBoundsStub(Offsets offsets)
}
Module*
ModuleGenerator::finish(Module::HeapBool usesHeap,
Module::SharedBool sharedHeap,
ModuleGenerator::finish(HeapUsage heapUsage,
Module::MutedBool mutedErrors,
CacheableChars filename,
CacheableTwoByteChars displayURL,
@ -510,7 +508,7 @@ ModuleGenerator::finish(Module::HeapBool usesHeap,
MOZ_ASSERT(!activeFunc_);
MOZ_ASSERT(finishedFuncs_);
if (!GenerateStubs(*this, usesHeap))
if (!GenerateStubs(*this, UsesHeap(heapUsage)))
return nullptr;
masm_.finish();
@ -616,8 +614,7 @@ ModuleGenerator::finish(Module::HeapBool usesHeap,
funcBytes_,
codeBytes,
globalBytes_,
usesHeap,
sharedHeap,
heapUsage,
mutedErrors,
Move(code),
Move(imports_),

Просмотреть файл

@ -123,7 +123,7 @@ class MOZ_STACK_CLASS ModuleGenerator
bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit);
// Exports:
bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index);
bool declareExport(MallocSig&& sig, uint32_t funcIndex);
uint32_t numDeclaredExports() const;
uint32_t exportFuncIndex(uint32_t index) const;
const MallocSig& exportSig(uint32_t index) const;
@ -147,8 +147,7 @@ class MOZ_STACK_CLASS ModuleGenerator
// Null return indicates failure. The caller must immediately root a
// non-null return value.
Module* finish(Module::HeapBool usesHeap,
Module::SharedBool sharedHeap,
Module* finish(HeapUsage heapUsage,
Module::MutedBool mutedErrors,
CacheableChars filename,
CacheableTwoByteChars displayURL,

Просмотреть файл

@ -511,8 +511,9 @@ Module::activation()
void
Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
{
MOZ_ASSERT(usesHeap());
MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
MOZ_ASSERT(!maybeHeap_);
MOZ_ASSERT(!heap_);
MOZ_ASSERT(!rawHeapPtr());
uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - protected by Module methods*/);
@ -550,14 +551,17 @@ Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code()));
#endif
maybeHeap_ = heap;
heap_ = heap;
rawHeapPtr() = ptrBase;
}
void
Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
{
MOZ_ASSERT_IF(maybeHeap_, maybeHeap_ == heap);
// heap_/rawHeapPtr can be null if this module holds cloned code from
// another dynamically-linked module which we are despecializing from that
// module's heap.
MOZ_ASSERT_IF(heap_, heap_ == heap);
MOZ_ASSERT_IF(rawHeapPtr(), rawHeapPtr() == heap->dataPointerEither().unwrap());
#if defined(JS_CODEGEN_X86)
@ -581,7 +585,7 @@ Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
}
#endif
maybeHeap_ = nullptr;
heap_ = nullptr;
rawHeapPtr() = nullptr;
}
@ -700,7 +704,7 @@ Module::importToExit(const Import& import)
/* static */ Module::CacheablePod
Module::zeroPod()
{
CacheablePod pod = {0, 0, 0, false, false, false, false, false};
CacheablePod pod = {0, 0, 0, HeapUsage::None, false, false, false};
return pod;
}
@ -711,9 +715,6 @@ Module::init()
interrupt_ = nullptr;
outOfBounds_ = nullptr;
dynamicallyLinked_ = false;
prev_ = nullptr;
next_ = nullptr;
interrupted_ = false;
*(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
*(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
@ -757,8 +758,7 @@ Module::Module(CompileArgs args,
uint32_t functionBytes,
uint32_t codeBytes,
uint32_t globalBytes,
HeapBool usesHeap,
SharedBool sharedHeap,
HeapUsage heapUsage,
MutedBool mutedErrors,
UniqueCodePtr code,
ImportVector&& imports,
@ -786,20 +786,16 @@ Module::Module(CompileArgs args,
const_cast<uint32_t&>(pod.functionBytes_) = functionBytes;
const_cast<uint32_t&>(pod.codeBytes_) = codeBytes;
const_cast<uint32_t&>(pod.globalBytes_) = globalBytes;
const_cast<bool&>(pod.usesHeap_) = bool(usesHeap);
const_cast<bool&>(pod.sharedHeap_) = bool(sharedHeap);
const_cast<HeapUsage&>(pod.heapUsage_) = heapUsage;
const_cast<bool&>(pod.mutedErrors_) = bool(mutedErrors);
const_cast<bool&>(pod.usesSignalHandlersForOOB_) = args.useSignalHandlersForOOB;
const_cast<bool&>(pod.usesSignalHandlersForInterrupt_) = args.useSignalHandlersForInterrupt;
MOZ_ASSERT_IF(sharedHeap, usesHeap);
init();
}
Module::~Module()
{
MOZ_ASSERT(!interrupted_);
if (code_) {
for (unsigned i = 0; i < imports_.length(); i++) {
ImportExit& exit = importToExit(imports_[i]);
@ -807,11 +803,6 @@ Module::~Module()
exit.baselineScript->removeDependentWasmModule(*this, i);
}
}
if (prev_)
*prev_ = next_;
if (next_)
next_->prev_ = prev_;
}
void
@ -822,8 +813,8 @@ Module::trace(JSTracer* trc)
TraceEdge(trc, &importToExit(import).fun, "wasm function import");
}
if (maybeHeap_)
TraceEdge(trc, &maybeHeap_, "wasm buffer");
if (heap_)
TraceEdge(trc, &heap_, "wasm buffer");
}
CompileArgs
@ -982,13 +973,6 @@ Module::dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> hea
MOZ_ASSERT(!dynamicallyLinked_);
dynamicallyLinked_ = true;
// Add this module to the JSRuntime-wide list of dynamically-linked modules.
next_ = cx->runtime()->linkedWasmModules;
prev_ = &cx->runtime()->linkedWasmModules;
cx->runtime()->linkedWasmModules = this;
if (next_)
next_->prev_ = &next_;
// Push a JitContext for benefit of IsCompilingAsmJS and flush the ICache.
// We've been inhibiting flushing up to this point so flush it all now.
JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
@ -1007,7 +991,7 @@ Module::dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> hea
}
// Specialize code to the actual heap.
if (heap)
if (usesHeap())
specializeToHeap(heap);
// See AllocateCode comment above.
@ -1017,19 +1001,13 @@ Module::dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> hea
return true;
}
ArrayBufferObjectMaybeShared*
Module::maybeBuffer() const
{
MOZ_ASSERT(dynamicallyLinked_);
return maybeHeap_;
}
SharedMem<uint8_t*>
Module::maybeHeap() const
Module::heap() const
{
MOZ_ASSERT(dynamicallyLinked_);
MOZ_ASSERT_IF(!pod.usesHeap_, rawHeapPtr() == nullptr);
return pod.sharedHeap_
MOZ_ASSERT(usesHeap());
MOZ_ASSERT(rawHeapPtr());
return hasSharedHeap()
? SharedMem<uint8_t*>::shared(rawHeapPtr())
: SharedMem<uint8_t*>::unshared(rawHeapPtr());
}
@ -1038,7 +1016,8 @@ size_t
Module::heapLength() const
{
MOZ_ASSERT(dynamicallyLinked_);
return maybeHeap_ ? maybeHeap_->byteLength() : 0;
MOZ_ASSERT(usesHeap());
return heap_->byteLength();
}
void
@ -1051,65 +1030,6 @@ Module::deoptimizeImportExit(uint32_t importIndex)
exit.baselineScript = nullptr;
}
bool
Module::changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx)
{
MOZ_ASSERT(dynamicallyLinked_);
MOZ_ASSERT(pod.usesHeap_);
// Content JS should not be able to run (and change heap) from within an
// interrupt callback, but in case it does, fail to change heap. Otherwise,
// the heap can change at every single instruction which would prevent
// future optimizations like heap-base hoisting.
if (interrupted_)
return false;
AutoMutateCode amc(cx, *this, "Module::changeHeap");
if (maybeHeap_)
despecializeFromHeap(maybeHeap_);
specializeToHeap(newHeap);
return true;
}
bool
Module::detachHeap(JSContext* cx)
{
MOZ_ASSERT(dynamicallyLinked_);
MOZ_ASSERT(pod.usesHeap_);
// Content JS should not be able to run (and detach heap) from within an
// interrupt callback, but in case it does, fail. Otherwise, the heap can
// change at an arbitrary instruction and break the assumption below.
if (interrupted_) {
JS_ReportError(cx, "attempt to detach from inside interrupt handler");
return false;
}
// Even if this->active(), to reach here, the activation must have called
// out via an import exit stub. FFI stubs check if heapDatum() is null on
// reentry and throw an exception if so.
MOZ_ASSERT_IF(activation(), activation()->exitReason() == ExitReason::ImportJit ||
activation()->exitReason() == ExitReason::ImportInterp);
AutoMutateCode amc(cx, *this, "Module::detachHeap");
despecializeFromHeap(maybeHeap_);
return true;
}
void
Module::setInterrupted(bool interrupted)
{
MOZ_ASSERT(dynamicallyLinked_);
interrupted_ = interrupted;
}
Module*
Module::nextLinked() const
{
MOZ_ASSERT(dynamicallyLinked_);
return next_;
}
bool
Module::callExport(JSContext* cx, uint32_t exportIndex, CallArgs args)
{
@ -1179,17 +1099,6 @@ Module::callExport(JSContext* cx, uint32_t exportIndex, CallArgs args)
}
}
// The correct way to handle this situation would be to allocate a new range
// of PROT_NONE memory and module.changeHeap to this memory. That would
// cause every access to take the out-of-bounds signal-handler path which
// does the right thing. For now, just throw an out-of-memory exception
// since these can technically pop out anywhere and the full fix may
// actually OOM when trying to allocate the PROT_NONE memory.
if (usesHeap() && !maybeHeap_) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
return false;
}
{
// Push a WasmActivation to describe the wasm frames we're about to push
// when running this module. Additionally, push a JitActivation so that
@ -1518,8 +1427,8 @@ Module::clone(JSContext* cx, const StaticLinkData& linkData) const
// If the copied machine code has been specialized to the heap, it must be
// unspecialized in the copy.
if (maybeHeap_)
out->despecializeFromHeap(maybeHeap_);
if (usesHeap())
out->despecializeFromHeap(heap_);
if (!out->staticallyLink(cx, linkData))
return nullptr;

Просмотреть файл

@ -325,6 +325,22 @@ typedef JS::UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
UniqueCodePtr
AllocateCode(ExclusiveContext* cx, size_t bytes);
// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
// heap (SharedArrayBuffer).
enum class HeapUsage
{
None = false,
Unshared = 1,
Shared = 2
};
static inline bool
UsesHeap(HeapUsage heapUsage)
{
return bool(heapUsage);
}
// Module represents a compiled WebAssembly module which lives until the last
// reference to any exported functions is dropped. Modules must be wrapped by a
// rooted JSObject immediately after creation so that Module::trace() is called
@ -375,8 +391,7 @@ class Module
const uint32_t functionBytes_;
const uint32_t codeBytes_;
const uint32_t globalBytes_;
const bool usesHeap_;
const bool sharedHeap_;
const HeapUsage heapUsage_;
const bool mutedErrors_;
const bool usesSignalHandlersForOOB_;
const bool usesSignalHandlersForInterrupt_;
@ -400,14 +415,11 @@ class Module
// Initialized during dynamicallyLink:
bool dynamicallyLinked_;
BufferPtr maybeHeap_;
Module** prev_;
Module* next_;
BufferPtr heap_;
// Mutated after dynamicallyLink:
bool profilingEnabled_;
FuncLabelVector funcLabels_;
bool interrupted_;
class AutoMutateCode;
@ -448,16 +460,13 @@ class Module
static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun);
static const unsigned SizeOfEntryArg = sizeof(EntryArg);
enum HeapBool { DoesntUseHeap = false, UsesHeap = true };
enum SharedBool { UnsharedHeap = false, SharedHeap = true };
enum MutedBool { DontMuteErrors = false, MuteErrors = true };
Module(CompileArgs args,
uint32_t functionBytes,
uint32_t codeBytes,
uint32_t globalBytes,
HeapBool usesHeap,
SharedBool sharedHeap,
HeapUsage heapUsage,
MutedBool mutedErrors,
UniqueCodePtr code,
ImportVector&& imports,
@ -474,8 +483,9 @@ class Module
uint8_t* code() const { return code_.get(); }
uint8_t* globalData() const { return code() + pod.codeBytes_; }
uint32_t globalBytes() const { return pod.globalBytes_; }
bool usesHeap() const { return pod.usesHeap_; }
bool sharedHeap() const { return pod.sharedHeap_; }
HeapUsage heapUsage() const { return pod.heapUsage_; }
bool usesHeap() const { return UsesHeap(pod.heapUsage_); }
bool hasSharedHeap() const { return pod.heapUsage_ == HeapUsage::Shared; }
bool mutedErrors() const { return pod.mutedErrors_; }
CompileArgs compileArgs() const;
const ImportVector& imports() const { return imports_; }
@ -515,21 +525,9 @@ class Module
// The wasm heap, established by dynamicallyLink.
ArrayBufferObjectMaybeShared* maybeBuffer() const;
SharedMem<uint8_t*> maybeHeap() const;
SharedMem<uint8_t*> heap() const;
size_t heapLength() const;
// asm.js may detach and change the heap at any time. As an internal detail,
// the heap may not be changed while the module has been asynchronously
// interrupted.
//
// N.B. These methods and asm.js change-heap support will be removed soon.
bool changeHeap(Handle<ArrayBufferObject*> newBuffer, JSContext* cx);
bool detachHeap(JSContext* cx);
void setInterrupted(bool interrupted);
Module* nextLinked() const;
// The exports of a wasm module are called by preparing an array of
// arguments (coerced to the corresponding types of the Export signature)
// and calling the export's entry trampoline.

Просмотреть файл

@ -627,7 +627,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
uintptr_t base;
StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.base()));
MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == module.maybeHeap());
MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == module.heap());
}
if (address.hasIndex()) {
uintptr_t index;
@ -645,11 +645,11 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
"Given faulting address does not appear to be within computed "
"faulting address range");
MOZ_RELEASE_ASSERT(accessAddress >= module.maybeHeap(),
MOZ_RELEASE_ASSERT(accessAddress >= module.heap(),
"Access begins outside the asm.js heap");
MOZ_RELEASE_ASSERT(accessAddress + access.size() <= module.maybeHeap() + AsmJSMappedSize,
MOZ_RELEASE_ASSERT(accessAddress + access.size() <= module.heap() + AsmJSMappedSize,
"Access extends beyond the asm.js heap guard region");
MOZ_RELEASE_ASSERT(accessAddress + access.size() > module.maybeHeap() + module.heapLength(),
MOZ_RELEASE_ASSERT(accessAddress + access.size() > module.heap() + module.heapLength(),
"Computed access address is not actually out of bounds");
// The basic sandbox model is that all heap accesses are a heap base
@ -666,7 +666,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
//
// Taking a signal is really slow, but in theory programs really shouldn't
// be hitting this anyway.
intptr_t unwrappedOffset = accessAddress - module.maybeHeap().unwrap(/*safe - for value*/);
intptr_t unwrappedOffset = accessAddress - module.heap().unwrap(/*safe - for value*/);
uint32_t wrappedOffset = uint32_t(unwrappedOffset);
size_t size = access.size();
MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
@ -684,10 +684,10 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
// We now know that this is an access that is actually in bounds when
// properly wrapped. Complete the load or store with the wrapped
// address.
SharedMem<uint8_t*> wrappedAddress = module.maybeHeap() + wrappedOffset;
MOZ_RELEASE_ASSERT(wrappedAddress >= module.maybeHeap());
SharedMem<uint8_t*> wrappedAddress = module.heap() + wrappedOffset;
MOZ_RELEASE_ASSERT(wrappedAddress >= module.heap());
MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
MOZ_RELEASE_ASSERT(wrappedAddress + size <= module.maybeHeap() + module.heapLength());
MOZ_RELEASE_ASSERT(wrappedAddress + size <= module.heap() + module.heapLength());
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
@ -762,9 +762,9 @@ HandleFault(PEXCEPTION_POINTERS exception)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
if (!module.usesHeap() ||
faultingAddress < module.heap() ||
faultingAddress >= module.heap() + AsmJSMappedSize)
{
return false;
}
@ -907,9 +907,9 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
if (!module.usesHeap() ||
faultingAddress < module.heap() ||
faultingAddress >= module.heap() + AsmJSMappedSize)
{
return false;
}
@ -1117,9 +1117,9 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
if (!module.usesHeap() ||
faultingAddress < module.heap() ||
faultingAddress >= module.heap() + AsmJSMappedSize)
{
return false;
}

Просмотреть файл

@ -99,7 +99,7 @@ static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of CodePtr to the export's signature's ABI.
static bool
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, Module::HeapBool usesHeap)
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, bool usesHeap)
{
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.exportSig(exportIndex);
@ -332,30 +332,12 @@ FillArgumentArray(MacroAssembler& masm, const MallocSig::ArgVector& args, unsign
}
}
// If an import call detaches its heap (viz., via ArrayBuffer.transfer), it must
// call change-heap to another heap (viz., the new heap returned by transfer)
// before returning to asm.js code. If the application fails to do this (if the
// heap pointer is null), jump to a stub.
static void
CheckForHeapDetachment(MacroAssembler& masm, Register scratch, Label* onDetached)
{
MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace));
AssertStackAlignment(masm, ABIStackAlignment);
#if defined(JS_CODEGEN_X86)
CodeOffset offset = masm.movlWithPatch(PatchedAbsoluteAddress(), scratch);
masm.append(AsmJSGlobalAccess(offset, HeapGlobalDataOffset));
masm.branchTestPtr(Assembler::Zero, scratch, scratch, onDetached);
#else
masm.branchTestPtr(Assembler::Zero, HeapReg, HeapReg, onDetached);
#endif
}
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into an appropriate InvokeImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
static bool
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLabel,
ProfilingOffsets* offsets)
{
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.importSig(importIndex);
@ -440,13 +422,6 @@ GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBo
MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
}
// The heap pointer may have changed during the FFI, so reload it and test
// for detachment.
if (usesHeap) {
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
}
GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets);
if (masm.oom())
@ -466,8 +441,8 @@ static const unsigned MaybeSavedGlobalReg = 0;
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
static bool
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
Label* throwLabel, ProfilingOffsets* offsets)
{
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.importSig(importIndex);
@ -540,8 +515,7 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool
// HeapReg are removed from the general register set for asm.js code, so
// these will not have been saved by the caller like all other registers,
// so they must be explicitly preserved. Only save GlobalReg since
// HeapReg must be reloaded (from global data) after the call since the
// heap may change during the FFI call.
// HeapReg can be reloaded (from global data) after the call.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
masm.storePtr(GlobalReg, Address(masm.getStackPointer(), jitFrameBytes));
@ -698,12 +672,10 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool
Label done;
masm.bind(&done);
// The heap pointer has to be reloaded anyway since JIT code could have
// clobbered it. Additionally, the import may have detached the heap buffer.
if (usesHeap) {
// Ion code does not respect system callee-saved register conventions so
// reload the heap register.
if (usesHeap)
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
}
GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets);
@ -764,32 +736,6 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool
return true;
}
// Generate a stub that is called when returning from an exit where the module's
// buffer has been detached. This stub first calls a C++ function to report an
// exception and then jumps to the generic throw stub to pop everything off the
// stack.
static bool
GenerateOnDetachedStub(ModuleGenerator& mg, Label* onDetached, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(onDetached);
// For now, OnDetached always throws (see OnDetached comment).
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnDetached);
masm.jump(throwLabel);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
return mg.defineInlineStub(offsets);
}
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
@ -929,7 +875,7 @@ static const LiveRegisterSet AllRegsExceptSP(
// after restoring all registers. To hack around this, push the resumePC on the
// stack so that it can be popped directly into PC.
static bool
GenerateAsyncInterruptStub(ModuleGenerator& mg, Module::HeapBool usesHeap, Label* throwLabel)
GenerateAsyncInterruptStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
@ -1126,7 +1072,7 @@ GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
}
bool
wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap)
wasm::GenerateStubs(ModuleGenerator& mg, bool usesHeap)
{
for (unsigned i = 0; i < mg.numDeclaredExports(); i++) {
if (!GenerateEntry(mg, i, usesHeap))
@ -1135,26 +1081,17 @@ wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap)
Label onThrow;
{
Label onDetached;
for (size_t i = 0; i < mg.numDeclaredImports(); i++) {
ProfilingOffsets interp;
if (!GenerateInterpExitStub(mg, i, &onThrow, &interp))
return false;
for (size_t i = 0; i < mg.numDeclaredImports(); i++) {
ProfilingOffsets interp;
if (!GenerateInterpExitStub(mg, i, usesHeap, &onThrow, &onDetached, &interp))
return false;
ProfilingOffsets jit;
if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &jit))
return false;
ProfilingOffsets jit;
if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &onDetached, &jit))
return false;
if (!mg.defineImport(i, interp, jit))
return false;
}
if (onDetached.used()) {
if (!GenerateOnDetachedStub(mg, &onDetached, &onThrow))
return false;
}
if (!mg.defineImport(i, interp, jit))
return false;
}
if (mg.masm().asmStackOverflowLabel()->used()) {
@ -1178,7 +1115,7 @@ wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap)
return false;
// Generate unconditionally: the async interrupt may be taken at any time.
if (!GenerateAsyncInterruptStub(mg, usesHeap, &onThrow))
if (!GenerateAsyncInterruptStub(mg, &onThrow))
return false;
if (onThrow.used()) {

Просмотреть файл

@ -25,7 +25,7 @@ namespace js {
namespace wasm {
bool
GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap);
GenerateStubs(ModuleGenerator& mg, bool usesHeap);
} // namespace wasm
} // namespace js

Просмотреть файл

@ -43,34 +43,16 @@ __aeabi_uidivmod(int, int);
}
#endif
namespace js {
namespace wasm {
void
ReportOverRecursed()
{
JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
ReportOverRecursed(cx);
}
bool
HandleExecutionInterrupt()
{
WasmActivation* act = JSRuntime::innermostWasmActivation();
act->module().setInterrupted(true);
bool ret = CheckForInterrupt(act->cx());
act->module().setInterrupted(false);
return ret;
}
} // namespace wasm
} // namespace js
static void
OnDetached()
WasmReportOverRecursed()
{
JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
ReportOverRecursed(JSRuntime::innermostWasmActivation()->cx());
}
static bool
WasmHandleExecutionInterrupt()
{
return CheckForInterrupt(JSRuntime::innermostWasmActivation()->cx());
}
static void
@ -187,15 +169,13 @@ wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
case SymbolicAddress::StackLimit:
return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
case SymbolicAddress::ReportOverRecursed:
return FuncCast(wasm::ReportOverRecursed, Args_General0);
case SymbolicAddress::OnDetached:
return FuncCast(OnDetached, Args_General0);
return FuncCast(WasmReportOverRecursed, Args_General0);
case SymbolicAddress::OnOutOfBounds:
return FuncCast(OnOutOfBounds, Args_General0);
case SymbolicAddress::OnImpreciseConversion:
return FuncCast(OnImpreciseConversion, Args_General0);
case SymbolicAddress::HandleExecutionInterrupt:
return FuncCast(wasm::HandleExecutionInterrupt, Args_General0);
return FuncCast(WasmHandleExecutionInterrupt, Args_General0);
case SymbolicAddress::InvokeImport_Void:
return FuncCast(InvokeImport_Void, Args_General3);
case SymbolicAddress::InvokeImport_I32:

Просмотреть файл

@ -557,7 +557,6 @@ enum class SymbolicAddress
RuntimeInterruptUint32,
StackLimit,
ReportOverRecursed,
OnDetached,
OnOutOfBounds,
OnImpreciseConversion,
HandleExecutionInterrupt,

Просмотреть файл

@ -524,7 +524,7 @@ GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
{
JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
wasm::Module& module = rt->wasmActivationStack()->module();
*heap = module.maybeHeap().cast<void*>();
*heap = module.heap().cast<void*>();
*length = module.heapLength();
}

Просмотреть файл

@ -1,33 +0,0 @@
load(libdir + "asm.js");
var byteLength = Function.prototype.call.bind(
Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, "byteLength").get
);
var m = asmCompile("glob", "s", "b", `
"use asm";
var I32 = glob.Int32Array;
var i32 = new I32(b);
var len = glob.byteLength;
function ch(b2) {
if (len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 80000000) {
return false;
}
i32 = new I32(b2);
b = b2;
return true
}
function get(i) {
i = i | 0;
return i32[i >> 2] | 0
}
return {
get: get,
changeHeap: ch
}
`);
var buf1 = new ArrayBuffer(16777216)
var { get, changeHeap } = asmLink(m, this, null, buf1)
assertEq(changeHeap(new ArrayBuffer(33554432)), true)
assertEq(get(), 0)
assertEq(changeHeap(buf1), true);
get();

Просмотреть файл

@ -23,19 +23,8 @@ var buffer = new ArrayBuffer(BUF_MIN);
var {get, set} = asmLink(m, this, null, buffer);
set(4, 42);
assertEq(get(4), 42);
neuter(buffer, "change-data");
neuter(buffer, "same-data");
assertThrowsInstanceOf(() => get(4), InternalError);
var buf1 = new ArrayBuffer(BUF_MIN);
var buf2 = new ArrayBuffer(BUF_MIN);
var {get:get1, set:set1} = asmLink(m, this, null, buf1);
var {get:get2, set:set2} = asmLink(m, this, null, buf2);
set1(0, 13);
set2(0, 42);
neuter(buf1, "change-data");
assertThrowsInstanceOf(() => get1(0), InternalError);
assertEq(get2(0), 42);
assertThrowsInstanceOf(() => neuter(buffer, "change-data"), InternalError);
assertThrowsInstanceOf(() => neuter(buffer, "same-data"), InternalError);
var m = asmCompile('stdlib', 'foreign', 'buffer',
`"use asm";
@ -49,66 +38,5 @@ var m = asmCompile('stdlib', 'foreign', 'buffer',
return inner`);
var buffer = new ArrayBuffer(BUF_MIN);
function ffi1() { neuter(buffer, "change-data"); }
function ffi1() { assertThrowsInstanceOf(() => neuter(buffer, "change-data"), InternalError) }
var inner = asmLink(m, this, {ffi:ffi1}, buffer);
assertThrowsInstanceOf(() => inner(8), InternalError);
var byteLength = Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, 'byteLength').get);
var m = asmCompile('stdlib', 'foreign', 'buffer',
`"use asm";
var ffi = foreign.ffi;
var I32 = stdlib.Int32Array;
var i32 = new I32(buffer);
var len = stdlib.byteLength;
function changeHeap(newBuffer) {
if (len(newBuffer) & 0xffffff || len(newBuffer) <= 0xffffff || len(newBuffer) > 0x80000000)
return false;
i32 = new I32(newBuffer);
buffer = newBuffer;
return true;
}
function get(i) {
i=i|0;
return i32[i>>2]|0;
}
function inner(i) {
i=i|0;
ffi();
return get(i)|0;
}
return {changeHeap:changeHeap, get:get, inner:inner}`);
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
var buf3 = new ArrayBuffer(BUF_CHANGE_MIN);
var buf4 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf2)[13] = 42;
new Int32Array(buf3)[13] = 1024;
new Int32Array(buf4)[13] = 1337;
function ffi2() { neuter(buf1, "change-data"); assertEq(changeHeap(buf2), true); }
var {changeHeap, get:get2, inner} = asmLink(m, this, {ffi:ffi2}, buf1);
assertEq(inner(13*4), 42);
function ffi3() {
assertEq(get2(13*4), 42);
assertEq(get2(BUF_CHANGE_MIN), 0)
assertEq(get3(13*4), 42);
assertEq(get3(BUF_CHANGE_MIN), 0)
neuter(buf2, "change-data");
assertThrowsInstanceOf(()=>get2(13*4), InternalError);
assertThrowsInstanceOf(()=>get2(BUF_CHANGE_MIN), InternalError);
assertThrowsInstanceOf(()=>get3(13*4), InternalError);
assertThrowsInstanceOf(()=>get3(BUF_CHANGE_MIN), InternalError);
assertEq(changeHeap(buf3), true);
assertThrowsInstanceOf(()=>get2(13*4), InternalError);
assertThrowsInstanceOf(()=>get2(BUF_CHANGE_MIN), InternalError);
assertEq(get3(13*4), 1024);
assertEq(get3(BUF_CHANGE_MIN), 0);
assertEq(changeHeap(buf4), true);
}
var {changeHeap, get:get3, inner} = asmLink(m, this, {ffi:ffi3}, buf2);
assertEq(inner(13*4), 1337);
assertThrowsInstanceOf(()=>get2(0), InternalError);
assertEq(get3(BUF_CHANGE_MIN), 0);
assertEq(get3(13*4), 1337);

Просмотреть файл

@ -202,16 +202,6 @@ var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f1,>,<,f1,>,>,<,f1,>,f2,>,<,f1,>,<,f2,>,<,f1,>,f2,>,<,f1,>,>,<,f1,>,<,f1,>,f1,>,>");
// Detachment exit
var buf = new ArrayBuffer(BUF_CHANGE_MIN);
var ffi = function() { neuter(buf, 'change-data') }
var f = asmLink(asmCompile('g','ffis','buf', USE_ASM + 'var ffi = ffis.ffi; var i32 = new g.Int32Array(buf); function f() { ffi() } return f'), this, {ffi:ffi}, buf);
enableSingleStepProfiling();
assertThrowsInstanceOf(f, InternalError);
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,<,f,>,inline stub,f,>,<,f,>,inline stub,f,>");
if (isSimdAvailable() && typeof SIMD !== 'undefined') {
// SIMD out-of-bounds exit
var buf = new ArrayBuffer(0x10000);

Просмотреть файл

@ -1,366 +0,0 @@
// |jit-test| test-also-noasmjs
load(libdir + "asm.js");
load(libdir + "asserts.js");
// Tests for importing typed array view constructors
assertAsmTypeFail('glob', USE_ASM + "var I32=glob.Int32Arra; function f() {} return f");
var m = asmCompile('glob', USE_ASM + "var I32=glob.Int32Array; function f() {} return f");
assertAsmLinkFail(m, {});
assertAsmLinkFail(m, {Int32Array:null});
assertAsmLinkFail(m, {Int32Array:{}});
assertAsmLinkFail(m, {Int32Array:Uint32Array});
assertEq(asmLink(m, {Int32Array:Int32Array})(), undefined);
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + "var I32=glob.Int32Array; function f() {} return f");
assertEq(asmLink(m, this)(), undefined);
assertEq(asmLink(m, this, null, BUF_64KB)(), undefined);
assertAsmTypeFail('glob', 'ffis', 'buf', USE_ASM + 'var I32=glob.Int32Array; var i32=new I3(buf); function f() {} return f');
assertAsmTypeFail('glob', 'ffis', 'buf', USE_ASM + 'var I32=0; var i32=new I32(buf); function f() {} return f');
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + 'var I32=glob.Int32Array; var i32=new I32(buf); function f() {} return f');
assertAsmLinkFail(m, this, null, {});
assertAsmLinkAlwaysFail(m, this, null, null);
assertAsmLinkFail(m, this, null, new ArrayBuffer(100));
assertEq(asmLink(m, this, null, BUF_64KB)(), undefined);
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + 'var I32=glob.Int32Array; var i32=new glob.Int32Array(buf); function f() {} return f');
assertAsmLinkFail(m, this, null, {});
assertAsmLinkAlwaysFail(m, this, null, null);
assertAsmLinkFail(m, this, null, new ArrayBuffer(100));
assertEq(asmLink(m, this, null, BUF_64KB)(), undefined);
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + 'var F32=glob.Float32Array; var i32=new glob.Int32Array(buf); function f() {} return f');
assertAsmLinkFail(m, this, null, {});
assertAsmLinkAlwaysFail(m, this, null, null);
assertAsmLinkFail(m, this, null, new ArrayBuffer(100));
assertEq(asmLink(m, this, null, BUF_64KB)(), undefined);
// Tests for link-time validation of byteLength import
assertAsmTypeFail('glob', 'ffis', 'buf', USE_ASM + 'var byteLength=glob.byteLength; function f() { return byteLength(1)|0 } return f');
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + 'var byteLength=glob.byteLength; function f() { return 42 } return f');
assertEq('byteLength' in this, false);
assertAsmLinkFail(m, this);
this['byteLength'] = null;
assertAsmLinkFail(m, this);
this['byteLength'] = {};
assertAsmLinkFail(m, this);
this['byteLength'] = function(){}
assertAsmLinkFail(m, this);
this['byteLength'] = (function(){}).bind(null);
assertAsmLinkFail(m, this);
this['byteLength'] = Function.prototype.call.bind();
assertAsmLinkFail(m, this);
this['byteLength'] = Function.prototype.call.bind({});
assertAsmLinkFail(m, this);
this['byteLength'] = Function.prototype.call.bind(function f() {});
assertAsmLinkFail(m, this);
this['byteLength'] = Function.prototype.call.bind(Math.sin);
assertAsmLinkFail(m, this);
this['byteLength'] =
Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, 'byteLength').get);
assertEq(asmLink(m, this)(), 42);
var m = asmCompile('glob', 'ffis', 'buf', USE_ASM + 'var b1=glob.byteLength, b2=glob.byteLength; function f() { return 43 } return f');
assertEq(asmLink(m, this)(), 43);
// Tests for validation of change-heap function
const BYTELENGTH_IMPORT = "var len = glob.byteLength; ";
const IMPORT0 = BYTELENGTH_IMPORT;
const IMPORT1 = "var I8=glob.Int8Array; var i8=new I8(b); " + BYTELENGTH_IMPORT;
const IMPORT2 = "var I8=glob.Int8Array; var i8=new I8(b); var I32=glob.Int32Array; var i32=new I32(b); var II32=glob.Int32Array; " + BYTELENGTH_IMPORT;
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function f() { return 42 } function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function b(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function f(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2=1) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2,xyz) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(...r) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2,...r) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch({b2}) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { ;if((len((b2))) & (0xffffff) || (len((b2)) <= (0xffffff)) || len(b2) > 0x80000000) {;;return false;;} ; i8=new I8(b2);; b=b2;; return true;; } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function ch2(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { 3; if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { b2=b2|0; if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(1) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(1 || 1) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(1 || 1 || 1) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(1 || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(1 & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || 1 || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(i8(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(xyz) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff && len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) | 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) == 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xfffffe || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0x1ffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0x7fffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) < 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xfffffe || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0x1000000 || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || 1) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) < 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || 1 > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0.0) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0xffffff) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x1000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffffff || len(b2) > 0x1000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0x1000000 || len(b2) > 0x1000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0x1000000 || len(b2) > 0x1000001) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000001) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) ; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) {} i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) {return false} i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return true; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT0 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i7=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; b=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=1; b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new 1; b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I7(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new b(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8; b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(1); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2,1); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); xyz=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=1; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; 1; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return 1 } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return false } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; if (0) return true; 1 } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); i8=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); i32=new I32(b2); b=b2; return true } function f() { return 42 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I32(b2); i32=new I8(b2); b=b2; return true } function f() { return 42 } return f');
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); i32=new II32(b2); b=b2; return true } function f() { return 42 } return f');
// Tests for no calls in heap index expressions
const CHANGE_FUN = 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i8=new I8(b2); i32=new I32(b2); b=b2; return true }';
const SETUP = USE_ASM + IMPORT2 + 'var imul=glob.Math.imul; var ffi=ffis.ffi;' + CHANGE_FUN;
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { i32[0] } return f');
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { i32[0] = 0 } return f');
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] } return f');
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] = 0 } return f');
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[(imul(i,i)|0) >> 2] = 0 } return f');
asmCompile('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] = (imul(i,i)|0) } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[(ffi()|0) >> 2] } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[(g()|0) >> 2] } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[(TBL[i&0]()|0) >> 2] } function g() { return 0 } var TBL=[g]; return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[(g()|0) >> 2] = 0 } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] = g()|0 } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i32[(g()|0)>>2] >> 2] } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i32[(g()|0)>>2] >> 2] = 0 } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] = i32[(g()|0)>>2] } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[((i32[i>>2]|0) + (g()|0)) >> 2] } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[((i32[i>>2]|0) + (g()|0)) >> 2] = 0 } function g() { return 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', SETUP + 'function f() { var i = 0; i32[i >> 2] = (i32[i>>2]|0) + (g()|0) } function g() { return 0 } return f');
if (isSimdAvailable() && typeof SIMD !== 'undefined')
asmCompile('glob', 'ffis', 'b', USE_ASM + IMPORT2 + 'var i4 = glob.SIMD.Int32x4; var ext = i4.extractLane; var add = i4.add;' + CHANGE_FUN + 'function f(i) { i=i|0; i32[ext(i4(i,1,2,i),0) >> 2]; i32[ext(add(i4(0,0,0,0),i4(1,1,1,1)),0) >> 2]; } return f');
// Tests for constant heap accesses when change-heap is used
const HEADER = USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= MIN || len(b2) > 0x80000000) return false; i8=new I8(b2); b=b2; return true } ';
assertAsmTypeFail('glob', 'ffis', 'b', HEADER.replace('MIN', '0xffffff') + 'function f() { i8[0x1000000] = 0 } return f');
asmCompile('glob', 'ffis', 'b', HEADER.replace('MIN', '0xffffff') + 'function f() { i8[0xffffff] = 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', HEADER.replace('MIN', '0x1000000') + 'function f() { i8[0x1000001] = 0 } return f');
asmCompile('glob', 'ffis', 'b', HEADER.replace('MIN', '0x1000000') + 'function f() { i8[0x1000000] = 0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', HEADER.replace('MIN', '0xffffff') + 'function f() { return i8[0x1000000]|0 } return f');
asmCompile('glob', 'ffis', 'b', HEADER.replace('MIN', '0xffffff') + 'function f() { return i8[0xffffff]|0 } return f');
assertAsmTypeFail('glob', 'ffis', 'b', HEADER.replace('MIN', '0x1000000') + 'function f() { return i8[0x1000001]|0 } return f');
asmCompile('glob', 'ffis', 'b', HEADER.replace('MIN', '0x1000000') + 'function f() { return i8[0x1000000]|0 } return f');
// Tests for validation of heap length
var body = USE_ASM + IMPORT1 + 'function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0x1ffffff || len(b2) > 0x4000000) return false; i8=new I8(b2); b=b2; return true } function f() { return 42 } return ch';
var m = asmCompile('glob', 'ffis', 'b', body);
assertAsmLinkFail(m, this, null, new ArrayBuffer(BUF_CHANGE_MIN));
assertAsmLinkFail(m, this, null, new ArrayBuffer(0x1000000));
var changeHeap = asmLink(m, this, null, new ArrayBuffer(0x2000000));
assertEq(changeHeap(new ArrayBuffer(0x1000000)), false);
assertEq(changeHeap(new ArrayBuffer(0x2000000)), true);
assertEq(changeHeap(new ArrayBuffer(0x2000001)), false);
assertEq(changeHeap(new ArrayBuffer(0x4000000)), true);
assertEq(changeHeap(new ArrayBuffer(0x5000000)), false);
assertThrowsInstanceOf(() => changeHeap(null), TypeError);
assertThrowsInstanceOf(() => changeHeap({}), TypeError);
assertThrowsInstanceOf(() => changeHeap(new Int32Array(100)), TypeError);
var detached = new ArrayBuffer(BUF_CHANGE_MIN);
neuter(detached, "change-data");
assertEq(changeHeap(detached), false);
// Tests for runtime changing heap
const CHANGE_HEAP = 'var changeHeap = glob.byteLength;';
var changeHeapSource = `function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); b=b2; return true }`;
var body = `var I32=glob.Int32Array; var i32=new I32(b);
var len=glob.byteLength;` +
changeHeapSource +
`function get(i) { i=i|0; return i32[i>>2]|0 }
function set(i, v) { i=i|0; v=v|0; i32[i>>2] = v }
return {get:get, set:set, changeHeap:ch}`;
var m = asmCompile('glob', 'ffis', 'b', USE_ASM + body);
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var {get, set, changeHeap} = asmLink(m, this, null, buf1);
assertEq(m.toString(), "function anonymous(glob, ffis, b) {\n" + USE_ASM + body + "\n}");
assertEq(m.toSource(), "(function anonymous(glob, ffis, b) {\n" + USE_ASM + body + "\n})");
assertEq(changeHeap.toString(), changeHeapSource);
assertEq(changeHeap.toSource(), changeHeapSource);
set(0, 42);
set(4, 13);
set(4, 13);
assertEq(get(0), 42);
assertEq(get(4), 13);
set(BUF_CHANGE_MIN, 262);
assertEq(get(BUF_CHANGE_MIN), 0);
var buf2 = new ArrayBuffer(2*BUF_CHANGE_MIN);
assertEq(changeHeap(buf2), true);
assertEq(get(0), 0);
assertEq(get(4), 0);
set(BUF_CHANGE_MIN, 262);
assertEq(get(BUF_CHANGE_MIN), 262);
set(2*BUF_CHANGE_MIN, 262);
assertEq(get(2*BUF_CHANGE_MIN), 0);
changeHeap(buf1);
assertEq(get(0), 42);
assertEq(get(4), 13);
set(BUF_CHANGE_MIN, 262);
assertEq(get(BUF_CHANGE_MIN), 0);
if (ArrayBuffer.transfer) {
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var {get, set, changeHeap} = asmLink(m, this, null, buf1);
set(0, 100);
set(BUF_CHANGE_MIN - 4, 101);
set(BUF_CHANGE_MIN, 102);
var buf2 = ArrayBuffer.transfer(buf1);
assertEq(changeHeap(buf2), true);
assertEq(buf1.byteLength, 0);
assertEq(buf2.byteLength, BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 0);
assertEq(get(2*BUF_CHANGE_MIN-4), 0);
var buf3 = ArrayBuffer.transfer(buf2, 3*BUF_CHANGE_MIN);
assertEq(changeHeap(buf3), true);
assertEq(buf2.byteLength, 0);
assertEq(buf3.byteLength, 3*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 0);
assertEq(get(2*BUF_CHANGE_MIN), 0);
set(BUF_CHANGE_MIN, 102);
set(2*BUF_CHANGE_MIN, 103);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 103);
var buf4 = ArrayBuffer.transfer(buf3, 2*BUF_CHANGE_MIN);
assertEq(changeHeap(buf4), true);
assertEq(buf3.byteLength, 0);
assertEq(buf4.byteLength, 2*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 0);
var buf5 = ArrayBuffer.transfer(buf4, 3*BUF_CHANGE_MIN);
assertEq(changeHeap(buf5), true);
assertEq(buf4.byteLength, 0);
assertEq(buf5.byteLength, 3*BUF_CHANGE_MIN);
assertEq(get(0), 100);
assertEq(get(BUF_CHANGE_MIN-4), 101);
assertEq(get(BUF_CHANGE_MIN), 102);
assertEq(get(2*BUF_CHANGE_MIN), 0);
var buf6 = ArrayBuffer.transfer(buf5, 0);
assertEq(buf5.byteLength, 0);
assertEq(buf6.byteLength, 0);
assertEq(changeHeap(buf6), false);
}
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
var m = asmCompile('glob', 'ffis', 'b', USE_ASM +
`var len=glob.byteLength;
function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; b=b2; return true }
return ch`);
var changeHeap = asmLink(m, this, null, buf1);
assertEq(changeHeap(buf2), true);
neuter(buf2, "change-data");
assertEq(changeHeap(buf1), true);
neuter(buf1, "change-data");
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf1)[0] = 13;
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf2)[0] = 42;
// Tests for changing heap during an FFI:
// Set the warmup to '2' so we can hit both interp and ion FFI exits
setJitCompilerOption("ion.warmup.trigger", 2);
setJitCompilerOption("baseline.warmup.trigger", 0);
setJitCompilerOption("offthread-compilation.enable", 0);
var changeToBuf = null;
var m = asmCompile('glob', 'ffis', 'b', USE_ASM +
`var ffi=ffis.ffi;
var I32=glob.Int32Array; var i32=new I32(b);
var len=glob.byteLength;
function ch(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); b=b2; return true }
function test(i) { i=i|0; var sum=0; sum = i32[i>>2]|0; sum = (sum + (ffi()|0))|0; sum = (sum + (i32[i>>2]|0))|0; return sum|0 }
return {test:test, changeHeap:ch}`);
var ffi = function() { changeHeap(changeToBuf); return 1 }
var {test, changeHeap} = asmLink(m, this, {ffi:ffi}, buf1);
changeToBuf = buf1;
assertEq(test(0), 27);
changeToBuf = buf2;
assertEq(test(0), 56);
changeToBuf = buf2;
assertEq(test(0), 85);
changeToBuf = buf1;
assertEq(test(0), 56);
changeToBuf = buf1;
assertEq(test(0), 27);
var ffi = function() { return { valueOf:function() { changeHeap(changeToBuf); return 100 } } };
var {test, changeHeap} = asmLink(m, this, {ffi:ffi}, buf1);
changeToBuf = buf1;
assertEq(test(0), 126);
changeToBuf = buf2;
assertEq(test(0), 155);
changeToBuf = buf2;
assertEq(test(0), 184);
changeToBuf = buf1;
assertEq(test(0), 155);
changeToBuf = buf1;
assertEq(test(0), 126);
if (ArrayBuffer.transfer) {
var buf = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf)[0] = 3;
var ffi = function() {
var buf2 = ArrayBuffer.transfer(buf, 2*BUF_CHANGE_MIN);
new Int32Array(buf2)[BUF_CHANGE_MIN/4] = 13;
assertEq(changeHeap(buf2), true);
return 1
}
var {test, changeHeap} = asmLink(m, this, {ffi:ffi}, buf);
assertEq(test(BUF_CHANGE_MIN), 14);
}

Просмотреть файл

@ -1,29 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
// This test may iloop for valid reasons if not compiled with asm.js (namely,
// inlining may allow the heap load to be hoisted out of the loop).
if (!isAsmJSCompilationAvailable())
quit(6);
setJitCompilerOption("signals.enable", 0);
var byteLength =
Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, 'byteLength').get);
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf1)[0] = 13;
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf2)[0] = 42;
// Test changeHeap from interrupt (as if that could ever happen...)
var m = asmCompile('glob', 'ffis', 'b', USE_ASM +
`var I32=glob.Int32Array; var i32=new I32(b);
var len=glob.byteLength;
function changeHeap(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); b=b2; return true }
function f() {}
function loop(i) { i=i|0; while((i32[i>>2]|0) == 13) { f() } }
return {loop:loop, changeHeap:changeHeap}`);
var { loop, changeHeap } = asmLink(m, this, null, buf1);
timeout(1, function() { assertEq(changeHeap(buf2), false); return false });
loop(0);

Просмотреть файл

@ -1,27 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
// This test may iloop for valid reasons if not compiled with asm.js (namely,
// inlining may allow the heap load to be hoisted out of the loop).
if (!isAsmJSCompilationAvailable())
quit(6);
var byteLength =
Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, 'byteLength').get);
var buf1 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf1)[0] = 13;
var buf2 = new ArrayBuffer(BUF_CHANGE_MIN);
new Int32Array(buf2)[0] = 42;
// Test changeHeap from interrupt (as if that could ever happen...)
var m = asmCompile('glob', 'ffis', 'b', USE_ASM +
`var I32=glob.Int32Array; var i32=new I32(b);
var len=glob.byteLength;
function changeHeap(b2) { if(len(b2) & 0xffffff || len(b2) <= 0xffffff || len(b2) > 0x80000000) return false; i32=new I32(b2); b=b2; return true }
function f() {}
function loop(i) { i=i|0; while((i32[i>>2]|0) == 13) { f() } }
return {loop:loop, changeHeap:changeHeap}`);
var { loop, changeHeap } = asmLink(m, this, null, buf1);
timeout(1, function() { assertEq(changeHeap(buf2), false); return false });
loop(0);

Просмотреть файл

@ -1,123 +0,0 @@
load(libdir + "asserts.js");
load(libdir + "asm.js");
// Currently, ArrayBuffer.transfer is #ifdef NIGHTLY_BUILD. When
// ArrayBuffer.transfer is enabled on release, this test should be removed.
if (!ArrayBuffer.transfer)
quit();
var XF = ArrayBuffer.transfer;
assertEq(typeof XF, "function");
assertEq(XF.length, 2);
// arg 1 errors
assertThrowsInstanceOf(()=>XF(), Error);
assertThrowsInstanceOf(()=>XF(undefined), Error);
assertThrowsInstanceOf(()=>XF(null), Error);
assertThrowsInstanceOf(()=>XF({}), Error);
assertThrowsInstanceOf(()=>XF(new Int32Array(1)), Error);
var buf = new ArrayBuffer(1);
neuter(buf, 'change-data');
assertThrowsInstanceOf(()=>XF(buf), TypeError);
// arg 2 errors
var buf = new ArrayBuffer(1);
assertThrowsInstanceOf(()=>XF(buf, -1), Error);
assertThrowsInstanceOf(()=>XF(buf, {valueOf() { return -1 }}), Error);
assertThrowsInstanceOf(()=>XF(buf, {toString() { return "-1" }}), Error);
assertThrowsValue(()=>XF(buf, {valueOf() { throw "wee" }}), "wee");
// arg 2 is coerced via ToInt32
var buf = new ArrayBuffer(1);
assertThrowsInstanceOf(()=>XF(buf, Math.pow(2,31)), Error);
buf = XF(buf, Math.pow(2,32));
assertEq(buf.byteLength, 0);
buf = XF(buf, Math.pow(2,32) + 10);
assertEq(buf.byteLength, 10);
assertThrowsInstanceOf(()=>XF(buf, {valueOf() { neuter(buf, "change-data"); return 10; }}), TypeError);
var buf = new ArrayBuffer(100);
assertThrowsInstanceOf(()=>XF(buf, {valueOf() { ArrayBuffer.transfer(buf, 0); return 100; }}), TypeError);
// on undefined second argument, stay the same size:
var buf1 = new ArrayBuffer(0);
var buf2 = XF(buf1);
assertEq(buf1.byteLength, 0);
assertEq(buf2.byteLength, 0);
assertThrowsInstanceOf(()=>XF(buf1), TypeError);
var buf1 = new ArrayBuffer(3);
var buf2 = XF(buf1);
assertEq(buf1.byteLength, 0);
assertEq(buf2.byteLength, 3);
assertThrowsInstanceOf(()=>XF(buf1), TypeError);
var buf1 = new ArrayBuffer(9);
var buf2 = XF(buf1, undefined);
assertEq(buf1.byteLength, 0);
assertEq(buf2.byteLength, 9);
assertThrowsInstanceOf(()=>XF(buf1), TypeError);
// cross-compartment wrapper
var buf3 = newGlobal().eval("new ArrayBuffer(10)");
var buf4 = XF(buf3, 20);
assertEq(buf4.byteLength, 20);
assertThrowsInstanceOf(()=>XF(buf3), TypeError);
// test going to from various sizes
function test(N1, N2) {
var buf1 = new ArrayBuffer(N1);
var i32 = new Int32Array(buf1);
for (var i = 0; i < i32.length; i++)
i32[i] = i;
var buf2 = XF(buf1, N2);
assertEq(buf1.byteLength, 0);
assertEq(i32.length, 0);
assertEq(buf2.byteLength, N2);
var i32 = new Int32Array(buf2);
for (var i = 0; i < Math.min(N1, N2)/4; i++)
assertEq(i32[i], i);
for (var i = Math.min(N1, N2)/4; i < i32.length; i++) {
assertEq(i32[i], 0);
i32[i] = -i;
}
}
test(0, 0);
test(0, 4);
test(4, 0);
test(4, 4);
test(0, 1000);
test(4, 1000);
test(1000, 0);
test(1000, 4);
test(1000, 1000);
// asm.js:
function testAsmJS(N1, N2) {
var buf1 = new ArrayBuffer(N1);
asmLink(asmCompile('stdlib', 'ffis', 'buf', USE_ASM + "var i32=new stdlib.Int32Array(buf); function f() {} return f"), this, null, buf1);
var i32 = new Int32Array(buf1);
for (var i = 0; i < i32.length; i+=100)
i32[i] = i;
var buf2 = XF(buf1, N2);
assertEq(buf1.byteLength, 0);
assertEq(i32.length, 0);
assertEq(buf2.byteLength, N2);
var i32 = new Int32Array(buf2);
var i = 0;
for (; i < Math.min(N1, N2)/4; i+=100)
assertEq(i32[i], i);
for (; i < i32.length; i+=100) {
assertEq(i32[i], 0);
i32[i] = -i;
}
}
testAsmJS(BUF_MIN, 0);
testAsmJS(BUF_MIN, BUF_MIN);
testAsmJS(BUF_MIN, 2*BUF_MIN);
testAsmJS(2*BUF_MIN, BUF_MIN);

Просмотреть файл

@ -699,13 +699,13 @@ class FunctionExtended : public JSFunction
* All asm.js/wasm functions store their compiled module (either
* WasmModuleObject or AsmJSModuleObject) in the first extended slot.
*/
static const unsigned ASM_MODULE_SLOT = 0;
static const unsigned WASM_MODULE_SLOT = 0;
/*
* wasm/asm.js exported functions store the index of the export in the
* module's export vector in the second slot.
*/
static const unsigned ASM_EXPORT_INDEX_SLOT = 1;
static const unsigned WASM_EXPORT_INDEX_SLOT = 1;
static inline size_t offsetOfExtendedSlot(unsigned which) {
MOZ_ASSERT(which < NUM_EXTENDED_SLOTS);

Просмотреть файл

@ -3,14 +3,10 @@
// http://creativecommons.org/licenses/publicdomain/
function test() {
// Note: -8 and -200 will trigger asm.js link failures because 8 and 200
// bytes are below the minimum allowed size, and the buffer will not
// actually be converted to an asm.js buffer.
for (var size of [0, 8, 16, 200, 1000, 4096, -8, -200, -8192, -65536]) {
var buffer_ctor = (size < 0) ? AsmJSArrayBuffer : ArrayBuffer;
size = Math.abs(size);
var old = new buffer_ctor(size);
var old = new ArrayBuffer(size);
var copy = deserialize(serialize(old, [old]));
assertEq(old.byteLength, 0);
assertEq(copy.byteLength, size);
@ -29,7 +25,7 @@ function test() {
for (var ctor of constructors) {
var dataview = (ctor === DataView);
var buf = new buffer_ctor(size);
var buf = new ArrayBuffer(size);
var old_arr = new ctor(buf);
assertEq(buf.byteLength, size);
assertEq(buf, old_arr.buffer);
@ -52,7 +48,7 @@ function test() {
for (var ctor of constructors) {
var dataview = (ctor === DataView);
var buf = new buffer_ctor(size);
var buf = new ArrayBuffer(size);
var old_arr = new ctor(buf);
var dv = new DataView(buf); // Second view
var copy_arr = deserialize(serialize(old_arr, [ buf ]));
@ -69,7 +65,7 @@ function test() {
// Mutate the buffer during the clone operation. The modifications should be visible.
if (size >= 4) {
old = new buffer_ctor(size);
old = new ArrayBuffer(size);
var view = new Int32Array(old);
view[0] = 1;
var mutator = { get foo() { view[0] = 2; } };
@ -81,7 +77,7 @@ function test() {
// Neuter the buffer during the clone operation. Should throw an exception.
if (size >= 4) {
old = new buffer_ctor(size);
old = new ArrayBuffer(size);
var mutator = {
get foo() {
deserialize(serialize(old, [old]));

Просмотреть файл

@ -16,17 +16,3 @@ if (typeof version != 'undefined')
{
version(185);
}
// Note that AsmJS ArrayBuffers have a minimum size, currently 4096 bytes. If a
// smaller size is given, a regular ArrayBuffer will be returned instead.
function AsmJSArrayBuffer(size) {
var ab = new ArrayBuffer(size);
(new Function('global', 'foreign', 'buffer', '' +
' "use asm";' +
' var i32 = new global.Int32Array(buffer);' +
' function g() {};' +
' return g;' +
''))(Function("return this")(),null,ab);
return ab;
}

Просмотреть файл

@ -130,9 +130,6 @@ const JSFunctionSpec ArrayBufferObject::jsfuncs[] = {
const JSFunctionSpec ArrayBufferObject::jsstaticfuncs[] = {
JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0),
#ifdef NIGHTLY_BUILD
JS_FN("transfer", ArrayBufferObject::fun_transfer, 2, 0),
#endif
JS_FS_END
};
@ -233,223 +230,6 @@ ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp)
return true;
}
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
static void
ReleaseAsmJSMappedData(void* base)
{
MOZ_ASSERT(uintptr_t(base) % AsmJSPageSize == 0);
# ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
# else
munmap(base, AsmJSMappedSize);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to recommence reporting accesses in the
// previously-inaccessible region.
if (AsmJSMappedSize > 0) {
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, AsmJSMappedSize);
}
# endif
# endif
MemProfiler::RemoveNative(base);
}
#else
static void
ReleaseAsmJSMappedData(void* base)
{
MOZ_CRASH("asm.js only uses mapped buffers when using signal-handler OOB checking");
}
#endif
#ifdef NIGHTLY_BUILD
# if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
static bool
TransferAsmJSMappedBuffer(JSContext* cx, const CallArgs& args,
Handle<ArrayBufferObject*> oldBuffer, size_t newByteLength)
{
size_t oldByteLength = oldBuffer->byteLength();
MOZ_ASSERT(oldByteLength % AsmJSPageSize == 0);
MOZ_ASSERT(newByteLength % AsmJSPageSize == 0);
ArrayBufferObject::BufferContents stolen =
ArrayBufferObject::stealContents(cx, oldBuffer, /* hasStealableContents = */ true);
if (!stolen)
return false;
MOZ_ASSERT(stolen.kind() == ArrayBufferObject::ASMJS_MAPPED);
uint8_t* data = stolen.data();
if (newByteLength > oldByteLength) {
void* diffStart = data + oldByteLength;
size_t diffLength = newByteLength - oldByteLength;
# ifdef XP_WIN
if (!VirtualAlloc(diffStart, diffLength, MEM_COMMIT, PAGE_READWRITE)) {
ReleaseAsmJSMappedData(data);
ReportOutOfMemory(cx);
return false;
}
# else
// To avoid memset, use MAP_FIXED to clobber the newly-accessible pages
// with zero pages.
int flags = MAP_FIXED | MAP_PRIVATE | MAP_ANON;
if (mmap(diffStart, diffLength, PROT_READ | PROT_WRITE, flags, -1, 0) == MAP_FAILED) {
ReleaseAsmJSMappedData(data);
ReportOutOfMemory(cx);
return false;
}
# endif
MemProfiler::SampleNative(diffStart, diffLength);
} else if (newByteLength < oldByteLength) {
void* diffStart = data + newByteLength;
size_t diffLength = oldByteLength - newByteLength;
# ifdef XP_WIN
if (!VirtualFree(diffStart, diffLength, MEM_DECOMMIT)) {
ReleaseAsmJSMappedData(data);
ReportOutOfMemory(cx);
return false;
}
# else
if (madvise(diffStart, diffLength, MADV_DONTNEED) ||
mprotect(diffStart, diffLength, PROT_NONE))
{
ReleaseAsmJSMappedData(data);
ReportOutOfMemory(cx);
return false;
}
# endif
}
ArrayBufferObject::BufferContents newContents =
ArrayBufferObject::BufferContents::create<ArrayBufferObject::ASMJS_MAPPED>(data);
RootedObject newBuffer(cx, ArrayBufferObject::create(cx, newByteLength, newContents));
if (!newBuffer) {
ReleaseAsmJSMappedData(data);
return false;
}
args.rval().setObject(*newBuffer);
return true;
}
# endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
/*
* Experimental implementation of ArrayBuffer.transfer:
* https://gist.github.com/andhow/95fb9e49996615764eff
* which is currently in the early stages of proposal for ES7.
*/
bool
ArrayBufferObject::fun_transfer(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue oldBufferArg = args.get(0);
HandleValue newByteLengthArg = args.get(1);
if (!oldBufferArg.isObject()) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS);
return false;
}
RootedObject oldBufferObj(cx, &oldBufferArg.toObject());
ESClassValue cls;
if (!GetBuiltinClass(cx, oldBufferObj, &cls))
return false;
if (cls != ESClass_ArrayBuffer) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS);
return false;
}
// Beware: oldBuffer can point across compartment boundaries. ArrayBuffer
// contents are not compartment-specific so this is safe.
Rooted<ArrayBufferObject*> oldBuffer(cx);
if (oldBufferObj->is<ArrayBufferObject>()) {
oldBuffer = &oldBufferObj->as<ArrayBufferObject>();
} else {
JSObject* unwrapped = CheckedUnwrap(oldBufferObj);
if (!unwrapped || !unwrapped->is<ArrayBufferObject>()) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS);
return false;
}
oldBuffer = &unwrapped->as<ArrayBufferObject>();
}
size_t oldByteLength = oldBuffer->byteLength();
size_t newByteLength;
if (newByteLengthArg.isUndefined()) {
newByteLength = oldByteLength;
} else {
int32_t i32;
if (!ToInt32(cx, newByteLengthArg, &i32))
return false;
if (i32 < 0) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_ARRAY_LENGTH);
return false;
}
newByteLength = size_t(i32);
}
if (oldBuffer->isNeutered()) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_DETACHED);
return false;
}
UniquePtr<uint8_t, JS::FreePolicy> newData;
if (!newByteLength) {
if (!ArrayBufferObject::neuter(cx, oldBuffer, oldBuffer->contents()))
return false;
} else {
# if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// With a 4gb mapped asm.js buffer, we can simply enable/disable access
// to the delta as long as the requested length is page-sized.
if (oldBuffer->isAsmJSMapped() && (newByteLength % AsmJSPageSize) == 0)
return TransferAsmJSMappedBuffer(cx, args, oldBuffer, newByteLength);
# endif
// Since we try to realloc below, only allow stealing malloc'd buffers.
// If !hasMallocedContents, stealContents will malloc a copy which we
// can then realloc.
bool steal = oldBuffer->hasMallocedContents();
auto stolenContents = ArrayBufferObject::stealContents(cx, oldBuffer, steal);
if (!stolenContents)
return false;
UniquePtr<uint8_t, JS::FreePolicy> oldData(stolenContents.data());
if (newByteLength > oldByteLength) {
// In theory, realloc+memset(0) can be optimized to avoid touching
// any pages (by using OS page mapping tricks). However, in
// practice, we don't seem to get this optimization in Firefox with
// jemalloc so calloc+memcpy are faster.
newData.reset(cx->runtime()->pod_callocCanGC<uint8_t>(newByteLength));
if (newData) {
memcpy(newData.get(), oldData.get(), oldByteLength);
} else {
// Try malloc before giving up since it might be able to succed
// by resizing oldData in-place.
newData.reset(cx->pod_realloc(oldData.get(), oldByteLength, newByteLength));
if (!newData)
return false;
oldData.release();
memset(newData.get() + oldByteLength, 0, newByteLength - oldByteLength);
}
} else if (newByteLength < oldByteLength) {
newData.reset(cx->pod_realloc(oldData.get(), oldByteLength, newByteLength));
if (!newData)
return false;
oldData.release();
} else {
newData = Move(oldData);
}
}
RootedObject newBuffer(cx, JS_NewArrayBufferWithContents(cx, newByteLength, newData.get()));
if (!newBuffer)
return false;
newData.release();
args.rval().setObject(*newBuffer);
return true;
}
#endif // defined(NIGHTLY_BUILD)
/*
* new ArrayBuffer(byteLength)
*/
@ -511,8 +291,10 @@ ArrayBufferObject::neuterView(JSContext* cx, ArrayBufferViewObject* view,
ArrayBufferObject::neuter(JSContext* cx, Handle<ArrayBufferObject*> buffer,
BufferContents newContents)
{
if (buffer->isAsmJS() && !OnDetachAsmJSArrayBuffer(cx, buffer))
if (buffer->isAsmJS()) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
return false;
}
// When neutering buffers where we don't know all views, the new data must
// match the old data. All missing views are typed objects, which do not
@ -742,6 +524,33 @@ ArrayBufferObject::dataPointerShared() const
return SharedMem<uint8_t*>::unshared(getSlot(DATA_SLOT).toPrivate());
}
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
static void
ReleaseAsmJSMappedData(void* base)
{
MOZ_ASSERT(uintptr_t(base) % AsmJSPageSize == 0);
# ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
# else
munmap(base, AsmJSMappedSize);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
// Tell Valgrind/Memcheck to recommence reporting accesses in the
// previously-inaccessible region.
if (AsmJSMappedSize > 0) {
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, AsmJSMappedSize);
}
# endif
# endif
MemProfiler::RemoveNative(base);
}
#else
static void
ReleaseAsmJSMappedData(void* base)
{
MOZ_CRASH("asm.js only uses mapped buffers when using signal-handler OOB checking");
}
#endif
void
ArrayBufferObject::releaseData(FreeOp* fop)
{

Просмотреть файл

@ -202,7 +202,6 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
destroyPrincipals(nullptr),
readPrincipals(nullptr),
errorReporter(nullptr),
linkedWasmModules(nullptr),
propertyRemovals(0),
#if !EXPOSE_INTL_API
thousandsSeparator(0),

Просмотреть файл

@ -1190,9 +1190,6 @@ struct JSRuntime : public JS::shadow::Runtime,
/* AsmJSCache callbacks are runtime-wide. */
JS::AsmJSCacheOps asmJSCacheOps;
/* Head of the linked list of linked wasm modules. */
js::wasm::Module* linkedWasmModules;
/*
* The propertyRemovals counter is incremented for every JSObject::clear,
* and for each JSObject::remove method call that frees a slot in the given