See bug for analysis.

Differential Revision: https://phabricator.services.mozilla.com/D129010
This commit is contained in:
Lars T Hansen 2021-10-21 15:46:28 +00:00
Родитель 6b06ddd335
Коммит ca918db2a1
15 изменённых файлов: 65 добавлений и 54 удалений

Просмотреть файл

@ -1,5 +1,16 @@
// |jit-test| skip-if: !hasDisassembler() || !(wasmCompileMode() == "baseline" || wasmCompileMode() == "ion") || !(getBuildConfiguration().x64 && !getBuildConfiguration()["arm64-simulator"] && !getBuildConfiguration()["mips64-simulator"]) // |jit-test| skip-if: !hasDisassembler() || !(wasmCompileMode() == "baseline" || wasmCompileMode() == "ion") || !(getBuildConfiguration().x64 && !getBuildConfiguration()["arm64-simulator"] && !getBuildConfiguration()["mips64-simulator"])
// We widen i32 results after calls on 64-bit platforms for two reasons:
//
// - it's a cheap mitigation for certain spectre problems, and
// - it makes the high bits of the 64-bit register conform to platform
// conventions, which they might not if the call was to C++ code
// especially.
//
// This is a whitebox test that explicit widening instructions are inserted
// after calls on x64. The widening is platform-specific; on x64, the upper
// bits are zeroed.
// What we can't test here is the direct-call-from-JIT path, as the generated // What we can't test here is the direct-call-from-JIT path, as the generated
// code is not available to wasmDis. // code is not available to wasmDis.

Просмотреть файл

@ -7878,8 +7878,7 @@ void CodeGenerator::visitGetNextEntryForIterator(
} }
// The point of these is to inform Ion of where these values already are; they // The point of these is to inform Ion of where these values already are; they
// don't normally generate code. Still, visitWasmRegisterResult is // don't normally generate (much) code.
// per-platform.
void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {} void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {} void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {} void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
@ -7901,6 +7900,16 @@ void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
} }
} }
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
#ifdef JS_64BIT
if (MWasmRegisterResult* mir = lir->mir()) {
if (mir->type() == MIRType::Int32) {
masm.widenInt32(ToRegister(lir->output()));
}
}
#endif
}
void CodeGenerator::visitWasmCall(LWasmCall* lir) { void CodeGenerator::visitWasmCall(LWasmCall* lir) {
MWasmCall* mir = lir->mir(); MWasmCall* mir = lir->mir();
bool needsBoundsCheck = lir->needsBoundsCheck(); bool needsBoundsCheck = lir->needsBoundsCheck();

Просмотреть файл

@ -3818,6 +3818,16 @@ class MacroAssembler : public MacroAssemblerSpecific {
wasm::SymbolicAddress builtin, wasm::SymbolicAddress builtin,
wasm::FailureMode failureMode); wasm::FailureMode failureMode);
// The System ABI frequently states that the high bits of a 64-bit register
// that holds a 32-bit return value are unpredictable, and C++ compilers will
// indeed generate code that leaves garbage in the upper bits.
//
// Adjust the contents of the 64-bit register `r` to conform to our internal
// convention, which requires predictable high bits. In practice, this means
// that the 32-bit valuewill be zero-extended or sign-extended to 64 bits as
// appropriate for the platform.
void widenInt32(Register r) DEFINED_ON(arm64, x64, mips64);
// As enterFakeExitFrame(), but using register conventions appropriate for // As enterFakeExitFrame(), but using register conventions appropriate for
// wasm stubs. // wasm stubs.
void enterFakeExitFrameForWasm(Register cxreg, Register scratch, void enterFakeExitFrameForWasm(Register cxreg, Register scratch,

Просмотреть файл

@ -1493,9 +1493,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse()); emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
} }
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()), masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output())); ToFloatRegister(lir->output()));

Просмотреть файл

@ -1485,9 +1485,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse()); emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
} }
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()), masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output())); ToFloatRegister(lir->output()));

Просмотреть файл

@ -2109,6 +2109,10 @@ void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
Str(tmp2, vixl::MemOperand(tmp, 0)); Str(tmp2, vixl::MemOperand(tmp, 0));
} }
void MacroAssembler::widenInt32(Register r) {
move32To64ZeroExtend(r, Register64(r));
}
// ======================================================================== // ========================================================================
// Convert floating point. // Convert floating point.

Просмотреть файл

@ -1372,9 +1372,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
lir->ifFalse()); lir->ifFalse());
} }
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()), masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output())); ToFloatRegister(lir->output()));

Просмотреть файл

@ -2288,6 +2288,11 @@ void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
MOZ_CRASH("IMPLEMENTME"); MOZ_CRASH("IMPLEMENTME");
} }
void MacroAssembler::widenInt32(Register r) {
// I *think* this is correct. It may be redundant.
move32To64SignExtend(r, Register64(r));
}
void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
Register output, Register output,
bool isSaturating, bool isSaturating,

Просмотреть файл

@ -465,16 +465,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
} }
} }
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
if (JitOptions.spectreIndexMasking) {
if (MWasmRegisterResult* mir = lir->mir()) {
if (mir->type() == MIRType::Int32) {
masm.movl(ToRegister(lir->output()), ToRegister(lir->output()));
}
}
}
}
void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) { void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);

Просмотреть файл

@ -1182,6 +1182,10 @@ void MacroAssembler::wasmTruncateFloat32ToUInt64(
bind(oolRejoin); bind(oolRejoin);
} }
void MacroAssembler::widenInt32(Register r) {
move32To64ZeroExtend(r, Register64(r));
}
// ======================================================================== // ========================================================================
// Convert floating point. // Convert floating point.

Просмотреть файл

@ -454,9 +454,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
masm.pop(edx); masm.pop(edx);
} }
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
Register input = ToRegister(lir->input()); Register input = ToRegister(lir->input());
Register temp = ToRegister(lir->temp()); Register temp = ToRegister(lir->temp());

Просмотреть файл

@ -794,8 +794,8 @@ struct BaseCompiler final {
// hot enough to warrant inlining at the outermost level. // hot enough to warrant inlining at the outermost level.
inline void needResultRegisters(ResultType type, ResultRegKind which); inline void needResultRegisters(ResultType type, ResultRegKind which);
#ifdef JS_CODEGEN_X64 #ifdef JS_64BIT
inline void maskResultRegisters(ResultType type); inline void widenInt32ResultRegisters(ResultType type);
#endif #endif
inline void freeResultRegisters(ResultType type, ResultRegKind which); inline void freeResultRegisters(ResultType type, ResultRegKind which);
inline void needIntegerResultRegisters(ResultType type); inline void needIntegerResultRegisters(ResultType type);

Просмотреть файл

@ -138,10 +138,8 @@ RegI32 BaseCompiler::captureReturnedI32() {
RegI32 r = RegI32(ReturnReg); RegI32 r = RegI32(ReturnReg);
MOZ_ASSERT(isAvailableI32(r)); MOZ_ASSERT(isAvailableI32(r));
needI32(r); needI32(r);
#if defined(JS_CODEGEN_X64) #if defined(JS_64BIT)
if (JitOptions.spectreIndexMasking) { masm.widenInt32(r);
masm.movl(r, r);
}
#endif #endif
return r; return r;
} }

Просмотреть файл

@ -365,10 +365,8 @@ void BaseCompiler::needResultRegisters(ResultType type, ResultRegKind which) {
} }
} }
#ifdef JS_CODEGEN_X64 #ifdef JS_64BIT
void BaseCompiler::maskResultRegisters(ResultType type) { void BaseCompiler::widenInt32ResultRegisters(ResultType type) {
MOZ_ASSERT(JitOptions.spectreIndexMasking);
if (type.empty()) { if (type.empty()) {
return; return;
} }
@ -376,7 +374,7 @@ void BaseCompiler::maskResultRegisters(ResultType type) {
for (ABIResultIter iter(type); !iter.done(); iter.next()) { for (ABIResultIter iter(type); !iter.done(); iter.next()) {
ABIResult result = iter.cur(); ABIResult result = iter.cur();
if (result.inRegister() && result.type().kind() == ValType::I32) { if (result.inRegister() && result.type().kind() == ValType::I32) {
masm.movl(result.gpr(), result.gpr()); masm.widenInt32(result.gpr());
} }
} }
} }
@ -451,10 +449,8 @@ void BaseCompiler::captureResultRegisters(ResultType type) {
void BaseCompiler::captureCallResultRegisters(ResultType type) { void BaseCompiler::captureCallResultRegisters(ResultType type) {
captureResultRegisters(type); captureResultRegisters(type);
#ifdef JS_CODEGEN_X64 #ifdef JS_64BIT
if (JitOptions.spectreIndexMasking) { widenInt32ResultRegisters(type);
maskResultRegisters(type);
}
#endif #endif
} }

Просмотреть файл

@ -841,7 +841,7 @@ static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
WasmPop(masm, WasmTlsReg); WasmPop(masm, WasmTlsReg);
// Store the register result, if any, in argv[0]. // Store the register result, if any, in argv[0].
// No spectre.index_masking is required, as the value leaves ReturnReg. // No widening is required, as the value leaves ReturnReg.
StoreRegisterResult(masm, fe, argv); StoreRegisterResult(masm, fe, argv);
// After the ReturnReg is stored into argv[0] but before fp is clobbered by // After the ReturnReg is stored into argv[0] but before fp is clobbered by
@ -1304,7 +1304,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
switch (results[0].kind()) { switch (results[0].kind()) {
case ValType::I32: case ValType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg); GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
// No spectre.index_masking is required, as the value is boxed. // No widening is required, as the value is boxed.
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand); masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
break; break;
case ValType::F32: { case ValType::F32: {
@ -1423,8 +1423,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
SymbolicAddress::CoerceInPlace_JitEntry); SymbolicAddress::CoerceInPlace_JitEntry);
masm.assertStackAlignment(ABIStackAlignment); masm.assertStackAlignment(ABIStackAlignment);
// No spectre.index_masking is required, as the return value is used as a // No widening is required, as the return value is used as a bool.
// bool.
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
&rejoinBeforeCall); &rejoinBeforeCall);
} }
@ -1632,10 +1631,8 @@ void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
case wasm::ValType::I32: case wasm::ValType::I32:
// The return value is in ReturnReg, which is what Ion expects. // The return value is in ReturnReg, which is what Ion expects.
GenPrintIsize(DebugChannel::Function, masm, ReturnReg); GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
#if defined(JS_CODEGEN_X64) #ifdef JS_64BIT
if (JitOptions.spectreIndexMasking) { masm.widenInt32(ReturnReg);
masm.movl(ReturnReg, ReturnReg);
}
#endif #endif
break; break;
case wasm::ValType::I64: case wasm::ValType::I64:
@ -2140,8 +2137,7 @@ static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
switch (registerResultType.kind()) { switch (registerResultType.kind()) {
case ValType::I32: case ValType::I32:
masm.load32(argv, ReturnReg); masm.load32(argv, ReturnReg);
// No spectre.index_masking is required, as we know the value comes from // No widening is required, as we know the value comes from an i32 load.
// an i32 load.
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ", GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex); funcImportIndex);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg); GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
@ -2383,8 +2379,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented"); MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
switch (results[0].kind()) { switch (results[0].kind()) {
case ValType::I32: case ValType::I32:
// No spectre.index_masking required, as the return value does not come // No widening is required, as the return value does not come to us in
// to us in ReturnReg. // ReturnReg.
masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
&oolConvert); &oolConvert);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg); GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
@ -2492,8 +2488,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
ReturnReg); ReturnReg);
// No spectre.index_masking required, as we generate a known-good // No widening is required, as we generate a known-good value in a
// value in a safe way here. // safe way here.
break; break;
case ValType::I64: { case ValType::I64: {
masm.call(SymbolicAddress::CoerceInPlace_ToBigInt); masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
@ -2616,7 +2612,7 @@ bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken())); masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
#if defined(JS_CODEGEN_X64) #if defined(JS_CODEGEN_X64)
// No spectre.index_masking is required, as the caller will mask. // No widening is required, as the caller will widen.
#elif defined(JS_CODEGEN_X86) #elif defined(JS_CODEGEN_X86)
// x86 passes the return value on the x87 FP stack. // x86 passes the return value on the x87 FP stack.
Operand op(esp, 0); Operand op(esp, 0);