See bug for analysis.

Differential Revision: https://phabricator.services.mozilla.com/D129010
This commit is contained in:
Lars T Hansen 2021-10-21 15:46:28 +00:00
Родитель 6b06ddd335
Коммит ca918db2a1
15 изменённых файлов: 65 добавлений и 54 удалений

Просмотреть файл

@ -1,5 +1,16 @@
// |jit-test| skip-if: !hasDisassembler() || !(wasmCompileMode() == "baseline" || wasmCompileMode() == "ion") || !(getBuildConfiguration().x64 && !getBuildConfiguration()["arm64-simulator"] && !getBuildConfiguration()["mips64-simulator"])
// We widen i32 results after calls on 64-bit platforms for two reasons:
//
// - it's a cheap mitigation for certain spectre problems, and
// - it makes the high bits of the 64-bit register conform to platform
// conventions, which they might not if the call was to C++ code
// especially.
//
// This is a whitebox test that explicit widening instructions are inserted
// after calls on x64. The widening is platform-specific; on x64, the upper
// bits are zeroed.
// What we can't test here is the direct-call-from-JIT path, as the generated
// code is not available to wasmDis.

Просмотреть файл

@ -7878,8 +7878,7 @@ void CodeGenerator::visitGetNextEntryForIterator(
}
// The point of these is to inform Ion of where these values already are; they
// don't normally generate code. Still, visitWasmRegisterResult is
// per-platform.
// don't normally generate (much) code.
void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
@ -7901,6 +7900,16 @@ void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
}
}
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
#ifdef JS_64BIT
if (MWasmRegisterResult* mir = lir->mir()) {
if (mir->type() == MIRType::Int32) {
masm.widenInt32(ToRegister(lir->output()));
}
}
#endif
}
void CodeGenerator::visitWasmCall(LWasmCall* lir) {
MWasmCall* mir = lir->mir();
bool needsBoundsCheck = lir->needsBoundsCheck();

Просмотреть файл

@ -3818,6 +3818,16 @@ class MacroAssembler : public MacroAssemblerSpecific {
wasm::SymbolicAddress builtin,
wasm::FailureMode failureMode);
// The System ABI frequently states that the high bits of a 64-bit register
// that holds a 32-bit return value are unpredictable, and C++ compilers will
// indeed generate code that leaves garbage in the upper bits.
//
// Adjust the contents of the 64-bit register `r` to conform to our internal
// convention, which requires predictable high bits. In practice, this means
// that the 32-bit valuewill be zero-extended or sign-extended to 64 bits as
// appropriate for the platform.
void widenInt32(Register r) DEFINED_ON(arm64, x64, mips64);
// As enterFakeExitFrame(), but using register conventions appropriate for
// wasm stubs.
void enterFakeExitFrameForWasm(Register cxreg, Register scratch,

Просмотреть файл

@ -1493,9 +1493,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
}
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));

Просмотреть файл

@ -1485,9 +1485,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
}
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));

Просмотреть файл

@ -2109,6 +2109,10 @@ void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
Str(tmp2, vixl::MemOperand(tmp, 0));
}
void MacroAssembler::widenInt32(Register r) {
move32To64ZeroExtend(r, Register64(r));
}
// ========================================================================
// Convert floating point.

Просмотреть файл

@ -1372,9 +1372,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
lir->ifFalse());
}
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));

Просмотреть файл

@ -2288,6 +2288,11 @@ void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
MOZ_CRASH("IMPLEMENTME");
}
void MacroAssembler::widenInt32(Register r) {
// I *think* this is correct. It may be redundant.
move32To64SignExtend(r, Register64(r));
}
void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
Register output,
bool isSaturating,

Просмотреть файл

@ -465,16 +465,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
}
}
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
if (JitOptions.spectreIndexMasking) {
if (MWasmRegisterResult* mir = lir->mir()) {
if (mir->type() == MIRType::Int32) {
masm.movl(ToRegister(lir->output()), ToRegister(lir->output()));
}
}
}
}
void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);

Просмотреть файл

@ -1182,6 +1182,10 @@ void MacroAssembler::wasmTruncateFloat32ToUInt64(
bind(oolRejoin);
}
void MacroAssembler::widenInt32(Register r) {
move32To64ZeroExtend(r, Register64(r));
}
// ========================================================================
// Convert floating point.

Просмотреть файл

@ -454,9 +454,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
masm.pop(edx);
}
// See ../CodeGenerator.cpp for more information.
void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
Register input = ToRegister(lir->input());
Register temp = ToRegister(lir->temp());

Просмотреть файл

@ -794,8 +794,8 @@ struct BaseCompiler final {
// hot enough to warrant inlining at the outermost level.
inline void needResultRegisters(ResultType type, ResultRegKind which);
#ifdef JS_CODEGEN_X64
inline void maskResultRegisters(ResultType type);
#ifdef JS_64BIT
inline void widenInt32ResultRegisters(ResultType type);
#endif
inline void freeResultRegisters(ResultType type, ResultRegKind which);
inline void needIntegerResultRegisters(ResultType type);

Просмотреть файл

@ -138,10 +138,8 @@ RegI32 BaseCompiler::captureReturnedI32() {
RegI32 r = RegI32(ReturnReg);
MOZ_ASSERT(isAvailableI32(r));
needI32(r);
#if defined(JS_CODEGEN_X64)
if (JitOptions.spectreIndexMasking) {
masm.movl(r, r);
}
#if defined(JS_64BIT)
masm.widenInt32(r);
#endif
return r;
}

Просмотреть файл

@ -365,10 +365,8 @@ void BaseCompiler::needResultRegisters(ResultType type, ResultRegKind which) {
}
}
#ifdef JS_CODEGEN_X64
void BaseCompiler::maskResultRegisters(ResultType type) {
MOZ_ASSERT(JitOptions.spectreIndexMasking);
#ifdef JS_64BIT
void BaseCompiler::widenInt32ResultRegisters(ResultType type) {
if (type.empty()) {
return;
}
@ -376,7 +374,7 @@ void BaseCompiler::maskResultRegisters(ResultType type) {
for (ABIResultIter iter(type); !iter.done(); iter.next()) {
ABIResult result = iter.cur();
if (result.inRegister() && result.type().kind() == ValType::I32) {
masm.movl(result.gpr(), result.gpr());
masm.widenInt32(result.gpr());
}
}
}
@ -451,10 +449,8 @@ void BaseCompiler::captureResultRegisters(ResultType type) {
void BaseCompiler::captureCallResultRegisters(ResultType type) {
captureResultRegisters(type);
#ifdef JS_CODEGEN_X64
if (JitOptions.spectreIndexMasking) {
maskResultRegisters(type);
}
#ifdef JS_64BIT
widenInt32ResultRegisters(type);
#endif
}

Просмотреть файл

@ -841,7 +841,7 @@ static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
WasmPop(masm, WasmTlsReg);
// Store the register result, if any, in argv[0].
// No spectre.index_masking is required, as the value leaves ReturnReg.
// No widening is required, as the value leaves ReturnReg.
StoreRegisterResult(masm, fe, argv);
// After the ReturnReg is stored into argv[0] but before fp is clobbered by
@ -1304,7 +1304,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
switch (results[0].kind()) {
case ValType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
// No spectre.index_masking is required, as the value is boxed.
// No widening is required, as the value is boxed.
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
break;
case ValType::F32: {
@ -1423,8 +1423,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
SymbolicAddress::CoerceInPlace_JitEntry);
masm.assertStackAlignment(ABIStackAlignment);
// No spectre.index_masking is required, as the return value is used as a
// bool.
// No widening is required, as the return value is used as a bool.
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
&rejoinBeforeCall);
}
@ -1632,10 +1631,8 @@ void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
case wasm::ValType::I32:
// The return value is in ReturnReg, which is what Ion expects.
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
#if defined(JS_CODEGEN_X64)
if (JitOptions.spectreIndexMasking) {
masm.movl(ReturnReg, ReturnReg);
}
#ifdef JS_64BIT
masm.widenInt32(ReturnReg);
#endif
break;
case wasm::ValType::I64:
@ -2140,8 +2137,7 @@ static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
switch (registerResultType.kind()) {
case ValType::I32:
masm.load32(argv, ReturnReg);
// No spectre.index_masking is required, as we know the value comes from
// an i32 load.
// No widening is required, as we know the value comes from an i32 load.
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
@ -2383,8 +2379,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
switch (results[0].kind()) {
case ValType::I32:
// No spectre.index_masking required, as the return value does not come
// to us in ReturnReg.
// No widening is required, as the return value does not come to us in
// ReturnReg.
masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
&oolConvert);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
@ -2492,8 +2488,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
ReturnReg);
// No spectre.index_masking required, as we generate a known-good
// value in a safe way here.
// No widening is required, as we generate a known-good value in a
// safe way here.
break;
case ValType::I64: {
masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
@ -2616,7 +2612,7 @@ bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
#if defined(JS_CODEGEN_X64)
// No spectre.index_masking is required, as the caller will mask.
// No widening is required, as the caller will widen.
#elif defined(JS_CODEGEN_X86)
// x86 passes the return value on the x87 FP stack.
Operand op(esp, 0);