diff --git a/js/src/jit-test/tests/wasm/spectre-mask.js b/js/src/jit-test/tests/wasm/widening-i32-after-call.js similarity index 81% rename from js/src/jit-test/tests/wasm/spectre-mask.js rename to js/src/jit-test/tests/wasm/widening-i32-after-call.js index 28205a7ca7e2..461fccc83d0d 100644 --- a/js/src/jit-test/tests/wasm/spectre-mask.js +++ b/js/src/jit-test/tests/wasm/widening-i32-after-call.js @@ -1,5 +1,16 @@ // |jit-test| skip-if: !hasDisassembler() || !(wasmCompileMode() == "baseline" || wasmCompileMode() == "ion") || !(getBuildConfiguration().x64 && !getBuildConfiguration()["arm64-simulator"] && !getBuildConfiguration()["mips64-simulator"]) +// We widen i32 results after calls on 64-bit platforms for two reasons: +// +// - it's a cheap mitigation for certain spectre problems, and +// - it makes the high bits of the 64-bit register conform to platform +// conventions, which they might not if the call was to C++ code +// especially. +// +// This is a whitebox test that explicit widening instructions are inserted +// after calls on x64. The widening is platform-specific; on x64, the upper +// bits are zeroed. + // What we can't test here is the direct-call-from-JIT path, as the generated // code is not available to wasmDis. diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 7dbc82414720..315457e25ecb 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -7878,8 +7878,7 @@ void CodeGenerator::visitGetNextEntryForIterator( } // The point of these is to inform Ion of where these values already are; they -// don't normally generate code. Still, visitWasmRegisterResult is -// per-platform. +// don't normally generate (much) code. void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {} void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {} void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {} @@ -7901,6 +7900,16 @@ void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) { } } +void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) { +#ifdef JS_64BIT + if (MWasmRegisterResult* mir = lir->mir()) { + if (mir->type() == MIRType::Int32) { + masm.widenInt32(ToRegister(lir->output())); + } + } +#endif +} + void CodeGenerator::visitWasmCall(LWasmCall* lir) { MWasmCall* mir = lir->mir(); bool needsBoundsCheck = lir->needsBoundsCheck(); diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h index 074ea592e76f..92f9678cd19c 100644 --- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -3818,6 +3818,16 @@ class MacroAssembler : public MacroAssemblerSpecific { wasm::SymbolicAddress builtin, wasm::FailureMode failureMode); + // The System ABI frequently states that the high bits of a 64-bit register + // that holds a 32-bit return value are unpredictable, and C++ compilers will + // indeed generate code that leaves garbage in the upper bits. + // + // Adjust the contents of the 64-bit register `r` to conform to our internal + // convention, which requires predictable high bits. In practice, this means + // that the 32-bit valuewill be zero-extended or sign-extended to 64 bits as + // appropriate for the platform. + void widenInt32(Register r) DEFINED_ON(arm64, x64, mips64); + // As enterFakeExitFrame(), but using register conventions appropriate for // wasm stubs. void enterFakeExitFrameForWasm(Register cxreg, Register scratch, diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp index 1a1e9c524bef..d733492c5ef0 100644 --- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -1493,9 +1493,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) { emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse()); } -// See ../CodeGenerator.cpp for more information. -void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {} - void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp index 9cf89a4bae05..0ef7d68e6610 100644 --- a/js/src/jit/arm64/CodeGenerator-arm64.cpp +++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp @@ -1485,9 +1485,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) { emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse()); } -// See ../CodeGenerator.cpp for more information. -void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {} - void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp index 8ac09ebabe38..ae4b9d266bb8 100644 --- a/js/src/jit/arm64/MacroAssembler-arm64.cpp +++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp @@ -2109,6 +2109,10 @@ void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch, Str(tmp2, vixl::MemOperand(tmp, 0)); } +void MacroAssembler::widenInt32(Register r) { + move32To64ZeroExtend(r, Register64(r)); +} + // ======================================================================== // Convert floating point. diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp index 6846056c4521..37653f476625 100644 --- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp +++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp @@ -1372,9 +1372,6 @@ void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) { lir->ifFalse()); } -// See ../CodeGenerator.cpp for more information. -void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {} - void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp index 0ca7a1445cc8..e3627af40923 100644 --- a/js/src/jit/mips64/MacroAssembler-mips64.cpp +++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp @@ -2288,6 +2288,11 @@ void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index, MOZ_CRASH("IMPLEMENTME"); } +void MacroAssembler::widenInt32(Register r) { + // I *think* this is correct. It may be redundant. + move32To64SignExtend(r, Register64(r)); +} + void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, bool isSaturating, diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp index 04b9de8f4734..788c5285f117 100644 --- a/js/src/jit/x64/CodeGenerator-x64.cpp +++ b/js/src/jit/x64/CodeGenerator-x64.cpp @@ -465,16 +465,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64( } } -void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) { - if (JitOptions.spectreIndexMasking) { - if (MWasmRegisterResult* mir = lir->mir()) { - if (mir->type() == MIRType::Int32) { - masm.movl(ToRegister(lir->output()), ToRegister(lir->output())); - } - } - } -} - void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) { MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp index ae64417e64f3..1ed7e988640d 100644 --- a/js/src/jit/x64/MacroAssembler-x64.cpp +++ b/js/src/jit/x64/MacroAssembler-x64.cpp @@ -1182,6 +1182,10 @@ void MacroAssembler::wasmTruncateFloat32ToUInt64( bind(oolRejoin); } +void MacroAssembler::widenInt32(Register r) { + move32To64ZeroExtend(r, Register64(r)); +} + // ======================================================================== // Convert floating point. diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp index fbed059aef8a..4ab7cf3e1072 100644 --- a/js/src/jit/x86/CodeGenerator-x86.cpp +++ b/js/src/jit/x86/CodeGenerator-x86.cpp @@ -454,9 +454,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64( masm.pop(edx); } -// See ../CodeGenerator.cpp for more information. -void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {} - void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { Register input = ToRegister(lir->input()); Register temp = ToRegister(lir->temp()); diff --git a/js/src/wasm/WasmBCClass.h b/js/src/wasm/WasmBCClass.h index b2f0fb91f96c..1fc48a27c5e5 100644 --- a/js/src/wasm/WasmBCClass.h +++ b/js/src/wasm/WasmBCClass.h @@ -794,8 +794,8 @@ struct BaseCompiler final { // hot enough to warrant inlining at the outermost level. inline void needResultRegisters(ResultType type, ResultRegKind which); -#ifdef JS_CODEGEN_X64 - inline void maskResultRegisters(ResultType type); +#ifdef JS_64BIT + inline void widenInt32ResultRegisters(ResultType type); #endif inline void freeResultRegisters(ResultType type, ResultRegKind which); inline void needIntegerResultRegisters(ResultType type); diff --git a/js/src/wasm/WasmBCCodegen-inl.h b/js/src/wasm/WasmBCCodegen-inl.h index dd6b9632721f..0c516a517d48 100644 --- a/js/src/wasm/WasmBCCodegen-inl.h +++ b/js/src/wasm/WasmBCCodegen-inl.h @@ -138,10 +138,8 @@ RegI32 BaseCompiler::captureReturnedI32() { RegI32 r = RegI32(ReturnReg); MOZ_ASSERT(isAvailableI32(r)); needI32(r); -#if defined(JS_CODEGEN_X64) - if (JitOptions.spectreIndexMasking) { - masm.movl(r, r); - } +#if defined(JS_64BIT) + masm.widenInt32(r); #endif return r; } diff --git a/js/src/wasm/WasmBCRegMgmt-inl.h b/js/src/wasm/WasmBCRegMgmt-inl.h index ebfb932488c2..c223589030ea 100644 --- a/js/src/wasm/WasmBCRegMgmt-inl.h +++ b/js/src/wasm/WasmBCRegMgmt-inl.h @@ -365,10 +365,8 @@ void BaseCompiler::needResultRegisters(ResultType type, ResultRegKind which) { } } -#ifdef JS_CODEGEN_X64 -void BaseCompiler::maskResultRegisters(ResultType type) { - MOZ_ASSERT(JitOptions.spectreIndexMasking); - +#ifdef JS_64BIT +void BaseCompiler::widenInt32ResultRegisters(ResultType type) { if (type.empty()) { return; } @@ -376,7 +374,7 @@ void BaseCompiler::maskResultRegisters(ResultType type) { for (ABIResultIter iter(type); !iter.done(); iter.next()) { ABIResult result = iter.cur(); if (result.inRegister() && result.type().kind() == ValType::I32) { - masm.movl(result.gpr(), result.gpr()); + masm.widenInt32(result.gpr()); } } } @@ -451,10 +449,8 @@ void BaseCompiler::captureResultRegisters(ResultType type) { void BaseCompiler::captureCallResultRegisters(ResultType type) { captureResultRegisters(type); -#ifdef JS_CODEGEN_X64 - if (JitOptions.spectreIndexMasking) { - maskResultRegisters(type); - } +#ifdef JS_64BIT + widenInt32ResultRegisters(type); #endif } diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp index 02fabaee717d..2e24dd0a79a4 100644 --- a/js/src/wasm/WasmStubs.cpp +++ b/js/src/wasm/WasmStubs.cpp @@ -841,7 +841,7 @@ static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe, WasmPop(masm, WasmTlsReg); // Store the register result, if any, in argv[0]. - // No spectre.index_masking is required, as the value leaves ReturnReg. + // No widening is required, as the value leaves ReturnReg. StoreRegisterResult(masm, fe, argv); // After the ReturnReg is stored into argv[0] but before fp is clobbered by @@ -1304,7 +1304,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex, switch (results[0].kind()) { case ValType::I32: GenPrintIsize(DebugChannel::Function, masm, ReturnReg); - // No spectre.index_masking is required, as the value is boxed. + // No widening is required, as the value is boxed. masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand); break; case ValType::F32: { @@ -1423,8 +1423,7 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex, SymbolicAddress::CoerceInPlace_JitEntry); masm.assertStackAlignment(ABIStackAlignment); - // No spectre.index_masking is required, as the return value is used as a - // bool. + // No widening is required, as the return value is used as a bool. masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &rejoinBeforeCall); } @@ -1632,10 +1631,8 @@ void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe, case wasm::ValType::I32: // The return value is in ReturnReg, which is what Ion expects. GenPrintIsize(DebugChannel::Function, masm, ReturnReg); -#if defined(JS_CODEGEN_X64) - if (JitOptions.spectreIndexMasking) { - masm.movl(ReturnReg, ReturnReg); - } +#ifdef JS_64BIT + masm.widenInt32(ReturnReg); #endif break; case wasm::ValType::I64: @@ -2140,8 +2137,7 @@ static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, switch (registerResultType.kind()) { case ValType::I32: masm.load32(argv, ReturnReg); - // No spectre.index_masking is required, as we know the value comes from - // an i32 load. + // No widening is required, as we know the value comes from an i32 load. GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ", funcImportIndex); GenPrintIsize(DebugChannel::Import, masm, ReturnReg); @@ -2383,8 +2379,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented"); switch (results[0].kind()) { case ValType::I32: - // No spectre.index_masking required, as the return value does not come - // to us in ReturnReg. + // No widening is required, as the return value does not come to us in + // ReturnReg. masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert); GenPrintIsize(DebugChannel::Import, masm, ReturnReg); @@ -2492,8 +2488,8 @@ static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg); - // No spectre.index_masking required, as we generate a known-good - // value in a safe way here. + // No widening is required, as we generate a known-good value in a + // safe way here. break; case ValType::I64: { masm.call(SymbolicAddress::CoerceInPlace_ToBigInt); @@ -2616,7 +2612,7 @@ bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType, masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken())); #if defined(JS_CODEGEN_X64) - // No spectre.index_masking is required, as the caller will mask. + // No widening is required, as the caller will widen. #elif defined(JS_CODEGEN_X86) // x86 passes the return value on the x87 FP stack. Operand op(esp, 0);