diff --git a/js/src/jit/EffectiveAddressAnalysis.cpp b/js/src/jit/EffectiveAddressAnalysis.cpp index 1c1bdcf91f0c..135920c02ee4 100644 --- a/js/src/jit/EffectiveAddressAnalysis.cpp +++ b/js/src/jit/EffectiveAddressAnalysis.cpp @@ -104,20 +104,17 @@ template bool EffectiveAddressAnalysis::tryAddDisplacement(MAsmJSHeapAccessType* ins, int32_t o) { - // Compute the new offset. Check for overflow and negative. In theory it - // ought to be possible to support negative offsets, but it'd require - // more elaborate bounds checking mechanisms than we currently have. - MOZ_ASSERT(ins->offset() >= 0); - int32_t newOffset = uint32_t(ins->offset()) + o; - if (newOffset < 0) + // Compute the new offset. Check for overflow. + uint32_t oldOffset = ins->offset(); + uint32_t newOffset = oldOffset + o; + if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset)) return false; // Compute the new offset to the end of the access. Check for overflow - // and negative here also. - int32_t newEnd = uint32_t(newOffset) + ins->byteSize(); - if (newEnd < 0) + // here also. + uint32_t newEnd = newOffset + ins->byteSize(); + if (newEnd < newOffset) return false; - MOZ_ASSERT(uint32_t(newEnd) >= uint32_t(newOffset)); // Determine the range of valid offsets which can be folded into this // instruction and check whether our computed offset is within that range. diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h index 113b4f63b3cc..29fb3dcc44d9 100644 --- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -13882,7 +13882,7 @@ class MAsmJSNeg class MAsmJSHeapAccess { - int32_t offset_; + uint32_t offset_; Scalar::Type accessType_ : 8; bool needsBoundsCheck_; unsigned numSimdElems_; @@ -13903,8 +13903,8 @@ class MAsmJSHeapAccess MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType)); } - int32_t offset() const { return offset_; } - int32_t endOffset() const { return offset() + byteSize(); } + uint32_t offset() const { return offset_; } + uint32_t endOffset() const { return offset() + byteSize(); } Scalar::Type accessType() const { return accessType_; } unsigned byteSize() const { return Scalar::isSimdType(accessType()) @@ -13914,7 +13914,7 @@ class MAsmJSHeapAccess bool needsBoundsCheck() const { return needsBoundsCheck_; } void removeBoundsCheck() { needsBoundsCheck_ = false; } unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; } - void setOffset(int32_t o) { + void setOffset(uint32_t o) { MOZ_ASSERT(o >= 0); offset_ = o; } diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp index 005b625daf88..5fbf034fca70 100644 --- a/js/src/jit/x86/CodeGenerator-x86.cpp +++ b/js/src/jit/x86/CodeGenerator-x86.cpp @@ -662,7 +662,7 @@ CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) void CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck, - int32_t offset, int32_t endOffset) + uint32_t offset, uint32_t endOffset) { uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck; diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h index a2b83cb083ce..cbee97fd94ba 100644 --- a/js/src/jit/x86/CodeGenerator-x86.h +++ b/js/src/jit/x86/CodeGenerator-x86.h @@ -74,7 +74,7 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared private: void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck, - int32_t offset, int32_t endOffset); + uint32_t offset, uint32_t endOffset); }; typedef CodeGeneratorX86 CodeGeneratorSpecific;